1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "mac.h" 19 20 #include <net/mac80211.h> 21 #include <linux/etherdevice.h> 22 #include <linux/acpi.h> 23 24 #include "hif.h" 25 #include "core.h" 26 #include "debug.h" 27 #include "wmi.h" 28 #include "htt.h" 29 #include "txrx.h" 30 #include "testmode.h" 31 #include "wmi.h" 32 #include "wmi-tlv.h" 33 #include "wmi-ops.h" 34 #include "wow.h" 35 36 /*********/ 37 /* Rates */ 38 /*********/ 39 40 static struct ieee80211_rate ath10k_rates[] = { 41 { .bitrate = 10, 42 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 43 { .bitrate = 20, 44 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 45 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 46 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 47 { .bitrate = 55, 48 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 49 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 50 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 51 { .bitrate = 110, 52 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 53 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 54 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 55 56 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 57 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 58 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 59 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 60 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 61 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 62 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 63 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 64 }; 65 66 static struct ieee80211_rate ath10k_rates_rev2[] = { 67 { .bitrate = 10, 68 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 69 { .bitrate = 20, 70 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 71 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 72 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 73 { .bitrate = 55, 74 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 75 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 76 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 77 { .bitrate = 110, 78 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 79 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 80 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 81 82 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 83 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 84 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 85 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 86 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 87 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 88 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 89 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 90 }; 91 92 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 93 94 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 95 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 96 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 97 #define ath10k_g_rates (ath10k_rates + 0) 98 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 99 100 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 101 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 102 103 static bool ath10k_mac_bitrate_is_cck(int bitrate) 104 { 105 switch (bitrate) { 106 case 10: 107 case 20: 108 case 55: 109 case 110: 110 return true; 111 } 112 113 return false; 114 } 115 116 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 117 { 118 return DIV_ROUND_UP(bitrate, 5) | 119 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 120 } 121 122 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 123 u8 hw_rate, bool cck) 124 { 125 const struct ieee80211_rate *rate; 126 int i; 127 128 for (i = 0; i < sband->n_bitrates; i++) { 129 rate = &sband->bitrates[i]; 130 131 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 132 continue; 133 134 if (rate->hw_value == hw_rate) 135 return i; 136 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 137 rate->hw_value_short == hw_rate) 138 return i; 139 } 140 141 return 0; 142 } 143 144 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 145 u32 bitrate) 146 { 147 int i; 148 149 for (i = 0; i < sband->n_bitrates; i++) 150 if (sband->bitrates[i].bitrate == bitrate) 151 return i; 152 153 return 0; 154 } 155 156 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 157 { 158 switch ((mcs_map >> (2 * nss)) & 0x3) { 159 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 160 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 161 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 162 } 163 return 0; 164 } 165 166 static u32 167 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 168 { 169 int nss; 170 171 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 172 if (ht_mcs_mask[nss]) 173 return nss + 1; 174 175 return 1; 176 } 177 178 static u32 179 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 180 { 181 int nss; 182 183 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 184 if (vht_mcs_mask[nss]) 185 return nss + 1; 186 187 return 1; 188 } 189 190 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 191 { 192 enum wmi_host_platform_type platform_type; 193 int ret; 194 195 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 196 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 197 else 198 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 199 200 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 201 202 if (ret && ret != -EOPNOTSUPP) { 203 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 204 return ret; 205 } 206 207 return 0; 208 } 209 210 /**********/ 211 /* Crypto */ 212 /**********/ 213 214 static int ath10k_send_key(struct ath10k_vif *arvif, 215 struct ieee80211_key_conf *key, 216 enum set_key_cmd cmd, 217 const u8 *macaddr, u32 flags) 218 { 219 struct ath10k *ar = arvif->ar; 220 struct wmi_vdev_install_key_arg arg = { 221 .vdev_id = arvif->vdev_id, 222 .key_idx = key->keyidx, 223 .key_len = key->keylen, 224 .key_data = key->key, 225 .key_flags = flags, 226 .macaddr = macaddr, 227 }; 228 229 lockdep_assert_held(&arvif->ar->conf_mutex); 230 231 switch (key->cipher) { 232 case WLAN_CIPHER_SUITE_CCMP: 233 arg.key_cipher = WMI_CIPHER_AES_CCM; 234 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 235 break; 236 case WLAN_CIPHER_SUITE_TKIP: 237 arg.key_cipher = WMI_CIPHER_TKIP; 238 arg.key_txmic_len = 8; 239 arg.key_rxmic_len = 8; 240 break; 241 case WLAN_CIPHER_SUITE_WEP40: 242 case WLAN_CIPHER_SUITE_WEP104: 243 arg.key_cipher = WMI_CIPHER_WEP; 244 break; 245 case WLAN_CIPHER_SUITE_AES_CMAC: 246 WARN_ON(1); 247 return -EINVAL; 248 default: 249 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 250 return -EOPNOTSUPP; 251 } 252 253 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 254 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 255 256 if (cmd == DISABLE_KEY) { 257 arg.key_cipher = WMI_CIPHER_NONE; 258 arg.key_data = NULL; 259 } 260 261 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 262 } 263 264 static int ath10k_install_key(struct ath10k_vif *arvif, 265 struct ieee80211_key_conf *key, 266 enum set_key_cmd cmd, 267 const u8 *macaddr, u32 flags) 268 { 269 struct ath10k *ar = arvif->ar; 270 int ret; 271 unsigned long time_left; 272 273 lockdep_assert_held(&ar->conf_mutex); 274 275 reinit_completion(&ar->install_key_done); 276 277 if (arvif->nohwcrypt) 278 return 1; 279 280 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 281 if (ret) 282 return ret; 283 284 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 285 if (time_left == 0) 286 return -ETIMEDOUT; 287 288 return 0; 289 } 290 291 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 292 const u8 *addr) 293 { 294 struct ath10k *ar = arvif->ar; 295 struct ath10k_peer *peer; 296 int ret; 297 int i; 298 u32 flags; 299 300 lockdep_assert_held(&ar->conf_mutex); 301 302 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 303 arvif->vif->type != NL80211_IFTYPE_ADHOC && 304 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 305 return -EINVAL; 306 307 spin_lock_bh(&ar->data_lock); 308 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 309 spin_unlock_bh(&ar->data_lock); 310 311 if (!peer) 312 return -ENOENT; 313 314 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 315 if (arvif->wep_keys[i] == NULL) 316 continue; 317 318 switch (arvif->vif->type) { 319 case NL80211_IFTYPE_AP: 320 flags = WMI_KEY_PAIRWISE; 321 322 if (arvif->def_wep_key_idx == i) 323 flags |= WMI_KEY_TX_USAGE; 324 325 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 326 SET_KEY, addr, flags); 327 if (ret < 0) 328 return ret; 329 break; 330 case NL80211_IFTYPE_ADHOC: 331 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 332 SET_KEY, addr, 333 WMI_KEY_PAIRWISE); 334 if (ret < 0) 335 return ret; 336 337 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 338 SET_KEY, addr, WMI_KEY_GROUP); 339 if (ret < 0) 340 return ret; 341 break; 342 default: 343 WARN_ON(1); 344 return -EINVAL; 345 } 346 347 spin_lock_bh(&ar->data_lock); 348 peer->keys[i] = arvif->wep_keys[i]; 349 spin_unlock_bh(&ar->data_lock); 350 } 351 352 /* In some cases (notably with static WEP IBSS with multiple keys) 353 * multicast Tx becomes broken. Both pairwise and groupwise keys are 354 * installed already. Using WMI_KEY_TX_USAGE in different combinations 355 * didn't seem help. Using def_keyid vdev parameter seems to be 356 * effective so use that. 357 * 358 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 359 */ 360 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 361 return 0; 362 363 if (arvif->def_wep_key_idx == -1) 364 return 0; 365 366 ret = ath10k_wmi_vdev_set_param(arvif->ar, 367 arvif->vdev_id, 368 arvif->ar->wmi.vdev_param->def_keyid, 369 arvif->def_wep_key_idx); 370 if (ret) { 371 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 372 arvif->vdev_id, ret); 373 return ret; 374 } 375 376 return 0; 377 } 378 379 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 380 const u8 *addr) 381 { 382 struct ath10k *ar = arvif->ar; 383 struct ath10k_peer *peer; 384 int first_errno = 0; 385 int ret; 386 int i; 387 u32 flags = 0; 388 389 lockdep_assert_held(&ar->conf_mutex); 390 391 spin_lock_bh(&ar->data_lock); 392 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 393 spin_unlock_bh(&ar->data_lock); 394 395 if (!peer) 396 return -ENOENT; 397 398 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 399 if (peer->keys[i] == NULL) 400 continue; 401 402 /* key flags are not required to delete the key */ 403 ret = ath10k_install_key(arvif, peer->keys[i], 404 DISABLE_KEY, addr, flags); 405 if (ret < 0 && first_errno == 0) 406 first_errno = ret; 407 408 if (ret < 0) 409 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 410 i, ret); 411 412 spin_lock_bh(&ar->data_lock); 413 peer->keys[i] = NULL; 414 spin_unlock_bh(&ar->data_lock); 415 } 416 417 return first_errno; 418 } 419 420 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 421 u8 keyidx) 422 { 423 struct ath10k_peer *peer; 424 int i; 425 426 lockdep_assert_held(&ar->data_lock); 427 428 /* We don't know which vdev this peer belongs to, 429 * since WMI doesn't give us that information. 430 * 431 * FIXME: multi-bss needs to be handled. 432 */ 433 peer = ath10k_peer_find(ar, 0, addr); 434 if (!peer) 435 return false; 436 437 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 438 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 439 return true; 440 } 441 442 return false; 443 } 444 445 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 446 struct ieee80211_key_conf *key) 447 { 448 struct ath10k *ar = arvif->ar; 449 struct ath10k_peer *peer; 450 u8 addr[ETH_ALEN]; 451 int first_errno = 0; 452 int ret; 453 int i; 454 u32 flags = 0; 455 456 lockdep_assert_held(&ar->conf_mutex); 457 458 for (;;) { 459 /* since ath10k_install_key we can't hold data_lock all the 460 * time, so we try to remove the keys incrementally */ 461 spin_lock_bh(&ar->data_lock); 462 i = 0; 463 list_for_each_entry(peer, &ar->peers, list) { 464 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 465 if (peer->keys[i] == key) { 466 ether_addr_copy(addr, peer->addr); 467 peer->keys[i] = NULL; 468 break; 469 } 470 } 471 472 if (i < ARRAY_SIZE(peer->keys)) 473 break; 474 } 475 spin_unlock_bh(&ar->data_lock); 476 477 if (i == ARRAY_SIZE(peer->keys)) 478 break; 479 /* key flags are not required to delete the key */ 480 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 481 if (ret < 0 && first_errno == 0) 482 first_errno = ret; 483 484 if (ret) 485 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 486 addr, ret); 487 } 488 489 return first_errno; 490 } 491 492 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 493 struct ieee80211_key_conf *key) 494 { 495 struct ath10k *ar = arvif->ar; 496 struct ath10k_peer *peer; 497 int ret; 498 499 lockdep_assert_held(&ar->conf_mutex); 500 501 list_for_each_entry(peer, &ar->peers, list) { 502 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 503 continue; 504 505 if (ether_addr_equal(peer->addr, arvif->bssid)) 506 continue; 507 508 if (peer->keys[key->keyidx] == key) 509 continue; 510 511 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 512 arvif->vdev_id, key->keyidx); 513 514 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 515 if (ret) { 516 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 517 arvif->vdev_id, peer->addr, ret); 518 return ret; 519 } 520 } 521 522 return 0; 523 } 524 525 /*********************/ 526 /* General utilities */ 527 /*********************/ 528 529 static inline enum wmi_phy_mode 530 chan_to_phymode(const struct cfg80211_chan_def *chandef) 531 { 532 enum wmi_phy_mode phymode = MODE_UNKNOWN; 533 534 switch (chandef->chan->band) { 535 case NL80211_BAND_2GHZ: 536 switch (chandef->width) { 537 case NL80211_CHAN_WIDTH_20_NOHT: 538 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 539 phymode = MODE_11B; 540 else 541 phymode = MODE_11G; 542 break; 543 case NL80211_CHAN_WIDTH_20: 544 phymode = MODE_11NG_HT20; 545 break; 546 case NL80211_CHAN_WIDTH_40: 547 phymode = MODE_11NG_HT40; 548 break; 549 case NL80211_CHAN_WIDTH_5: 550 case NL80211_CHAN_WIDTH_10: 551 case NL80211_CHAN_WIDTH_80: 552 case NL80211_CHAN_WIDTH_80P80: 553 case NL80211_CHAN_WIDTH_160: 554 phymode = MODE_UNKNOWN; 555 break; 556 } 557 break; 558 case NL80211_BAND_5GHZ: 559 switch (chandef->width) { 560 case NL80211_CHAN_WIDTH_20_NOHT: 561 phymode = MODE_11A; 562 break; 563 case NL80211_CHAN_WIDTH_20: 564 phymode = MODE_11NA_HT20; 565 break; 566 case NL80211_CHAN_WIDTH_40: 567 phymode = MODE_11NA_HT40; 568 break; 569 case NL80211_CHAN_WIDTH_80: 570 phymode = MODE_11AC_VHT80; 571 break; 572 case NL80211_CHAN_WIDTH_160: 573 phymode = MODE_11AC_VHT160; 574 break; 575 case NL80211_CHAN_WIDTH_80P80: 576 phymode = MODE_11AC_VHT80_80; 577 break; 578 case NL80211_CHAN_WIDTH_5: 579 case NL80211_CHAN_WIDTH_10: 580 phymode = MODE_UNKNOWN; 581 break; 582 } 583 break; 584 default: 585 break; 586 } 587 588 WARN_ON(phymode == MODE_UNKNOWN); 589 return phymode; 590 } 591 592 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 593 { 594 /* 595 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 596 * 0 for no restriction 597 * 1 for 1/4 us 598 * 2 for 1/2 us 599 * 3 for 1 us 600 * 4 for 2 us 601 * 5 for 4 us 602 * 6 for 8 us 603 * 7 for 16 us 604 */ 605 switch (mpdudensity) { 606 case 0: 607 return 0; 608 case 1: 609 case 2: 610 case 3: 611 /* Our lower layer calculations limit our precision to 612 1 microsecond */ 613 return 1; 614 case 4: 615 return 2; 616 case 5: 617 return 4; 618 case 6: 619 return 8; 620 case 7: 621 return 16; 622 default: 623 return 0; 624 } 625 } 626 627 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 628 struct cfg80211_chan_def *def) 629 { 630 struct ieee80211_chanctx_conf *conf; 631 632 rcu_read_lock(); 633 conf = rcu_dereference(vif->chanctx_conf); 634 if (!conf) { 635 rcu_read_unlock(); 636 return -ENOENT; 637 } 638 639 *def = conf->def; 640 rcu_read_unlock(); 641 642 return 0; 643 } 644 645 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 646 struct ieee80211_chanctx_conf *conf, 647 void *data) 648 { 649 int *num = data; 650 651 (*num)++; 652 } 653 654 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 655 { 656 int num = 0; 657 658 ieee80211_iter_chan_contexts_atomic(ar->hw, 659 ath10k_mac_num_chanctxs_iter, 660 &num); 661 662 return num; 663 } 664 665 static void 666 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 667 struct ieee80211_chanctx_conf *conf, 668 void *data) 669 { 670 struct cfg80211_chan_def **def = data; 671 672 *def = &conf->def; 673 } 674 675 static int ath10k_peer_create(struct ath10k *ar, 676 struct ieee80211_vif *vif, 677 struct ieee80211_sta *sta, 678 u32 vdev_id, 679 const u8 *addr, 680 enum wmi_peer_type peer_type) 681 { 682 struct ath10k_vif *arvif; 683 struct ath10k_peer *peer; 684 int num_peers = 0; 685 int ret; 686 687 lockdep_assert_held(&ar->conf_mutex); 688 689 num_peers = ar->num_peers; 690 691 /* Each vdev consumes a peer entry as well */ 692 list_for_each_entry(arvif, &ar->arvifs, list) 693 num_peers++; 694 695 if (num_peers >= ar->max_num_peers) 696 return -ENOBUFS; 697 698 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 699 if (ret) { 700 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 701 addr, vdev_id, ret); 702 return ret; 703 } 704 705 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 706 if (ret) { 707 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 708 addr, vdev_id, ret); 709 return ret; 710 } 711 712 spin_lock_bh(&ar->data_lock); 713 714 peer = ath10k_peer_find(ar, vdev_id, addr); 715 if (!peer) { 716 spin_unlock_bh(&ar->data_lock); 717 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 718 addr, vdev_id); 719 ath10k_wmi_peer_delete(ar, vdev_id, addr); 720 return -ENOENT; 721 } 722 723 peer->vif = vif; 724 peer->sta = sta; 725 726 spin_unlock_bh(&ar->data_lock); 727 728 ar->num_peers++; 729 730 return 0; 731 } 732 733 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 734 { 735 struct ath10k *ar = arvif->ar; 736 u32 param; 737 int ret; 738 739 param = ar->wmi.pdev_param->sta_kickout_th; 740 ret = ath10k_wmi_pdev_set_param(ar, param, 741 ATH10K_KICKOUT_THRESHOLD); 742 if (ret) { 743 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 744 arvif->vdev_id, ret); 745 return ret; 746 } 747 748 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 749 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 750 ATH10K_KEEPALIVE_MIN_IDLE); 751 if (ret) { 752 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 753 arvif->vdev_id, ret); 754 return ret; 755 } 756 757 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 758 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 759 ATH10K_KEEPALIVE_MAX_IDLE); 760 if (ret) { 761 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 762 arvif->vdev_id, ret); 763 return ret; 764 } 765 766 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 767 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 768 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 769 if (ret) { 770 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 771 arvif->vdev_id, ret); 772 return ret; 773 } 774 775 return 0; 776 } 777 778 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 779 { 780 struct ath10k *ar = arvif->ar; 781 u32 vdev_param; 782 783 vdev_param = ar->wmi.vdev_param->rts_threshold; 784 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 785 } 786 787 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 788 { 789 int ret; 790 791 lockdep_assert_held(&ar->conf_mutex); 792 793 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 794 if (ret) 795 return ret; 796 797 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 798 if (ret) 799 return ret; 800 801 ar->num_peers--; 802 803 return 0; 804 } 805 806 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 807 { 808 struct ath10k_peer *peer, *tmp; 809 int peer_id; 810 int i; 811 812 lockdep_assert_held(&ar->conf_mutex); 813 814 spin_lock_bh(&ar->data_lock); 815 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 816 if (peer->vdev_id != vdev_id) 817 continue; 818 819 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 820 peer->addr, vdev_id); 821 822 for_each_set_bit(peer_id, peer->peer_ids, 823 ATH10K_MAX_NUM_PEER_IDS) { 824 ar->peer_map[peer_id] = NULL; 825 } 826 827 /* Double check that peer is properly un-referenced from 828 * the peer_map 829 */ 830 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 831 if (ar->peer_map[i] == peer) { 832 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 833 peer->addr, peer, i); 834 ar->peer_map[i] = NULL; 835 } 836 } 837 838 list_del(&peer->list); 839 kfree(peer); 840 ar->num_peers--; 841 } 842 spin_unlock_bh(&ar->data_lock); 843 } 844 845 static void ath10k_peer_cleanup_all(struct ath10k *ar) 846 { 847 struct ath10k_peer *peer, *tmp; 848 int i; 849 850 lockdep_assert_held(&ar->conf_mutex); 851 852 spin_lock_bh(&ar->data_lock); 853 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 854 list_del(&peer->list); 855 kfree(peer); 856 } 857 858 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 859 ar->peer_map[i] = NULL; 860 861 spin_unlock_bh(&ar->data_lock); 862 863 ar->num_peers = 0; 864 ar->num_stations = 0; 865 } 866 867 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 868 struct ieee80211_sta *sta, 869 enum wmi_tdls_peer_state state) 870 { 871 int ret; 872 struct wmi_tdls_peer_update_cmd_arg arg = {}; 873 struct wmi_tdls_peer_capab_arg cap = {}; 874 struct wmi_channel_arg chan_arg = {}; 875 876 lockdep_assert_held(&ar->conf_mutex); 877 878 arg.vdev_id = vdev_id; 879 arg.peer_state = state; 880 ether_addr_copy(arg.addr, sta->addr); 881 882 cap.peer_max_sp = sta->max_sp; 883 cap.peer_uapsd_queues = sta->uapsd_queues; 884 885 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 886 !sta->tdls_initiator) 887 cap.is_peer_responder = 1; 888 889 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 890 if (ret) { 891 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 892 arg.addr, vdev_id, ret); 893 return ret; 894 } 895 896 return 0; 897 } 898 899 /************************/ 900 /* Interface management */ 901 /************************/ 902 903 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 904 { 905 struct ath10k *ar = arvif->ar; 906 907 lockdep_assert_held(&ar->data_lock); 908 909 if (!arvif->beacon) 910 return; 911 912 if (!arvif->beacon_buf) 913 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 914 arvif->beacon->len, DMA_TO_DEVICE); 915 916 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 917 arvif->beacon_state != ATH10K_BEACON_SENT)) 918 return; 919 920 dev_kfree_skb_any(arvif->beacon); 921 922 arvif->beacon = NULL; 923 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 924 } 925 926 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 927 { 928 struct ath10k *ar = arvif->ar; 929 930 lockdep_assert_held(&ar->data_lock); 931 932 ath10k_mac_vif_beacon_free(arvif); 933 934 if (arvif->beacon_buf) { 935 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 936 arvif->beacon_buf, arvif->beacon_paddr); 937 arvif->beacon_buf = NULL; 938 } 939 } 940 941 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 942 { 943 unsigned long time_left; 944 945 lockdep_assert_held(&ar->conf_mutex); 946 947 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 948 return -ESHUTDOWN; 949 950 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 951 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 952 if (time_left == 0) 953 return -ETIMEDOUT; 954 955 return 0; 956 } 957 958 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 959 { 960 struct cfg80211_chan_def *chandef = NULL; 961 struct ieee80211_channel *channel = NULL; 962 struct wmi_vdev_start_request_arg arg = {}; 963 int ret = 0; 964 965 lockdep_assert_held(&ar->conf_mutex); 966 967 ieee80211_iter_chan_contexts_atomic(ar->hw, 968 ath10k_mac_get_any_chandef_iter, 969 &chandef); 970 if (WARN_ON_ONCE(!chandef)) 971 return -ENOENT; 972 973 channel = chandef->chan; 974 975 arg.vdev_id = vdev_id; 976 arg.channel.freq = channel->center_freq; 977 arg.channel.band_center_freq1 = chandef->center_freq1; 978 arg.channel.band_center_freq2 = chandef->center_freq2; 979 980 /* TODO setup this dynamically, what in case we 981 don't have any vifs? */ 982 arg.channel.mode = chan_to_phymode(chandef); 983 arg.channel.chan_radar = 984 !!(channel->flags & IEEE80211_CHAN_RADAR); 985 986 arg.channel.min_power = 0; 987 arg.channel.max_power = channel->max_power * 2; 988 arg.channel.max_reg_power = channel->max_reg_power * 2; 989 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 990 991 reinit_completion(&ar->vdev_setup_done); 992 993 ret = ath10k_wmi_vdev_start(ar, &arg); 994 if (ret) { 995 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 996 vdev_id, ret); 997 return ret; 998 } 999 1000 ret = ath10k_vdev_setup_sync(ar); 1001 if (ret) { 1002 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1003 vdev_id, ret); 1004 return ret; 1005 } 1006 1007 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1008 if (ret) { 1009 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1010 vdev_id, ret); 1011 goto vdev_stop; 1012 } 1013 1014 ar->monitor_vdev_id = vdev_id; 1015 1016 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1017 ar->monitor_vdev_id); 1018 return 0; 1019 1020 vdev_stop: 1021 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1022 if (ret) 1023 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1024 ar->monitor_vdev_id, ret); 1025 1026 return ret; 1027 } 1028 1029 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1030 { 1031 int ret = 0; 1032 1033 lockdep_assert_held(&ar->conf_mutex); 1034 1035 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1036 if (ret) 1037 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1038 ar->monitor_vdev_id, ret); 1039 1040 reinit_completion(&ar->vdev_setup_done); 1041 1042 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1043 if (ret) 1044 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1045 ar->monitor_vdev_id, ret); 1046 1047 ret = ath10k_vdev_setup_sync(ar); 1048 if (ret) 1049 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1050 ar->monitor_vdev_id, ret); 1051 1052 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1053 ar->monitor_vdev_id); 1054 return ret; 1055 } 1056 1057 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1058 { 1059 int bit, ret = 0; 1060 1061 lockdep_assert_held(&ar->conf_mutex); 1062 1063 if (ar->free_vdev_map == 0) { 1064 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1065 return -ENOMEM; 1066 } 1067 1068 bit = __ffs64(ar->free_vdev_map); 1069 1070 ar->monitor_vdev_id = bit; 1071 1072 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1073 WMI_VDEV_TYPE_MONITOR, 1074 0, ar->mac_addr); 1075 if (ret) { 1076 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1077 ar->monitor_vdev_id, ret); 1078 return ret; 1079 } 1080 1081 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1082 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1083 ar->monitor_vdev_id); 1084 1085 return 0; 1086 } 1087 1088 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1089 { 1090 int ret = 0; 1091 1092 lockdep_assert_held(&ar->conf_mutex); 1093 1094 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1095 if (ret) { 1096 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1097 ar->monitor_vdev_id, ret); 1098 return ret; 1099 } 1100 1101 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1102 1103 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1104 ar->monitor_vdev_id); 1105 return ret; 1106 } 1107 1108 static int ath10k_monitor_start(struct ath10k *ar) 1109 { 1110 int ret; 1111 1112 lockdep_assert_held(&ar->conf_mutex); 1113 1114 ret = ath10k_monitor_vdev_create(ar); 1115 if (ret) { 1116 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1117 return ret; 1118 } 1119 1120 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1121 if (ret) { 1122 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1123 ath10k_monitor_vdev_delete(ar); 1124 return ret; 1125 } 1126 1127 ar->monitor_started = true; 1128 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1129 1130 return 0; 1131 } 1132 1133 static int ath10k_monitor_stop(struct ath10k *ar) 1134 { 1135 int ret; 1136 1137 lockdep_assert_held(&ar->conf_mutex); 1138 1139 ret = ath10k_monitor_vdev_stop(ar); 1140 if (ret) { 1141 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1142 return ret; 1143 } 1144 1145 ret = ath10k_monitor_vdev_delete(ar); 1146 if (ret) { 1147 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1148 return ret; 1149 } 1150 1151 ar->monitor_started = false; 1152 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1153 1154 return 0; 1155 } 1156 1157 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1158 { 1159 int num_ctx; 1160 1161 /* At least one chanctx is required to derive a channel to start 1162 * monitor vdev on. 1163 */ 1164 num_ctx = ath10k_mac_num_chanctxs(ar); 1165 if (num_ctx == 0) 1166 return false; 1167 1168 /* If there's already an existing special monitor interface then don't 1169 * bother creating another monitor vdev. 1170 */ 1171 if (ar->monitor_arvif) 1172 return false; 1173 1174 return ar->monitor || 1175 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1176 ar->running_fw->fw_file.fw_features) && 1177 (ar->filter_flags & FIF_OTHER_BSS)) || 1178 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1179 } 1180 1181 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1182 { 1183 int num_ctx; 1184 1185 num_ctx = ath10k_mac_num_chanctxs(ar); 1186 1187 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1188 * shouldn't allow this but make sure to prevent handling the following 1189 * case anyway since multi-channel DFS hasn't been tested at all. 1190 */ 1191 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1192 return false; 1193 1194 return true; 1195 } 1196 1197 static int ath10k_monitor_recalc(struct ath10k *ar) 1198 { 1199 bool needed; 1200 bool allowed; 1201 int ret; 1202 1203 lockdep_assert_held(&ar->conf_mutex); 1204 1205 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1206 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1207 1208 ath10k_dbg(ar, ATH10K_DBG_MAC, 1209 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1210 ar->monitor_started, needed, allowed); 1211 1212 if (WARN_ON(needed && !allowed)) { 1213 if (ar->monitor_started) { 1214 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1215 1216 ret = ath10k_monitor_stop(ar); 1217 if (ret) 1218 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1219 ret); 1220 /* not serious */ 1221 } 1222 1223 return -EPERM; 1224 } 1225 1226 if (needed == ar->monitor_started) 1227 return 0; 1228 1229 if (needed) 1230 return ath10k_monitor_start(ar); 1231 else 1232 return ath10k_monitor_stop(ar); 1233 } 1234 1235 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1236 { 1237 struct ath10k *ar = arvif->ar; 1238 1239 lockdep_assert_held(&ar->conf_mutex); 1240 1241 if (!arvif->is_started) { 1242 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1243 return false; 1244 } 1245 1246 return true; 1247 } 1248 1249 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1250 { 1251 struct ath10k *ar = arvif->ar; 1252 u32 vdev_param; 1253 1254 lockdep_assert_held(&ar->conf_mutex); 1255 1256 vdev_param = ar->wmi.vdev_param->protection_mode; 1257 1258 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1259 arvif->vdev_id, arvif->use_cts_prot); 1260 1261 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1262 arvif->use_cts_prot ? 1 : 0); 1263 } 1264 1265 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1266 { 1267 struct ath10k *ar = arvif->ar; 1268 u32 vdev_param, rts_cts = 0; 1269 1270 lockdep_assert_held(&ar->conf_mutex); 1271 1272 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1273 1274 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1275 1276 if (arvif->num_legacy_stations > 0) 1277 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1278 WMI_RTSCTS_PROFILE); 1279 else 1280 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1281 WMI_RTSCTS_PROFILE); 1282 1283 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1284 arvif->vdev_id, rts_cts); 1285 1286 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1287 rts_cts); 1288 } 1289 1290 static int ath10k_start_cac(struct ath10k *ar) 1291 { 1292 int ret; 1293 1294 lockdep_assert_held(&ar->conf_mutex); 1295 1296 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1297 1298 ret = ath10k_monitor_recalc(ar); 1299 if (ret) { 1300 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1301 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1302 return ret; 1303 } 1304 1305 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1306 ar->monitor_vdev_id); 1307 1308 return 0; 1309 } 1310 1311 static int ath10k_stop_cac(struct ath10k *ar) 1312 { 1313 lockdep_assert_held(&ar->conf_mutex); 1314 1315 /* CAC is not running - do nothing */ 1316 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1317 return 0; 1318 1319 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1320 ath10k_monitor_stop(ar); 1321 1322 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1323 1324 return 0; 1325 } 1326 1327 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1328 struct ieee80211_chanctx_conf *conf, 1329 void *data) 1330 { 1331 bool *ret = data; 1332 1333 if (!*ret && conf->radar_enabled) 1334 *ret = true; 1335 } 1336 1337 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1338 { 1339 bool has_radar = false; 1340 1341 ieee80211_iter_chan_contexts_atomic(ar->hw, 1342 ath10k_mac_has_radar_iter, 1343 &has_radar); 1344 1345 return has_radar; 1346 } 1347 1348 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1349 { 1350 int ret; 1351 1352 lockdep_assert_held(&ar->conf_mutex); 1353 1354 ath10k_stop_cac(ar); 1355 1356 if (!ath10k_mac_has_radar_enabled(ar)) 1357 return; 1358 1359 if (ar->num_started_vdevs > 0) 1360 return; 1361 1362 ret = ath10k_start_cac(ar); 1363 if (ret) { 1364 /* 1365 * Not possible to start CAC on current channel so starting 1366 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1367 * by indicating that radar was detected. 1368 */ 1369 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1370 ieee80211_radar_detected(ar->hw); 1371 } 1372 } 1373 1374 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1375 { 1376 struct ath10k *ar = arvif->ar; 1377 int ret; 1378 1379 lockdep_assert_held(&ar->conf_mutex); 1380 1381 reinit_completion(&ar->vdev_setup_done); 1382 1383 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1384 if (ret) { 1385 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1386 arvif->vdev_id, ret); 1387 return ret; 1388 } 1389 1390 ret = ath10k_vdev_setup_sync(ar); 1391 if (ret) { 1392 ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n", 1393 arvif->vdev_id, ret); 1394 return ret; 1395 } 1396 1397 WARN_ON(ar->num_started_vdevs == 0); 1398 1399 if (ar->num_started_vdevs != 0) { 1400 ar->num_started_vdevs--; 1401 ath10k_recalc_radar_detection(ar); 1402 } 1403 1404 return ret; 1405 } 1406 1407 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1408 const struct cfg80211_chan_def *chandef, 1409 bool restart) 1410 { 1411 struct ath10k *ar = arvif->ar; 1412 struct wmi_vdev_start_request_arg arg = {}; 1413 int ret = 0; 1414 1415 lockdep_assert_held(&ar->conf_mutex); 1416 1417 reinit_completion(&ar->vdev_setup_done); 1418 1419 arg.vdev_id = arvif->vdev_id; 1420 arg.dtim_period = arvif->dtim_period; 1421 arg.bcn_intval = arvif->beacon_interval; 1422 1423 arg.channel.freq = chandef->chan->center_freq; 1424 arg.channel.band_center_freq1 = chandef->center_freq1; 1425 arg.channel.band_center_freq2 = chandef->center_freq2; 1426 arg.channel.mode = chan_to_phymode(chandef); 1427 1428 arg.channel.min_power = 0; 1429 arg.channel.max_power = chandef->chan->max_power * 2; 1430 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1431 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1432 1433 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1434 arg.ssid = arvif->u.ap.ssid; 1435 arg.ssid_len = arvif->u.ap.ssid_len; 1436 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1437 1438 /* For now allow DFS for AP mode */ 1439 arg.channel.chan_radar = 1440 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1441 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1442 arg.ssid = arvif->vif->bss_conf.ssid; 1443 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1444 } 1445 1446 ath10k_dbg(ar, ATH10K_DBG_MAC, 1447 "mac vdev %d start center_freq %d phymode %s\n", 1448 arg.vdev_id, arg.channel.freq, 1449 ath10k_wmi_phymode_str(arg.channel.mode)); 1450 1451 if (restart) 1452 ret = ath10k_wmi_vdev_restart(ar, &arg); 1453 else 1454 ret = ath10k_wmi_vdev_start(ar, &arg); 1455 1456 if (ret) { 1457 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1458 arg.vdev_id, ret); 1459 return ret; 1460 } 1461 1462 ret = ath10k_vdev_setup_sync(ar); 1463 if (ret) { 1464 ath10k_warn(ar, 1465 "failed to synchronize setup for vdev %i restart %d: %d\n", 1466 arg.vdev_id, restart, ret); 1467 return ret; 1468 } 1469 1470 ar->num_started_vdevs++; 1471 ath10k_recalc_radar_detection(ar); 1472 1473 return ret; 1474 } 1475 1476 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1477 const struct cfg80211_chan_def *def) 1478 { 1479 return ath10k_vdev_start_restart(arvif, def, false); 1480 } 1481 1482 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1483 const struct cfg80211_chan_def *def) 1484 { 1485 return ath10k_vdev_start_restart(arvif, def, true); 1486 } 1487 1488 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1489 struct sk_buff *bcn) 1490 { 1491 struct ath10k *ar = arvif->ar; 1492 struct ieee80211_mgmt *mgmt; 1493 const u8 *p2p_ie; 1494 int ret; 1495 1496 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1497 return 0; 1498 1499 mgmt = (void *)bcn->data; 1500 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1501 mgmt->u.beacon.variable, 1502 bcn->len - (mgmt->u.beacon.variable - 1503 bcn->data)); 1504 if (!p2p_ie) 1505 return -ENOENT; 1506 1507 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1508 if (ret) { 1509 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1510 arvif->vdev_id, ret); 1511 return ret; 1512 } 1513 1514 return 0; 1515 } 1516 1517 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1518 u8 oui_type, size_t ie_offset) 1519 { 1520 size_t len; 1521 const u8 *next; 1522 const u8 *end; 1523 u8 *ie; 1524 1525 if (WARN_ON(skb->len < ie_offset)) 1526 return -EINVAL; 1527 1528 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1529 skb->data + ie_offset, 1530 skb->len - ie_offset); 1531 if (!ie) 1532 return -ENOENT; 1533 1534 len = ie[1] + 2; 1535 end = skb->data + skb->len; 1536 next = ie + len; 1537 1538 if (WARN_ON(next > end)) 1539 return -EINVAL; 1540 1541 memmove(ie, next, end - next); 1542 skb_trim(skb, skb->len - len); 1543 1544 return 0; 1545 } 1546 1547 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1548 { 1549 struct ath10k *ar = arvif->ar; 1550 struct ieee80211_hw *hw = ar->hw; 1551 struct ieee80211_vif *vif = arvif->vif; 1552 struct ieee80211_mutable_offsets offs = {}; 1553 struct sk_buff *bcn; 1554 int ret; 1555 1556 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1557 return 0; 1558 1559 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1560 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1561 return 0; 1562 1563 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1564 if (!bcn) { 1565 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1566 return -EPERM; 1567 } 1568 1569 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1570 if (ret) { 1571 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1572 kfree_skb(bcn); 1573 return ret; 1574 } 1575 1576 /* P2P IE is inserted by firmware automatically (as configured above) 1577 * so remove it from the base beacon template to avoid duplicate P2P 1578 * IEs in beacon frames. 1579 */ 1580 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1581 offsetof(struct ieee80211_mgmt, 1582 u.beacon.variable)); 1583 1584 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1585 0, NULL, 0); 1586 kfree_skb(bcn); 1587 1588 if (ret) { 1589 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1590 ret); 1591 return ret; 1592 } 1593 1594 return 0; 1595 } 1596 1597 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1598 { 1599 struct ath10k *ar = arvif->ar; 1600 struct ieee80211_hw *hw = ar->hw; 1601 struct ieee80211_vif *vif = arvif->vif; 1602 struct sk_buff *prb; 1603 int ret; 1604 1605 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1606 return 0; 1607 1608 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1609 return 0; 1610 1611 prb = ieee80211_proberesp_get(hw, vif); 1612 if (!prb) { 1613 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1614 return -EPERM; 1615 } 1616 1617 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1618 kfree_skb(prb); 1619 1620 if (ret) { 1621 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1622 ret); 1623 return ret; 1624 } 1625 1626 return 0; 1627 } 1628 1629 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1630 { 1631 struct ath10k *ar = arvif->ar; 1632 struct cfg80211_chan_def def; 1633 int ret; 1634 1635 /* When originally vdev is started during assign_vif_chanctx() some 1636 * information is missing, notably SSID. Firmware revisions with beacon 1637 * offloading require the SSID to be provided during vdev (re)start to 1638 * handle hidden SSID properly. 1639 * 1640 * Vdev restart must be done after vdev has been both started and 1641 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1642 * deliver vdev restart response event causing timeouts during vdev 1643 * syncing in ath10k. 1644 * 1645 * Note: The vdev down/up and template reinstallation could be skipped 1646 * since only wmi-tlv firmware are known to have beacon offload and 1647 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1648 * response delivery. It's probably more robust to keep it as is. 1649 */ 1650 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1651 return 0; 1652 1653 if (WARN_ON(!arvif->is_started)) 1654 return -EINVAL; 1655 1656 if (WARN_ON(!arvif->is_up)) 1657 return -EINVAL; 1658 1659 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1660 return -EINVAL; 1661 1662 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1663 if (ret) { 1664 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1665 arvif->vdev_id, ret); 1666 return ret; 1667 } 1668 1669 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1670 * firmware will crash upon vdev up. 1671 */ 1672 1673 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1674 if (ret) { 1675 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1676 return ret; 1677 } 1678 1679 ret = ath10k_mac_setup_prb_tmpl(arvif); 1680 if (ret) { 1681 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1682 return ret; 1683 } 1684 1685 ret = ath10k_vdev_restart(arvif, &def); 1686 if (ret) { 1687 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1688 arvif->vdev_id, ret); 1689 return ret; 1690 } 1691 1692 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1693 arvif->bssid); 1694 if (ret) { 1695 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1696 arvif->vdev_id, ret); 1697 return ret; 1698 } 1699 1700 return 0; 1701 } 1702 1703 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1704 struct ieee80211_bss_conf *info) 1705 { 1706 struct ath10k *ar = arvif->ar; 1707 int ret = 0; 1708 1709 lockdep_assert_held(&arvif->ar->conf_mutex); 1710 1711 if (!info->enable_beacon) { 1712 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1713 if (ret) 1714 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1715 arvif->vdev_id, ret); 1716 1717 arvif->is_up = false; 1718 1719 spin_lock_bh(&arvif->ar->data_lock); 1720 ath10k_mac_vif_beacon_free(arvif); 1721 spin_unlock_bh(&arvif->ar->data_lock); 1722 1723 return; 1724 } 1725 1726 arvif->tx_seq_no = 0x1000; 1727 1728 arvif->aid = 0; 1729 ether_addr_copy(arvif->bssid, info->bssid); 1730 1731 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1732 arvif->bssid); 1733 if (ret) { 1734 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1735 arvif->vdev_id, ret); 1736 return; 1737 } 1738 1739 arvif->is_up = true; 1740 1741 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1742 if (ret) { 1743 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1744 arvif->vdev_id, ret); 1745 return; 1746 } 1747 1748 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1749 } 1750 1751 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1752 struct ieee80211_bss_conf *info, 1753 const u8 self_peer[ETH_ALEN]) 1754 { 1755 struct ath10k *ar = arvif->ar; 1756 u32 vdev_param; 1757 int ret = 0; 1758 1759 lockdep_assert_held(&arvif->ar->conf_mutex); 1760 1761 if (!info->ibss_joined) { 1762 if (is_zero_ether_addr(arvif->bssid)) 1763 return; 1764 1765 eth_zero_addr(arvif->bssid); 1766 1767 return; 1768 } 1769 1770 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1771 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1772 ATH10K_DEFAULT_ATIM); 1773 if (ret) 1774 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1775 arvif->vdev_id, ret); 1776 } 1777 1778 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1779 { 1780 struct ath10k *ar = arvif->ar; 1781 u32 param; 1782 u32 value; 1783 int ret; 1784 1785 lockdep_assert_held(&arvif->ar->conf_mutex); 1786 1787 if (arvif->u.sta.uapsd) 1788 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1789 else 1790 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1791 1792 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1793 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1794 if (ret) { 1795 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1796 value, arvif->vdev_id, ret); 1797 return ret; 1798 } 1799 1800 return 0; 1801 } 1802 1803 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1804 { 1805 struct ath10k *ar = arvif->ar; 1806 u32 param; 1807 u32 value; 1808 int ret; 1809 1810 lockdep_assert_held(&arvif->ar->conf_mutex); 1811 1812 if (arvif->u.sta.uapsd) 1813 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1814 else 1815 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1816 1817 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1818 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1819 param, value); 1820 if (ret) { 1821 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1822 value, arvif->vdev_id, ret); 1823 return ret; 1824 } 1825 1826 return 0; 1827 } 1828 1829 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1830 { 1831 struct ath10k_vif *arvif; 1832 int num = 0; 1833 1834 lockdep_assert_held(&ar->conf_mutex); 1835 1836 list_for_each_entry(arvif, &ar->arvifs, list) 1837 if (arvif->is_started) 1838 num++; 1839 1840 return num; 1841 } 1842 1843 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1844 { 1845 struct ath10k *ar = arvif->ar; 1846 struct ieee80211_vif *vif = arvif->vif; 1847 struct ieee80211_conf *conf = &ar->hw->conf; 1848 enum wmi_sta_powersave_param param; 1849 enum wmi_sta_ps_mode psmode; 1850 int ret; 1851 int ps_timeout; 1852 bool enable_ps; 1853 1854 lockdep_assert_held(&arvif->ar->conf_mutex); 1855 1856 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1857 return 0; 1858 1859 enable_ps = arvif->ps; 1860 1861 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1862 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1863 ar->running_fw->fw_file.fw_features)) { 1864 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1865 arvif->vdev_id); 1866 enable_ps = false; 1867 } 1868 1869 if (!arvif->is_started) { 1870 /* mac80211 can update vif powersave state while disconnected. 1871 * Firmware doesn't behave nicely and consumes more power than 1872 * necessary if PS is disabled on a non-started vdev. Hence 1873 * force-enable PS for non-running vdevs. 1874 */ 1875 psmode = WMI_STA_PS_MODE_ENABLED; 1876 } else if (enable_ps) { 1877 psmode = WMI_STA_PS_MODE_ENABLED; 1878 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1879 1880 ps_timeout = conf->dynamic_ps_timeout; 1881 if (ps_timeout == 0) { 1882 /* Firmware doesn't like 0 */ 1883 ps_timeout = ieee80211_tu_to_usec( 1884 vif->bss_conf.beacon_int) / 1000; 1885 } 1886 1887 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1888 ps_timeout); 1889 if (ret) { 1890 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1891 arvif->vdev_id, ret); 1892 return ret; 1893 } 1894 } else { 1895 psmode = WMI_STA_PS_MODE_DISABLED; 1896 } 1897 1898 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1899 arvif->vdev_id, psmode ? "enable" : "disable"); 1900 1901 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1902 if (ret) { 1903 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1904 psmode, arvif->vdev_id, ret); 1905 return ret; 1906 } 1907 1908 return 0; 1909 } 1910 1911 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1912 { 1913 struct ath10k *ar = arvif->ar; 1914 struct wmi_sta_keepalive_arg arg = {}; 1915 int ret; 1916 1917 lockdep_assert_held(&arvif->ar->conf_mutex); 1918 1919 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1920 return 0; 1921 1922 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1923 return 0; 1924 1925 /* Some firmware revisions have a bug and ignore the `enabled` field. 1926 * Instead use the interval to disable the keepalive. 1927 */ 1928 arg.vdev_id = arvif->vdev_id; 1929 arg.enabled = 1; 1930 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1931 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1932 1933 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1934 if (ret) { 1935 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1936 arvif->vdev_id, ret); 1937 return ret; 1938 } 1939 1940 return 0; 1941 } 1942 1943 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1944 { 1945 struct ath10k *ar = arvif->ar; 1946 struct ieee80211_vif *vif = arvif->vif; 1947 int ret; 1948 1949 lockdep_assert_held(&arvif->ar->conf_mutex); 1950 1951 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1952 return; 1953 1954 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1955 return; 1956 1957 if (!vif->csa_active) 1958 return; 1959 1960 if (!arvif->is_up) 1961 return; 1962 1963 if (!ieee80211_csa_is_complete(vif)) { 1964 ieee80211_csa_update_counter(vif); 1965 1966 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1967 if (ret) 1968 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1969 ret); 1970 1971 ret = ath10k_mac_setup_prb_tmpl(arvif); 1972 if (ret) 1973 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 1974 ret); 1975 } else { 1976 ieee80211_csa_finish(vif); 1977 } 1978 } 1979 1980 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 1981 { 1982 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 1983 ap_csa_work); 1984 struct ath10k *ar = arvif->ar; 1985 1986 mutex_lock(&ar->conf_mutex); 1987 ath10k_mac_vif_ap_csa_count_down(arvif); 1988 mutex_unlock(&ar->conf_mutex); 1989 } 1990 1991 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 1992 struct ieee80211_vif *vif) 1993 { 1994 struct sk_buff *skb = data; 1995 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1996 struct ath10k_vif *arvif = (void *)vif->drv_priv; 1997 1998 if (vif->type != NL80211_IFTYPE_STATION) 1999 return; 2000 2001 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2002 return; 2003 2004 cancel_delayed_work(&arvif->connection_loss_work); 2005 } 2006 2007 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2008 { 2009 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2010 IEEE80211_IFACE_ITER_NORMAL, 2011 ath10k_mac_handle_beacon_iter, 2012 skb); 2013 } 2014 2015 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2016 struct ieee80211_vif *vif) 2017 { 2018 u32 *vdev_id = data; 2019 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2020 struct ath10k *ar = arvif->ar; 2021 struct ieee80211_hw *hw = ar->hw; 2022 2023 if (arvif->vdev_id != *vdev_id) 2024 return; 2025 2026 if (!arvif->is_up) 2027 return; 2028 2029 ieee80211_beacon_loss(vif); 2030 2031 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2032 * (done by mac80211) succeeds but beacons do not resume then it 2033 * doesn't make sense to continue operation. Queue connection loss work 2034 * which can be cancelled when beacon is received. 2035 */ 2036 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2037 ATH10K_CONNECTION_LOSS_HZ); 2038 } 2039 2040 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2041 { 2042 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2043 IEEE80211_IFACE_ITER_NORMAL, 2044 ath10k_mac_handle_beacon_miss_iter, 2045 &vdev_id); 2046 } 2047 2048 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2049 { 2050 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2051 connection_loss_work.work); 2052 struct ieee80211_vif *vif = arvif->vif; 2053 2054 if (!arvif->is_up) 2055 return; 2056 2057 ieee80211_connection_loss(vif); 2058 } 2059 2060 /**********************/ 2061 /* Station management */ 2062 /**********************/ 2063 2064 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2065 struct ieee80211_vif *vif) 2066 { 2067 /* Some firmware revisions have unstable STA powersave when listen 2068 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2069 * generate NullFunc frames properly even if buffered frames have been 2070 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2071 * buffered frames. Often pinging the device from AP would simply fail. 2072 * 2073 * As a workaround set it to 1. 2074 */ 2075 if (vif->type == NL80211_IFTYPE_STATION) 2076 return 1; 2077 2078 return ar->hw->conf.listen_interval; 2079 } 2080 2081 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2082 struct ieee80211_vif *vif, 2083 struct ieee80211_sta *sta, 2084 struct wmi_peer_assoc_complete_arg *arg) 2085 { 2086 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2087 u32 aid; 2088 2089 lockdep_assert_held(&ar->conf_mutex); 2090 2091 if (vif->type == NL80211_IFTYPE_STATION) 2092 aid = vif->bss_conf.aid; 2093 else 2094 aid = sta->aid; 2095 2096 ether_addr_copy(arg->addr, sta->addr); 2097 arg->vdev_id = arvif->vdev_id; 2098 arg->peer_aid = aid; 2099 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2100 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2101 arg->peer_num_spatial_streams = 1; 2102 arg->peer_caps = vif->bss_conf.assoc_capability; 2103 } 2104 2105 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2106 struct ieee80211_vif *vif, 2107 struct ieee80211_sta *sta, 2108 struct wmi_peer_assoc_complete_arg *arg) 2109 { 2110 struct ieee80211_bss_conf *info = &vif->bss_conf; 2111 struct cfg80211_chan_def def; 2112 struct cfg80211_bss *bss; 2113 const u8 *rsnie = NULL; 2114 const u8 *wpaie = NULL; 2115 2116 lockdep_assert_held(&ar->conf_mutex); 2117 2118 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2119 return; 2120 2121 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2122 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2123 if (bss) { 2124 const struct cfg80211_bss_ies *ies; 2125 2126 rcu_read_lock(); 2127 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2128 2129 ies = rcu_dereference(bss->ies); 2130 2131 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2132 WLAN_OUI_TYPE_MICROSOFT_WPA, 2133 ies->data, 2134 ies->len); 2135 rcu_read_unlock(); 2136 cfg80211_put_bss(ar->hw->wiphy, bss); 2137 } 2138 2139 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2140 if (rsnie || wpaie) { 2141 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2142 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2143 } 2144 2145 if (wpaie) { 2146 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2147 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2148 } 2149 2150 if (sta->mfp && 2151 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2152 ar->running_fw->fw_file.fw_features)) { 2153 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2154 } 2155 } 2156 2157 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2158 struct ieee80211_vif *vif, 2159 struct ieee80211_sta *sta, 2160 struct wmi_peer_assoc_complete_arg *arg) 2161 { 2162 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2163 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2164 struct cfg80211_chan_def def; 2165 const struct ieee80211_supported_band *sband; 2166 const struct ieee80211_rate *rates; 2167 enum nl80211_band band; 2168 u32 ratemask; 2169 u8 rate; 2170 int i; 2171 2172 lockdep_assert_held(&ar->conf_mutex); 2173 2174 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2175 return; 2176 2177 band = def.chan->band; 2178 sband = ar->hw->wiphy->bands[band]; 2179 ratemask = sta->supp_rates[band]; 2180 ratemask &= arvif->bitrate_mask.control[band].legacy; 2181 rates = sband->bitrates; 2182 2183 rateset->num_rates = 0; 2184 2185 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2186 if (!(ratemask & 1)) 2187 continue; 2188 2189 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2190 rateset->rates[rateset->num_rates] = rate; 2191 rateset->num_rates++; 2192 } 2193 } 2194 2195 static bool 2196 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2197 { 2198 int nss; 2199 2200 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2201 if (ht_mcs_mask[nss]) 2202 return false; 2203 2204 return true; 2205 } 2206 2207 static bool 2208 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2209 { 2210 int nss; 2211 2212 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2213 if (vht_mcs_mask[nss]) 2214 return false; 2215 2216 return true; 2217 } 2218 2219 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2220 struct ieee80211_vif *vif, 2221 struct ieee80211_sta *sta, 2222 struct wmi_peer_assoc_complete_arg *arg) 2223 { 2224 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2225 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2226 struct cfg80211_chan_def def; 2227 enum nl80211_band band; 2228 const u8 *ht_mcs_mask; 2229 const u16 *vht_mcs_mask; 2230 int i, n; 2231 u8 max_nss; 2232 u32 stbc; 2233 2234 lockdep_assert_held(&ar->conf_mutex); 2235 2236 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2237 return; 2238 2239 if (!ht_cap->ht_supported) 2240 return; 2241 2242 band = def.chan->band; 2243 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2244 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2245 2246 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2247 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2248 return; 2249 2250 arg->peer_flags |= ar->wmi.peer_flags->ht; 2251 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2252 ht_cap->ampdu_factor)) - 1; 2253 2254 arg->peer_mpdu_density = 2255 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2256 2257 arg->peer_ht_caps = ht_cap->cap; 2258 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2259 2260 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2261 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2262 2263 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2264 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2265 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2266 } 2267 2268 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2269 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2270 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2271 2272 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2273 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2274 } 2275 2276 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2277 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2278 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2279 } 2280 2281 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2282 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2283 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2284 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2285 arg->peer_rate_caps |= stbc; 2286 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2287 } 2288 2289 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2290 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2291 else if (ht_cap->mcs.rx_mask[1]) 2292 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2293 2294 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2295 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2296 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2297 max_nss = (i / 8) + 1; 2298 arg->peer_ht_rates.rates[n++] = i; 2299 } 2300 2301 /* 2302 * This is a workaround for HT-enabled STAs which break the spec 2303 * and have no HT capabilities RX mask (no HT RX MCS map). 2304 * 2305 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2306 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2307 * 2308 * Firmware asserts if such situation occurs. 2309 */ 2310 if (n == 0) { 2311 arg->peer_ht_rates.num_rates = 8; 2312 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2313 arg->peer_ht_rates.rates[i] = i; 2314 } else { 2315 arg->peer_ht_rates.num_rates = n; 2316 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2317 } 2318 2319 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2320 arg->addr, 2321 arg->peer_ht_rates.num_rates, 2322 arg->peer_num_spatial_streams); 2323 } 2324 2325 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2326 struct ath10k_vif *arvif, 2327 struct ieee80211_sta *sta) 2328 { 2329 u32 uapsd = 0; 2330 u32 max_sp = 0; 2331 int ret = 0; 2332 2333 lockdep_assert_held(&ar->conf_mutex); 2334 2335 if (sta->wme && sta->uapsd_queues) { 2336 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2337 sta->uapsd_queues, sta->max_sp); 2338 2339 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2340 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2341 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2342 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2343 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2344 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2345 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2346 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2347 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2348 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2349 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2350 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2351 2352 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2353 max_sp = sta->max_sp; 2354 2355 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2356 sta->addr, 2357 WMI_AP_PS_PEER_PARAM_UAPSD, 2358 uapsd); 2359 if (ret) { 2360 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2361 arvif->vdev_id, ret); 2362 return ret; 2363 } 2364 2365 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2366 sta->addr, 2367 WMI_AP_PS_PEER_PARAM_MAX_SP, 2368 max_sp); 2369 if (ret) { 2370 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2371 arvif->vdev_id, ret); 2372 return ret; 2373 } 2374 2375 /* TODO setup this based on STA listen interval and 2376 beacon interval. Currently we don't know 2377 sta->listen_interval - mac80211 patch required. 2378 Currently use 10 seconds */ 2379 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2380 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2381 10); 2382 if (ret) { 2383 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2384 arvif->vdev_id, ret); 2385 return ret; 2386 } 2387 } 2388 2389 return 0; 2390 } 2391 2392 static u16 2393 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2394 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2395 { 2396 int idx_limit; 2397 int nss; 2398 u16 mcs_map; 2399 u16 mcs; 2400 2401 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2402 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2403 vht_mcs_limit[nss]; 2404 2405 if (mcs_map) 2406 idx_limit = fls(mcs_map) - 1; 2407 else 2408 idx_limit = -1; 2409 2410 switch (idx_limit) { 2411 case 0: /* fall through */ 2412 case 1: /* fall through */ 2413 case 2: /* fall through */ 2414 case 3: /* fall through */ 2415 case 4: /* fall through */ 2416 case 5: /* fall through */ 2417 case 6: /* fall through */ 2418 default: 2419 /* see ath10k_mac_can_set_bitrate_mask() */ 2420 WARN_ON(1); 2421 /* fall through */ 2422 case -1: 2423 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2424 break; 2425 case 7: 2426 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2427 break; 2428 case 8: 2429 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2430 break; 2431 case 9: 2432 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2433 break; 2434 } 2435 2436 tx_mcs_set &= ~(0x3 << (nss * 2)); 2437 tx_mcs_set |= mcs << (nss * 2); 2438 } 2439 2440 return tx_mcs_set; 2441 } 2442 2443 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2444 struct ieee80211_vif *vif, 2445 struct ieee80211_sta *sta, 2446 struct wmi_peer_assoc_complete_arg *arg) 2447 { 2448 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2449 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2450 struct cfg80211_chan_def def; 2451 enum nl80211_band band; 2452 const u16 *vht_mcs_mask; 2453 u8 ampdu_factor; 2454 2455 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2456 return; 2457 2458 if (!vht_cap->vht_supported) 2459 return; 2460 2461 band = def.chan->band; 2462 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2463 2464 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2465 return; 2466 2467 arg->peer_flags |= ar->wmi.peer_flags->vht; 2468 2469 if (def.chan->band == NL80211_BAND_2GHZ) 2470 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2471 2472 arg->peer_vht_caps = vht_cap->cap; 2473 2474 ampdu_factor = (vht_cap->cap & 2475 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2476 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2477 2478 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2479 * zero in VHT IE. Using it would result in degraded throughput. 2480 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2481 * it if VHT max_mpdu is smaller. */ 2482 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2483 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2484 ampdu_factor)) - 1); 2485 2486 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2487 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2488 2489 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2490 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2491 2492 arg->peer_vht_rates.rx_max_rate = 2493 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2494 arg->peer_vht_rates.rx_mcs_set = 2495 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2496 arg->peer_vht_rates.tx_max_rate = 2497 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2498 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2499 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2500 2501 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2502 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2503 } 2504 2505 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2506 struct ieee80211_vif *vif, 2507 struct ieee80211_sta *sta, 2508 struct wmi_peer_assoc_complete_arg *arg) 2509 { 2510 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2511 2512 switch (arvif->vdev_type) { 2513 case WMI_VDEV_TYPE_AP: 2514 if (sta->wme) 2515 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2516 2517 if (sta->wme && sta->uapsd_queues) { 2518 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2519 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2520 } 2521 break; 2522 case WMI_VDEV_TYPE_STA: 2523 if (vif->bss_conf.qos) 2524 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2525 break; 2526 case WMI_VDEV_TYPE_IBSS: 2527 if (sta->wme) 2528 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2529 break; 2530 default: 2531 break; 2532 } 2533 2534 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2535 sta->addr, !!(arg->peer_flags & 2536 arvif->ar->wmi.peer_flags->qos)); 2537 } 2538 2539 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2540 { 2541 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2542 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2543 } 2544 2545 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2546 struct ieee80211_sta *sta) 2547 { 2548 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2549 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2550 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2551 return MODE_11AC_VHT160; 2552 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2553 return MODE_11AC_VHT80_80; 2554 default: 2555 /* not sure if this is a valid case? */ 2556 return MODE_11AC_VHT160; 2557 } 2558 } 2559 2560 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2561 return MODE_11AC_VHT80; 2562 2563 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2564 return MODE_11AC_VHT40; 2565 2566 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2567 return MODE_11AC_VHT20; 2568 2569 return MODE_UNKNOWN; 2570 } 2571 2572 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2573 struct ieee80211_vif *vif, 2574 struct ieee80211_sta *sta, 2575 struct wmi_peer_assoc_complete_arg *arg) 2576 { 2577 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2578 struct cfg80211_chan_def def; 2579 enum nl80211_band band; 2580 const u8 *ht_mcs_mask; 2581 const u16 *vht_mcs_mask; 2582 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2583 2584 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2585 return; 2586 2587 band = def.chan->band; 2588 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2589 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2590 2591 switch (band) { 2592 case NL80211_BAND_2GHZ: 2593 if (sta->vht_cap.vht_supported && 2594 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2595 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2596 phymode = MODE_11AC_VHT40; 2597 else 2598 phymode = MODE_11AC_VHT20; 2599 } else if (sta->ht_cap.ht_supported && 2600 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2601 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2602 phymode = MODE_11NG_HT40; 2603 else 2604 phymode = MODE_11NG_HT20; 2605 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2606 phymode = MODE_11G; 2607 } else { 2608 phymode = MODE_11B; 2609 } 2610 2611 break; 2612 case NL80211_BAND_5GHZ: 2613 /* 2614 * Check VHT first. 2615 */ 2616 if (sta->vht_cap.vht_supported && 2617 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2618 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2619 } else if (sta->ht_cap.ht_supported && 2620 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2621 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2622 phymode = MODE_11NA_HT40; 2623 else 2624 phymode = MODE_11NA_HT20; 2625 } else { 2626 phymode = MODE_11A; 2627 } 2628 2629 break; 2630 default: 2631 break; 2632 } 2633 2634 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2635 sta->addr, ath10k_wmi_phymode_str(phymode)); 2636 2637 arg->peer_phymode = phymode; 2638 WARN_ON(phymode == MODE_UNKNOWN); 2639 } 2640 2641 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2642 struct ieee80211_vif *vif, 2643 struct ieee80211_sta *sta, 2644 struct wmi_peer_assoc_complete_arg *arg) 2645 { 2646 lockdep_assert_held(&ar->conf_mutex); 2647 2648 memset(arg, 0, sizeof(*arg)); 2649 2650 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2651 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2652 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2653 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2654 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2655 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2656 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2657 2658 return 0; 2659 } 2660 2661 static const u32 ath10k_smps_map[] = { 2662 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2663 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2664 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2665 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2666 }; 2667 2668 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2669 const u8 *addr, 2670 const struct ieee80211_sta_ht_cap *ht_cap) 2671 { 2672 int smps; 2673 2674 if (!ht_cap->ht_supported) 2675 return 0; 2676 2677 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2678 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2679 2680 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2681 return -EINVAL; 2682 2683 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2684 WMI_PEER_SMPS_STATE, 2685 ath10k_smps_map[smps]); 2686 } 2687 2688 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2689 struct ieee80211_vif *vif, 2690 struct ieee80211_sta_vht_cap vht_cap) 2691 { 2692 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2693 int ret; 2694 u32 param; 2695 u32 value; 2696 2697 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2698 return 0; 2699 2700 if (!(ar->vht_cap_info & 2701 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2702 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2703 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2704 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2705 return 0; 2706 2707 param = ar->wmi.vdev_param->txbf; 2708 value = 0; 2709 2710 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2711 return 0; 2712 2713 /* The following logic is correct. If a remote STA advertises support 2714 * for being a beamformer then we should enable us being a beamformee. 2715 */ 2716 2717 if (ar->vht_cap_info & 2718 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2719 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2720 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2721 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2722 2723 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2724 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2725 } 2726 2727 if (ar->vht_cap_info & 2728 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2729 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2730 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2731 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2732 2733 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2734 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2735 } 2736 2737 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2738 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2739 2740 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2741 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2742 2743 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2744 if (ret) { 2745 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2746 value, ret); 2747 return ret; 2748 } 2749 2750 return 0; 2751 } 2752 2753 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2754 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2755 struct ieee80211_vif *vif, 2756 struct ieee80211_bss_conf *bss_conf) 2757 { 2758 struct ath10k *ar = hw->priv; 2759 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2760 struct ieee80211_sta_ht_cap ht_cap; 2761 struct ieee80211_sta_vht_cap vht_cap; 2762 struct wmi_peer_assoc_complete_arg peer_arg; 2763 struct ieee80211_sta *ap_sta; 2764 int ret; 2765 2766 lockdep_assert_held(&ar->conf_mutex); 2767 2768 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2769 arvif->vdev_id, arvif->bssid, arvif->aid); 2770 2771 rcu_read_lock(); 2772 2773 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2774 if (!ap_sta) { 2775 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2776 bss_conf->bssid, arvif->vdev_id); 2777 rcu_read_unlock(); 2778 return; 2779 } 2780 2781 /* ap_sta must be accessed only within rcu section which must be left 2782 * before calling ath10k_setup_peer_smps() which might sleep. */ 2783 ht_cap = ap_sta->ht_cap; 2784 vht_cap = ap_sta->vht_cap; 2785 2786 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2787 if (ret) { 2788 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2789 bss_conf->bssid, arvif->vdev_id, ret); 2790 rcu_read_unlock(); 2791 return; 2792 } 2793 2794 rcu_read_unlock(); 2795 2796 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2797 if (ret) { 2798 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2799 bss_conf->bssid, arvif->vdev_id, ret); 2800 return; 2801 } 2802 2803 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2804 if (ret) { 2805 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2806 arvif->vdev_id, ret); 2807 return; 2808 } 2809 2810 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2811 if (ret) { 2812 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2813 arvif->vdev_id, bss_conf->bssid, ret); 2814 return; 2815 } 2816 2817 ath10k_dbg(ar, ATH10K_DBG_MAC, 2818 "mac vdev %d up (associated) bssid %pM aid %d\n", 2819 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2820 2821 WARN_ON(arvif->is_up); 2822 2823 arvif->aid = bss_conf->aid; 2824 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2825 2826 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2827 if (ret) { 2828 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2829 arvif->vdev_id, ret); 2830 return; 2831 } 2832 2833 arvif->is_up = true; 2834 2835 /* Workaround: Some firmware revisions (tested with qca6174 2836 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2837 * poked with peer param command. 2838 */ 2839 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2840 WMI_PEER_DUMMY_VAR, 1); 2841 if (ret) { 2842 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2843 arvif->bssid, arvif->vdev_id, ret); 2844 return; 2845 } 2846 } 2847 2848 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2849 struct ieee80211_vif *vif) 2850 { 2851 struct ath10k *ar = hw->priv; 2852 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2853 struct ieee80211_sta_vht_cap vht_cap = {}; 2854 int ret; 2855 2856 lockdep_assert_held(&ar->conf_mutex); 2857 2858 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2859 arvif->vdev_id, arvif->bssid); 2860 2861 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2862 if (ret) 2863 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2864 arvif->vdev_id, ret); 2865 2866 arvif->def_wep_key_idx = -1; 2867 2868 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2869 if (ret) { 2870 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2871 arvif->vdev_id, ret); 2872 return; 2873 } 2874 2875 arvif->is_up = false; 2876 2877 cancel_delayed_work_sync(&arvif->connection_loss_work); 2878 } 2879 2880 static int ath10k_station_assoc(struct ath10k *ar, 2881 struct ieee80211_vif *vif, 2882 struct ieee80211_sta *sta, 2883 bool reassoc) 2884 { 2885 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2886 struct wmi_peer_assoc_complete_arg peer_arg; 2887 int ret = 0; 2888 2889 lockdep_assert_held(&ar->conf_mutex); 2890 2891 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2892 if (ret) { 2893 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2894 sta->addr, arvif->vdev_id, ret); 2895 return ret; 2896 } 2897 2898 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2899 if (ret) { 2900 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2901 sta->addr, arvif->vdev_id, ret); 2902 return ret; 2903 } 2904 2905 /* Re-assoc is run only to update supported rates for given station. It 2906 * doesn't make much sense to reconfigure the peer completely. 2907 */ 2908 if (!reassoc) { 2909 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2910 &sta->ht_cap); 2911 if (ret) { 2912 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2913 arvif->vdev_id, ret); 2914 return ret; 2915 } 2916 2917 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2918 if (ret) { 2919 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2920 sta->addr, arvif->vdev_id, ret); 2921 return ret; 2922 } 2923 2924 if (!sta->wme) { 2925 arvif->num_legacy_stations++; 2926 ret = ath10k_recalc_rtscts_prot(arvif); 2927 if (ret) { 2928 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2929 arvif->vdev_id, ret); 2930 return ret; 2931 } 2932 } 2933 2934 /* Plumb cached keys only for static WEP */ 2935 if (arvif->def_wep_key_idx != -1) { 2936 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2937 if (ret) { 2938 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2939 arvif->vdev_id, ret); 2940 return ret; 2941 } 2942 } 2943 } 2944 2945 return ret; 2946 } 2947 2948 static int ath10k_station_disassoc(struct ath10k *ar, 2949 struct ieee80211_vif *vif, 2950 struct ieee80211_sta *sta) 2951 { 2952 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2953 int ret = 0; 2954 2955 lockdep_assert_held(&ar->conf_mutex); 2956 2957 if (!sta->wme) { 2958 arvif->num_legacy_stations--; 2959 ret = ath10k_recalc_rtscts_prot(arvif); 2960 if (ret) { 2961 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2962 arvif->vdev_id, ret); 2963 return ret; 2964 } 2965 } 2966 2967 ret = ath10k_clear_peer_keys(arvif, sta->addr); 2968 if (ret) { 2969 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 2970 arvif->vdev_id, ret); 2971 return ret; 2972 } 2973 2974 return ret; 2975 } 2976 2977 /**************/ 2978 /* Regulatory */ 2979 /**************/ 2980 2981 static int ath10k_update_channel_list(struct ath10k *ar) 2982 { 2983 struct ieee80211_hw *hw = ar->hw; 2984 struct ieee80211_supported_band **bands; 2985 enum nl80211_band band; 2986 struct ieee80211_channel *channel; 2987 struct wmi_scan_chan_list_arg arg = {0}; 2988 struct wmi_channel_arg *ch; 2989 bool passive; 2990 int len; 2991 int ret; 2992 int i; 2993 2994 lockdep_assert_held(&ar->conf_mutex); 2995 2996 bands = hw->wiphy->bands; 2997 for (band = 0; band < NUM_NL80211_BANDS; band++) { 2998 if (!bands[band]) 2999 continue; 3000 3001 for (i = 0; i < bands[band]->n_channels; i++) { 3002 if (bands[band]->channels[i].flags & 3003 IEEE80211_CHAN_DISABLED) 3004 continue; 3005 3006 arg.n_channels++; 3007 } 3008 } 3009 3010 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3011 arg.channels = kzalloc(len, GFP_KERNEL); 3012 if (!arg.channels) 3013 return -ENOMEM; 3014 3015 ch = arg.channels; 3016 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3017 if (!bands[band]) 3018 continue; 3019 3020 for (i = 0; i < bands[band]->n_channels; i++) { 3021 channel = &bands[band]->channels[i]; 3022 3023 if (channel->flags & IEEE80211_CHAN_DISABLED) 3024 continue; 3025 3026 ch->allow_ht = true; 3027 3028 /* FIXME: when should we really allow VHT? */ 3029 ch->allow_vht = true; 3030 3031 ch->allow_ibss = 3032 !(channel->flags & IEEE80211_CHAN_NO_IR); 3033 3034 ch->ht40plus = 3035 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3036 3037 ch->chan_radar = 3038 !!(channel->flags & IEEE80211_CHAN_RADAR); 3039 3040 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3041 ch->passive = passive; 3042 3043 ch->freq = channel->center_freq; 3044 ch->band_center_freq1 = channel->center_freq; 3045 ch->min_power = 0; 3046 ch->max_power = channel->max_power * 2; 3047 ch->max_reg_power = channel->max_reg_power * 2; 3048 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3049 ch->reg_class_id = 0; /* FIXME */ 3050 3051 /* FIXME: why use only legacy modes, why not any 3052 * HT/VHT modes? Would that even make any 3053 * difference? */ 3054 if (channel->band == NL80211_BAND_2GHZ) 3055 ch->mode = MODE_11G; 3056 else 3057 ch->mode = MODE_11A; 3058 3059 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3060 continue; 3061 3062 ath10k_dbg(ar, ATH10K_DBG_WMI, 3063 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3064 ch - arg.channels, arg.n_channels, 3065 ch->freq, ch->max_power, ch->max_reg_power, 3066 ch->max_antenna_gain, ch->mode); 3067 3068 ch++; 3069 } 3070 } 3071 3072 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3073 kfree(arg.channels); 3074 3075 return ret; 3076 } 3077 3078 static enum wmi_dfs_region 3079 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3080 { 3081 switch (dfs_region) { 3082 case NL80211_DFS_UNSET: 3083 return WMI_UNINIT_DFS_DOMAIN; 3084 case NL80211_DFS_FCC: 3085 return WMI_FCC_DFS_DOMAIN; 3086 case NL80211_DFS_ETSI: 3087 return WMI_ETSI_DFS_DOMAIN; 3088 case NL80211_DFS_JP: 3089 return WMI_MKK4_DFS_DOMAIN; 3090 } 3091 return WMI_UNINIT_DFS_DOMAIN; 3092 } 3093 3094 static void ath10k_regd_update(struct ath10k *ar) 3095 { 3096 struct reg_dmn_pair_mapping *regpair; 3097 int ret; 3098 enum wmi_dfs_region wmi_dfs_reg; 3099 enum nl80211_dfs_regions nl_dfs_reg; 3100 3101 lockdep_assert_held(&ar->conf_mutex); 3102 3103 ret = ath10k_update_channel_list(ar); 3104 if (ret) 3105 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3106 3107 regpair = ar->ath_common.regulatory.regpair; 3108 3109 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3110 nl_dfs_reg = ar->dfs_detector->region; 3111 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3112 } else { 3113 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3114 } 3115 3116 /* Target allows setting up per-band regdomain but ath_common provides 3117 * a combined one only */ 3118 ret = ath10k_wmi_pdev_set_regdomain(ar, 3119 regpair->reg_domain, 3120 regpair->reg_domain, /* 2ghz */ 3121 regpair->reg_domain, /* 5ghz */ 3122 regpair->reg_2ghz_ctl, 3123 regpair->reg_5ghz_ctl, 3124 wmi_dfs_reg); 3125 if (ret) 3126 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3127 } 3128 3129 static void ath10k_reg_notifier(struct wiphy *wiphy, 3130 struct regulatory_request *request) 3131 { 3132 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3133 struct ath10k *ar = hw->priv; 3134 bool result; 3135 3136 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3137 3138 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3139 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3140 request->dfs_region); 3141 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3142 request->dfs_region); 3143 if (!result) 3144 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3145 request->dfs_region); 3146 } 3147 3148 mutex_lock(&ar->conf_mutex); 3149 if (ar->state == ATH10K_STATE_ON) 3150 ath10k_regd_update(ar); 3151 mutex_unlock(&ar->conf_mutex); 3152 } 3153 3154 /***************/ 3155 /* TX handlers */ 3156 /***************/ 3157 3158 enum ath10k_mac_tx_path { 3159 ATH10K_MAC_TX_HTT, 3160 ATH10K_MAC_TX_HTT_MGMT, 3161 ATH10K_MAC_TX_WMI_MGMT, 3162 ATH10K_MAC_TX_UNKNOWN, 3163 }; 3164 3165 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3166 { 3167 lockdep_assert_held(&ar->htt.tx_lock); 3168 3169 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3170 ar->tx_paused |= BIT(reason); 3171 ieee80211_stop_queues(ar->hw); 3172 } 3173 3174 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3175 struct ieee80211_vif *vif) 3176 { 3177 struct ath10k *ar = data; 3178 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3179 3180 if (arvif->tx_paused) 3181 return; 3182 3183 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3184 } 3185 3186 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3187 { 3188 lockdep_assert_held(&ar->htt.tx_lock); 3189 3190 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3191 ar->tx_paused &= ~BIT(reason); 3192 3193 if (ar->tx_paused) 3194 return; 3195 3196 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3197 IEEE80211_IFACE_ITER_RESUME_ALL, 3198 ath10k_mac_tx_unlock_iter, 3199 ar); 3200 3201 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3202 } 3203 3204 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3205 { 3206 struct ath10k *ar = arvif->ar; 3207 3208 lockdep_assert_held(&ar->htt.tx_lock); 3209 3210 WARN_ON(reason >= BITS_PER_LONG); 3211 arvif->tx_paused |= BIT(reason); 3212 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3213 } 3214 3215 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3216 { 3217 struct ath10k *ar = arvif->ar; 3218 3219 lockdep_assert_held(&ar->htt.tx_lock); 3220 3221 WARN_ON(reason >= BITS_PER_LONG); 3222 arvif->tx_paused &= ~BIT(reason); 3223 3224 if (ar->tx_paused) 3225 return; 3226 3227 if (arvif->tx_paused) 3228 return; 3229 3230 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3231 } 3232 3233 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3234 enum wmi_tlv_tx_pause_id pause_id, 3235 enum wmi_tlv_tx_pause_action action) 3236 { 3237 struct ath10k *ar = arvif->ar; 3238 3239 lockdep_assert_held(&ar->htt.tx_lock); 3240 3241 switch (action) { 3242 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3243 ath10k_mac_vif_tx_lock(arvif, pause_id); 3244 break; 3245 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3246 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3247 break; 3248 default: 3249 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3250 "received unknown tx pause action %d on vdev %i, ignoring\n", 3251 action, arvif->vdev_id); 3252 break; 3253 } 3254 } 3255 3256 struct ath10k_mac_tx_pause { 3257 u32 vdev_id; 3258 enum wmi_tlv_tx_pause_id pause_id; 3259 enum wmi_tlv_tx_pause_action action; 3260 }; 3261 3262 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3263 struct ieee80211_vif *vif) 3264 { 3265 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3266 struct ath10k_mac_tx_pause *arg = data; 3267 3268 if (arvif->vdev_id != arg->vdev_id) 3269 return; 3270 3271 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3272 } 3273 3274 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3275 enum wmi_tlv_tx_pause_id pause_id, 3276 enum wmi_tlv_tx_pause_action action) 3277 { 3278 struct ath10k_mac_tx_pause arg = { 3279 .vdev_id = vdev_id, 3280 .pause_id = pause_id, 3281 .action = action, 3282 }; 3283 3284 spin_lock_bh(&ar->htt.tx_lock); 3285 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3286 IEEE80211_IFACE_ITER_RESUME_ALL, 3287 ath10k_mac_handle_tx_pause_iter, 3288 &arg); 3289 spin_unlock_bh(&ar->htt.tx_lock); 3290 } 3291 3292 static enum ath10k_hw_txrx_mode 3293 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3294 struct ieee80211_vif *vif, 3295 struct ieee80211_sta *sta, 3296 struct sk_buff *skb) 3297 { 3298 const struct ieee80211_hdr *hdr = (void *)skb->data; 3299 __le16 fc = hdr->frame_control; 3300 3301 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3302 return ATH10K_HW_TXRX_RAW; 3303 3304 if (ieee80211_is_mgmt(fc)) 3305 return ATH10K_HW_TXRX_MGMT; 3306 3307 /* Workaround: 3308 * 3309 * NullFunc frames are mostly used to ping if a client or AP are still 3310 * reachable and responsive. This implies tx status reports must be 3311 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3312 * come to a conclusion that the other end disappeared and tear down 3313 * BSS connection or it can never disconnect from BSS/client (which is 3314 * the case). 3315 * 3316 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3317 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3318 * which seems to deliver correct tx reports for NullFunc frames. The 3319 * downside of using it is it ignores client powersave state so it can 3320 * end up disconnecting sleeping clients in AP mode. It should fix STA 3321 * mode though because AP don't sleep. 3322 */ 3323 if (ar->htt.target_version_major < 3 && 3324 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3325 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3326 ar->running_fw->fw_file.fw_features)) 3327 return ATH10K_HW_TXRX_MGMT; 3328 3329 /* Workaround: 3330 * 3331 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3332 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3333 * to work with Ethernet txmode so use it. 3334 * 3335 * FIXME: Check if raw mode works with TDLS. 3336 */ 3337 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3338 return ATH10K_HW_TXRX_ETHERNET; 3339 3340 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 3341 return ATH10K_HW_TXRX_RAW; 3342 3343 return ATH10K_HW_TXRX_NATIVE_WIFI; 3344 } 3345 3346 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3347 struct sk_buff *skb) 3348 { 3349 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3350 const struct ieee80211_hdr *hdr = (void *)skb->data; 3351 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3352 IEEE80211_TX_CTL_INJECTED; 3353 3354 if (!ieee80211_has_protected(hdr->frame_control)) 3355 return false; 3356 3357 if ((info->flags & mask) == mask) 3358 return false; 3359 3360 if (vif) 3361 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3362 3363 return true; 3364 } 3365 3366 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3367 * Control in the header. 3368 */ 3369 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3370 { 3371 struct ieee80211_hdr *hdr = (void *)skb->data; 3372 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3373 u8 *qos_ctl; 3374 3375 if (!ieee80211_is_data_qos(hdr->frame_control)) 3376 return; 3377 3378 qos_ctl = ieee80211_get_qos_ctl(hdr); 3379 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3380 skb->data, (void *)qos_ctl - (void *)skb->data); 3381 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3382 3383 /* Some firmware revisions don't handle sending QoS NullFunc well. 3384 * These frames are mainly used for CQM purposes so it doesn't really 3385 * matter whether QoS NullFunc or NullFunc are sent. 3386 */ 3387 hdr = (void *)skb->data; 3388 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3389 cb->flags &= ~ATH10K_SKB_F_QOS; 3390 3391 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3392 } 3393 3394 static void ath10k_tx_h_8023(struct sk_buff *skb) 3395 { 3396 struct ieee80211_hdr *hdr; 3397 struct rfc1042_hdr *rfc1042; 3398 struct ethhdr *eth; 3399 size_t hdrlen; 3400 u8 da[ETH_ALEN]; 3401 u8 sa[ETH_ALEN]; 3402 __be16 type; 3403 3404 hdr = (void *)skb->data; 3405 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3406 rfc1042 = (void *)skb->data + hdrlen; 3407 3408 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3409 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3410 type = rfc1042->snap_type; 3411 3412 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3413 skb_push(skb, sizeof(*eth)); 3414 3415 eth = (void *)skb->data; 3416 ether_addr_copy(eth->h_dest, da); 3417 ether_addr_copy(eth->h_source, sa); 3418 eth->h_proto = type; 3419 } 3420 3421 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3422 struct ieee80211_vif *vif, 3423 struct sk_buff *skb) 3424 { 3425 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3426 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3427 3428 /* This is case only for P2P_GO */ 3429 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3430 return; 3431 3432 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3433 spin_lock_bh(&ar->data_lock); 3434 if (arvif->u.ap.noa_data) 3435 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3436 GFP_ATOMIC)) 3437 memcpy(skb_put(skb, arvif->u.ap.noa_len), 3438 arvif->u.ap.noa_data, 3439 arvif->u.ap.noa_len); 3440 spin_unlock_bh(&ar->data_lock); 3441 } 3442 } 3443 3444 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3445 struct ieee80211_vif *vif, 3446 struct ieee80211_txq *txq, 3447 struct sk_buff *skb) 3448 { 3449 struct ieee80211_hdr *hdr = (void *)skb->data; 3450 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3451 3452 cb->flags = 0; 3453 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3454 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3455 3456 if (ieee80211_is_mgmt(hdr->frame_control)) 3457 cb->flags |= ATH10K_SKB_F_MGMT; 3458 3459 if (ieee80211_is_data_qos(hdr->frame_control)) 3460 cb->flags |= ATH10K_SKB_F_QOS; 3461 3462 cb->vif = vif; 3463 cb->txq = txq; 3464 } 3465 3466 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3467 { 3468 /* FIXME: Not really sure since when the behaviour changed. At some 3469 * point new firmware stopped requiring creation of peer entries for 3470 * offchannel tx (and actually creating them causes issues with wmi-htc 3471 * tx credit replenishment and reliability). Assuming it's at least 3.4 3472 * because that's when the `freq` was introduced to TX_FRM HTT command. 3473 */ 3474 return (ar->htt.target_version_major >= 3 && 3475 ar->htt.target_version_minor >= 4 && 3476 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3477 } 3478 3479 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3480 { 3481 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3482 int ret = 0; 3483 3484 spin_lock_bh(&ar->data_lock); 3485 3486 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3487 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3488 ret = -ENOSPC; 3489 goto unlock; 3490 } 3491 3492 __skb_queue_tail(q, skb); 3493 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3494 3495 unlock: 3496 spin_unlock_bh(&ar->data_lock); 3497 3498 return ret; 3499 } 3500 3501 static enum ath10k_mac_tx_path 3502 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3503 struct sk_buff *skb, 3504 enum ath10k_hw_txrx_mode txmode) 3505 { 3506 switch (txmode) { 3507 case ATH10K_HW_TXRX_RAW: 3508 case ATH10K_HW_TXRX_NATIVE_WIFI: 3509 case ATH10K_HW_TXRX_ETHERNET: 3510 return ATH10K_MAC_TX_HTT; 3511 case ATH10K_HW_TXRX_MGMT: 3512 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3513 ar->running_fw->fw_file.fw_features)) 3514 return ATH10K_MAC_TX_WMI_MGMT; 3515 else if (ar->htt.target_version_major >= 3) 3516 return ATH10K_MAC_TX_HTT; 3517 else 3518 return ATH10K_MAC_TX_HTT_MGMT; 3519 } 3520 3521 return ATH10K_MAC_TX_UNKNOWN; 3522 } 3523 3524 static int ath10k_mac_tx_submit(struct ath10k *ar, 3525 enum ath10k_hw_txrx_mode txmode, 3526 enum ath10k_mac_tx_path txpath, 3527 struct sk_buff *skb) 3528 { 3529 struct ath10k_htt *htt = &ar->htt; 3530 int ret = -EINVAL; 3531 3532 switch (txpath) { 3533 case ATH10K_MAC_TX_HTT: 3534 ret = ath10k_htt_tx(htt, txmode, skb); 3535 break; 3536 case ATH10K_MAC_TX_HTT_MGMT: 3537 ret = ath10k_htt_mgmt_tx(htt, skb); 3538 break; 3539 case ATH10K_MAC_TX_WMI_MGMT: 3540 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3541 break; 3542 case ATH10K_MAC_TX_UNKNOWN: 3543 WARN_ON_ONCE(1); 3544 ret = -EINVAL; 3545 break; 3546 } 3547 3548 if (ret) { 3549 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3550 ret); 3551 ieee80211_free_txskb(ar->hw, skb); 3552 } 3553 3554 return ret; 3555 } 3556 3557 /* This function consumes the sk_buff regardless of return value as far as 3558 * caller is concerned so no freeing is necessary afterwards. 3559 */ 3560 static int ath10k_mac_tx(struct ath10k *ar, 3561 struct ieee80211_vif *vif, 3562 enum ath10k_hw_txrx_mode txmode, 3563 enum ath10k_mac_tx_path txpath, 3564 struct sk_buff *skb) 3565 { 3566 struct ieee80211_hw *hw = ar->hw; 3567 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3568 int ret; 3569 3570 /* We should disable CCK RATE due to P2P */ 3571 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3572 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3573 3574 switch (txmode) { 3575 case ATH10K_HW_TXRX_MGMT: 3576 case ATH10K_HW_TXRX_NATIVE_WIFI: 3577 ath10k_tx_h_nwifi(hw, skb); 3578 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3579 ath10k_tx_h_seq_no(vif, skb); 3580 break; 3581 case ATH10K_HW_TXRX_ETHERNET: 3582 ath10k_tx_h_8023(skb); 3583 break; 3584 case ATH10K_HW_TXRX_RAW: 3585 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 3586 WARN_ON_ONCE(1); 3587 ieee80211_free_txskb(hw, skb); 3588 return -ENOTSUPP; 3589 } 3590 } 3591 3592 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3593 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3594 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3595 skb); 3596 3597 skb_queue_tail(&ar->offchan_tx_queue, skb); 3598 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3599 return 0; 3600 } 3601 } 3602 3603 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3604 if (ret) { 3605 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3606 return ret; 3607 } 3608 3609 return 0; 3610 } 3611 3612 void ath10k_offchan_tx_purge(struct ath10k *ar) 3613 { 3614 struct sk_buff *skb; 3615 3616 for (;;) { 3617 skb = skb_dequeue(&ar->offchan_tx_queue); 3618 if (!skb) 3619 break; 3620 3621 ieee80211_free_txskb(ar->hw, skb); 3622 } 3623 } 3624 3625 void ath10k_offchan_tx_work(struct work_struct *work) 3626 { 3627 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3628 struct ath10k_peer *peer; 3629 struct ath10k_vif *arvif; 3630 enum ath10k_hw_txrx_mode txmode; 3631 enum ath10k_mac_tx_path txpath; 3632 struct ieee80211_hdr *hdr; 3633 struct ieee80211_vif *vif; 3634 struct ieee80211_sta *sta; 3635 struct sk_buff *skb; 3636 const u8 *peer_addr; 3637 int vdev_id; 3638 int ret; 3639 unsigned long time_left; 3640 bool tmp_peer_created = false; 3641 3642 /* FW requirement: We must create a peer before FW will send out 3643 * an offchannel frame. Otherwise the frame will be stuck and 3644 * never transmitted. We delete the peer upon tx completion. 3645 * It is unlikely that a peer for offchannel tx will already be 3646 * present. However it may be in some rare cases so account for that. 3647 * Otherwise we might remove a legitimate peer and break stuff. */ 3648 3649 for (;;) { 3650 skb = skb_dequeue(&ar->offchan_tx_queue); 3651 if (!skb) 3652 break; 3653 3654 mutex_lock(&ar->conf_mutex); 3655 3656 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3657 skb); 3658 3659 hdr = (struct ieee80211_hdr *)skb->data; 3660 peer_addr = ieee80211_get_DA(hdr); 3661 3662 spin_lock_bh(&ar->data_lock); 3663 vdev_id = ar->scan.vdev_id; 3664 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3665 spin_unlock_bh(&ar->data_lock); 3666 3667 if (peer) 3668 /* FIXME: should this use ath10k_warn()? */ 3669 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3670 peer_addr, vdev_id); 3671 3672 if (!peer) { 3673 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3674 peer_addr, 3675 WMI_PEER_TYPE_DEFAULT); 3676 if (ret) 3677 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3678 peer_addr, vdev_id, ret); 3679 tmp_peer_created = (ret == 0); 3680 } 3681 3682 spin_lock_bh(&ar->data_lock); 3683 reinit_completion(&ar->offchan_tx_completed); 3684 ar->offchan_tx_skb = skb; 3685 spin_unlock_bh(&ar->data_lock); 3686 3687 /* It's safe to access vif and sta - conf_mutex guarantees that 3688 * sta_state() and remove_interface() are locked exclusively 3689 * out wrt to this offchannel worker. 3690 */ 3691 arvif = ath10k_get_arvif(ar, vdev_id); 3692 if (arvif) { 3693 vif = arvif->vif; 3694 sta = ieee80211_find_sta(vif, peer_addr); 3695 } else { 3696 vif = NULL; 3697 sta = NULL; 3698 } 3699 3700 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3701 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3702 3703 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3704 if (ret) { 3705 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3706 ret); 3707 /* not serious */ 3708 } 3709 3710 time_left = 3711 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3712 if (time_left == 0) 3713 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3714 skb); 3715 3716 if (!peer && tmp_peer_created) { 3717 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3718 if (ret) 3719 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3720 peer_addr, vdev_id, ret); 3721 } 3722 3723 mutex_unlock(&ar->conf_mutex); 3724 } 3725 } 3726 3727 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3728 { 3729 struct sk_buff *skb; 3730 3731 for (;;) { 3732 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3733 if (!skb) 3734 break; 3735 3736 ieee80211_free_txskb(ar->hw, skb); 3737 } 3738 } 3739 3740 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3741 { 3742 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3743 struct sk_buff *skb; 3744 int ret; 3745 3746 for (;;) { 3747 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3748 if (!skb) 3749 break; 3750 3751 ret = ath10k_wmi_mgmt_tx(ar, skb); 3752 if (ret) { 3753 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3754 ret); 3755 ieee80211_free_txskb(ar->hw, skb); 3756 } 3757 } 3758 } 3759 3760 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3761 { 3762 struct ath10k_txq *artxq; 3763 3764 if (!txq) 3765 return; 3766 3767 artxq = (void *)txq->drv_priv; 3768 INIT_LIST_HEAD(&artxq->list); 3769 } 3770 3771 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3772 { 3773 struct ath10k_txq *artxq; 3774 struct ath10k_skb_cb *cb; 3775 struct sk_buff *msdu; 3776 int msdu_id; 3777 3778 if (!txq) 3779 return; 3780 3781 artxq = (void *)txq->drv_priv; 3782 spin_lock_bh(&ar->txqs_lock); 3783 if (!list_empty(&artxq->list)) 3784 list_del_init(&artxq->list); 3785 spin_unlock_bh(&ar->txqs_lock); 3786 3787 spin_lock_bh(&ar->htt.tx_lock); 3788 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3789 cb = ATH10K_SKB_CB(msdu); 3790 if (cb->txq == txq) 3791 cb->txq = NULL; 3792 } 3793 spin_unlock_bh(&ar->htt.tx_lock); 3794 } 3795 3796 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3797 u16 peer_id, 3798 u8 tid) 3799 { 3800 struct ath10k_peer *peer; 3801 3802 lockdep_assert_held(&ar->data_lock); 3803 3804 peer = ar->peer_map[peer_id]; 3805 if (!peer) 3806 return NULL; 3807 3808 if (peer->removed) 3809 return NULL; 3810 3811 if (peer->sta) 3812 return peer->sta->txq[tid]; 3813 else if (peer->vif) 3814 return peer->vif->txq; 3815 else 3816 return NULL; 3817 } 3818 3819 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3820 struct ieee80211_txq *txq) 3821 { 3822 struct ath10k *ar = hw->priv; 3823 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3824 3825 /* No need to get locks */ 3826 3827 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3828 return true; 3829 3830 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3831 return true; 3832 3833 if (artxq->num_fw_queued < artxq->num_push_allowed) 3834 return true; 3835 3836 return false; 3837 } 3838 3839 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 3840 struct ieee80211_txq *txq) 3841 { 3842 struct ath10k *ar = hw->priv; 3843 struct ath10k_htt *htt = &ar->htt; 3844 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3845 struct ieee80211_vif *vif = txq->vif; 3846 struct ieee80211_sta *sta = txq->sta; 3847 enum ath10k_hw_txrx_mode txmode; 3848 enum ath10k_mac_tx_path txpath; 3849 struct sk_buff *skb; 3850 struct ieee80211_hdr *hdr; 3851 size_t skb_len; 3852 bool is_mgmt, is_presp; 3853 int ret; 3854 3855 spin_lock_bh(&ar->htt.tx_lock); 3856 ret = ath10k_htt_tx_inc_pending(htt); 3857 spin_unlock_bh(&ar->htt.tx_lock); 3858 3859 if (ret) 3860 return ret; 3861 3862 skb = ieee80211_tx_dequeue(hw, txq); 3863 if (!skb) { 3864 spin_lock_bh(&ar->htt.tx_lock); 3865 ath10k_htt_tx_dec_pending(htt); 3866 spin_unlock_bh(&ar->htt.tx_lock); 3867 3868 return -ENOENT; 3869 } 3870 3871 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 3872 3873 skb_len = skb->len; 3874 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3875 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3876 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 3877 3878 if (is_mgmt) { 3879 hdr = (struct ieee80211_hdr *)skb->data; 3880 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 3881 3882 spin_lock_bh(&ar->htt.tx_lock); 3883 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 3884 3885 if (ret) { 3886 ath10k_htt_tx_dec_pending(htt); 3887 spin_unlock_bh(&ar->htt.tx_lock); 3888 return ret; 3889 } 3890 spin_unlock_bh(&ar->htt.tx_lock); 3891 } 3892 3893 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3894 if (unlikely(ret)) { 3895 ath10k_warn(ar, "failed to push frame: %d\n", ret); 3896 3897 spin_lock_bh(&ar->htt.tx_lock); 3898 ath10k_htt_tx_dec_pending(htt); 3899 if (is_mgmt) 3900 ath10k_htt_tx_mgmt_dec_pending(htt); 3901 spin_unlock_bh(&ar->htt.tx_lock); 3902 3903 return ret; 3904 } 3905 3906 spin_lock_bh(&ar->htt.tx_lock); 3907 artxq->num_fw_queued++; 3908 spin_unlock_bh(&ar->htt.tx_lock); 3909 3910 return skb_len; 3911 } 3912 3913 void ath10k_mac_tx_push_pending(struct ath10k *ar) 3914 { 3915 struct ieee80211_hw *hw = ar->hw; 3916 struct ieee80211_txq *txq; 3917 struct ath10k_txq *artxq; 3918 struct ath10k_txq *last; 3919 int ret; 3920 int max; 3921 3922 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 3923 return; 3924 3925 spin_lock_bh(&ar->txqs_lock); 3926 rcu_read_lock(); 3927 3928 last = list_last_entry(&ar->txqs, struct ath10k_txq, list); 3929 while (!list_empty(&ar->txqs)) { 3930 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 3931 txq = container_of((void *)artxq, struct ieee80211_txq, 3932 drv_priv); 3933 3934 /* Prevent aggressive sta/tid taking over tx queue */ 3935 max = 16; 3936 ret = 0; 3937 while (ath10k_mac_tx_can_push(hw, txq) && max--) { 3938 ret = ath10k_mac_tx_push_txq(hw, txq); 3939 if (ret < 0) 3940 break; 3941 } 3942 3943 list_del_init(&artxq->list); 3944 if (ret != -ENOENT) 3945 list_add_tail(&artxq->list, &ar->txqs); 3946 3947 ath10k_htt_tx_txq_update(hw, txq); 3948 3949 if (artxq == last || (ret < 0 && ret != -ENOENT)) 3950 break; 3951 } 3952 3953 rcu_read_unlock(); 3954 spin_unlock_bh(&ar->txqs_lock); 3955 } 3956 3957 /************/ 3958 /* Scanning */ 3959 /************/ 3960 3961 void __ath10k_scan_finish(struct ath10k *ar) 3962 { 3963 lockdep_assert_held(&ar->data_lock); 3964 3965 switch (ar->scan.state) { 3966 case ATH10K_SCAN_IDLE: 3967 break; 3968 case ATH10K_SCAN_RUNNING: 3969 case ATH10K_SCAN_ABORTING: 3970 if (!ar->scan.is_roc) { 3971 struct cfg80211_scan_info info = { 3972 .aborted = (ar->scan.state == 3973 ATH10K_SCAN_ABORTING), 3974 }; 3975 3976 ieee80211_scan_completed(ar->hw, &info); 3977 } else if (ar->scan.roc_notify) { 3978 ieee80211_remain_on_channel_expired(ar->hw); 3979 } 3980 /* fall through */ 3981 case ATH10K_SCAN_STARTING: 3982 ar->scan.state = ATH10K_SCAN_IDLE; 3983 ar->scan_channel = NULL; 3984 ar->scan.roc_freq = 0; 3985 ath10k_offchan_tx_purge(ar); 3986 cancel_delayed_work(&ar->scan.timeout); 3987 complete(&ar->scan.completed); 3988 break; 3989 } 3990 } 3991 3992 void ath10k_scan_finish(struct ath10k *ar) 3993 { 3994 spin_lock_bh(&ar->data_lock); 3995 __ath10k_scan_finish(ar); 3996 spin_unlock_bh(&ar->data_lock); 3997 } 3998 3999 static int ath10k_scan_stop(struct ath10k *ar) 4000 { 4001 struct wmi_stop_scan_arg arg = { 4002 .req_id = 1, /* FIXME */ 4003 .req_type = WMI_SCAN_STOP_ONE, 4004 .u.scan_id = ATH10K_SCAN_ID, 4005 }; 4006 int ret; 4007 4008 lockdep_assert_held(&ar->conf_mutex); 4009 4010 ret = ath10k_wmi_stop_scan(ar, &arg); 4011 if (ret) { 4012 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4013 goto out; 4014 } 4015 4016 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4017 if (ret == 0) { 4018 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4019 ret = -ETIMEDOUT; 4020 } else if (ret > 0) { 4021 ret = 0; 4022 } 4023 4024 out: 4025 /* Scan state should be updated upon scan completion but in case 4026 * firmware fails to deliver the event (for whatever reason) it is 4027 * desired to clean up scan state anyway. Firmware may have just 4028 * dropped the scan completion event delivery due to transport pipe 4029 * being overflown with data and/or it can recover on its own before 4030 * next scan request is submitted. 4031 */ 4032 spin_lock_bh(&ar->data_lock); 4033 if (ar->scan.state != ATH10K_SCAN_IDLE) 4034 __ath10k_scan_finish(ar); 4035 spin_unlock_bh(&ar->data_lock); 4036 4037 return ret; 4038 } 4039 4040 static void ath10k_scan_abort(struct ath10k *ar) 4041 { 4042 int ret; 4043 4044 lockdep_assert_held(&ar->conf_mutex); 4045 4046 spin_lock_bh(&ar->data_lock); 4047 4048 switch (ar->scan.state) { 4049 case ATH10K_SCAN_IDLE: 4050 /* This can happen if timeout worker kicked in and called 4051 * abortion while scan completion was being processed. 4052 */ 4053 break; 4054 case ATH10K_SCAN_STARTING: 4055 case ATH10K_SCAN_ABORTING: 4056 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4057 ath10k_scan_state_str(ar->scan.state), 4058 ar->scan.state); 4059 break; 4060 case ATH10K_SCAN_RUNNING: 4061 ar->scan.state = ATH10K_SCAN_ABORTING; 4062 spin_unlock_bh(&ar->data_lock); 4063 4064 ret = ath10k_scan_stop(ar); 4065 if (ret) 4066 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4067 4068 spin_lock_bh(&ar->data_lock); 4069 break; 4070 } 4071 4072 spin_unlock_bh(&ar->data_lock); 4073 } 4074 4075 void ath10k_scan_timeout_work(struct work_struct *work) 4076 { 4077 struct ath10k *ar = container_of(work, struct ath10k, 4078 scan.timeout.work); 4079 4080 mutex_lock(&ar->conf_mutex); 4081 ath10k_scan_abort(ar); 4082 mutex_unlock(&ar->conf_mutex); 4083 } 4084 4085 static int ath10k_start_scan(struct ath10k *ar, 4086 const struct wmi_start_scan_arg *arg) 4087 { 4088 int ret; 4089 4090 lockdep_assert_held(&ar->conf_mutex); 4091 4092 ret = ath10k_wmi_start_scan(ar, arg); 4093 if (ret) 4094 return ret; 4095 4096 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4097 if (ret == 0) { 4098 ret = ath10k_scan_stop(ar); 4099 if (ret) 4100 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4101 4102 return -ETIMEDOUT; 4103 } 4104 4105 /* If we failed to start the scan, return error code at 4106 * this point. This is probably due to some issue in the 4107 * firmware, but no need to wedge the driver due to that... 4108 */ 4109 spin_lock_bh(&ar->data_lock); 4110 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4111 spin_unlock_bh(&ar->data_lock); 4112 return -EINVAL; 4113 } 4114 spin_unlock_bh(&ar->data_lock); 4115 4116 return 0; 4117 } 4118 4119 /**********************/ 4120 /* mac80211 callbacks */ 4121 /**********************/ 4122 4123 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4124 struct ieee80211_tx_control *control, 4125 struct sk_buff *skb) 4126 { 4127 struct ath10k *ar = hw->priv; 4128 struct ath10k_htt *htt = &ar->htt; 4129 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4130 struct ieee80211_vif *vif = info->control.vif; 4131 struct ieee80211_sta *sta = control->sta; 4132 struct ieee80211_txq *txq = NULL; 4133 struct ieee80211_hdr *hdr = (void *)skb->data; 4134 enum ath10k_hw_txrx_mode txmode; 4135 enum ath10k_mac_tx_path txpath; 4136 bool is_htt; 4137 bool is_mgmt; 4138 bool is_presp; 4139 int ret; 4140 4141 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 4142 4143 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4144 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4145 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4146 txpath == ATH10K_MAC_TX_HTT_MGMT); 4147 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4148 4149 if (is_htt) { 4150 spin_lock_bh(&ar->htt.tx_lock); 4151 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4152 4153 ret = ath10k_htt_tx_inc_pending(htt); 4154 if (ret) { 4155 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4156 ret); 4157 spin_unlock_bh(&ar->htt.tx_lock); 4158 ieee80211_free_txskb(ar->hw, skb); 4159 return; 4160 } 4161 4162 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4163 if (ret) { 4164 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4165 ret); 4166 ath10k_htt_tx_dec_pending(htt); 4167 spin_unlock_bh(&ar->htt.tx_lock); 4168 ieee80211_free_txskb(ar->hw, skb); 4169 return; 4170 } 4171 spin_unlock_bh(&ar->htt.tx_lock); 4172 } 4173 4174 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4175 if (ret) { 4176 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4177 if (is_htt) { 4178 spin_lock_bh(&ar->htt.tx_lock); 4179 ath10k_htt_tx_dec_pending(htt); 4180 if (is_mgmt) 4181 ath10k_htt_tx_mgmt_dec_pending(htt); 4182 spin_unlock_bh(&ar->htt.tx_lock); 4183 } 4184 return; 4185 } 4186 } 4187 4188 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4189 struct ieee80211_txq *txq) 4190 { 4191 struct ath10k *ar = hw->priv; 4192 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4193 struct ieee80211_txq *f_txq; 4194 struct ath10k_txq *f_artxq; 4195 int ret = 0; 4196 int max = 16; 4197 4198 spin_lock_bh(&ar->txqs_lock); 4199 if (list_empty(&artxq->list)) 4200 list_add_tail(&artxq->list, &ar->txqs); 4201 4202 f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 4203 f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); 4204 list_del_init(&f_artxq->list); 4205 4206 while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { 4207 ret = ath10k_mac_tx_push_txq(hw, f_txq); 4208 if (ret) 4209 break; 4210 } 4211 if (ret != -ENOENT) 4212 list_add_tail(&f_artxq->list, &ar->txqs); 4213 spin_unlock_bh(&ar->txqs_lock); 4214 4215 ath10k_htt_tx_txq_update(hw, f_txq); 4216 ath10k_htt_tx_txq_update(hw, txq); 4217 } 4218 4219 /* Must not be called with conf_mutex held as workers can use that also. */ 4220 void ath10k_drain_tx(struct ath10k *ar) 4221 { 4222 /* make sure rcu-protected mac80211 tx path itself is drained */ 4223 synchronize_net(); 4224 4225 ath10k_offchan_tx_purge(ar); 4226 ath10k_mgmt_over_wmi_tx_purge(ar); 4227 4228 cancel_work_sync(&ar->offchan_tx_work); 4229 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4230 } 4231 4232 void ath10k_halt(struct ath10k *ar) 4233 { 4234 struct ath10k_vif *arvif; 4235 4236 lockdep_assert_held(&ar->conf_mutex); 4237 4238 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4239 ar->filter_flags = 0; 4240 ar->monitor = false; 4241 ar->monitor_arvif = NULL; 4242 4243 if (ar->monitor_started) 4244 ath10k_monitor_stop(ar); 4245 4246 ar->monitor_started = false; 4247 ar->tx_paused = 0; 4248 4249 ath10k_scan_finish(ar); 4250 ath10k_peer_cleanup_all(ar); 4251 ath10k_core_stop(ar); 4252 ath10k_hif_power_down(ar); 4253 4254 spin_lock_bh(&ar->data_lock); 4255 list_for_each_entry(arvif, &ar->arvifs, list) 4256 ath10k_mac_vif_beacon_cleanup(arvif); 4257 spin_unlock_bh(&ar->data_lock); 4258 } 4259 4260 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4261 { 4262 struct ath10k *ar = hw->priv; 4263 4264 mutex_lock(&ar->conf_mutex); 4265 4266 *tx_ant = ar->cfg_tx_chainmask; 4267 *rx_ant = ar->cfg_rx_chainmask; 4268 4269 mutex_unlock(&ar->conf_mutex); 4270 4271 return 0; 4272 } 4273 4274 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4275 { 4276 /* It is not clear that allowing gaps in chainmask 4277 * is helpful. Probably it will not do what user 4278 * is hoping for, so warn in that case. 4279 */ 4280 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4281 return; 4282 4283 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4284 dbg, cm); 4285 } 4286 4287 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4288 { 4289 int nsts = ar->vht_cap_info; 4290 4291 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4292 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4293 4294 /* If firmware does not deliver to host number of space-time 4295 * streams supported, assume it support up to 4 BF STS and return 4296 * the value for VHT CAP: nsts-1) 4297 */ 4298 if (nsts == 0) 4299 return 3; 4300 4301 return nsts; 4302 } 4303 4304 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4305 { 4306 int sound_dim = ar->vht_cap_info; 4307 4308 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4309 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4310 4311 /* If the sounding dimension is not advertised by the firmware, 4312 * let's use a default value of 1 4313 */ 4314 if (sound_dim == 0) 4315 return 1; 4316 4317 return sound_dim; 4318 } 4319 4320 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4321 { 4322 struct ieee80211_sta_vht_cap vht_cap = {0}; 4323 u16 mcs_map; 4324 u32 val; 4325 int i; 4326 4327 vht_cap.vht_supported = 1; 4328 vht_cap.cap = ar->vht_cap_info; 4329 4330 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4331 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4332 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4333 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4334 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4335 4336 vht_cap.cap |= val; 4337 } 4338 4339 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4340 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4341 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4342 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4343 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4344 4345 vht_cap.cap |= val; 4346 } 4347 4348 /* Currently the firmware seems to be buggy, don't enable 80+80 4349 * mode until that's resolved. 4350 */ 4351 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4352 !(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)) 4353 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4354 4355 mcs_map = 0; 4356 for (i = 0; i < 8; i++) { 4357 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4358 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4359 else 4360 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4361 } 4362 4363 if (ar->cfg_tx_chainmask <= 1) 4364 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4365 4366 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4367 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4368 4369 return vht_cap; 4370 } 4371 4372 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4373 { 4374 int i; 4375 struct ieee80211_sta_ht_cap ht_cap = {0}; 4376 4377 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4378 return ht_cap; 4379 4380 ht_cap.ht_supported = 1; 4381 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4382 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4383 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4384 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4385 ht_cap.cap |= 4386 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4387 4388 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4389 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4390 4391 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4392 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4393 4394 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4395 u32 smps; 4396 4397 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4398 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4399 4400 ht_cap.cap |= smps; 4401 } 4402 4403 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4404 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4405 4406 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4407 u32 stbc; 4408 4409 stbc = ar->ht_cap_info; 4410 stbc &= WMI_HT_CAP_RX_STBC; 4411 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4412 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4413 stbc &= IEEE80211_HT_CAP_RX_STBC; 4414 4415 ht_cap.cap |= stbc; 4416 } 4417 4418 if (ar->ht_cap_info & WMI_HT_CAP_LDPC) 4419 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4420 4421 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4422 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4423 4424 /* max AMSDU is implicitly taken from vht_cap_info */ 4425 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4426 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4427 4428 for (i = 0; i < ar->num_rf_chains; i++) { 4429 if (ar->cfg_rx_chainmask & BIT(i)) 4430 ht_cap.mcs.rx_mask[i] = 0xFF; 4431 } 4432 4433 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4434 4435 return ht_cap; 4436 } 4437 4438 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4439 { 4440 struct ieee80211_supported_band *band; 4441 struct ieee80211_sta_vht_cap vht_cap; 4442 struct ieee80211_sta_ht_cap ht_cap; 4443 4444 ht_cap = ath10k_get_ht_cap(ar); 4445 vht_cap = ath10k_create_vht_cap(ar); 4446 4447 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4448 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4449 band->ht_cap = ht_cap; 4450 } 4451 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4452 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4453 band->ht_cap = ht_cap; 4454 band->vht_cap = vht_cap; 4455 } 4456 } 4457 4458 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4459 { 4460 int ret; 4461 4462 lockdep_assert_held(&ar->conf_mutex); 4463 4464 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4465 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4466 4467 ar->cfg_tx_chainmask = tx_ant; 4468 ar->cfg_rx_chainmask = rx_ant; 4469 4470 if ((ar->state != ATH10K_STATE_ON) && 4471 (ar->state != ATH10K_STATE_RESTARTED)) 4472 return 0; 4473 4474 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4475 tx_ant); 4476 if (ret) { 4477 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4478 ret, tx_ant); 4479 return ret; 4480 } 4481 4482 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4483 rx_ant); 4484 if (ret) { 4485 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4486 ret, rx_ant); 4487 return ret; 4488 } 4489 4490 /* Reload HT/VHT capability */ 4491 ath10k_mac_setup_ht_vht_cap(ar); 4492 4493 return 0; 4494 } 4495 4496 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4497 { 4498 struct ath10k *ar = hw->priv; 4499 int ret; 4500 4501 mutex_lock(&ar->conf_mutex); 4502 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4503 mutex_unlock(&ar->conf_mutex); 4504 return ret; 4505 } 4506 4507 static int ath10k_start(struct ieee80211_hw *hw) 4508 { 4509 struct ath10k *ar = hw->priv; 4510 u32 param; 4511 int ret = 0; 4512 4513 /* 4514 * This makes sense only when restarting hw. It is harmless to call 4515 * unconditionally. This is necessary to make sure no HTT/WMI tx 4516 * commands will be submitted while restarting. 4517 */ 4518 ath10k_drain_tx(ar); 4519 4520 mutex_lock(&ar->conf_mutex); 4521 4522 switch (ar->state) { 4523 case ATH10K_STATE_OFF: 4524 ar->state = ATH10K_STATE_ON; 4525 break; 4526 case ATH10K_STATE_RESTARTING: 4527 ar->state = ATH10K_STATE_RESTARTED; 4528 break; 4529 case ATH10K_STATE_ON: 4530 case ATH10K_STATE_RESTARTED: 4531 case ATH10K_STATE_WEDGED: 4532 WARN_ON(1); 4533 ret = -EINVAL; 4534 goto err; 4535 case ATH10K_STATE_UTF: 4536 ret = -EBUSY; 4537 goto err; 4538 } 4539 4540 ret = ath10k_hif_power_up(ar); 4541 if (ret) { 4542 ath10k_err(ar, "Could not init hif: %d\n", ret); 4543 goto err_off; 4544 } 4545 4546 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4547 &ar->normal_mode_fw); 4548 if (ret) { 4549 ath10k_err(ar, "Could not init core: %d\n", ret); 4550 goto err_power_down; 4551 } 4552 4553 param = ar->wmi.pdev_param->pmf_qos; 4554 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4555 if (ret) { 4556 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4557 goto err_core_stop; 4558 } 4559 4560 param = ar->wmi.pdev_param->dynamic_bw; 4561 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4562 if (ret) { 4563 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4564 goto err_core_stop; 4565 } 4566 4567 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4568 ret = ath10k_wmi_adaptive_qcs(ar, true); 4569 if (ret) { 4570 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4571 ret); 4572 goto err_core_stop; 4573 } 4574 } 4575 4576 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4577 param = ar->wmi.pdev_param->burst_enable; 4578 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4579 if (ret) { 4580 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4581 goto err_core_stop; 4582 } 4583 } 4584 4585 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4586 4587 /* 4588 * By default FW set ARP frames ac to voice (6). In that case ARP 4589 * exchange is not working properly for UAPSD enabled AP. ARP requests 4590 * which arrives with access category 0 are processed by network stack 4591 * and send back with access category 0, but FW changes access category 4592 * to 6. Set ARP frames access category to best effort (0) solves 4593 * this problem. 4594 */ 4595 4596 param = ar->wmi.pdev_param->arp_ac_override; 4597 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4598 if (ret) { 4599 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4600 ret); 4601 goto err_core_stop; 4602 } 4603 4604 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4605 ar->running_fw->fw_file.fw_features)) { 4606 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4607 WMI_CCA_DETECT_LEVEL_AUTO, 4608 WMI_CCA_DETECT_MARGIN_AUTO); 4609 if (ret) { 4610 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4611 ret); 4612 goto err_core_stop; 4613 } 4614 } 4615 4616 param = ar->wmi.pdev_param->ani_enable; 4617 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4618 if (ret) { 4619 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4620 ret); 4621 goto err_core_stop; 4622 } 4623 4624 ar->ani_enabled = true; 4625 4626 if (ath10k_peer_stats_enabled(ar)) { 4627 param = ar->wmi.pdev_param->peer_stats_update_period; 4628 ret = ath10k_wmi_pdev_set_param(ar, param, 4629 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4630 if (ret) { 4631 ath10k_warn(ar, 4632 "failed to set peer stats period : %d\n", 4633 ret); 4634 goto err_core_stop; 4635 } 4636 } 4637 4638 param = ar->wmi.pdev_param->enable_btcoex; 4639 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4640 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4641 ar->running_fw->fw_file.fw_features)) { 4642 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4643 if (ret) { 4644 ath10k_warn(ar, 4645 "failed to set btcoex param: %d\n", ret); 4646 goto err_core_stop; 4647 } 4648 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4649 } 4650 4651 ar->num_started_vdevs = 0; 4652 ath10k_regd_update(ar); 4653 4654 ath10k_spectral_start(ar); 4655 ath10k_thermal_set_throttling(ar); 4656 4657 mutex_unlock(&ar->conf_mutex); 4658 return 0; 4659 4660 err_core_stop: 4661 ath10k_core_stop(ar); 4662 4663 err_power_down: 4664 ath10k_hif_power_down(ar); 4665 4666 err_off: 4667 ar->state = ATH10K_STATE_OFF; 4668 4669 err: 4670 mutex_unlock(&ar->conf_mutex); 4671 return ret; 4672 } 4673 4674 static void ath10k_stop(struct ieee80211_hw *hw) 4675 { 4676 struct ath10k *ar = hw->priv; 4677 4678 ath10k_drain_tx(ar); 4679 4680 mutex_lock(&ar->conf_mutex); 4681 if (ar->state != ATH10K_STATE_OFF) { 4682 ath10k_halt(ar); 4683 ar->state = ATH10K_STATE_OFF; 4684 } 4685 mutex_unlock(&ar->conf_mutex); 4686 4687 cancel_delayed_work_sync(&ar->scan.timeout); 4688 cancel_work_sync(&ar->restart_work); 4689 } 4690 4691 static int ath10k_config_ps(struct ath10k *ar) 4692 { 4693 struct ath10k_vif *arvif; 4694 int ret = 0; 4695 4696 lockdep_assert_held(&ar->conf_mutex); 4697 4698 list_for_each_entry(arvif, &ar->arvifs, list) { 4699 ret = ath10k_mac_vif_setup_ps(arvif); 4700 if (ret) { 4701 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4702 break; 4703 } 4704 } 4705 4706 return ret; 4707 } 4708 4709 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4710 { 4711 int ret; 4712 u32 param; 4713 4714 lockdep_assert_held(&ar->conf_mutex); 4715 4716 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4717 4718 param = ar->wmi.pdev_param->txpower_limit2g; 4719 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4720 if (ret) { 4721 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 4722 txpower, ret); 4723 return ret; 4724 } 4725 4726 param = ar->wmi.pdev_param->txpower_limit5g; 4727 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4728 if (ret) { 4729 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 4730 txpower, ret); 4731 return ret; 4732 } 4733 4734 return 0; 4735 } 4736 4737 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 4738 { 4739 struct ath10k_vif *arvif; 4740 int ret, txpower = -1; 4741 4742 lockdep_assert_held(&ar->conf_mutex); 4743 4744 list_for_each_entry(arvif, &ar->arvifs, list) { 4745 if (arvif->txpower <= 0) 4746 continue; 4747 4748 if (txpower == -1) 4749 txpower = arvif->txpower; 4750 else 4751 txpower = min(txpower, arvif->txpower); 4752 } 4753 4754 if (txpower == -1) 4755 return 0; 4756 4757 ret = ath10k_mac_txpower_setup(ar, txpower); 4758 if (ret) { 4759 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 4760 txpower, ret); 4761 return ret; 4762 } 4763 4764 return 0; 4765 } 4766 4767 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 4768 { 4769 struct ath10k *ar = hw->priv; 4770 struct ieee80211_conf *conf = &hw->conf; 4771 int ret = 0; 4772 4773 mutex_lock(&ar->conf_mutex); 4774 4775 if (changed & IEEE80211_CONF_CHANGE_PS) 4776 ath10k_config_ps(ar); 4777 4778 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 4779 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 4780 ret = ath10k_monitor_recalc(ar); 4781 if (ret) 4782 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4783 } 4784 4785 mutex_unlock(&ar->conf_mutex); 4786 return ret; 4787 } 4788 4789 static u32 get_nss_from_chainmask(u16 chain_mask) 4790 { 4791 if ((chain_mask & 0xf) == 0xf) 4792 return 4; 4793 else if ((chain_mask & 0x7) == 0x7) 4794 return 3; 4795 else if ((chain_mask & 0x3) == 0x3) 4796 return 2; 4797 return 1; 4798 } 4799 4800 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 4801 { 4802 u32 value = 0; 4803 struct ath10k *ar = arvif->ar; 4804 int nsts; 4805 int sound_dim; 4806 4807 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 4808 return 0; 4809 4810 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 4811 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4812 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 4813 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 4814 4815 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4816 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4817 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 4818 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 4819 4820 if (!value) 4821 return 0; 4822 4823 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 4824 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 4825 4826 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 4827 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 4828 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 4829 4830 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 4831 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 4832 4833 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 4834 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 4835 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 4836 4837 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 4838 ar->wmi.vdev_param->txbf, value); 4839 } 4840 4841 /* 4842 * TODO: 4843 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 4844 * because we will send mgmt frames without CCK. This requirement 4845 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 4846 * in the TX packet. 4847 */ 4848 static int ath10k_add_interface(struct ieee80211_hw *hw, 4849 struct ieee80211_vif *vif) 4850 { 4851 struct ath10k *ar = hw->priv; 4852 struct ath10k_vif *arvif = (void *)vif->drv_priv; 4853 struct ath10k_peer *peer; 4854 enum wmi_sta_powersave_param param; 4855 int ret = 0; 4856 u32 value; 4857 int bit; 4858 int i; 4859 u32 vdev_param; 4860 4861 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 4862 4863 mutex_lock(&ar->conf_mutex); 4864 4865 memset(arvif, 0, sizeof(*arvif)); 4866 ath10k_mac_txq_init(vif->txq); 4867 4868 arvif->ar = ar; 4869 arvif->vif = vif; 4870 4871 INIT_LIST_HEAD(&arvif->list); 4872 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 4873 INIT_DELAYED_WORK(&arvif->connection_loss_work, 4874 ath10k_mac_vif_sta_connection_loss_work); 4875 4876 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 4877 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 4878 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 4879 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 4880 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 4881 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 4882 } 4883 4884 if (ar->num_peers >= ar->max_num_peers) { 4885 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 4886 ret = -ENOBUFS; 4887 goto err; 4888 } 4889 4890 if (ar->free_vdev_map == 0) { 4891 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 4892 ret = -EBUSY; 4893 goto err; 4894 } 4895 bit = __ffs64(ar->free_vdev_map); 4896 4897 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 4898 bit, ar->free_vdev_map); 4899 4900 arvif->vdev_id = bit; 4901 arvif->vdev_subtype = 4902 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 4903 4904 switch (vif->type) { 4905 case NL80211_IFTYPE_P2P_DEVICE: 4906 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4907 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4908 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 4909 break; 4910 case NL80211_IFTYPE_UNSPECIFIED: 4911 case NL80211_IFTYPE_STATION: 4912 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4913 if (vif->p2p) 4914 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4915 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 4916 break; 4917 case NL80211_IFTYPE_ADHOC: 4918 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 4919 break; 4920 case NL80211_IFTYPE_MESH_POINT: 4921 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 4922 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4923 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 4924 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 4925 ret = -EINVAL; 4926 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 4927 goto err; 4928 } 4929 arvif->vdev_type = WMI_VDEV_TYPE_AP; 4930 break; 4931 case NL80211_IFTYPE_AP: 4932 arvif->vdev_type = WMI_VDEV_TYPE_AP; 4933 4934 if (vif->p2p) 4935 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4936 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 4937 break; 4938 case NL80211_IFTYPE_MONITOR: 4939 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 4940 break; 4941 default: 4942 WARN_ON(1); 4943 break; 4944 } 4945 4946 /* Using vdev_id as queue number will make it very easy to do per-vif 4947 * tx queue locking. This shouldn't wrap due to interface combinations 4948 * but do a modulo for correctness sake and prevent using offchannel tx 4949 * queues for regular vif tx. 4950 */ 4951 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 4952 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 4953 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 4954 4955 /* Some firmware revisions don't wait for beacon tx completion before 4956 * sending another SWBA event. This could lead to hardware using old 4957 * (freed) beacon data in some cases, e.g. tx credit starvation 4958 * combined with missed TBTT. This is very very rare. 4959 * 4960 * On non-IOMMU-enabled hosts this could be a possible security issue 4961 * because hw could beacon some random data on the air. On 4962 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 4963 * device would crash. 4964 * 4965 * Since there are no beacon tx completions (implicit nor explicit) 4966 * propagated to host the only workaround for this is to allocate a 4967 * DMA-coherent buffer for a lifetime of a vif and use it for all 4968 * beacon tx commands. Worst case for this approach is some beacons may 4969 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 4970 */ 4971 if (vif->type == NL80211_IFTYPE_ADHOC || 4972 vif->type == NL80211_IFTYPE_MESH_POINT || 4973 vif->type == NL80211_IFTYPE_AP) { 4974 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 4975 IEEE80211_MAX_FRAME_LEN, 4976 &arvif->beacon_paddr, 4977 GFP_ATOMIC); 4978 if (!arvif->beacon_buf) { 4979 ret = -ENOMEM; 4980 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 4981 ret); 4982 goto err; 4983 } 4984 } 4985 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 4986 arvif->nohwcrypt = true; 4987 4988 if (arvif->nohwcrypt && 4989 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 4990 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 4991 goto err; 4992 } 4993 4994 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 4995 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 4996 arvif->beacon_buf ? "single-buf" : "per-skb"); 4997 4998 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 4999 arvif->vdev_subtype, vif->addr); 5000 if (ret) { 5001 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5002 arvif->vdev_id, ret); 5003 goto err; 5004 } 5005 5006 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5007 spin_lock_bh(&ar->data_lock); 5008 list_add(&arvif->list, &ar->arvifs); 5009 spin_unlock_bh(&ar->data_lock); 5010 5011 /* It makes no sense to have firmware do keepalives. mac80211 already 5012 * takes care of this with idle connection polling. 5013 */ 5014 ret = ath10k_mac_vif_disable_keepalive(arvif); 5015 if (ret) { 5016 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5017 arvif->vdev_id, ret); 5018 goto err_vdev_delete; 5019 } 5020 5021 arvif->def_wep_key_idx = -1; 5022 5023 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5024 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5025 ATH10K_HW_TXRX_NATIVE_WIFI); 5026 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5027 if (ret && ret != -EOPNOTSUPP) { 5028 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5029 arvif->vdev_id, ret); 5030 goto err_vdev_delete; 5031 } 5032 5033 /* Configuring number of spatial stream for monitor interface is causing 5034 * target assert in qca9888 and qca6174. 5035 */ 5036 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5037 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5038 5039 vdev_param = ar->wmi.vdev_param->nss; 5040 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5041 nss); 5042 if (ret) { 5043 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5044 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5045 ret); 5046 goto err_vdev_delete; 5047 } 5048 } 5049 5050 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5051 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5052 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5053 vif->addr, WMI_PEER_TYPE_DEFAULT); 5054 if (ret) { 5055 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5056 arvif->vdev_id, ret); 5057 goto err_vdev_delete; 5058 } 5059 5060 spin_lock_bh(&ar->data_lock); 5061 5062 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5063 if (!peer) { 5064 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5065 vif->addr, arvif->vdev_id); 5066 spin_unlock_bh(&ar->data_lock); 5067 ret = -ENOENT; 5068 goto err_peer_delete; 5069 } 5070 5071 arvif->peer_id = find_first_bit(peer->peer_ids, 5072 ATH10K_MAX_NUM_PEER_IDS); 5073 5074 spin_unlock_bh(&ar->data_lock); 5075 } else { 5076 arvif->peer_id = HTT_INVALID_PEERID; 5077 } 5078 5079 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5080 ret = ath10k_mac_set_kickout(arvif); 5081 if (ret) { 5082 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5083 arvif->vdev_id, ret); 5084 goto err_peer_delete; 5085 } 5086 } 5087 5088 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5089 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5090 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5091 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5092 param, value); 5093 if (ret) { 5094 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5095 arvif->vdev_id, ret); 5096 goto err_peer_delete; 5097 } 5098 5099 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5100 if (ret) { 5101 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5102 arvif->vdev_id, ret); 5103 goto err_peer_delete; 5104 } 5105 5106 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5107 if (ret) { 5108 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5109 arvif->vdev_id, ret); 5110 goto err_peer_delete; 5111 } 5112 } 5113 5114 ret = ath10k_mac_set_txbf_conf(arvif); 5115 if (ret) { 5116 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5117 arvif->vdev_id, ret); 5118 goto err_peer_delete; 5119 } 5120 5121 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5122 if (ret) { 5123 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5124 arvif->vdev_id, ret); 5125 goto err_peer_delete; 5126 } 5127 5128 arvif->txpower = vif->bss_conf.txpower; 5129 ret = ath10k_mac_txpower_recalc(ar); 5130 if (ret) { 5131 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5132 goto err_peer_delete; 5133 } 5134 5135 if (vif->type == NL80211_IFTYPE_MONITOR) { 5136 ar->monitor_arvif = arvif; 5137 ret = ath10k_monitor_recalc(ar); 5138 if (ret) { 5139 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5140 goto err_peer_delete; 5141 } 5142 } 5143 5144 spin_lock_bh(&ar->htt.tx_lock); 5145 if (!ar->tx_paused) 5146 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5147 spin_unlock_bh(&ar->htt.tx_lock); 5148 5149 mutex_unlock(&ar->conf_mutex); 5150 return 0; 5151 5152 err_peer_delete: 5153 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5154 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 5155 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5156 5157 err_vdev_delete: 5158 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5159 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5160 spin_lock_bh(&ar->data_lock); 5161 list_del(&arvif->list); 5162 spin_unlock_bh(&ar->data_lock); 5163 5164 err: 5165 if (arvif->beacon_buf) { 5166 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5167 arvif->beacon_buf, arvif->beacon_paddr); 5168 arvif->beacon_buf = NULL; 5169 } 5170 5171 mutex_unlock(&ar->conf_mutex); 5172 5173 return ret; 5174 } 5175 5176 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5177 { 5178 int i; 5179 5180 for (i = 0; i < BITS_PER_LONG; i++) 5181 ath10k_mac_vif_tx_unlock(arvif, i); 5182 } 5183 5184 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5185 struct ieee80211_vif *vif) 5186 { 5187 struct ath10k *ar = hw->priv; 5188 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5189 struct ath10k_peer *peer; 5190 int ret; 5191 int i; 5192 5193 cancel_work_sync(&arvif->ap_csa_work); 5194 cancel_delayed_work_sync(&arvif->connection_loss_work); 5195 5196 mutex_lock(&ar->conf_mutex); 5197 5198 spin_lock_bh(&ar->data_lock); 5199 ath10k_mac_vif_beacon_cleanup(arvif); 5200 spin_unlock_bh(&ar->data_lock); 5201 5202 ret = ath10k_spectral_vif_stop(arvif); 5203 if (ret) 5204 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5205 arvif->vdev_id, ret); 5206 5207 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5208 spin_lock_bh(&ar->data_lock); 5209 list_del(&arvif->list); 5210 spin_unlock_bh(&ar->data_lock); 5211 5212 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5213 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5214 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5215 vif->addr); 5216 if (ret) 5217 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5218 arvif->vdev_id, ret); 5219 5220 kfree(arvif->u.ap.noa_data); 5221 } 5222 5223 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5224 arvif->vdev_id); 5225 5226 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5227 if (ret) 5228 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5229 arvif->vdev_id, ret); 5230 5231 /* Some firmware revisions don't notify host about self-peer removal 5232 * until after associated vdev is deleted. 5233 */ 5234 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5235 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5236 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5237 vif->addr); 5238 if (ret) 5239 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5240 arvif->vdev_id, ret); 5241 5242 spin_lock_bh(&ar->data_lock); 5243 ar->num_peers--; 5244 spin_unlock_bh(&ar->data_lock); 5245 } 5246 5247 spin_lock_bh(&ar->data_lock); 5248 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5249 peer = ar->peer_map[i]; 5250 if (!peer) 5251 continue; 5252 5253 if (peer->vif == vif) { 5254 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5255 vif->addr, arvif->vdev_id); 5256 peer->vif = NULL; 5257 } 5258 } 5259 spin_unlock_bh(&ar->data_lock); 5260 5261 ath10k_peer_cleanup(ar, arvif->vdev_id); 5262 ath10k_mac_txq_unref(ar, vif->txq); 5263 5264 if (vif->type == NL80211_IFTYPE_MONITOR) { 5265 ar->monitor_arvif = NULL; 5266 ret = ath10k_monitor_recalc(ar); 5267 if (ret) 5268 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5269 } 5270 5271 ret = ath10k_mac_txpower_recalc(ar); 5272 if (ret) 5273 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5274 5275 spin_lock_bh(&ar->htt.tx_lock); 5276 ath10k_mac_vif_tx_unlock_all(arvif); 5277 spin_unlock_bh(&ar->htt.tx_lock); 5278 5279 ath10k_mac_txq_unref(ar, vif->txq); 5280 5281 mutex_unlock(&ar->conf_mutex); 5282 } 5283 5284 /* 5285 * FIXME: Has to be verified. 5286 */ 5287 #define SUPPORTED_FILTERS \ 5288 (FIF_ALLMULTI | \ 5289 FIF_CONTROL | \ 5290 FIF_PSPOLL | \ 5291 FIF_OTHER_BSS | \ 5292 FIF_BCN_PRBRESP_PROMISC | \ 5293 FIF_PROBE_REQ | \ 5294 FIF_FCSFAIL) 5295 5296 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5297 unsigned int changed_flags, 5298 unsigned int *total_flags, 5299 u64 multicast) 5300 { 5301 struct ath10k *ar = hw->priv; 5302 int ret; 5303 5304 mutex_lock(&ar->conf_mutex); 5305 5306 changed_flags &= SUPPORTED_FILTERS; 5307 *total_flags &= SUPPORTED_FILTERS; 5308 ar->filter_flags = *total_flags; 5309 5310 ret = ath10k_monitor_recalc(ar); 5311 if (ret) 5312 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5313 5314 mutex_unlock(&ar->conf_mutex); 5315 } 5316 5317 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5318 struct ieee80211_vif *vif, 5319 struct ieee80211_bss_conf *info, 5320 u32 changed) 5321 { 5322 struct ath10k *ar = hw->priv; 5323 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5324 int ret = 0; 5325 u32 vdev_param, pdev_param, slottime, preamble; 5326 5327 mutex_lock(&ar->conf_mutex); 5328 5329 if (changed & BSS_CHANGED_IBSS) 5330 ath10k_control_ibss(arvif, info, vif->addr); 5331 5332 if (changed & BSS_CHANGED_BEACON_INT) { 5333 arvif->beacon_interval = info->beacon_int; 5334 vdev_param = ar->wmi.vdev_param->beacon_interval; 5335 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5336 arvif->beacon_interval); 5337 ath10k_dbg(ar, ATH10K_DBG_MAC, 5338 "mac vdev %d beacon_interval %d\n", 5339 arvif->vdev_id, arvif->beacon_interval); 5340 5341 if (ret) 5342 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5343 arvif->vdev_id, ret); 5344 } 5345 5346 if (changed & BSS_CHANGED_BEACON) { 5347 ath10k_dbg(ar, ATH10K_DBG_MAC, 5348 "vdev %d set beacon tx mode to staggered\n", 5349 arvif->vdev_id); 5350 5351 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5352 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5353 WMI_BEACON_STAGGERED_MODE); 5354 if (ret) 5355 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5356 arvif->vdev_id, ret); 5357 5358 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5359 if (ret) 5360 ath10k_warn(ar, "failed to update beacon template: %d\n", 5361 ret); 5362 5363 if (ieee80211_vif_is_mesh(vif)) { 5364 /* mesh doesn't use SSID but firmware needs it */ 5365 strncpy(arvif->u.ap.ssid, "mesh", 5366 sizeof(arvif->u.ap.ssid)); 5367 arvif->u.ap.ssid_len = 4; 5368 } 5369 } 5370 5371 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5372 ret = ath10k_mac_setup_prb_tmpl(arvif); 5373 if (ret) 5374 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5375 arvif->vdev_id, ret); 5376 } 5377 5378 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5379 arvif->dtim_period = info->dtim_period; 5380 5381 ath10k_dbg(ar, ATH10K_DBG_MAC, 5382 "mac vdev %d dtim_period %d\n", 5383 arvif->vdev_id, arvif->dtim_period); 5384 5385 vdev_param = ar->wmi.vdev_param->dtim_period; 5386 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5387 arvif->dtim_period); 5388 if (ret) 5389 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5390 arvif->vdev_id, ret); 5391 } 5392 5393 if (changed & BSS_CHANGED_SSID && 5394 vif->type == NL80211_IFTYPE_AP) { 5395 arvif->u.ap.ssid_len = info->ssid_len; 5396 if (info->ssid_len) 5397 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5398 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5399 } 5400 5401 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5402 ether_addr_copy(arvif->bssid, info->bssid); 5403 5404 if (changed & BSS_CHANGED_BEACON_ENABLED) 5405 ath10k_control_beaconing(arvif, info); 5406 5407 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5408 arvif->use_cts_prot = info->use_cts_prot; 5409 5410 ret = ath10k_recalc_rtscts_prot(arvif); 5411 if (ret) 5412 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5413 arvif->vdev_id, ret); 5414 5415 if (ath10k_mac_can_set_cts_prot(arvif)) { 5416 ret = ath10k_mac_set_cts_prot(arvif); 5417 if (ret) 5418 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5419 arvif->vdev_id, ret); 5420 } 5421 } 5422 5423 if (changed & BSS_CHANGED_ERP_SLOT) { 5424 if (info->use_short_slot) 5425 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5426 5427 else 5428 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5429 5430 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5431 arvif->vdev_id, slottime); 5432 5433 vdev_param = ar->wmi.vdev_param->slot_time; 5434 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5435 slottime); 5436 if (ret) 5437 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5438 arvif->vdev_id, ret); 5439 } 5440 5441 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5442 if (info->use_short_preamble) 5443 preamble = WMI_VDEV_PREAMBLE_SHORT; 5444 else 5445 preamble = WMI_VDEV_PREAMBLE_LONG; 5446 5447 ath10k_dbg(ar, ATH10K_DBG_MAC, 5448 "mac vdev %d preamble %dn", 5449 arvif->vdev_id, preamble); 5450 5451 vdev_param = ar->wmi.vdev_param->preamble; 5452 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5453 preamble); 5454 if (ret) 5455 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5456 arvif->vdev_id, ret); 5457 } 5458 5459 if (changed & BSS_CHANGED_ASSOC) { 5460 if (info->assoc) { 5461 /* Workaround: Make sure monitor vdev is not running 5462 * when associating to prevent some firmware revisions 5463 * (e.g. 10.1 and 10.2) from crashing. 5464 */ 5465 if (ar->monitor_started) 5466 ath10k_monitor_stop(ar); 5467 ath10k_bss_assoc(hw, vif, info); 5468 ath10k_monitor_recalc(ar); 5469 } else { 5470 ath10k_bss_disassoc(hw, vif); 5471 } 5472 } 5473 5474 if (changed & BSS_CHANGED_TXPOWER) { 5475 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5476 arvif->vdev_id, info->txpower); 5477 5478 arvif->txpower = info->txpower; 5479 ret = ath10k_mac_txpower_recalc(ar); 5480 if (ret) 5481 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5482 } 5483 5484 if (changed & BSS_CHANGED_PS) { 5485 arvif->ps = vif->bss_conf.ps; 5486 5487 ret = ath10k_config_ps(ar); 5488 if (ret) 5489 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5490 arvif->vdev_id, ret); 5491 } 5492 5493 mutex_unlock(&ar->conf_mutex); 5494 } 5495 5496 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5497 { 5498 struct ath10k *ar = hw->priv; 5499 5500 /* This function should never be called if setting the coverage class 5501 * is not supported on this hardware. 5502 */ 5503 if (!ar->hw_params.hw_ops->set_coverage_class) { 5504 WARN_ON_ONCE(1); 5505 return; 5506 } 5507 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5508 } 5509 5510 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5511 struct ieee80211_vif *vif, 5512 struct ieee80211_scan_request *hw_req) 5513 { 5514 struct ath10k *ar = hw->priv; 5515 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5516 struct cfg80211_scan_request *req = &hw_req->req; 5517 struct wmi_start_scan_arg arg; 5518 int ret = 0; 5519 int i; 5520 5521 mutex_lock(&ar->conf_mutex); 5522 5523 spin_lock_bh(&ar->data_lock); 5524 switch (ar->scan.state) { 5525 case ATH10K_SCAN_IDLE: 5526 reinit_completion(&ar->scan.started); 5527 reinit_completion(&ar->scan.completed); 5528 ar->scan.state = ATH10K_SCAN_STARTING; 5529 ar->scan.is_roc = false; 5530 ar->scan.vdev_id = arvif->vdev_id; 5531 ret = 0; 5532 break; 5533 case ATH10K_SCAN_STARTING: 5534 case ATH10K_SCAN_RUNNING: 5535 case ATH10K_SCAN_ABORTING: 5536 ret = -EBUSY; 5537 break; 5538 } 5539 spin_unlock_bh(&ar->data_lock); 5540 5541 if (ret) 5542 goto exit; 5543 5544 memset(&arg, 0, sizeof(arg)); 5545 ath10k_wmi_start_scan_init(ar, &arg); 5546 arg.vdev_id = arvif->vdev_id; 5547 arg.scan_id = ATH10K_SCAN_ID; 5548 5549 if (req->ie_len) { 5550 arg.ie_len = req->ie_len; 5551 memcpy(arg.ie, req->ie, arg.ie_len); 5552 } 5553 5554 if (req->n_ssids) { 5555 arg.n_ssids = req->n_ssids; 5556 for (i = 0; i < arg.n_ssids; i++) { 5557 arg.ssids[i].len = req->ssids[i].ssid_len; 5558 arg.ssids[i].ssid = req->ssids[i].ssid; 5559 } 5560 } else { 5561 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5562 } 5563 5564 if (req->n_channels) { 5565 arg.n_channels = req->n_channels; 5566 for (i = 0; i < arg.n_channels; i++) 5567 arg.channels[i] = req->channels[i]->center_freq; 5568 } 5569 5570 ret = ath10k_start_scan(ar, &arg); 5571 if (ret) { 5572 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 5573 spin_lock_bh(&ar->data_lock); 5574 ar->scan.state = ATH10K_SCAN_IDLE; 5575 spin_unlock_bh(&ar->data_lock); 5576 } 5577 5578 /* Add a 200ms margin to account for event/command processing */ 5579 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5580 msecs_to_jiffies(arg.max_scan_time + 5581 200)); 5582 5583 exit: 5584 mutex_unlock(&ar->conf_mutex); 5585 return ret; 5586 } 5587 5588 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 5589 struct ieee80211_vif *vif) 5590 { 5591 struct ath10k *ar = hw->priv; 5592 5593 mutex_lock(&ar->conf_mutex); 5594 ath10k_scan_abort(ar); 5595 mutex_unlock(&ar->conf_mutex); 5596 5597 cancel_delayed_work_sync(&ar->scan.timeout); 5598 } 5599 5600 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 5601 struct ath10k_vif *arvif, 5602 enum set_key_cmd cmd, 5603 struct ieee80211_key_conf *key) 5604 { 5605 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 5606 int ret; 5607 5608 /* 10.1 firmware branch requires default key index to be set to group 5609 * key index after installing it. Otherwise FW/HW Txes corrupted 5610 * frames with multi-vif APs. This is not required for main firmware 5611 * branch (e.g. 636). 5612 * 5613 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 5614 * 5615 * FIXME: It remains unknown if this is required for multi-vif STA 5616 * interfaces on 10.1. 5617 */ 5618 5619 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 5620 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 5621 return; 5622 5623 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 5624 return; 5625 5626 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 5627 return; 5628 5629 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5630 return; 5631 5632 if (cmd != SET_KEY) 5633 return; 5634 5635 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5636 key->keyidx); 5637 if (ret) 5638 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 5639 arvif->vdev_id, ret); 5640 } 5641 5642 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 5643 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 5644 struct ieee80211_key_conf *key) 5645 { 5646 struct ath10k *ar = hw->priv; 5647 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5648 struct ath10k_peer *peer; 5649 const u8 *peer_addr; 5650 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 5651 key->cipher == WLAN_CIPHER_SUITE_WEP104; 5652 int ret = 0; 5653 int ret2; 5654 u32 flags = 0; 5655 u32 flags2; 5656 5657 /* this one needs to be done in software */ 5658 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) 5659 return 1; 5660 5661 if (arvif->nohwcrypt) 5662 return 1; 5663 5664 if (key->keyidx > WMI_MAX_KEY_INDEX) 5665 return -ENOSPC; 5666 5667 mutex_lock(&ar->conf_mutex); 5668 5669 if (sta) 5670 peer_addr = sta->addr; 5671 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 5672 peer_addr = vif->bss_conf.bssid; 5673 else 5674 peer_addr = vif->addr; 5675 5676 key->hw_key_idx = key->keyidx; 5677 5678 if (is_wep) { 5679 if (cmd == SET_KEY) 5680 arvif->wep_keys[key->keyidx] = key; 5681 else 5682 arvif->wep_keys[key->keyidx] = NULL; 5683 } 5684 5685 /* the peer should not disappear in mid-way (unless FW goes awry) since 5686 * we already hold conf_mutex. we just make sure its there now. */ 5687 spin_lock_bh(&ar->data_lock); 5688 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5689 spin_unlock_bh(&ar->data_lock); 5690 5691 if (!peer) { 5692 if (cmd == SET_KEY) { 5693 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 5694 peer_addr); 5695 ret = -EOPNOTSUPP; 5696 goto exit; 5697 } else { 5698 /* if the peer doesn't exist there is no key to disable 5699 * anymore */ 5700 goto exit; 5701 } 5702 } 5703 5704 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5705 flags |= WMI_KEY_PAIRWISE; 5706 else 5707 flags |= WMI_KEY_GROUP; 5708 5709 if (is_wep) { 5710 if (cmd == DISABLE_KEY) 5711 ath10k_clear_vdev_key(arvif, key); 5712 5713 /* When WEP keys are uploaded it's possible that there are 5714 * stations associated already (e.g. when merging) without any 5715 * keys. Static WEP needs an explicit per-peer key upload. 5716 */ 5717 if (vif->type == NL80211_IFTYPE_ADHOC && 5718 cmd == SET_KEY) 5719 ath10k_mac_vif_update_wep_key(arvif, key); 5720 5721 /* 802.1x never sets the def_wep_key_idx so each set_key() 5722 * call changes default tx key. 5723 * 5724 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 5725 * after first set_key(). 5726 */ 5727 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 5728 flags |= WMI_KEY_TX_USAGE; 5729 } 5730 5731 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 5732 if (ret) { 5733 WARN_ON(ret > 0); 5734 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 5735 arvif->vdev_id, peer_addr, ret); 5736 goto exit; 5737 } 5738 5739 /* mac80211 sets static WEP keys as groupwise while firmware requires 5740 * them to be installed twice as both pairwise and groupwise. 5741 */ 5742 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 5743 flags2 = flags; 5744 flags2 &= ~WMI_KEY_GROUP; 5745 flags2 |= WMI_KEY_PAIRWISE; 5746 5747 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 5748 if (ret) { 5749 WARN_ON(ret > 0); 5750 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 5751 arvif->vdev_id, peer_addr, ret); 5752 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 5753 peer_addr, flags); 5754 if (ret2) { 5755 WARN_ON(ret2 > 0); 5756 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 5757 arvif->vdev_id, peer_addr, ret2); 5758 } 5759 goto exit; 5760 } 5761 } 5762 5763 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 5764 5765 spin_lock_bh(&ar->data_lock); 5766 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5767 if (peer && cmd == SET_KEY) 5768 peer->keys[key->keyidx] = key; 5769 else if (peer && cmd == DISABLE_KEY) 5770 peer->keys[key->keyidx] = NULL; 5771 else if (peer == NULL) 5772 /* impossible unless FW goes crazy */ 5773 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 5774 spin_unlock_bh(&ar->data_lock); 5775 5776 exit: 5777 mutex_unlock(&ar->conf_mutex); 5778 return ret; 5779 } 5780 5781 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 5782 struct ieee80211_vif *vif, 5783 int keyidx) 5784 { 5785 struct ath10k *ar = hw->priv; 5786 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5787 int ret; 5788 5789 mutex_lock(&arvif->ar->conf_mutex); 5790 5791 if (arvif->ar->state != ATH10K_STATE_ON) 5792 goto unlock; 5793 5794 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 5795 arvif->vdev_id, keyidx); 5796 5797 ret = ath10k_wmi_vdev_set_param(arvif->ar, 5798 arvif->vdev_id, 5799 arvif->ar->wmi.vdev_param->def_keyid, 5800 keyidx); 5801 5802 if (ret) { 5803 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 5804 arvif->vdev_id, 5805 ret); 5806 goto unlock; 5807 } 5808 5809 arvif->def_wep_key_idx = keyidx; 5810 5811 unlock: 5812 mutex_unlock(&arvif->ar->conf_mutex); 5813 } 5814 5815 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 5816 { 5817 struct ath10k *ar; 5818 struct ath10k_vif *arvif; 5819 struct ath10k_sta *arsta; 5820 struct ieee80211_sta *sta; 5821 struct cfg80211_chan_def def; 5822 enum nl80211_band band; 5823 const u8 *ht_mcs_mask; 5824 const u16 *vht_mcs_mask; 5825 u32 changed, bw, nss, smps; 5826 int err; 5827 5828 arsta = container_of(wk, struct ath10k_sta, update_wk); 5829 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 5830 arvif = arsta->arvif; 5831 ar = arvif->ar; 5832 5833 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 5834 return; 5835 5836 band = def.chan->band; 5837 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 5838 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 5839 5840 spin_lock_bh(&ar->data_lock); 5841 5842 changed = arsta->changed; 5843 arsta->changed = 0; 5844 5845 bw = arsta->bw; 5846 nss = arsta->nss; 5847 smps = arsta->smps; 5848 5849 spin_unlock_bh(&ar->data_lock); 5850 5851 mutex_lock(&ar->conf_mutex); 5852 5853 nss = max_t(u32, 1, nss); 5854 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 5855 ath10k_mac_max_vht_nss(vht_mcs_mask))); 5856 5857 if (changed & IEEE80211_RC_BW_CHANGED) { 5858 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 5859 sta->addr, bw); 5860 5861 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5862 WMI_PEER_CHAN_WIDTH, bw); 5863 if (err) 5864 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 5865 sta->addr, bw, err); 5866 } 5867 5868 if (changed & IEEE80211_RC_NSS_CHANGED) { 5869 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 5870 sta->addr, nss); 5871 5872 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5873 WMI_PEER_NSS, nss); 5874 if (err) 5875 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 5876 sta->addr, nss, err); 5877 } 5878 5879 if (changed & IEEE80211_RC_SMPS_CHANGED) { 5880 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 5881 sta->addr, smps); 5882 5883 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5884 WMI_PEER_SMPS_STATE, smps); 5885 if (err) 5886 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 5887 sta->addr, smps, err); 5888 } 5889 5890 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || 5891 changed & IEEE80211_RC_NSS_CHANGED) { 5892 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", 5893 sta->addr); 5894 5895 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 5896 if (err) 5897 ath10k_warn(ar, "failed to reassociate station: %pM\n", 5898 sta->addr); 5899 } 5900 5901 mutex_unlock(&ar->conf_mutex); 5902 } 5903 5904 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 5905 struct ieee80211_sta *sta) 5906 { 5907 struct ath10k *ar = arvif->ar; 5908 5909 lockdep_assert_held(&ar->conf_mutex); 5910 5911 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 5912 return 0; 5913 5914 if (ar->num_stations >= ar->max_num_stations) 5915 return -ENOBUFS; 5916 5917 ar->num_stations++; 5918 5919 return 0; 5920 } 5921 5922 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 5923 struct ieee80211_sta *sta) 5924 { 5925 struct ath10k *ar = arvif->ar; 5926 5927 lockdep_assert_held(&ar->conf_mutex); 5928 5929 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 5930 return; 5931 5932 ar->num_stations--; 5933 } 5934 5935 struct ath10k_mac_tdls_iter_data { 5936 u32 num_tdls_stations; 5937 struct ieee80211_vif *curr_vif; 5938 }; 5939 5940 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5941 struct ieee80211_sta *sta) 5942 { 5943 struct ath10k_mac_tdls_iter_data *iter_data = data; 5944 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5945 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5946 5947 if (sta->tdls && sta_vif == iter_data->curr_vif) 5948 iter_data->num_tdls_stations++; 5949 } 5950 5951 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5952 struct ieee80211_vif *vif) 5953 { 5954 struct ath10k_mac_tdls_iter_data data = {}; 5955 5956 data.curr_vif = vif; 5957 5958 ieee80211_iterate_stations_atomic(hw, 5959 ath10k_mac_tdls_vif_stations_count_iter, 5960 &data); 5961 return data.num_tdls_stations; 5962 } 5963 5964 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, 5965 struct ieee80211_vif *vif) 5966 { 5967 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5968 int *num_tdls_vifs = data; 5969 5970 if (vif->type != NL80211_IFTYPE_STATION) 5971 return; 5972 5973 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0) 5974 (*num_tdls_vifs)++; 5975 } 5976 5977 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw) 5978 { 5979 int num_tdls_vifs = 0; 5980 5981 ieee80211_iterate_active_interfaces_atomic(hw, 5982 IEEE80211_IFACE_ITER_NORMAL, 5983 ath10k_mac_tdls_vifs_count_iter, 5984 &num_tdls_vifs); 5985 return num_tdls_vifs; 5986 } 5987 5988 static int ath10k_sta_state(struct ieee80211_hw *hw, 5989 struct ieee80211_vif *vif, 5990 struct ieee80211_sta *sta, 5991 enum ieee80211_sta_state old_state, 5992 enum ieee80211_sta_state new_state) 5993 { 5994 struct ath10k *ar = hw->priv; 5995 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5996 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5997 struct ath10k_peer *peer; 5998 int ret = 0; 5999 int i; 6000 6001 if (old_state == IEEE80211_STA_NOTEXIST && 6002 new_state == IEEE80211_STA_NONE) { 6003 memset(arsta, 0, sizeof(*arsta)); 6004 arsta->arvif = arvif; 6005 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6006 6007 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6008 ath10k_mac_txq_init(sta->txq[i]); 6009 } 6010 6011 /* cancel must be done outside the mutex to avoid deadlock */ 6012 if ((old_state == IEEE80211_STA_NONE && 6013 new_state == IEEE80211_STA_NOTEXIST)) 6014 cancel_work_sync(&arsta->update_wk); 6015 6016 mutex_lock(&ar->conf_mutex); 6017 6018 if (old_state == IEEE80211_STA_NOTEXIST && 6019 new_state == IEEE80211_STA_NONE) { 6020 /* 6021 * New station addition. 6022 */ 6023 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6024 u32 num_tdls_stations; 6025 u32 num_tdls_vifs; 6026 6027 ath10k_dbg(ar, ATH10K_DBG_MAC, 6028 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6029 arvif->vdev_id, sta->addr, 6030 ar->num_stations + 1, ar->max_num_stations, 6031 ar->num_peers + 1, ar->max_num_peers); 6032 6033 ret = ath10k_mac_inc_num_stations(arvif, sta); 6034 if (ret) { 6035 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6036 ar->max_num_stations); 6037 goto exit; 6038 } 6039 6040 if (sta->tdls) 6041 peer_type = WMI_PEER_TYPE_TDLS; 6042 6043 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6044 sta->addr, peer_type); 6045 if (ret) { 6046 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6047 sta->addr, arvif->vdev_id, ret); 6048 ath10k_mac_dec_num_stations(arvif, sta); 6049 goto exit; 6050 } 6051 6052 spin_lock_bh(&ar->data_lock); 6053 6054 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6055 if (!peer) { 6056 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6057 vif->addr, arvif->vdev_id); 6058 spin_unlock_bh(&ar->data_lock); 6059 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6060 ath10k_mac_dec_num_stations(arvif, sta); 6061 ret = -ENOENT; 6062 goto exit; 6063 } 6064 6065 arsta->peer_id = find_first_bit(peer->peer_ids, 6066 ATH10K_MAX_NUM_PEER_IDS); 6067 6068 spin_unlock_bh(&ar->data_lock); 6069 6070 if (!sta->tdls) 6071 goto exit; 6072 6073 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6074 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); 6075 6076 if (num_tdls_vifs >= ar->max_num_tdls_vdevs && 6077 num_tdls_stations == 0) { 6078 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6079 arvif->vdev_id, ar->max_num_tdls_vdevs); 6080 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6081 ath10k_mac_dec_num_stations(arvif, sta); 6082 ret = -ENOBUFS; 6083 goto exit; 6084 } 6085 6086 if (num_tdls_stations == 0) { 6087 /* This is the first tdls peer in current vif */ 6088 enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE; 6089 6090 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6091 state); 6092 if (ret) { 6093 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6094 arvif->vdev_id, ret); 6095 ath10k_peer_delete(ar, arvif->vdev_id, 6096 sta->addr); 6097 ath10k_mac_dec_num_stations(arvif, sta); 6098 goto exit; 6099 } 6100 } 6101 6102 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6103 WMI_TDLS_PEER_STATE_PEERING); 6104 if (ret) { 6105 ath10k_warn(ar, 6106 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6107 sta->addr, arvif->vdev_id, ret); 6108 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6109 ath10k_mac_dec_num_stations(arvif, sta); 6110 6111 if (num_tdls_stations != 0) 6112 goto exit; 6113 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6114 WMI_TDLS_DISABLE); 6115 } 6116 } else if ((old_state == IEEE80211_STA_NONE && 6117 new_state == IEEE80211_STA_NOTEXIST)) { 6118 /* 6119 * Existing station deletion. 6120 */ 6121 ath10k_dbg(ar, ATH10K_DBG_MAC, 6122 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6123 arvif->vdev_id, sta->addr, sta); 6124 6125 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6126 if (ret) 6127 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6128 sta->addr, arvif->vdev_id, ret); 6129 6130 ath10k_mac_dec_num_stations(arvif, sta); 6131 6132 spin_lock_bh(&ar->data_lock); 6133 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6134 peer = ar->peer_map[i]; 6135 if (!peer) 6136 continue; 6137 6138 if (peer->sta == sta) { 6139 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6140 sta->addr, peer, i, arvif->vdev_id); 6141 peer->sta = NULL; 6142 6143 /* Clean up the peer object as well since we 6144 * must have failed to do this above. 6145 */ 6146 list_del(&peer->list); 6147 ar->peer_map[i] = NULL; 6148 kfree(peer); 6149 ar->num_peers--; 6150 } 6151 } 6152 spin_unlock_bh(&ar->data_lock); 6153 6154 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6155 ath10k_mac_txq_unref(ar, sta->txq[i]); 6156 6157 if (!sta->tdls) 6158 goto exit; 6159 6160 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6161 goto exit; 6162 6163 /* This was the last tdls peer in current vif */ 6164 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6165 WMI_TDLS_DISABLE); 6166 if (ret) { 6167 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6168 arvif->vdev_id, ret); 6169 } 6170 } else if (old_state == IEEE80211_STA_AUTH && 6171 new_state == IEEE80211_STA_ASSOC && 6172 (vif->type == NL80211_IFTYPE_AP || 6173 vif->type == NL80211_IFTYPE_MESH_POINT || 6174 vif->type == NL80211_IFTYPE_ADHOC)) { 6175 /* 6176 * New association. 6177 */ 6178 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6179 sta->addr); 6180 6181 ret = ath10k_station_assoc(ar, vif, sta, false); 6182 if (ret) 6183 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6184 sta->addr, arvif->vdev_id, ret); 6185 } else if (old_state == IEEE80211_STA_ASSOC && 6186 new_state == IEEE80211_STA_AUTHORIZED && 6187 sta->tdls) { 6188 /* 6189 * Tdls station authorized. 6190 */ 6191 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6192 sta->addr); 6193 6194 ret = ath10k_station_assoc(ar, vif, sta, false); 6195 if (ret) { 6196 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6197 sta->addr, arvif->vdev_id, ret); 6198 goto exit; 6199 } 6200 6201 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6202 WMI_TDLS_PEER_STATE_CONNECTED); 6203 if (ret) 6204 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6205 sta->addr, arvif->vdev_id, ret); 6206 } else if (old_state == IEEE80211_STA_ASSOC && 6207 new_state == IEEE80211_STA_AUTH && 6208 (vif->type == NL80211_IFTYPE_AP || 6209 vif->type == NL80211_IFTYPE_MESH_POINT || 6210 vif->type == NL80211_IFTYPE_ADHOC)) { 6211 /* 6212 * Disassociation. 6213 */ 6214 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6215 sta->addr); 6216 6217 ret = ath10k_station_disassoc(ar, vif, sta); 6218 if (ret) 6219 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6220 sta->addr, arvif->vdev_id, ret); 6221 } 6222 exit: 6223 mutex_unlock(&ar->conf_mutex); 6224 return ret; 6225 } 6226 6227 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6228 u16 ac, bool enable) 6229 { 6230 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6231 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6232 u32 prio = 0, acc = 0; 6233 u32 value = 0; 6234 int ret = 0; 6235 6236 lockdep_assert_held(&ar->conf_mutex); 6237 6238 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6239 return 0; 6240 6241 switch (ac) { 6242 case IEEE80211_AC_VO: 6243 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6244 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6245 prio = 7; 6246 acc = 3; 6247 break; 6248 case IEEE80211_AC_VI: 6249 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6250 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6251 prio = 5; 6252 acc = 2; 6253 break; 6254 case IEEE80211_AC_BE: 6255 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6256 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6257 prio = 2; 6258 acc = 1; 6259 break; 6260 case IEEE80211_AC_BK: 6261 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6262 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6263 prio = 0; 6264 acc = 0; 6265 break; 6266 } 6267 6268 if (enable) 6269 arvif->u.sta.uapsd |= value; 6270 else 6271 arvif->u.sta.uapsd &= ~value; 6272 6273 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6274 WMI_STA_PS_PARAM_UAPSD, 6275 arvif->u.sta.uapsd); 6276 if (ret) { 6277 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6278 goto exit; 6279 } 6280 6281 if (arvif->u.sta.uapsd) 6282 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6283 else 6284 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6285 6286 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6287 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6288 value); 6289 if (ret) 6290 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6291 6292 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6293 if (ret) { 6294 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6295 arvif->vdev_id, ret); 6296 return ret; 6297 } 6298 6299 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6300 if (ret) { 6301 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6302 arvif->vdev_id, ret); 6303 return ret; 6304 } 6305 6306 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6307 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6308 /* Only userspace can make an educated decision when to send 6309 * trigger frame. The following effectively disables u-UAPSD 6310 * autotrigger in firmware (which is enabled by default 6311 * provided the autotrigger service is available). 6312 */ 6313 6314 arg.wmm_ac = acc; 6315 arg.user_priority = prio; 6316 arg.service_interval = 0; 6317 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6318 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6319 6320 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6321 arvif->bssid, &arg, 1); 6322 if (ret) { 6323 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6324 ret); 6325 return ret; 6326 } 6327 } 6328 6329 exit: 6330 return ret; 6331 } 6332 6333 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6334 struct ieee80211_vif *vif, u16 ac, 6335 const struct ieee80211_tx_queue_params *params) 6336 { 6337 struct ath10k *ar = hw->priv; 6338 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6339 struct wmi_wmm_params_arg *p = NULL; 6340 int ret; 6341 6342 mutex_lock(&ar->conf_mutex); 6343 6344 switch (ac) { 6345 case IEEE80211_AC_VO: 6346 p = &arvif->wmm_params.ac_vo; 6347 break; 6348 case IEEE80211_AC_VI: 6349 p = &arvif->wmm_params.ac_vi; 6350 break; 6351 case IEEE80211_AC_BE: 6352 p = &arvif->wmm_params.ac_be; 6353 break; 6354 case IEEE80211_AC_BK: 6355 p = &arvif->wmm_params.ac_bk; 6356 break; 6357 } 6358 6359 if (WARN_ON(!p)) { 6360 ret = -EINVAL; 6361 goto exit; 6362 } 6363 6364 p->cwmin = params->cw_min; 6365 p->cwmax = params->cw_max; 6366 p->aifs = params->aifs; 6367 6368 /* 6369 * The channel time duration programmed in the HW is in absolute 6370 * microseconds, while mac80211 gives the txop in units of 6371 * 32 microseconds. 6372 */ 6373 p->txop = params->txop * 32; 6374 6375 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6376 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6377 &arvif->wmm_params); 6378 if (ret) { 6379 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6380 arvif->vdev_id, ret); 6381 goto exit; 6382 } 6383 } else { 6384 /* This won't work well with multi-interface cases but it's 6385 * better than nothing. 6386 */ 6387 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6388 if (ret) { 6389 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6390 goto exit; 6391 } 6392 } 6393 6394 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6395 if (ret) 6396 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6397 6398 exit: 6399 mutex_unlock(&ar->conf_mutex); 6400 return ret; 6401 } 6402 6403 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6404 6405 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6406 struct ieee80211_vif *vif, 6407 struct ieee80211_channel *chan, 6408 int duration, 6409 enum ieee80211_roc_type type) 6410 { 6411 struct ath10k *ar = hw->priv; 6412 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6413 struct wmi_start_scan_arg arg; 6414 int ret = 0; 6415 u32 scan_time_msec; 6416 6417 mutex_lock(&ar->conf_mutex); 6418 6419 spin_lock_bh(&ar->data_lock); 6420 switch (ar->scan.state) { 6421 case ATH10K_SCAN_IDLE: 6422 reinit_completion(&ar->scan.started); 6423 reinit_completion(&ar->scan.completed); 6424 reinit_completion(&ar->scan.on_channel); 6425 ar->scan.state = ATH10K_SCAN_STARTING; 6426 ar->scan.is_roc = true; 6427 ar->scan.vdev_id = arvif->vdev_id; 6428 ar->scan.roc_freq = chan->center_freq; 6429 ar->scan.roc_notify = true; 6430 ret = 0; 6431 break; 6432 case ATH10K_SCAN_STARTING: 6433 case ATH10K_SCAN_RUNNING: 6434 case ATH10K_SCAN_ABORTING: 6435 ret = -EBUSY; 6436 break; 6437 } 6438 spin_unlock_bh(&ar->data_lock); 6439 6440 if (ret) 6441 goto exit; 6442 6443 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6444 6445 memset(&arg, 0, sizeof(arg)); 6446 ath10k_wmi_start_scan_init(ar, &arg); 6447 arg.vdev_id = arvif->vdev_id; 6448 arg.scan_id = ATH10K_SCAN_ID; 6449 arg.n_channels = 1; 6450 arg.channels[0] = chan->center_freq; 6451 arg.dwell_time_active = scan_time_msec; 6452 arg.dwell_time_passive = scan_time_msec; 6453 arg.max_scan_time = scan_time_msec; 6454 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6455 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6456 arg.burst_duration_ms = duration; 6457 6458 ret = ath10k_start_scan(ar, &arg); 6459 if (ret) { 6460 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6461 spin_lock_bh(&ar->data_lock); 6462 ar->scan.state = ATH10K_SCAN_IDLE; 6463 spin_unlock_bh(&ar->data_lock); 6464 goto exit; 6465 } 6466 6467 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6468 if (ret == 0) { 6469 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6470 6471 ret = ath10k_scan_stop(ar); 6472 if (ret) 6473 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6474 6475 ret = -ETIMEDOUT; 6476 goto exit; 6477 } 6478 6479 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6480 msecs_to_jiffies(duration)); 6481 6482 ret = 0; 6483 exit: 6484 mutex_unlock(&ar->conf_mutex); 6485 return ret; 6486 } 6487 6488 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6489 { 6490 struct ath10k *ar = hw->priv; 6491 6492 mutex_lock(&ar->conf_mutex); 6493 6494 spin_lock_bh(&ar->data_lock); 6495 ar->scan.roc_notify = false; 6496 spin_unlock_bh(&ar->data_lock); 6497 6498 ath10k_scan_abort(ar); 6499 6500 mutex_unlock(&ar->conf_mutex); 6501 6502 cancel_delayed_work_sync(&ar->scan.timeout); 6503 6504 return 0; 6505 } 6506 6507 /* 6508 * Both RTS and Fragmentation threshold are interface-specific 6509 * in ath10k, but device-specific in mac80211. 6510 */ 6511 6512 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6513 { 6514 struct ath10k *ar = hw->priv; 6515 struct ath10k_vif *arvif; 6516 int ret = 0; 6517 6518 mutex_lock(&ar->conf_mutex); 6519 list_for_each_entry(arvif, &ar->arvifs, list) { 6520 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 6521 arvif->vdev_id, value); 6522 6523 ret = ath10k_mac_set_rts(arvif, value); 6524 if (ret) { 6525 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 6526 arvif->vdev_id, ret); 6527 break; 6528 } 6529 } 6530 mutex_unlock(&ar->conf_mutex); 6531 6532 return ret; 6533 } 6534 6535 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6536 { 6537 /* Even though there's a WMI enum for fragmentation threshold no known 6538 * firmware actually implements it. Moreover it is not possible to rely 6539 * frame fragmentation to mac80211 because firmware clears the "more 6540 * fragments" bit in frame control making it impossible for remote 6541 * devices to reassemble frames. 6542 * 6543 * Hence implement a dummy callback just to say fragmentation isn't 6544 * supported. This effectively prevents mac80211 from doing frame 6545 * fragmentation in software. 6546 */ 6547 return -EOPNOTSUPP; 6548 } 6549 6550 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6551 u32 queues, bool drop) 6552 { 6553 struct ath10k *ar = hw->priv; 6554 bool skip; 6555 long time_left; 6556 6557 /* mac80211 doesn't care if we really xmit queued frames or not 6558 * we'll collect those frames either way if we stop/delete vdevs */ 6559 if (drop) 6560 return; 6561 6562 mutex_lock(&ar->conf_mutex); 6563 6564 if (ar->state == ATH10K_STATE_WEDGED) 6565 goto skip; 6566 6567 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 6568 bool empty; 6569 6570 spin_lock_bh(&ar->htt.tx_lock); 6571 empty = (ar->htt.num_pending_tx == 0); 6572 spin_unlock_bh(&ar->htt.tx_lock); 6573 6574 skip = (ar->state == ATH10K_STATE_WEDGED) || 6575 test_bit(ATH10K_FLAG_CRASH_FLUSH, 6576 &ar->dev_flags); 6577 6578 (empty || skip); 6579 }), ATH10K_FLUSH_TIMEOUT_HZ); 6580 6581 if (time_left == 0 || skip) 6582 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 6583 skip, ar->state, time_left); 6584 6585 skip: 6586 mutex_unlock(&ar->conf_mutex); 6587 } 6588 6589 /* TODO: Implement this function properly 6590 * For now it is needed to reply to Probe Requests in IBSS mode. 6591 * Propably we need this information from FW. 6592 */ 6593 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 6594 { 6595 return 1; 6596 } 6597 6598 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 6599 enum ieee80211_reconfig_type reconfig_type) 6600 { 6601 struct ath10k *ar = hw->priv; 6602 6603 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 6604 return; 6605 6606 mutex_lock(&ar->conf_mutex); 6607 6608 /* If device failed to restart it will be in a different state, e.g. 6609 * ATH10K_STATE_WEDGED */ 6610 if (ar->state == ATH10K_STATE_RESTARTED) { 6611 ath10k_info(ar, "device successfully recovered\n"); 6612 ar->state = ATH10K_STATE_ON; 6613 ieee80211_wake_queues(ar->hw); 6614 } 6615 6616 mutex_unlock(&ar->conf_mutex); 6617 } 6618 6619 static void 6620 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 6621 struct ieee80211_channel *channel) 6622 { 6623 int ret; 6624 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 6625 6626 lockdep_assert_held(&ar->conf_mutex); 6627 6628 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 6629 (ar->rx_channel != channel)) 6630 return; 6631 6632 if (ar->scan.state != ATH10K_SCAN_IDLE) { 6633 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 6634 return; 6635 } 6636 6637 reinit_completion(&ar->bss_survey_done); 6638 6639 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 6640 if (ret) { 6641 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 6642 return; 6643 } 6644 6645 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 6646 if (!ret) { 6647 ath10k_warn(ar, "bss channel survey timed out\n"); 6648 return; 6649 } 6650 } 6651 6652 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 6653 struct survey_info *survey) 6654 { 6655 struct ath10k *ar = hw->priv; 6656 struct ieee80211_supported_band *sband; 6657 struct survey_info *ar_survey = &ar->survey[idx]; 6658 int ret = 0; 6659 6660 mutex_lock(&ar->conf_mutex); 6661 6662 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 6663 if (sband && idx >= sband->n_channels) { 6664 idx -= sband->n_channels; 6665 sband = NULL; 6666 } 6667 6668 if (!sband) 6669 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 6670 6671 if (!sband || idx >= sband->n_channels) { 6672 ret = -ENOENT; 6673 goto exit; 6674 } 6675 6676 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 6677 6678 spin_lock_bh(&ar->data_lock); 6679 memcpy(survey, ar_survey, sizeof(*survey)); 6680 spin_unlock_bh(&ar->data_lock); 6681 6682 survey->channel = &sband->channels[idx]; 6683 6684 if (ar->rx_channel == survey->channel) 6685 survey->filled |= SURVEY_INFO_IN_USE; 6686 6687 exit: 6688 mutex_unlock(&ar->conf_mutex); 6689 return ret; 6690 } 6691 6692 static bool 6693 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 6694 enum nl80211_band band, 6695 const struct cfg80211_bitrate_mask *mask) 6696 { 6697 int num_rates = 0; 6698 int i; 6699 6700 num_rates += hweight32(mask->control[band].legacy); 6701 6702 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 6703 num_rates += hweight8(mask->control[band].ht_mcs[i]); 6704 6705 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 6706 num_rates += hweight16(mask->control[band].vht_mcs[i]); 6707 6708 return num_rates == 1; 6709 } 6710 6711 static bool 6712 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 6713 enum nl80211_band band, 6714 const struct cfg80211_bitrate_mask *mask, 6715 int *nss) 6716 { 6717 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6718 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 6719 u8 ht_nss_mask = 0; 6720 u8 vht_nss_mask = 0; 6721 int i; 6722 6723 if (mask->control[band].legacy) 6724 return false; 6725 6726 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6727 if (mask->control[band].ht_mcs[i] == 0) 6728 continue; 6729 else if (mask->control[band].ht_mcs[i] == 6730 sband->ht_cap.mcs.rx_mask[i]) 6731 ht_nss_mask |= BIT(i); 6732 else 6733 return false; 6734 } 6735 6736 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6737 if (mask->control[band].vht_mcs[i] == 0) 6738 continue; 6739 else if (mask->control[band].vht_mcs[i] == 6740 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 6741 vht_nss_mask |= BIT(i); 6742 else 6743 return false; 6744 } 6745 6746 if (ht_nss_mask != vht_nss_mask) 6747 return false; 6748 6749 if (ht_nss_mask == 0) 6750 return false; 6751 6752 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 6753 return false; 6754 6755 *nss = fls(ht_nss_mask); 6756 6757 return true; 6758 } 6759 6760 static int 6761 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 6762 enum nl80211_band band, 6763 const struct cfg80211_bitrate_mask *mask, 6764 u8 *rate, u8 *nss) 6765 { 6766 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6767 int rate_idx; 6768 int i; 6769 u16 bitrate; 6770 u8 preamble; 6771 u8 hw_rate; 6772 6773 if (hweight32(mask->control[band].legacy) == 1) { 6774 rate_idx = ffs(mask->control[band].legacy) - 1; 6775 6776 hw_rate = sband->bitrates[rate_idx].hw_value; 6777 bitrate = sband->bitrates[rate_idx].bitrate; 6778 6779 if (ath10k_mac_bitrate_is_cck(bitrate)) 6780 preamble = WMI_RATE_PREAMBLE_CCK; 6781 else 6782 preamble = WMI_RATE_PREAMBLE_OFDM; 6783 6784 *nss = 1; 6785 *rate = preamble << 6 | 6786 (*nss - 1) << 4 | 6787 hw_rate << 0; 6788 6789 return 0; 6790 } 6791 6792 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6793 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 6794 *nss = i + 1; 6795 *rate = WMI_RATE_PREAMBLE_HT << 6 | 6796 (*nss - 1) << 4 | 6797 (ffs(mask->control[band].ht_mcs[i]) - 1); 6798 6799 return 0; 6800 } 6801 } 6802 6803 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6804 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 6805 *nss = i + 1; 6806 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 6807 (*nss - 1) << 4 | 6808 (ffs(mask->control[band].vht_mcs[i]) - 1); 6809 6810 return 0; 6811 } 6812 } 6813 6814 return -EINVAL; 6815 } 6816 6817 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 6818 u8 rate, u8 nss, u8 sgi, u8 ldpc) 6819 { 6820 struct ath10k *ar = arvif->ar; 6821 u32 vdev_param; 6822 int ret; 6823 6824 lockdep_assert_held(&ar->conf_mutex); 6825 6826 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 6827 arvif->vdev_id, rate, nss, sgi); 6828 6829 vdev_param = ar->wmi.vdev_param->fixed_rate; 6830 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 6831 if (ret) { 6832 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 6833 rate, ret); 6834 return ret; 6835 } 6836 6837 vdev_param = ar->wmi.vdev_param->nss; 6838 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 6839 if (ret) { 6840 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 6841 return ret; 6842 } 6843 6844 vdev_param = ar->wmi.vdev_param->sgi; 6845 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 6846 if (ret) { 6847 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 6848 return ret; 6849 } 6850 6851 vdev_param = ar->wmi.vdev_param->ldpc; 6852 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 6853 if (ret) { 6854 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 6855 return ret; 6856 } 6857 6858 return 0; 6859 } 6860 6861 static bool 6862 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 6863 enum nl80211_band band, 6864 const struct cfg80211_bitrate_mask *mask) 6865 { 6866 int i; 6867 u16 vht_mcs; 6868 6869 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 6870 * to express all VHT MCS rate masks. Effectively only the following 6871 * ranges can be used: none, 0-7, 0-8 and 0-9. 6872 */ 6873 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 6874 vht_mcs = mask->control[band].vht_mcs[i]; 6875 6876 switch (vht_mcs) { 6877 case 0: 6878 case BIT(8) - 1: 6879 case BIT(9) - 1: 6880 case BIT(10) - 1: 6881 break; 6882 default: 6883 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 6884 return false; 6885 } 6886 } 6887 6888 return true; 6889 } 6890 6891 static void ath10k_mac_set_bitrate_mask_iter(void *data, 6892 struct ieee80211_sta *sta) 6893 { 6894 struct ath10k_vif *arvif = data; 6895 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6896 struct ath10k *ar = arvif->ar; 6897 6898 if (arsta->arvif != arvif) 6899 return; 6900 6901 spin_lock_bh(&ar->data_lock); 6902 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 6903 spin_unlock_bh(&ar->data_lock); 6904 6905 ieee80211_queue_work(ar->hw, &arsta->update_wk); 6906 } 6907 6908 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 6909 struct ieee80211_vif *vif, 6910 const struct cfg80211_bitrate_mask *mask) 6911 { 6912 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6913 struct cfg80211_chan_def def; 6914 struct ath10k *ar = arvif->ar; 6915 enum nl80211_band band; 6916 const u8 *ht_mcs_mask; 6917 const u16 *vht_mcs_mask; 6918 u8 rate; 6919 u8 nss; 6920 u8 sgi; 6921 u8 ldpc; 6922 int single_nss; 6923 int ret; 6924 6925 if (ath10k_mac_vif_chan(vif, &def)) 6926 return -EPERM; 6927 6928 band = def.chan->band; 6929 ht_mcs_mask = mask->control[band].ht_mcs; 6930 vht_mcs_mask = mask->control[band].vht_mcs; 6931 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 6932 6933 sgi = mask->control[band].gi; 6934 if (sgi == NL80211_TXRATE_FORCE_LGI) 6935 return -EINVAL; 6936 6937 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 6938 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 6939 &rate, &nss); 6940 if (ret) { 6941 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 6942 arvif->vdev_id, ret); 6943 return ret; 6944 } 6945 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 6946 &single_nss)) { 6947 rate = WMI_FIXED_RATE_NONE; 6948 nss = single_nss; 6949 } else { 6950 rate = WMI_FIXED_RATE_NONE; 6951 nss = min(ar->num_rf_chains, 6952 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 6953 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6954 6955 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 6956 return -EINVAL; 6957 6958 mutex_lock(&ar->conf_mutex); 6959 6960 arvif->bitrate_mask = *mask; 6961 ieee80211_iterate_stations_atomic(ar->hw, 6962 ath10k_mac_set_bitrate_mask_iter, 6963 arvif); 6964 6965 mutex_unlock(&ar->conf_mutex); 6966 } 6967 6968 mutex_lock(&ar->conf_mutex); 6969 6970 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 6971 if (ret) { 6972 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 6973 arvif->vdev_id, ret); 6974 goto exit; 6975 } 6976 6977 exit: 6978 mutex_unlock(&ar->conf_mutex); 6979 6980 return ret; 6981 } 6982 6983 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 6984 struct ieee80211_vif *vif, 6985 struct ieee80211_sta *sta, 6986 u32 changed) 6987 { 6988 struct ath10k *ar = hw->priv; 6989 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6990 u32 bw, smps; 6991 6992 spin_lock_bh(&ar->data_lock); 6993 6994 ath10k_dbg(ar, ATH10K_DBG_MAC, 6995 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 6996 sta->addr, changed, sta->bandwidth, sta->rx_nss, 6997 sta->smps_mode); 6998 6999 if (changed & IEEE80211_RC_BW_CHANGED) { 7000 bw = WMI_PEER_CHWIDTH_20MHZ; 7001 7002 switch (sta->bandwidth) { 7003 case IEEE80211_STA_RX_BW_20: 7004 bw = WMI_PEER_CHWIDTH_20MHZ; 7005 break; 7006 case IEEE80211_STA_RX_BW_40: 7007 bw = WMI_PEER_CHWIDTH_40MHZ; 7008 break; 7009 case IEEE80211_STA_RX_BW_80: 7010 bw = WMI_PEER_CHWIDTH_80MHZ; 7011 break; 7012 case IEEE80211_STA_RX_BW_160: 7013 bw = WMI_PEER_CHWIDTH_160MHZ; 7014 break; 7015 default: 7016 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7017 sta->bandwidth, sta->addr); 7018 bw = WMI_PEER_CHWIDTH_20MHZ; 7019 break; 7020 } 7021 7022 arsta->bw = bw; 7023 } 7024 7025 if (changed & IEEE80211_RC_NSS_CHANGED) 7026 arsta->nss = sta->rx_nss; 7027 7028 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7029 smps = WMI_PEER_SMPS_PS_NONE; 7030 7031 switch (sta->smps_mode) { 7032 case IEEE80211_SMPS_AUTOMATIC: 7033 case IEEE80211_SMPS_OFF: 7034 smps = WMI_PEER_SMPS_PS_NONE; 7035 break; 7036 case IEEE80211_SMPS_STATIC: 7037 smps = WMI_PEER_SMPS_STATIC; 7038 break; 7039 case IEEE80211_SMPS_DYNAMIC: 7040 smps = WMI_PEER_SMPS_DYNAMIC; 7041 break; 7042 case IEEE80211_SMPS_NUM_MODES: 7043 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7044 sta->smps_mode, sta->addr); 7045 smps = WMI_PEER_SMPS_PS_NONE; 7046 break; 7047 } 7048 7049 arsta->smps = smps; 7050 } 7051 7052 arsta->changed |= changed; 7053 7054 spin_unlock_bh(&ar->data_lock); 7055 7056 ieee80211_queue_work(hw, &arsta->update_wk); 7057 } 7058 7059 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7060 struct ieee80211_vif *vif, s64 tsf_offset) 7061 { 7062 struct ath10k *ar = hw->priv; 7063 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7064 u32 offset, vdev_param; 7065 int ret; 7066 7067 if (tsf_offset < 0) { 7068 vdev_param = ar->wmi.vdev_param->dec_tsf; 7069 offset = -tsf_offset; 7070 } else { 7071 vdev_param = ar->wmi.vdev_param->inc_tsf; 7072 offset = tsf_offset; 7073 } 7074 7075 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7076 vdev_param, offset); 7077 7078 if (ret && ret != -EOPNOTSUPP) 7079 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7080 offset, vdev_param, ret); 7081 } 7082 7083 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7084 struct ieee80211_vif *vif, 7085 struct ieee80211_ampdu_params *params) 7086 { 7087 struct ath10k *ar = hw->priv; 7088 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7089 struct ieee80211_sta *sta = params->sta; 7090 enum ieee80211_ampdu_mlme_action action = params->action; 7091 u16 tid = params->tid; 7092 7093 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7094 arvif->vdev_id, sta->addr, tid, action); 7095 7096 switch (action) { 7097 case IEEE80211_AMPDU_RX_START: 7098 case IEEE80211_AMPDU_RX_STOP: 7099 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7100 * creation/removal. Do we need to verify this? 7101 */ 7102 return 0; 7103 case IEEE80211_AMPDU_TX_START: 7104 case IEEE80211_AMPDU_TX_STOP_CONT: 7105 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7106 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7107 case IEEE80211_AMPDU_TX_OPERATIONAL: 7108 /* Firmware offloads Tx aggregation entirely so deny mac80211 7109 * Tx aggregation requests. 7110 */ 7111 return -EOPNOTSUPP; 7112 } 7113 7114 return -EINVAL; 7115 } 7116 7117 static void 7118 ath10k_mac_update_rx_channel(struct ath10k *ar, 7119 struct ieee80211_chanctx_conf *ctx, 7120 struct ieee80211_vif_chanctx_switch *vifs, 7121 int n_vifs) 7122 { 7123 struct cfg80211_chan_def *def = NULL; 7124 7125 /* Both locks are required because ar->rx_channel is modified. This 7126 * allows readers to hold either lock. 7127 */ 7128 lockdep_assert_held(&ar->conf_mutex); 7129 lockdep_assert_held(&ar->data_lock); 7130 7131 WARN_ON(ctx && vifs); 7132 WARN_ON(vifs && n_vifs != 1); 7133 7134 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7135 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7136 * ppdu on Rx may reduce performance on low-end systems. It should be 7137 * possible to make tables/hashmaps to speed the lookup up (be vary of 7138 * cpu data cache lines though regarding sizes) but to keep the initial 7139 * implementation simple and less intrusive fallback to the slow lookup 7140 * only for multi-channel cases. Single-channel cases will remain to 7141 * use the old channel derival and thus performance should not be 7142 * affected much. 7143 */ 7144 rcu_read_lock(); 7145 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7146 ieee80211_iter_chan_contexts_atomic(ar->hw, 7147 ath10k_mac_get_any_chandef_iter, 7148 &def); 7149 7150 if (vifs) 7151 def = &vifs[0].new_ctx->def; 7152 7153 ar->rx_channel = def->chan; 7154 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7155 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7156 /* During driver restart due to firmware assert, since mac80211 7157 * already has valid channel context for given radio, channel 7158 * context iteration return num_chanctx > 0. So fix rx_channel 7159 * when restart is in progress. 7160 */ 7161 ar->rx_channel = ctx->def.chan; 7162 } else { 7163 ar->rx_channel = NULL; 7164 } 7165 rcu_read_unlock(); 7166 } 7167 7168 static void 7169 ath10k_mac_update_vif_chan(struct ath10k *ar, 7170 struct ieee80211_vif_chanctx_switch *vifs, 7171 int n_vifs) 7172 { 7173 struct ath10k_vif *arvif; 7174 int ret; 7175 int i; 7176 7177 lockdep_assert_held(&ar->conf_mutex); 7178 7179 /* First stop monitor interface. Some FW versions crash if there's a 7180 * lone monitor interface. 7181 */ 7182 if (ar->monitor_started) 7183 ath10k_monitor_stop(ar); 7184 7185 for (i = 0; i < n_vifs; i++) { 7186 arvif = (void *)vifs[i].vif->drv_priv; 7187 7188 ath10k_dbg(ar, ATH10K_DBG_MAC, 7189 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7190 arvif->vdev_id, 7191 vifs[i].old_ctx->def.chan->center_freq, 7192 vifs[i].new_ctx->def.chan->center_freq, 7193 vifs[i].old_ctx->def.width, 7194 vifs[i].new_ctx->def.width); 7195 7196 if (WARN_ON(!arvif->is_started)) 7197 continue; 7198 7199 if (WARN_ON(!arvif->is_up)) 7200 continue; 7201 7202 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7203 if (ret) { 7204 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7205 arvif->vdev_id, ret); 7206 continue; 7207 } 7208 } 7209 7210 /* All relevant vdevs are downed and associated channel resources 7211 * should be available for the channel switch now. 7212 */ 7213 7214 spin_lock_bh(&ar->data_lock); 7215 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7216 spin_unlock_bh(&ar->data_lock); 7217 7218 for (i = 0; i < n_vifs; i++) { 7219 arvif = (void *)vifs[i].vif->drv_priv; 7220 7221 if (WARN_ON(!arvif->is_started)) 7222 continue; 7223 7224 if (WARN_ON(!arvif->is_up)) 7225 continue; 7226 7227 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7228 if (ret) 7229 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7230 ret); 7231 7232 ret = ath10k_mac_setup_prb_tmpl(arvif); 7233 if (ret) 7234 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7235 ret); 7236 7237 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7238 if (ret) { 7239 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7240 arvif->vdev_id, ret); 7241 continue; 7242 } 7243 7244 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7245 arvif->bssid); 7246 if (ret) { 7247 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7248 arvif->vdev_id, ret); 7249 continue; 7250 } 7251 } 7252 7253 ath10k_monitor_recalc(ar); 7254 } 7255 7256 static int 7257 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7258 struct ieee80211_chanctx_conf *ctx) 7259 { 7260 struct ath10k *ar = hw->priv; 7261 7262 ath10k_dbg(ar, ATH10K_DBG_MAC, 7263 "mac chanctx add freq %hu width %d ptr %pK\n", 7264 ctx->def.chan->center_freq, ctx->def.width, ctx); 7265 7266 mutex_lock(&ar->conf_mutex); 7267 7268 spin_lock_bh(&ar->data_lock); 7269 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7270 spin_unlock_bh(&ar->data_lock); 7271 7272 ath10k_recalc_radar_detection(ar); 7273 ath10k_monitor_recalc(ar); 7274 7275 mutex_unlock(&ar->conf_mutex); 7276 7277 return 0; 7278 } 7279 7280 static void 7281 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7282 struct ieee80211_chanctx_conf *ctx) 7283 { 7284 struct ath10k *ar = hw->priv; 7285 7286 ath10k_dbg(ar, ATH10K_DBG_MAC, 7287 "mac chanctx remove freq %hu width %d ptr %pK\n", 7288 ctx->def.chan->center_freq, ctx->def.width, ctx); 7289 7290 mutex_lock(&ar->conf_mutex); 7291 7292 spin_lock_bh(&ar->data_lock); 7293 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7294 spin_unlock_bh(&ar->data_lock); 7295 7296 ath10k_recalc_radar_detection(ar); 7297 ath10k_monitor_recalc(ar); 7298 7299 mutex_unlock(&ar->conf_mutex); 7300 } 7301 7302 struct ath10k_mac_change_chanctx_arg { 7303 struct ieee80211_chanctx_conf *ctx; 7304 struct ieee80211_vif_chanctx_switch *vifs; 7305 int n_vifs; 7306 int next_vif; 7307 }; 7308 7309 static void 7310 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7311 struct ieee80211_vif *vif) 7312 { 7313 struct ath10k_mac_change_chanctx_arg *arg = data; 7314 7315 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7316 return; 7317 7318 arg->n_vifs++; 7319 } 7320 7321 static void 7322 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7323 struct ieee80211_vif *vif) 7324 { 7325 struct ath10k_mac_change_chanctx_arg *arg = data; 7326 struct ieee80211_chanctx_conf *ctx; 7327 7328 ctx = rcu_access_pointer(vif->chanctx_conf); 7329 if (ctx != arg->ctx) 7330 return; 7331 7332 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7333 return; 7334 7335 arg->vifs[arg->next_vif].vif = vif; 7336 arg->vifs[arg->next_vif].old_ctx = ctx; 7337 arg->vifs[arg->next_vif].new_ctx = ctx; 7338 arg->next_vif++; 7339 } 7340 7341 static void 7342 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7343 struct ieee80211_chanctx_conf *ctx, 7344 u32 changed) 7345 { 7346 struct ath10k *ar = hw->priv; 7347 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7348 7349 mutex_lock(&ar->conf_mutex); 7350 7351 ath10k_dbg(ar, ATH10K_DBG_MAC, 7352 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7353 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7354 7355 /* This shouldn't really happen because channel switching should use 7356 * switch_vif_chanctx(). 7357 */ 7358 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7359 goto unlock; 7360 7361 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7362 ieee80211_iterate_active_interfaces_atomic( 7363 hw, 7364 IEEE80211_IFACE_ITER_NORMAL, 7365 ath10k_mac_change_chanctx_cnt_iter, 7366 &arg); 7367 if (arg.n_vifs == 0) 7368 goto radar; 7369 7370 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7371 GFP_KERNEL); 7372 if (!arg.vifs) 7373 goto radar; 7374 7375 ieee80211_iterate_active_interfaces_atomic( 7376 hw, 7377 IEEE80211_IFACE_ITER_NORMAL, 7378 ath10k_mac_change_chanctx_fill_iter, 7379 &arg); 7380 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7381 kfree(arg.vifs); 7382 } 7383 7384 radar: 7385 ath10k_recalc_radar_detection(ar); 7386 7387 /* FIXME: How to configure Rx chains properly? */ 7388 7389 /* No other actions are actually necessary. Firmware maintains channel 7390 * definitions per vdev internally and there's no host-side channel 7391 * context abstraction to configure, e.g. channel width. 7392 */ 7393 7394 unlock: 7395 mutex_unlock(&ar->conf_mutex); 7396 } 7397 7398 static int 7399 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7400 struct ieee80211_vif *vif, 7401 struct ieee80211_chanctx_conf *ctx) 7402 { 7403 struct ath10k *ar = hw->priv; 7404 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7405 int ret; 7406 7407 mutex_lock(&ar->conf_mutex); 7408 7409 ath10k_dbg(ar, ATH10K_DBG_MAC, 7410 "mac chanctx assign ptr %pK vdev_id %i\n", 7411 ctx, arvif->vdev_id); 7412 7413 if (WARN_ON(arvif->is_started)) { 7414 mutex_unlock(&ar->conf_mutex); 7415 return -EBUSY; 7416 } 7417 7418 ret = ath10k_vdev_start(arvif, &ctx->def); 7419 if (ret) { 7420 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 7421 arvif->vdev_id, vif->addr, 7422 ctx->def.chan->center_freq, ret); 7423 goto err; 7424 } 7425 7426 arvif->is_started = true; 7427 7428 ret = ath10k_mac_vif_setup_ps(arvif); 7429 if (ret) { 7430 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 7431 arvif->vdev_id, ret); 7432 goto err_stop; 7433 } 7434 7435 if (vif->type == NL80211_IFTYPE_MONITOR) { 7436 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 7437 if (ret) { 7438 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 7439 arvif->vdev_id, ret); 7440 goto err_stop; 7441 } 7442 7443 arvif->is_up = true; 7444 } 7445 7446 if (ath10k_mac_can_set_cts_prot(arvif)) { 7447 ret = ath10k_mac_set_cts_prot(arvif); 7448 if (ret) 7449 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 7450 arvif->vdev_id, ret); 7451 } 7452 7453 mutex_unlock(&ar->conf_mutex); 7454 return 0; 7455 7456 err_stop: 7457 ath10k_vdev_stop(arvif); 7458 arvif->is_started = false; 7459 ath10k_mac_vif_setup_ps(arvif); 7460 7461 err: 7462 mutex_unlock(&ar->conf_mutex); 7463 return ret; 7464 } 7465 7466 static void 7467 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7468 struct ieee80211_vif *vif, 7469 struct ieee80211_chanctx_conf *ctx) 7470 { 7471 struct ath10k *ar = hw->priv; 7472 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7473 int ret; 7474 7475 mutex_lock(&ar->conf_mutex); 7476 7477 ath10k_dbg(ar, ATH10K_DBG_MAC, 7478 "mac chanctx unassign ptr %pK vdev_id %i\n", 7479 ctx, arvif->vdev_id); 7480 7481 WARN_ON(!arvif->is_started); 7482 7483 if (vif->type == NL80211_IFTYPE_MONITOR) { 7484 WARN_ON(!arvif->is_up); 7485 7486 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7487 if (ret) 7488 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 7489 arvif->vdev_id, ret); 7490 7491 arvif->is_up = false; 7492 } 7493 7494 ret = ath10k_vdev_stop(arvif); 7495 if (ret) 7496 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 7497 arvif->vdev_id, ret); 7498 7499 arvif->is_started = false; 7500 7501 mutex_unlock(&ar->conf_mutex); 7502 } 7503 7504 static int 7505 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7506 struct ieee80211_vif_chanctx_switch *vifs, 7507 int n_vifs, 7508 enum ieee80211_chanctx_switch_mode mode) 7509 { 7510 struct ath10k *ar = hw->priv; 7511 7512 mutex_lock(&ar->conf_mutex); 7513 7514 ath10k_dbg(ar, ATH10K_DBG_MAC, 7515 "mac chanctx switch n_vifs %d mode %d\n", 7516 n_vifs, mode); 7517 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 7518 7519 mutex_unlock(&ar->conf_mutex); 7520 return 0; 7521 } 7522 7523 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 7524 struct ieee80211_vif *vif, 7525 struct ieee80211_sta *sta) 7526 { 7527 struct ath10k *ar; 7528 struct ath10k_peer *peer; 7529 7530 ar = hw->priv; 7531 7532 list_for_each_entry(peer, &ar->peers, list) 7533 if (peer->sta == sta) 7534 peer->removed = true; 7535 } 7536 7537 static const struct ieee80211_ops ath10k_ops = { 7538 .tx = ath10k_mac_op_tx, 7539 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 7540 .start = ath10k_start, 7541 .stop = ath10k_stop, 7542 .config = ath10k_config, 7543 .add_interface = ath10k_add_interface, 7544 .remove_interface = ath10k_remove_interface, 7545 .configure_filter = ath10k_configure_filter, 7546 .bss_info_changed = ath10k_bss_info_changed, 7547 .set_coverage_class = ath10k_mac_op_set_coverage_class, 7548 .hw_scan = ath10k_hw_scan, 7549 .cancel_hw_scan = ath10k_cancel_hw_scan, 7550 .set_key = ath10k_set_key, 7551 .set_default_unicast_key = ath10k_set_default_unicast_key, 7552 .sta_state = ath10k_sta_state, 7553 .conf_tx = ath10k_conf_tx, 7554 .remain_on_channel = ath10k_remain_on_channel, 7555 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 7556 .set_rts_threshold = ath10k_set_rts_threshold, 7557 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 7558 .flush = ath10k_flush, 7559 .tx_last_beacon = ath10k_tx_last_beacon, 7560 .set_antenna = ath10k_set_antenna, 7561 .get_antenna = ath10k_get_antenna, 7562 .reconfig_complete = ath10k_reconfig_complete, 7563 .get_survey = ath10k_get_survey, 7564 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 7565 .sta_rc_update = ath10k_sta_rc_update, 7566 .offset_tsf = ath10k_offset_tsf, 7567 .ampdu_action = ath10k_ampdu_action, 7568 .get_et_sset_count = ath10k_debug_get_et_sset_count, 7569 .get_et_stats = ath10k_debug_get_et_stats, 7570 .get_et_strings = ath10k_debug_get_et_strings, 7571 .add_chanctx = ath10k_mac_op_add_chanctx, 7572 .remove_chanctx = ath10k_mac_op_remove_chanctx, 7573 .change_chanctx = ath10k_mac_op_change_chanctx, 7574 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 7575 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 7576 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 7577 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 7578 7579 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 7580 7581 #ifdef CONFIG_PM 7582 .suspend = ath10k_wow_op_suspend, 7583 .resume = ath10k_wow_op_resume, 7584 #endif 7585 #ifdef CONFIG_MAC80211_DEBUGFS 7586 .sta_add_debugfs = ath10k_sta_add_debugfs, 7587 .sta_statistics = ath10k_sta_statistics, 7588 #endif 7589 }; 7590 7591 #define CHAN2G(_channel, _freq, _flags) { \ 7592 .band = NL80211_BAND_2GHZ, \ 7593 .hw_value = (_channel), \ 7594 .center_freq = (_freq), \ 7595 .flags = (_flags), \ 7596 .max_antenna_gain = 0, \ 7597 .max_power = 30, \ 7598 } 7599 7600 #define CHAN5G(_channel, _freq, _flags) { \ 7601 .band = NL80211_BAND_5GHZ, \ 7602 .hw_value = (_channel), \ 7603 .center_freq = (_freq), \ 7604 .flags = (_flags), \ 7605 .max_antenna_gain = 0, \ 7606 .max_power = 30, \ 7607 } 7608 7609 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 7610 CHAN2G(1, 2412, 0), 7611 CHAN2G(2, 2417, 0), 7612 CHAN2G(3, 2422, 0), 7613 CHAN2G(4, 2427, 0), 7614 CHAN2G(5, 2432, 0), 7615 CHAN2G(6, 2437, 0), 7616 CHAN2G(7, 2442, 0), 7617 CHAN2G(8, 2447, 0), 7618 CHAN2G(9, 2452, 0), 7619 CHAN2G(10, 2457, 0), 7620 CHAN2G(11, 2462, 0), 7621 CHAN2G(12, 2467, 0), 7622 CHAN2G(13, 2472, 0), 7623 CHAN2G(14, 2484, 0), 7624 }; 7625 7626 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 7627 CHAN5G(36, 5180, 0), 7628 CHAN5G(40, 5200, 0), 7629 CHAN5G(44, 5220, 0), 7630 CHAN5G(48, 5240, 0), 7631 CHAN5G(52, 5260, 0), 7632 CHAN5G(56, 5280, 0), 7633 CHAN5G(60, 5300, 0), 7634 CHAN5G(64, 5320, 0), 7635 CHAN5G(100, 5500, 0), 7636 CHAN5G(104, 5520, 0), 7637 CHAN5G(108, 5540, 0), 7638 CHAN5G(112, 5560, 0), 7639 CHAN5G(116, 5580, 0), 7640 CHAN5G(120, 5600, 0), 7641 CHAN5G(124, 5620, 0), 7642 CHAN5G(128, 5640, 0), 7643 CHAN5G(132, 5660, 0), 7644 CHAN5G(136, 5680, 0), 7645 CHAN5G(140, 5700, 0), 7646 CHAN5G(144, 5720, 0), 7647 CHAN5G(149, 5745, 0), 7648 CHAN5G(153, 5765, 0), 7649 CHAN5G(157, 5785, 0), 7650 CHAN5G(161, 5805, 0), 7651 CHAN5G(165, 5825, 0), 7652 CHAN5G(169, 5845, 0), 7653 }; 7654 7655 struct ath10k *ath10k_mac_create(size_t priv_size) 7656 { 7657 struct ieee80211_hw *hw; 7658 struct ieee80211_ops *ops; 7659 struct ath10k *ar; 7660 7661 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 7662 if (!ops) 7663 return NULL; 7664 7665 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 7666 if (!hw) { 7667 kfree(ops); 7668 return NULL; 7669 } 7670 7671 ar = hw->priv; 7672 ar->hw = hw; 7673 ar->ops = ops; 7674 7675 return ar; 7676 } 7677 7678 void ath10k_mac_destroy(struct ath10k *ar) 7679 { 7680 struct ieee80211_ops *ops = ar->ops; 7681 7682 ieee80211_free_hw(ar->hw); 7683 kfree(ops); 7684 } 7685 7686 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 7687 { 7688 .max = 8, 7689 .types = BIT(NL80211_IFTYPE_STATION) 7690 | BIT(NL80211_IFTYPE_P2P_CLIENT) 7691 }, 7692 { 7693 .max = 3, 7694 .types = BIT(NL80211_IFTYPE_P2P_GO) 7695 }, 7696 { 7697 .max = 1, 7698 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 7699 }, 7700 { 7701 .max = 7, 7702 .types = BIT(NL80211_IFTYPE_AP) 7703 #ifdef CONFIG_MAC80211_MESH 7704 | BIT(NL80211_IFTYPE_MESH_POINT) 7705 #endif 7706 }, 7707 }; 7708 7709 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 7710 { 7711 .max = 8, 7712 .types = BIT(NL80211_IFTYPE_AP) 7713 #ifdef CONFIG_MAC80211_MESH 7714 | BIT(NL80211_IFTYPE_MESH_POINT) 7715 #endif 7716 }, 7717 { 7718 .max = 1, 7719 .types = BIT(NL80211_IFTYPE_STATION) 7720 }, 7721 }; 7722 7723 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 7724 { 7725 .limits = ath10k_if_limits, 7726 .n_limits = ARRAY_SIZE(ath10k_if_limits), 7727 .max_interfaces = 8, 7728 .num_different_channels = 1, 7729 .beacon_int_infra_match = true, 7730 }, 7731 }; 7732 7733 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 7734 { 7735 .limits = ath10k_10x_if_limits, 7736 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 7737 .max_interfaces = 8, 7738 .num_different_channels = 1, 7739 .beacon_int_infra_match = true, 7740 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7741 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7742 BIT(NL80211_CHAN_WIDTH_20) | 7743 BIT(NL80211_CHAN_WIDTH_40) | 7744 BIT(NL80211_CHAN_WIDTH_80), 7745 #endif 7746 }, 7747 }; 7748 7749 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 7750 { 7751 .max = 2, 7752 .types = BIT(NL80211_IFTYPE_STATION), 7753 }, 7754 { 7755 .max = 2, 7756 .types = BIT(NL80211_IFTYPE_AP) | 7757 #ifdef CONFIG_MAC80211_MESH 7758 BIT(NL80211_IFTYPE_MESH_POINT) | 7759 #endif 7760 BIT(NL80211_IFTYPE_P2P_CLIENT) | 7761 BIT(NL80211_IFTYPE_P2P_GO), 7762 }, 7763 { 7764 .max = 1, 7765 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7766 }, 7767 }; 7768 7769 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 7770 { 7771 .max = 2, 7772 .types = BIT(NL80211_IFTYPE_STATION), 7773 }, 7774 { 7775 .max = 2, 7776 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 7777 }, 7778 { 7779 .max = 1, 7780 .types = BIT(NL80211_IFTYPE_AP) | 7781 #ifdef CONFIG_MAC80211_MESH 7782 BIT(NL80211_IFTYPE_MESH_POINT) | 7783 #endif 7784 BIT(NL80211_IFTYPE_P2P_GO), 7785 }, 7786 { 7787 .max = 1, 7788 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7789 }, 7790 }; 7791 7792 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 7793 { 7794 .max = 1, 7795 .types = BIT(NL80211_IFTYPE_STATION), 7796 }, 7797 { 7798 .max = 1, 7799 .types = BIT(NL80211_IFTYPE_ADHOC), 7800 }, 7801 }; 7802 7803 /* FIXME: This is not thouroughly tested. These combinations may over- or 7804 * underestimate hw/fw capabilities. 7805 */ 7806 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 7807 { 7808 .limits = ath10k_tlv_if_limit, 7809 .num_different_channels = 1, 7810 .max_interfaces = 4, 7811 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7812 }, 7813 { 7814 .limits = ath10k_tlv_if_limit_ibss, 7815 .num_different_channels = 1, 7816 .max_interfaces = 2, 7817 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 7818 }, 7819 }; 7820 7821 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 7822 { 7823 .limits = ath10k_tlv_if_limit, 7824 .num_different_channels = 1, 7825 .max_interfaces = 4, 7826 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7827 }, 7828 { 7829 .limits = ath10k_tlv_qcs_if_limit, 7830 .num_different_channels = 2, 7831 .max_interfaces = 4, 7832 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 7833 }, 7834 { 7835 .limits = ath10k_tlv_if_limit_ibss, 7836 .num_different_channels = 1, 7837 .max_interfaces = 2, 7838 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 7839 }, 7840 }; 7841 7842 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 7843 { 7844 .max = 1, 7845 .types = BIT(NL80211_IFTYPE_STATION), 7846 }, 7847 { 7848 .max = 16, 7849 .types = BIT(NL80211_IFTYPE_AP) 7850 #ifdef CONFIG_MAC80211_MESH 7851 | BIT(NL80211_IFTYPE_MESH_POINT) 7852 #endif 7853 }, 7854 }; 7855 7856 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 7857 { 7858 .limits = ath10k_10_4_if_limits, 7859 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 7860 .max_interfaces = 16, 7861 .num_different_channels = 1, 7862 .beacon_int_infra_match = true, 7863 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7864 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7865 BIT(NL80211_CHAN_WIDTH_20) | 7866 BIT(NL80211_CHAN_WIDTH_40) | 7867 BIT(NL80211_CHAN_WIDTH_80), 7868 #endif 7869 }, 7870 }; 7871 7872 static void ath10k_get_arvif_iter(void *data, u8 *mac, 7873 struct ieee80211_vif *vif) 7874 { 7875 struct ath10k_vif_iter *arvif_iter = data; 7876 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7877 7878 if (arvif->vdev_id == arvif_iter->vdev_id) 7879 arvif_iter->arvif = arvif; 7880 } 7881 7882 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 7883 { 7884 struct ath10k_vif_iter arvif_iter; 7885 u32 flags; 7886 7887 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 7888 arvif_iter.vdev_id = vdev_id; 7889 7890 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 7891 ieee80211_iterate_active_interfaces_atomic(ar->hw, 7892 flags, 7893 ath10k_get_arvif_iter, 7894 &arvif_iter); 7895 if (!arvif_iter.arvif) { 7896 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 7897 return NULL; 7898 } 7899 7900 return arvif_iter.arvif; 7901 } 7902 7903 #define WRD_METHOD "WRDD" 7904 #define WRDD_WIFI (0x07) 7905 7906 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 7907 { 7908 union acpi_object *mcc_pkg; 7909 union acpi_object *domain_type; 7910 union acpi_object *mcc_value; 7911 u32 i; 7912 7913 if (wrdd->type != ACPI_TYPE_PACKAGE || 7914 wrdd->package.count < 2 || 7915 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 7916 wrdd->package.elements[0].integer.value != 0) { 7917 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 7918 return 0; 7919 } 7920 7921 for (i = 1; i < wrdd->package.count; ++i) { 7922 mcc_pkg = &wrdd->package.elements[i]; 7923 7924 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 7925 continue; 7926 if (mcc_pkg->package.count < 2) 7927 continue; 7928 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 7929 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 7930 continue; 7931 7932 domain_type = &mcc_pkg->package.elements[0]; 7933 if (domain_type->integer.value != WRDD_WIFI) 7934 continue; 7935 7936 mcc_value = &mcc_pkg->package.elements[1]; 7937 return mcc_value->integer.value; 7938 } 7939 return 0; 7940 } 7941 7942 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 7943 { 7944 struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev); 7945 acpi_handle root_handle; 7946 acpi_handle handle; 7947 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 7948 acpi_status status; 7949 u32 alpha2_code; 7950 char alpha2[3]; 7951 7952 root_handle = ACPI_HANDLE(&pdev->dev); 7953 if (!root_handle) 7954 return -EOPNOTSUPP; 7955 7956 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 7957 if (ACPI_FAILURE(status)) { 7958 ath10k_dbg(ar, ATH10K_DBG_BOOT, 7959 "failed to get wrd method %d\n", status); 7960 return -EIO; 7961 } 7962 7963 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 7964 if (ACPI_FAILURE(status)) { 7965 ath10k_dbg(ar, ATH10K_DBG_BOOT, 7966 "failed to call wrdc %d\n", status); 7967 return -EIO; 7968 } 7969 7970 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 7971 kfree(wrdd.pointer); 7972 if (!alpha2_code) 7973 return -EIO; 7974 7975 alpha2[0] = (alpha2_code >> 8) & 0xff; 7976 alpha2[1] = (alpha2_code >> 0) & 0xff; 7977 alpha2[2] = '\0'; 7978 7979 ath10k_dbg(ar, ATH10K_DBG_BOOT, 7980 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 7981 7982 *rd = ath_regd_find_country_by_name(alpha2); 7983 if (*rd == 0xffff) 7984 return -EIO; 7985 7986 *rd |= COUNTRY_ERD_FLAG; 7987 return 0; 7988 } 7989 7990 static int ath10k_mac_init_rd(struct ath10k *ar) 7991 { 7992 int ret; 7993 u16 rd; 7994 7995 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 7996 if (ret) { 7997 ath10k_dbg(ar, ATH10K_DBG_BOOT, 7998 "fallback to eeprom programmed regulatory settings\n"); 7999 rd = ar->hw_eeprom_rd; 8000 } 8001 8002 ar->ath_common.regulatory.current_rd = rd; 8003 return 0; 8004 } 8005 8006 int ath10k_mac_register(struct ath10k *ar) 8007 { 8008 static const u32 cipher_suites[] = { 8009 WLAN_CIPHER_SUITE_WEP40, 8010 WLAN_CIPHER_SUITE_WEP104, 8011 WLAN_CIPHER_SUITE_TKIP, 8012 WLAN_CIPHER_SUITE_CCMP, 8013 WLAN_CIPHER_SUITE_AES_CMAC, 8014 }; 8015 struct ieee80211_supported_band *band; 8016 void *channels; 8017 int ret; 8018 8019 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8020 8021 SET_IEEE80211_DEV(ar->hw, ar->dev); 8022 8023 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8024 ARRAY_SIZE(ath10k_5ghz_channels)) != 8025 ATH10K_NUM_CHANS); 8026 8027 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8028 channels = kmemdup(ath10k_2ghz_channels, 8029 sizeof(ath10k_2ghz_channels), 8030 GFP_KERNEL); 8031 if (!channels) { 8032 ret = -ENOMEM; 8033 goto err_free; 8034 } 8035 8036 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8037 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8038 band->channels = channels; 8039 8040 if (ar->hw_params.cck_rate_map_rev2) { 8041 band->n_bitrates = ath10k_g_rates_rev2_size; 8042 band->bitrates = ath10k_g_rates_rev2; 8043 } else { 8044 band->n_bitrates = ath10k_g_rates_size; 8045 band->bitrates = ath10k_g_rates; 8046 } 8047 8048 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8049 } 8050 8051 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8052 channels = kmemdup(ath10k_5ghz_channels, 8053 sizeof(ath10k_5ghz_channels), 8054 GFP_KERNEL); 8055 if (!channels) { 8056 ret = -ENOMEM; 8057 goto err_free; 8058 } 8059 8060 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8061 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8062 band->channels = channels; 8063 band->n_bitrates = ath10k_a_rates_size; 8064 band->bitrates = ath10k_a_rates; 8065 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8066 } 8067 8068 ath10k_mac_setup_ht_vht_cap(ar); 8069 8070 ar->hw->wiphy->interface_modes = 8071 BIT(NL80211_IFTYPE_STATION) | 8072 BIT(NL80211_IFTYPE_AP) | 8073 BIT(NL80211_IFTYPE_MESH_POINT); 8074 8075 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8076 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8077 8078 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8079 ar->hw->wiphy->interface_modes |= 8080 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8081 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8082 BIT(NL80211_IFTYPE_P2P_GO); 8083 8084 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8085 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8086 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8087 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8088 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8089 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8090 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8091 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8092 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8093 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8094 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8095 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8096 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8097 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8098 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8099 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8100 8101 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8102 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8103 8104 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8105 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8106 8107 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8108 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8109 8110 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8111 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8112 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8113 } 8114 8115 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8116 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8117 8118 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8119 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8120 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8121 8122 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8123 8124 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8125 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8126 8127 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8128 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8129 * correct Probe Responses. This is more of a hack advert.. 8130 */ 8131 ar->hw->wiphy->probe_resp_offload |= 8132 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8133 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8134 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8135 } 8136 8137 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map)) 8138 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8139 8140 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8141 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8142 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8143 8144 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8145 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8146 NL80211_FEATURE_AP_SCAN; 8147 8148 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8149 8150 ret = ath10k_wow_init(ar); 8151 if (ret) { 8152 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8153 goto err_free; 8154 } 8155 8156 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8157 8158 /* 8159 * on LL hardware queues are managed entirely by the FW 8160 * so we only advertise to mac we can do the queues thing 8161 */ 8162 ar->hw->queues = IEEE80211_MAX_QUEUES; 8163 8164 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8165 * something that vdev_ids can't reach so that we don't stop the queue 8166 * accidentally. 8167 */ 8168 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8169 8170 switch (ar->running_fw->fw_file.wmi_op_version) { 8171 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8172 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8173 ar->hw->wiphy->n_iface_combinations = 8174 ARRAY_SIZE(ath10k_if_comb); 8175 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8176 break; 8177 case ATH10K_FW_WMI_OP_VERSION_TLV: 8178 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8179 ar->hw->wiphy->iface_combinations = 8180 ath10k_tlv_qcs_if_comb; 8181 ar->hw->wiphy->n_iface_combinations = 8182 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8183 } else { 8184 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8185 ar->hw->wiphy->n_iface_combinations = 8186 ARRAY_SIZE(ath10k_tlv_if_comb); 8187 } 8188 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8189 break; 8190 case ATH10K_FW_WMI_OP_VERSION_10_1: 8191 case ATH10K_FW_WMI_OP_VERSION_10_2: 8192 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8193 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8194 ar->hw->wiphy->n_iface_combinations = 8195 ARRAY_SIZE(ath10k_10x_if_comb); 8196 break; 8197 case ATH10K_FW_WMI_OP_VERSION_10_4: 8198 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8199 ar->hw->wiphy->n_iface_combinations = 8200 ARRAY_SIZE(ath10k_10_4_if_comb); 8201 break; 8202 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8203 case ATH10K_FW_WMI_OP_VERSION_MAX: 8204 WARN_ON(1); 8205 ret = -EINVAL; 8206 goto err_free; 8207 } 8208 8209 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8210 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8211 8212 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8213 /* Init ath dfs pattern detector */ 8214 ar->ath_common.debug_mask = ATH_DBG_DFS; 8215 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8216 NL80211_DFS_UNSET); 8217 8218 if (!ar->dfs_detector) 8219 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8220 } 8221 8222 /* Current wake_tx_queue implementation imposes a significant 8223 * performance penalty in some setups. The tx scheduling code needs 8224 * more work anyway so disable the wake_tx_queue unless firmware 8225 * supports the pull-push mechanism. 8226 */ 8227 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 8228 ar->running_fw->fw_file.fw_features)) 8229 ar->ops->wake_tx_queue = NULL; 8230 8231 ret = ath10k_mac_init_rd(ar); 8232 if (ret) { 8233 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8234 goto err_dfs_detector_exit; 8235 } 8236 8237 /* Disable set_coverage_class for chipsets that do not support it. */ 8238 if (!ar->hw_params.hw_ops->set_coverage_class) 8239 ar->ops->set_coverage_class = NULL; 8240 8241 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8242 ath10k_reg_notifier); 8243 if (ret) { 8244 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8245 goto err_dfs_detector_exit; 8246 } 8247 8248 ar->hw->wiphy->cipher_suites = cipher_suites; 8249 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 8250 8251 ret = ieee80211_register_hw(ar->hw); 8252 if (ret) { 8253 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 8254 goto err_dfs_detector_exit; 8255 } 8256 8257 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 8258 ret = regulatory_hint(ar->hw->wiphy, 8259 ar->ath_common.regulatory.alpha2); 8260 if (ret) 8261 goto err_unregister; 8262 } 8263 8264 return 0; 8265 8266 err_unregister: 8267 ieee80211_unregister_hw(ar->hw); 8268 8269 err_dfs_detector_exit: 8270 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8271 ar->dfs_detector->exit(ar->dfs_detector); 8272 8273 err_free: 8274 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8275 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8276 8277 SET_IEEE80211_DEV(ar->hw, NULL); 8278 return ret; 8279 } 8280 8281 void ath10k_mac_unregister(struct ath10k *ar) 8282 { 8283 ieee80211_unregister_hw(ar->hw); 8284 8285 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8286 ar->dfs_detector->exit(ar->dfs_detector); 8287 8288 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8289 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8290 8291 SET_IEEE80211_DEV(ar->hw, NULL); 8292 } 8293