1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 */ 7 8 #include "mac.h" 9 10 #include <net/cfg80211.h> 11 #include <net/mac80211.h> 12 #include <linux/etherdevice.h> 13 #include <linux/acpi.h> 14 #include <linux/of.h> 15 16 #include "hif.h" 17 #include "core.h" 18 #include "debug.h" 19 #include "wmi.h" 20 #include "htt.h" 21 #include "txrx.h" 22 #include "testmode.h" 23 #include "wmi-tlv.h" 24 #include "wmi-ops.h" 25 #include "wow.h" 26 27 /*********/ 28 /* Rates */ 29 /*********/ 30 31 static struct ieee80211_rate ath10k_rates[] = { 32 { .bitrate = 10, 33 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 34 { .bitrate = 20, 35 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 36 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 37 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 38 { .bitrate = 55, 39 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 40 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 41 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 42 { .bitrate = 110, 43 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 44 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 45 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 46 47 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 48 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 49 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 50 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 51 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 52 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 53 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 54 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 55 }; 56 57 static struct ieee80211_rate ath10k_rates_rev2[] = { 58 { .bitrate = 10, 59 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 60 { .bitrate = 20, 61 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 62 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 63 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 64 { .bitrate = 55, 65 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 66 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 67 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 68 { .bitrate = 110, 69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 70 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 71 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 72 73 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 74 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 75 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 76 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 77 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 78 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 79 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 80 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 81 }; 82 83 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 84 85 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 86 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 87 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 88 #define ath10k_g_rates (ath10k_rates + 0) 89 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 90 91 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 92 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 93 94 #define ath10k_wmi_legacy_rates ath10k_rates 95 96 static bool ath10k_mac_bitrate_is_cck(int bitrate) 97 { 98 switch (bitrate) { 99 case 10: 100 case 20: 101 case 55: 102 case 110: 103 return true; 104 } 105 106 return false; 107 } 108 109 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 110 { 111 return DIV_ROUND_UP(bitrate, 5) | 112 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 113 } 114 115 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 116 u8 hw_rate, bool cck) 117 { 118 const struct ieee80211_rate *rate; 119 int i; 120 121 for (i = 0; i < sband->n_bitrates; i++) { 122 rate = &sband->bitrates[i]; 123 124 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 125 continue; 126 127 if (rate->hw_value == hw_rate) 128 return i; 129 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 130 rate->hw_value_short == hw_rate) 131 return i; 132 } 133 134 return 0; 135 } 136 137 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 138 u32 bitrate) 139 { 140 int i; 141 142 for (i = 0; i < sband->n_bitrates; i++) 143 if (sband->bitrates[i].bitrate == bitrate) 144 return i; 145 146 return 0; 147 } 148 149 static int ath10k_mac_get_rate_hw_value(int bitrate) 150 { 151 int i; 152 u8 hw_value_prefix = 0; 153 154 if (ath10k_mac_bitrate_is_cck(bitrate)) 155 hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6; 156 157 for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) { 158 if (ath10k_rates[i].bitrate == bitrate) 159 return hw_value_prefix | ath10k_rates[i].hw_value; 160 } 161 162 return -EINVAL; 163 } 164 165 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 166 { 167 switch ((mcs_map >> (2 * nss)) & 0x3) { 168 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 169 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 170 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 171 } 172 return 0; 173 } 174 175 static u32 176 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 177 { 178 int nss; 179 180 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 181 if (ht_mcs_mask[nss]) 182 return nss + 1; 183 184 return 1; 185 } 186 187 static u32 188 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 189 { 190 int nss; 191 192 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 193 if (vht_mcs_mask[nss]) 194 return nss + 1; 195 196 return 1; 197 } 198 199 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 200 { 201 enum wmi_host_platform_type platform_type; 202 int ret; 203 204 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 205 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 206 else 207 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 208 209 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 210 211 if (ret && ret != -EOPNOTSUPP) { 212 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 213 return ret; 214 } 215 216 return 0; 217 } 218 219 /**********/ 220 /* Crypto */ 221 /**********/ 222 223 static int ath10k_send_key(struct ath10k_vif *arvif, 224 struct ieee80211_key_conf *key, 225 enum set_key_cmd cmd, 226 const u8 *macaddr, u32 flags) 227 { 228 struct ath10k *ar = arvif->ar; 229 struct wmi_vdev_install_key_arg arg = { 230 .vdev_id = arvif->vdev_id, 231 .key_idx = key->keyidx, 232 .key_len = key->keylen, 233 .key_data = key->key, 234 .key_flags = flags, 235 .macaddr = macaddr, 236 }; 237 238 lockdep_assert_held(&arvif->ar->conf_mutex); 239 240 switch (key->cipher) { 241 case WLAN_CIPHER_SUITE_CCMP: 242 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; 243 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 244 break; 245 case WLAN_CIPHER_SUITE_TKIP: 246 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP]; 247 arg.key_txmic_len = 8; 248 arg.key_rxmic_len = 8; 249 break; 250 case WLAN_CIPHER_SUITE_WEP40: 251 case WLAN_CIPHER_SUITE_WEP104: 252 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP]; 253 break; 254 case WLAN_CIPHER_SUITE_CCMP_256: 255 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; 256 break; 257 case WLAN_CIPHER_SUITE_GCMP: 258 case WLAN_CIPHER_SUITE_GCMP_256: 259 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM]; 260 break; 261 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 262 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 263 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 264 case WLAN_CIPHER_SUITE_AES_CMAC: 265 WARN_ON(1); 266 return -EINVAL; 267 default: 268 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 269 return -EOPNOTSUPP; 270 } 271 272 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 273 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 274 275 if (cmd == DISABLE_KEY) { 276 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE]; 277 arg.key_data = NULL; 278 } 279 280 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 281 } 282 283 static int ath10k_install_key(struct ath10k_vif *arvif, 284 struct ieee80211_key_conf *key, 285 enum set_key_cmd cmd, 286 const u8 *macaddr, u32 flags) 287 { 288 struct ath10k *ar = arvif->ar; 289 int ret; 290 unsigned long time_left; 291 292 lockdep_assert_held(&ar->conf_mutex); 293 294 reinit_completion(&ar->install_key_done); 295 296 if (arvif->nohwcrypt) 297 return 1; 298 299 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 300 if (ret) 301 return ret; 302 303 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 304 if (time_left == 0) 305 return -ETIMEDOUT; 306 307 return 0; 308 } 309 310 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 311 const u8 *addr) 312 { 313 struct ath10k *ar = arvif->ar; 314 struct ath10k_peer *peer; 315 int ret; 316 int i; 317 u32 flags; 318 319 lockdep_assert_held(&ar->conf_mutex); 320 321 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 322 arvif->vif->type != NL80211_IFTYPE_ADHOC && 323 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 324 return -EINVAL; 325 326 spin_lock_bh(&ar->data_lock); 327 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 328 spin_unlock_bh(&ar->data_lock); 329 330 if (!peer) 331 return -ENOENT; 332 333 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 334 if (arvif->wep_keys[i] == NULL) 335 continue; 336 337 switch (arvif->vif->type) { 338 case NL80211_IFTYPE_AP: 339 flags = WMI_KEY_PAIRWISE; 340 341 if (arvif->def_wep_key_idx == i) 342 flags |= WMI_KEY_TX_USAGE; 343 344 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 345 SET_KEY, addr, flags); 346 if (ret < 0) 347 return ret; 348 break; 349 case NL80211_IFTYPE_ADHOC: 350 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 351 SET_KEY, addr, 352 WMI_KEY_PAIRWISE); 353 if (ret < 0) 354 return ret; 355 356 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 357 SET_KEY, addr, WMI_KEY_GROUP); 358 if (ret < 0) 359 return ret; 360 break; 361 default: 362 WARN_ON(1); 363 return -EINVAL; 364 } 365 366 spin_lock_bh(&ar->data_lock); 367 peer->keys[i] = arvif->wep_keys[i]; 368 spin_unlock_bh(&ar->data_lock); 369 } 370 371 /* In some cases (notably with static WEP IBSS with multiple keys) 372 * multicast Tx becomes broken. Both pairwise and groupwise keys are 373 * installed already. Using WMI_KEY_TX_USAGE in different combinations 374 * didn't seem help. Using def_keyid vdev parameter seems to be 375 * effective so use that. 376 * 377 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 378 */ 379 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 380 return 0; 381 382 if (arvif->def_wep_key_idx == -1) 383 return 0; 384 385 ret = ath10k_wmi_vdev_set_param(arvif->ar, 386 arvif->vdev_id, 387 arvif->ar->wmi.vdev_param->def_keyid, 388 arvif->def_wep_key_idx); 389 if (ret) { 390 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 391 arvif->vdev_id, ret); 392 return ret; 393 } 394 395 return 0; 396 } 397 398 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 399 const u8 *addr) 400 { 401 struct ath10k *ar = arvif->ar; 402 struct ath10k_peer *peer; 403 int first_errno = 0; 404 int ret; 405 int i; 406 u32 flags = 0; 407 408 lockdep_assert_held(&ar->conf_mutex); 409 410 spin_lock_bh(&ar->data_lock); 411 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 412 spin_unlock_bh(&ar->data_lock); 413 414 if (!peer) 415 return -ENOENT; 416 417 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 418 if (peer->keys[i] == NULL) 419 continue; 420 421 /* key flags are not required to delete the key */ 422 ret = ath10k_install_key(arvif, peer->keys[i], 423 DISABLE_KEY, addr, flags); 424 if (ret < 0 && first_errno == 0) 425 first_errno = ret; 426 427 if (ret < 0) 428 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 429 i, ret); 430 431 spin_lock_bh(&ar->data_lock); 432 peer->keys[i] = NULL; 433 spin_unlock_bh(&ar->data_lock); 434 } 435 436 return first_errno; 437 } 438 439 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 440 u8 keyidx) 441 { 442 struct ath10k_peer *peer; 443 int i; 444 445 lockdep_assert_held(&ar->data_lock); 446 447 /* We don't know which vdev this peer belongs to, 448 * since WMI doesn't give us that information. 449 * 450 * FIXME: multi-bss needs to be handled. 451 */ 452 peer = ath10k_peer_find(ar, 0, addr); 453 if (!peer) 454 return false; 455 456 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 457 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 458 return true; 459 } 460 461 return false; 462 } 463 464 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 465 struct ieee80211_key_conf *key) 466 { 467 struct ath10k *ar = arvif->ar; 468 struct ath10k_peer *peer; 469 u8 addr[ETH_ALEN]; 470 int first_errno = 0; 471 int ret; 472 int i; 473 u32 flags = 0; 474 475 lockdep_assert_held(&ar->conf_mutex); 476 477 for (;;) { 478 /* since ath10k_install_key we can't hold data_lock all the 479 * time, so we try to remove the keys incrementally 480 */ 481 spin_lock_bh(&ar->data_lock); 482 i = 0; 483 list_for_each_entry(peer, &ar->peers, list) { 484 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 485 if (peer->keys[i] == key) { 486 ether_addr_copy(addr, peer->addr); 487 peer->keys[i] = NULL; 488 break; 489 } 490 } 491 492 if (i < ARRAY_SIZE(peer->keys)) 493 break; 494 } 495 spin_unlock_bh(&ar->data_lock); 496 497 if (i == ARRAY_SIZE(peer->keys)) 498 break; 499 /* key flags are not required to delete the key */ 500 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 501 if (ret < 0 && first_errno == 0) 502 first_errno = ret; 503 504 if (ret) 505 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 506 addr, ret); 507 } 508 509 return first_errno; 510 } 511 512 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 513 struct ieee80211_key_conf *key) 514 { 515 struct ath10k *ar = arvif->ar; 516 struct ath10k_peer *peer; 517 int ret; 518 519 lockdep_assert_held(&ar->conf_mutex); 520 521 list_for_each_entry(peer, &ar->peers, list) { 522 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 523 continue; 524 525 if (ether_addr_equal(peer->addr, arvif->bssid)) 526 continue; 527 528 if (peer->keys[key->keyidx] == key) 529 continue; 530 531 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 532 arvif->vdev_id, key->keyidx); 533 534 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 535 if (ret) { 536 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 537 arvif->vdev_id, peer->addr, ret); 538 return ret; 539 } 540 } 541 542 return 0; 543 } 544 545 /*********************/ 546 /* General utilities */ 547 /*********************/ 548 549 static inline enum wmi_phy_mode 550 chan_to_phymode(const struct cfg80211_chan_def *chandef) 551 { 552 enum wmi_phy_mode phymode = MODE_UNKNOWN; 553 554 switch (chandef->chan->band) { 555 case NL80211_BAND_2GHZ: 556 switch (chandef->width) { 557 case NL80211_CHAN_WIDTH_20_NOHT: 558 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 559 phymode = MODE_11B; 560 else 561 phymode = MODE_11G; 562 break; 563 case NL80211_CHAN_WIDTH_20: 564 phymode = MODE_11NG_HT20; 565 break; 566 case NL80211_CHAN_WIDTH_40: 567 phymode = MODE_11NG_HT40; 568 break; 569 case NL80211_CHAN_WIDTH_5: 570 case NL80211_CHAN_WIDTH_10: 571 case NL80211_CHAN_WIDTH_80: 572 case NL80211_CHAN_WIDTH_80P80: 573 case NL80211_CHAN_WIDTH_160: 574 phymode = MODE_UNKNOWN; 575 break; 576 } 577 break; 578 case NL80211_BAND_5GHZ: 579 switch (chandef->width) { 580 case NL80211_CHAN_WIDTH_20_NOHT: 581 phymode = MODE_11A; 582 break; 583 case NL80211_CHAN_WIDTH_20: 584 phymode = MODE_11NA_HT20; 585 break; 586 case NL80211_CHAN_WIDTH_40: 587 phymode = MODE_11NA_HT40; 588 break; 589 case NL80211_CHAN_WIDTH_80: 590 phymode = MODE_11AC_VHT80; 591 break; 592 case NL80211_CHAN_WIDTH_160: 593 phymode = MODE_11AC_VHT160; 594 break; 595 case NL80211_CHAN_WIDTH_80P80: 596 phymode = MODE_11AC_VHT80_80; 597 break; 598 case NL80211_CHAN_WIDTH_5: 599 case NL80211_CHAN_WIDTH_10: 600 phymode = MODE_UNKNOWN; 601 break; 602 } 603 break; 604 default: 605 break; 606 } 607 608 WARN_ON(phymode == MODE_UNKNOWN); 609 return phymode; 610 } 611 612 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 613 { 614 /* 615 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 616 * 0 for no restriction 617 * 1 for 1/4 us 618 * 2 for 1/2 us 619 * 3 for 1 us 620 * 4 for 2 us 621 * 5 for 4 us 622 * 6 for 8 us 623 * 7 for 16 us 624 */ 625 switch (mpdudensity) { 626 case 0: 627 return 0; 628 case 1: 629 case 2: 630 case 3: 631 /* Our lower layer calculations limit our precision to 632 * 1 microsecond 633 */ 634 return 1; 635 case 4: 636 return 2; 637 case 5: 638 return 4; 639 case 6: 640 return 8; 641 case 7: 642 return 16; 643 default: 644 return 0; 645 } 646 } 647 648 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 649 struct cfg80211_chan_def *def) 650 { 651 struct ieee80211_chanctx_conf *conf; 652 653 rcu_read_lock(); 654 conf = rcu_dereference(vif->chanctx_conf); 655 if (!conf) { 656 rcu_read_unlock(); 657 return -ENOENT; 658 } 659 660 *def = conf->def; 661 rcu_read_unlock(); 662 663 return 0; 664 } 665 666 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 667 struct ieee80211_chanctx_conf *conf, 668 void *data) 669 { 670 int *num = data; 671 672 (*num)++; 673 } 674 675 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 676 { 677 int num = 0; 678 679 ieee80211_iter_chan_contexts_atomic(ar->hw, 680 ath10k_mac_num_chanctxs_iter, 681 &num); 682 683 return num; 684 } 685 686 static void 687 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 688 struct ieee80211_chanctx_conf *conf, 689 void *data) 690 { 691 struct cfg80211_chan_def **def = data; 692 693 *def = &conf->def; 694 } 695 696 static int ath10k_peer_create(struct ath10k *ar, 697 struct ieee80211_vif *vif, 698 struct ieee80211_sta *sta, 699 u32 vdev_id, 700 const u8 *addr, 701 enum wmi_peer_type peer_type) 702 { 703 struct ath10k_vif *arvif; 704 struct ath10k_peer *peer; 705 int num_peers = 0; 706 int ret; 707 708 lockdep_assert_held(&ar->conf_mutex); 709 710 num_peers = ar->num_peers; 711 712 /* Each vdev consumes a peer entry as well */ 713 list_for_each_entry(arvif, &ar->arvifs, list) 714 num_peers++; 715 716 if (num_peers >= ar->max_num_peers) 717 return -ENOBUFS; 718 719 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 720 if (ret) { 721 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 722 addr, vdev_id, ret); 723 return ret; 724 } 725 726 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 727 if (ret) { 728 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 729 addr, vdev_id, ret); 730 return ret; 731 } 732 733 spin_lock_bh(&ar->data_lock); 734 735 peer = ath10k_peer_find(ar, vdev_id, addr); 736 if (!peer) { 737 spin_unlock_bh(&ar->data_lock); 738 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 739 addr, vdev_id); 740 ath10k_wmi_peer_delete(ar, vdev_id, addr); 741 return -ENOENT; 742 } 743 744 peer->vif = vif; 745 peer->sta = sta; 746 747 spin_unlock_bh(&ar->data_lock); 748 749 ar->num_peers++; 750 751 return 0; 752 } 753 754 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 755 { 756 struct ath10k *ar = arvif->ar; 757 u32 param; 758 int ret; 759 760 param = ar->wmi.pdev_param->sta_kickout_th; 761 ret = ath10k_wmi_pdev_set_param(ar, param, 762 ATH10K_KICKOUT_THRESHOLD); 763 if (ret) { 764 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 765 arvif->vdev_id, ret); 766 return ret; 767 } 768 769 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 770 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 771 ATH10K_KEEPALIVE_MIN_IDLE); 772 if (ret) { 773 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 774 arvif->vdev_id, ret); 775 return ret; 776 } 777 778 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 779 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 780 ATH10K_KEEPALIVE_MAX_IDLE); 781 if (ret) { 782 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 783 arvif->vdev_id, ret); 784 return ret; 785 } 786 787 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 788 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 789 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 790 if (ret) { 791 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 792 arvif->vdev_id, ret); 793 return ret; 794 } 795 796 return 0; 797 } 798 799 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 800 { 801 struct ath10k *ar = arvif->ar; 802 u32 vdev_param; 803 804 vdev_param = ar->wmi.vdev_param->rts_threshold; 805 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 806 } 807 808 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 809 { 810 int ret; 811 812 lockdep_assert_held(&ar->conf_mutex); 813 814 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 815 if (ret) 816 return ret; 817 818 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 819 if (ret) 820 return ret; 821 822 ar->num_peers--; 823 824 return 0; 825 } 826 827 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 828 { 829 struct ath10k_peer *peer, *tmp; 830 int peer_id; 831 int i; 832 833 lockdep_assert_held(&ar->conf_mutex); 834 835 spin_lock_bh(&ar->data_lock); 836 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 837 if (peer->vdev_id != vdev_id) 838 continue; 839 840 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 841 peer->addr, vdev_id); 842 843 for_each_set_bit(peer_id, peer->peer_ids, 844 ATH10K_MAX_NUM_PEER_IDS) { 845 ar->peer_map[peer_id] = NULL; 846 } 847 848 /* Double check that peer is properly un-referenced from 849 * the peer_map 850 */ 851 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 852 if (ar->peer_map[i] == peer) { 853 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 854 peer->addr, peer, i); 855 ar->peer_map[i] = NULL; 856 } 857 } 858 859 list_del(&peer->list); 860 kfree(peer); 861 ar->num_peers--; 862 } 863 spin_unlock_bh(&ar->data_lock); 864 } 865 866 static void ath10k_peer_cleanup_all(struct ath10k *ar) 867 { 868 struct ath10k_peer *peer, *tmp; 869 int i; 870 871 lockdep_assert_held(&ar->conf_mutex); 872 873 spin_lock_bh(&ar->data_lock); 874 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 875 list_del(&peer->list); 876 kfree(peer); 877 } 878 879 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 880 ar->peer_map[i] = NULL; 881 882 spin_unlock_bh(&ar->data_lock); 883 884 ar->num_peers = 0; 885 ar->num_stations = 0; 886 } 887 888 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 889 struct ieee80211_sta *sta, 890 enum wmi_tdls_peer_state state) 891 { 892 int ret; 893 struct wmi_tdls_peer_update_cmd_arg arg = {}; 894 struct wmi_tdls_peer_capab_arg cap = {}; 895 struct wmi_channel_arg chan_arg = {}; 896 897 lockdep_assert_held(&ar->conf_mutex); 898 899 arg.vdev_id = vdev_id; 900 arg.peer_state = state; 901 ether_addr_copy(arg.addr, sta->addr); 902 903 cap.peer_max_sp = sta->max_sp; 904 cap.peer_uapsd_queues = sta->uapsd_queues; 905 906 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 907 !sta->tdls_initiator) 908 cap.is_peer_responder = 1; 909 910 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 911 if (ret) { 912 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 913 arg.addr, vdev_id, ret); 914 return ret; 915 } 916 917 return 0; 918 } 919 920 /************************/ 921 /* Interface management */ 922 /************************/ 923 924 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 925 { 926 struct ath10k *ar = arvif->ar; 927 928 lockdep_assert_held(&ar->data_lock); 929 930 if (!arvif->beacon) 931 return; 932 933 if (!arvif->beacon_buf) 934 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 935 arvif->beacon->len, DMA_TO_DEVICE); 936 937 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 938 arvif->beacon_state != ATH10K_BEACON_SENT)) 939 return; 940 941 dev_kfree_skb_any(arvif->beacon); 942 943 arvif->beacon = NULL; 944 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 945 } 946 947 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 948 { 949 struct ath10k *ar = arvif->ar; 950 951 lockdep_assert_held(&ar->data_lock); 952 953 ath10k_mac_vif_beacon_free(arvif); 954 955 if (arvif->beacon_buf) { 956 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 957 arvif->beacon_buf, arvif->beacon_paddr); 958 arvif->beacon_buf = NULL; 959 } 960 } 961 962 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 963 { 964 unsigned long time_left; 965 966 lockdep_assert_held(&ar->conf_mutex); 967 968 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 969 return -ESHUTDOWN; 970 971 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 972 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 973 if (time_left == 0) 974 return -ETIMEDOUT; 975 976 return ar->last_wmi_vdev_start_status; 977 } 978 979 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 980 { 981 struct cfg80211_chan_def *chandef = NULL; 982 struct ieee80211_channel *channel = NULL; 983 struct wmi_vdev_start_request_arg arg = {}; 984 int ret = 0; 985 986 lockdep_assert_held(&ar->conf_mutex); 987 988 ieee80211_iter_chan_contexts_atomic(ar->hw, 989 ath10k_mac_get_any_chandef_iter, 990 &chandef); 991 if (WARN_ON_ONCE(!chandef)) 992 return -ENOENT; 993 994 channel = chandef->chan; 995 996 arg.vdev_id = vdev_id; 997 arg.channel.freq = channel->center_freq; 998 arg.channel.band_center_freq1 = chandef->center_freq1; 999 arg.channel.band_center_freq2 = chandef->center_freq2; 1000 1001 /* TODO setup this dynamically, what in case we 1002 * don't have any vifs? 1003 */ 1004 arg.channel.mode = chan_to_phymode(chandef); 1005 arg.channel.chan_radar = 1006 !!(channel->flags & IEEE80211_CHAN_RADAR); 1007 1008 arg.channel.min_power = 0; 1009 arg.channel.max_power = channel->max_power * 2; 1010 arg.channel.max_reg_power = channel->max_reg_power * 2; 1011 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 1012 1013 reinit_completion(&ar->vdev_setup_done); 1014 1015 ret = ath10k_wmi_vdev_start(ar, &arg); 1016 if (ret) { 1017 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 1018 vdev_id, ret); 1019 return ret; 1020 } 1021 1022 ret = ath10k_vdev_setup_sync(ar); 1023 if (ret) { 1024 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1025 vdev_id, ret); 1026 return ret; 1027 } 1028 1029 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1030 if (ret) { 1031 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1032 vdev_id, ret); 1033 goto vdev_stop; 1034 } 1035 1036 ar->monitor_vdev_id = vdev_id; 1037 1038 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1039 ar->monitor_vdev_id); 1040 return 0; 1041 1042 vdev_stop: 1043 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1044 if (ret) 1045 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1046 ar->monitor_vdev_id, ret); 1047 1048 return ret; 1049 } 1050 1051 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1052 { 1053 int ret = 0; 1054 1055 lockdep_assert_held(&ar->conf_mutex); 1056 1057 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1058 if (ret) 1059 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1060 ar->monitor_vdev_id, ret); 1061 1062 reinit_completion(&ar->vdev_setup_done); 1063 1064 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1065 if (ret) 1066 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1067 ar->monitor_vdev_id, ret); 1068 1069 ret = ath10k_vdev_setup_sync(ar); 1070 if (ret) 1071 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1072 ar->monitor_vdev_id, ret); 1073 1074 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1075 ar->monitor_vdev_id); 1076 return ret; 1077 } 1078 1079 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1080 { 1081 int bit, ret = 0; 1082 1083 lockdep_assert_held(&ar->conf_mutex); 1084 1085 if (ar->free_vdev_map == 0) { 1086 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1087 return -ENOMEM; 1088 } 1089 1090 bit = __ffs64(ar->free_vdev_map); 1091 1092 ar->monitor_vdev_id = bit; 1093 1094 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1095 WMI_VDEV_TYPE_MONITOR, 1096 0, ar->mac_addr); 1097 if (ret) { 1098 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1099 ar->monitor_vdev_id, ret); 1100 return ret; 1101 } 1102 1103 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1104 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1105 ar->monitor_vdev_id); 1106 1107 return 0; 1108 } 1109 1110 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1111 { 1112 int ret = 0; 1113 1114 lockdep_assert_held(&ar->conf_mutex); 1115 1116 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1117 if (ret) { 1118 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1119 ar->monitor_vdev_id, ret); 1120 return ret; 1121 } 1122 1123 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1124 1125 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1126 ar->monitor_vdev_id); 1127 return ret; 1128 } 1129 1130 static int ath10k_monitor_start(struct ath10k *ar) 1131 { 1132 int ret; 1133 1134 lockdep_assert_held(&ar->conf_mutex); 1135 1136 ret = ath10k_monitor_vdev_create(ar); 1137 if (ret) { 1138 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1139 return ret; 1140 } 1141 1142 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1143 if (ret) { 1144 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1145 ath10k_monitor_vdev_delete(ar); 1146 return ret; 1147 } 1148 1149 ar->monitor_started = true; 1150 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1151 1152 return 0; 1153 } 1154 1155 static int ath10k_monitor_stop(struct ath10k *ar) 1156 { 1157 int ret; 1158 1159 lockdep_assert_held(&ar->conf_mutex); 1160 1161 ret = ath10k_monitor_vdev_stop(ar); 1162 if (ret) { 1163 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1164 return ret; 1165 } 1166 1167 ret = ath10k_monitor_vdev_delete(ar); 1168 if (ret) { 1169 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1170 return ret; 1171 } 1172 1173 ar->monitor_started = false; 1174 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1175 1176 return 0; 1177 } 1178 1179 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1180 { 1181 int num_ctx; 1182 1183 /* At least one chanctx is required to derive a channel to start 1184 * monitor vdev on. 1185 */ 1186 num_ctx = ath10k_mac_num_chanctxs(ar); 1187 if (num_ctx == 0) 1188 return false; 1189 1190 /* If there's already an existing special monitor interface then don't 1191 * bother creating another monitor vdev. 1192 */ 1193 if (ar->monitor_arvif) 1194 return false; 1195 1196 return ar->monitor || 1197 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1198 ar->running_fw->fw_file.fw_features) && 1199 (ar->filter_flags & FIF_OTHER_BSS)) || 1200 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1201 } 1202 1203 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1204 { 1205 int num_ctx; 1206 1207 num_ctx = ath10k_mac_num_chanctxs(ar); 1208 1209 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1210 * shouldn't allow this but make sure to prevent handling the following 1211 * case anyway since multi-channel DFS hasn't been tested at all. 1212 */ 1213 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1214 return false; 1215 1216 return true; 1217 } 1218 1219 static int ath10k_monitor_recalc(struct ath10k *ar) 1220 { 1221 bool needed; 1222 bool allowed; 1223 int ret; 1224 1225 lockdep_assert_held(&ar->conf_mutex); 1226 1227 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1228 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1229 1230 ath10k_dbg(ar, ATH10K_DBG_MAC, 1231 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1232 ar->monitor_started, needed, allowed); 1233 1234 if (WARN_ON(needed && !allowed)) { 1235 if (ar->monitor_started) { 1236 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1237 1238 ret = ath10k_monitor_stop(ar); 1239 if (ret) 1240 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1241 ret); 1242 /* not serious */ 1243 } 1244 1245 return -EPERM; 1246 } 1247 1248 if (needed == ar->monitor_started) 1249 return 0; 1250 1251 if (needed) 1252 return ath10k_monitor_start(ar); 1253 else 1254 return ath10k_monitor_stop(ar); 1255 } 1256 1257 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1258 { 1259 struct ath10k *ar = arvif->ar; 1260 1261 lockdep_assert_held(&ar->conf_mutex); 1262 1263 if (!arvif->is_started) { 1264 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1265 return false; 1266 } 1267 1268 return true; 1269 } 1270 1271 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1272 { 1273 struct ath10k *ar = arvif->ar; 1274 u32 vdev_param; 1275 1276 lockdep_assert_held(&ar->conf_mutex); 1277 1278 vdev_param = ar->wmi.vdev_param->protection_mode; 1279 1280 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1281 arvif->vdev_id, arvif->use_cts_prot); 1282 1283 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1284 arvif->use_cts_prot ? 1 : 0); 1285 } 1286 1287 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1288 { 1289 struct ath10k *ar = arvif->ar; 1290 u32 vdev_param, rts_cts = 0; 1291 1292 lockdep_assert_held(&ar->conf_mutex); 1293 1294 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1295 1296 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1297 1298 if (arvif->num_legacy_stations > 0) 1299 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1300 WMI_RTSCTS_PROFILE); 1301 else 1302 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1303 WMI_RTSCTS_PROFILE); 1304 1305 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1306 arvif->vdev_id, rts_cts); 1307 1308 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1309 rts_cts); 1310 } 1311 1312 static int ath10k_start_cac(struct ath10k *ar) 1313 { 1314 int ret; 1315 1316 lockdep_assert_held(&ar->conf_mutex); 1317 1318 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1319 1320 ret = ath10k_monitor_recalc(ar); 1321 if (ret) { 1322 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1323 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1324 return ret; 1325 } 1326 1327 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1328 ar->monitor_vdev_id); 1329 1330 return 0; 1331 } 1332 1333 static int ath10k_stop_cac(struct ath10k *ar) 1334 { 1335 lockdep_assert_held(&ar->conf_mutex); 1336 1337 /* CAC is not running - do nothing */ 1338 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1339 return 0; 1340 1341 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1342 ath10k_monitor_stop(ar); 1343 1344 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1345 1346 return 0; 1347 } 1348 1349 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1350 struct ieee80211_chanctx_conf *conf, 1351 void *data) 1352 { 1353 bool *ret = data; 1354 1355 if (!*ret && conf->radar_enabled) 1356 *ret = true; 1357 } 1358 1359 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1360 { 1361 bool has_radar = false; 1362 1363 ieee80211_iter_chan_contexts_atomic(ar->hw, 1364 ath10k_mac_has_radar_iter, 1365 &has_radar); 1366 1367 return has_radar; 1368 } 1369 1370 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1371 { 1372 int ret; 1373 1374 lockdep_assert_held(&ar->conf_mutex); 1375 1376 ath10k_stop_cac(ar); 1377 1378 if (!ath10k_mac_has_radar_enabled(ar)) 1379 return; 1380 1381 if (ar->num_started_vdevs > 0) 1382 return; 1383 1384 ret = ath10k_start_cac(ar); 1385 if (ret) { 1386 /* 1387 * Not possible to start CAC on current channel so starting 1388 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1389 * by indicating that radar was detected. 1390 */ 1391 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1392 ieee80211_radar_detected(ar->hw); 1393 } 1394 } 1395 1396 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1397 { 1398 struct ath10k *ar = arvif->ar; 1399 int ret; 1400 1401 lockdep_assert_held(&ar->conf_mutex); 1402 1403 reinit_completion(&ar->vdev_setup_done); 1404 1405 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1406 if (ret) { 1407 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1408 arvif->vdev_id, ret); 1409 return ret; 1410 } 1411 1412 ret = ath10k_vdev_setup_sync(ar); 1413 if (ret) { 1414 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n", 1415 arvif->vdev_id, ret); 1416 return ret; 1417 } 1418 1419 WARN_ON(ar->num_started_vdevs == 0); 1420 1421 if (ar->num_started_vdevs != 0) { 1422 ar->num_started_vdevs--; 1423 ath10k_recalc_radar_detection(ar); 1424 } 1425 1426 return ret; 1427 } 1428 1429 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1430 const struct cfg80211_chan_def *chandef, 1431 bool restart) 1432 { 1433 struct ath10k *ar = arvif->ar; 1434 struct wmi_vdev_start_request_arg arg = {}; 1435 int ret = 0; 1436 1437 lockdep_assert_held(&ar->conf_mutex); 1438 1439 reinit_completion(&ar->vdev_setup_done); 1440 1441 arg.vdev_id = arvif->vdev_id; 1442 arg.dtim_period = arvif->dtim_period; 1443 arg.bcn_intval = arvif->beacon_interval; 1444 1445 arg.channel.freq = chandef->chan->center_freq; 1446 arg.channel.band_center_freq1 = chandef->center_freq1; 1447 arg.channel.band_center_freq2 = chandef->center_freq2; 1448 arg.channel.mode = chan_to_phymode(chandef); 1449 1450 arg.channel.min_power = 0; 1451 arg.channel.max_power = chandef->chan->max_power * 2; 1452 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1453 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1454 1455 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1456 arg.ssid = arvif->u.ap.ssid; 1457 arg.ssid_len = arvif->u.ap.ssid_len; 1458 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1459 1460 /* For now allow DFS for AP mode */ 1461 arg.channel.chan_radar = 1462 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1463 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1464 arg.ssid = arvif->vif->bss_conf.ssid; 1465 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1466 } 1467 1468 ath10k_dbg(ar, ATH10K_DBG_MAC, 1469 "mac vdev %d start center_freq %d phymode %s\n", 1470 arg.vdev_id, arg.channel.freq, 1471 ath10k_wmi_phymode_str(arg.channel.mode)); 1472 1473 if (restart) 1474 ret = ath10k_wmi_vdev_restart(ar, &arg); 1475 else 1476 ret = ath10k_wmi_vdev_start(ar, &arg); 1477 1478 if (ret) { 1479 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1480 arg.vdev_id, ret); 1481 return ret; 1482 } 1483 1484 ret = ath10k_vdev_setup_sync(ar); 1485 if (ret) { 1486 ath10k_warn(ar, 1487 "failed to synchronize setup for vdev %i restart %d: %d\n", 1488 arg.vdev_id, restart, ret); 1489 return ret; 1490 } 1491 1492 ar->num_started_vdevs++; 1493 ath10k_recalc_radar_detection(ar); 1494 1495 return ret; 1496 } 1497 1498 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1499 const struct cfg80211_chan_def *def) 1500 { 1501 return ath10k_vdev_start_restart(arvif, def, false); 1502 } 1503 1504 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1505 const struct cfg80211_chan_def *def) 1506 { 1507 return ath10k_vdev_start_restart(arvif, def, true); 1508 } 1509 1510 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1511 struct sk_buff *bcn) 1512 { 1513 struct ath10k *ar = arvif->ar; 1514 struct ieee80211_mgmt *mgmt; 1515 const u8 *p2p_ie; 1516 int ret; 1517 1518 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1519 return 0; 1520 1521 mgmt = (void *)bcn->data; 1522 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1523 mgmt->u.beacon.variable, 1524 bcn->len - (mgmt->u.beacon.variable - 1525 bcn->data)); 1526 if (!p2p_ie) 1527 return -ENOENT; 1528 1529 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1530 if (ret) { 1531 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1532 arvif->vdev_id, ret); 1533 return ret; 1534 } 1535 1536 return 0; 1537 } 1538 1539 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1540 u8 oui_type, size_t ie_offset) 1541 { 1542 size_t len; 1543 const u8 *next; 1544 const u8 *end; 1545 u8 *ie; 1546 1547 if (WARN_ON(skb->len < ie_offset)) 1548 return -EINVAL; 1549 1550 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1551 skb->data + ie_offset, 1552 skb->len - ie_offset); 1553 if (!ie) 1554 return -ENOENT; 1555 1556 len = ie[1] + 2; 1557 end = skb->data + skb->len; 1558 next = ie + len; 1559 1560 if (WARN_ON(next > end)) 1561 return -EINVAL; 1562 1563 memmove(ie, next, end - next); 1564 skb_trim(skb, skb->len - len); 1565 1566 return 0; 1567 } 1568 1569 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1570 { 1571 struct ath10k *ar = arvif->ar; 1572 struct ieee80211_hw *hw = ar->hw; 1573 struct ieee80211_vif *vif = arvif->vif; 1574 struct ieee80211_mutable_offsets offs = {}; 1575 struct sk_buff *bcn; 1576 int ret; 1577 1578 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1579 return 0; 1580 1581 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1582 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1583 return 0; 1584 1585 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1586 if (!bcn) { 1587 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1588 return -EPERM; 1589 } 1590 1591 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1592 if (ret) { 1593 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1594 kfree_skb(bcn); 1595 return ret; 1596 } 1597 1598 /* P2P IE is inserted by firmware automatically (as configured above) 1599 * so remove it from the base beacon template to avoid duplicate P2P 1600 * IEs in beacon frames. 1601 */ 1602 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1603 offsetof(struct ieee80211_mgmt, 1604 u.beacon.variable)); 1605 1606 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1607 0, NULL, 0); 1608 kfree_skb(bcn); 1609 1610 if (ret) { 1611 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1612 ret); 1613 return ret; 1614 } 1615 1616 return 0; 1617 } 1618 1619 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1620 { 1621 struct ath10k *ar = arvif->ar; 1622 struct ieee80211_hw *hw = ar->hw; 1623 struct ieee80211_vif *vif = arvif->vif; 1624 struct sk_buff *prb; 1625 int ret; 1626 1627 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1628 return 0; 1629 1630 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1631 return 0; 1632 1633 prb = ieee80211_proberesp_get(hw, vif); 1634 if (!prb) { 1635 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1636 return -EPERM; 1637 } 1638 1639 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1640 kfree_skb(prb); 1641 1642 if (ret) { 1643 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1644 ret); 1645 return ret; 1646 } 1647 1648 return 0; 1649 } 1650 1651 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1652 { 1653 struct ath10k *ar = arvif->ar; 1654 struct cfg80211_chan_def def; 1655 int ret; 1656 1657 /* When originally vdev is started during assign_vif_chanctx() some 1658 * information is missing, notably SSID. Firmware revisions with beacon 1659 * offloading require the SSID to be provided during vdev (re)start to 1660 * handle hidden SSID properly. 1661 * 1662 * Vdev restart must be done after vdev has been both started and 1663 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1664 * deliver vdev restart response event causing timeouts during vdev 1665 * syncing in ath10k. 1666 * 1667 * Note: The vdev down/up and template reinstallation could be skipped 1668 * since only wmi-tlv firmware are known to have beacon offload and 1669 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1670 * response delivery. It's probably more robust to keep it as is. 1671 */ 1672 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1673 return 0; 1674 1675 if (WARN_ON(!arvif->is_started)) 1676 return -EINVAL; 1677 1678 if (WARN_ON(!arvif->is_up)) 1679 return -EINVAL; 1680 1681 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1682 return -EINVAL; 1683 1684 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1685 if (ret) { 1686 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1687 arvif->vdev_id, ret); 1688 return ret; 1689 } 1690 1691 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1692 * firmware will crash upon vdev up. 1693 */ 1694 1695 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1696 if (ret) { 1697 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1698 return ret; 1699 } 1700 1701 ret = ath10k_mac_setup_prb_tmpl(arvif); 1702 if (ret) { 1703 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1704 return ret; 1705 } 1706 1707 ret = ath10k_vdev_restart(arvif, &def); 1708 if (ret) { 1709 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1710 arvif->vdev_id, ret); 1711 return ret; 1712 } 1713 1714 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1715 arvif->bssid); 1716 if (ret) { 1717 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1718 arvif->vdev_id, ret); 1719 return ret; 1720 } 1721 1722 return 0; 1723 } 1724 1725 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1726 struct ieee80211_bss_conf *info) 1727 { 1728 struct ath10k *ar = arvif->ar; 1729 int ret = 0; 1730 1731 lockdep_assert_held(&arvif->ar->conf_mutex); 1732 1733 if (!info->enable_beacon) { 1734 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1735 if (ret) 1736 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1737 arvif->vdev_id, ret); 1738 1739 arvif->is_up = false; 1740 1741 spin_lock_bh(&arvif->ar->data_lock); 1742 ath10k_mac_vif_beacon_free(arvif); 1743 spin_unlock_bh(&arvif->ar->data_lock); 1744 1745 return; 1746 } 1747 1748 arvif->tx_seq_no = 0x1000; 1749 1750 arvif->aid = 0; 1751 ether_addr_copy(arvif->bssid, info->bssid); 1752 1753 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1754 arvif->bssid); 1755 if (ret) { 1756 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1757 arvif->vdev_id, ret); 1758 return; 1759 } 1760 1761 arvif->is_up = true; 1762 1763 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1764 if (ret) { 1765 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1766 arvif->vdev_id, ret); 1767 return; 1768 } 1769 1770 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1771 } 1772 1773 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1774 struct ieee80211_bss_conf *info, 1775 const u8 self_peer[ETH_ALEN]) 1776 { 1777 struct ath10k *ar = arvif->ar; 1778 u32 vdev_param; 1779 int ret = 0; 1780 1781 lockdep_assert_held(&arvif->ar->conf_mutex); 1782 1783 if (!info->ibss_joined) { 1784 if (is_zero_ether_addr(arvif->bssid)) 1785 return; 1786 1787 eth_zero_addr(arvif->bssid); 1788 1789 return; 1790 } 1791 1792 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1793 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1794 ATH10K_DEFAULT_ATIM); 1795 if (ret) 1796 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1797 arvif->vdev_id, ret); 1798 } 1799 1800 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1801 { 1802 struct ath10k *ar = arvif->ar; 1803 u32 param; 1804 u32 value; 1805 int ret; 1806 1807 lockdep_assert_held(&arvif->ar->conf_mutex); 1808 1809 if (arvif->u.sta.uapsd) 1810 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1811 else 1812 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1813 1814 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1815 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1816 if (ret) { 1817 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1818 value, arvif->vdev_id, ret); 1819 return ret; 1820 } 1821 1822 return 0; 1823 } 1824 1825 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1826 { 1827 struct ath10k *ar = arvif->ar; 1828 u32 param; 1829 u32 value; 1830 int ret; 1831 1832 lockdep_assert_held(&arvif->ar->conf_mutex); 1833 1834 if (arvif->u.sta.uapsd) 1835 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1836 else 1837 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1838 1839 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1840 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1841 param, value); 1842 if (ret) { 1843 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1844 value, arvif->vdev_id, ret); 1845 return ret; 1846 } 1847 1848 return 0; 1849 } 1850 1851 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1852 { 1853 struct ath10k_vif *arvif; 1854 int num = 0; 1855 1856 lockdep_assert_held(&ar->conf_mutex); 1857 1858 list_for_each_entry(arvif, &ar->arvifs, list) 1859 if (arvif->is_started) 1860 num++; 1861 1862 return num; 1863 } 1864 1865 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1866 { 1867 struct ath10k *ar = arvif->ar; 1868 struct ieee80211_vif *vif = arvif->vif; 1869 struct ieee80211_conf *conf = &ar->hw->conf; 1870 enum wmi_sta_powersave_param param; 1871 enum wmi_sta_ps_mode psmode; 1872 int ret; 1873 int ps_timeout; 1874 bool enable_ps; 1875 1876 lockdep_assert_held(&arvif->ar->conf_mutex); 1877 1878 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1879 return 0; 1880 1881 enable_ps = arvif->ps; 1882 1883 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1884 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1885 ar->running_fw->fw_file.fw_features)) { 1886 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1887 arvif->vdev_id); 1888 enable_ps = false; 1889 } 1890 1891 if (!arvif->is_started) { 1892 /* mac80211 can update vif powersave state while disconnected. 1893 * Firmware doesn't behave nicely and consumes more power than 1894 * necessary if PS is disabled on a non-started vdev. Hence 1895 * force-enable PS for non-running vdevs. 1896 */ 1897 psmode = WMI_STA_PS_MODE_ENABLED; 1898 } else if (enable_ps) { 1899 psmode = WMI_STA_PS_MODE_ENABLED; 1900 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1901 1902 ps_timeout = conf->dynamic_ps_timeout; 1903 if (ps_timeout == 0) { 1904 /* Firmware doesn't like 0 */ 1905 ps_timeout = ieee80211_tu_to_usec( 1906 vif->bss_conf.beacon_int) / 1000; 1907 } 1908 1909 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1910 ps_timeout); 1911 if (ret) { 1912 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1913 arvif->vdev_id, ret); 1914 return ret; 1915 } 1916 } else { 1917 psmode = WMI_STA_PS_MODE_DISABLED; 1918 } 1919 1920 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1921 arvif->vdev_id, psmode ? "enable" : "disable"); 1922 1923 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1924 if (ret) { 1925 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1926 psmode, arvif->vdev_id, ret); 1927 return ret; 1928 } 1929 1930 return 0; 1931 } 1932 1933 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1934 { 1935 struct ath10k *ar = arvif->ar; 1936 struct wmi_sta_keepalive_arg arg = {}; 1937 int ret; 1938 1939 lockdep_assert_held(&arvif->ar->conf_mutex); 1940 1941 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1942 return 0; 1943 1944 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1945 return 0; 1946 1947 /* Some firmware revisions have a bug and ignore the `enabled` field. 1948 * Instead use the interval to disable the keepalive. 1949 */ 1950 arg.vdev_id = arvif->vdev_id; 1951 arg.enabled = 1; 1952 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1953 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1954 1955 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1956 if (ret) { 1957 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1958 arvif->vdev_id, ret); 1959 return ret; 1960 } 1961 1962 return 0; 1963 } 1964 1965 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1966 { 1967 struct ath10k *ar = arvif->ar; 1968 struct ieee80211_vif *vif = arvif->vif; 1969 int ret; 1970 1971 lockdep_assert_held(&arvif->ar->conf_mutex); 1972 1973 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1974 return; 1975 1976 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1977 return; 1978 1979 if (!vif->csa_active) 1980 return; 1981 1982 if (!arvif->is_up) 1983 return; 1984 1985 if (!ieee80211_csa_is_complete(vif)) { 1986 ieee80211_csa_update_counter(vif); 1987 1988 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1989 if (ret) 1990 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1991 ret); 1992 1993 ret = ath10k_mac_setup_prb_tmpl(arvif); 1994 if (ret) 1995 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 1996 ret); 1997 } else { 1998 ieee80211_csa_finish(vif); 1999 } 2000 } 2001 2002 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 2003 { 2004 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2005 ap_csa_work); 2006 struct ath10k *ar = arvif->ar; 2007 2008 mutex_lock(&ar->conf_mutex); 2009 ath10k_mac_vif_ap_csa_count_down(arvif); 2010 mutex_unlock(&ar->conf_mutex); 2011 } 2012 2013 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 2014 struct ieee80211_vif *vif) 2015 { 2016 struct sk_buff *skb = data; 2017 struct ieee80211_mgmt *mgmt = (void *)skb->data; 2018 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2019 2020 if (vif->type != NL80211_IFTYPE_STATION) 2021 return; 2022 2023 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2024 return; 2025 2026 cancel_delayed_work(&arvif->connection_loss_work); 2027 } 2028 2029 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2030 { 2031 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2032 IEEE80211_IFACE_ITER_NORMAL, 2033 ath10k_mac_handle_beacon_iter, 2034 skb); 2035 } 2036 2037 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2038 struct ieee80211_vif *vif) 2039 { 2040 u32 *vdev_id = data; 2041 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2042 struct ath10k *ar = arvif->ar; 2043 struct ieee80211_hw *hw = ar->hw; 2044 2045 if (arvif->vdev_id != *vdev_id) 2046 return; 2047 2048 if (!arvif->is_up) 2049 return; 2050 2051 ieee80211_beacon_loss(vif); 2052 2053 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2054 * (done by mac80211) succeeds but beacons do not resume then it 2055 * doesn't make sense to continue operation. Queue connection loss work 2056 * which can be cancelled when beacon is received. 2057 */ 2058 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2059 ATH10K_CONNECTION_LOSS_HZ); 2060 } 2061 2062 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2063 { 2064 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2065 IEEE80211_IFACE_ITER_NORMAL, 2066 ath10k_mac_handle_beacon_miss_iter, 2067 &vdev_id); 2068 } 2069 2070 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2071 { 2072 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2073 connection_loss_work.work); 2074 struct ieee80211_vif *vif = arvif->vif; 2075 2076 if (!arvif->is_up) 2077 return; 2078 2079 ieee80211_connection_loss(vif); 2080 } 2081 2082 /**********************/ 2083 /* Station management */ 2084 /**********************/ 2085 2086 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2087 struct ieee80211_vif *vif) 2088 { 2089 /* Some firmware revisions have unstable STA powersave when listen 2090 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2091 * generate NullFunc frames properly even if buffered frames have been 2092 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2093 * buffered frames. Often pinging the device from AP would simply fail. 2094 * 2095 * As a workaround set it to 1. 2096 */ 2097 if (vif->type == NL80211_IFTYPE_STATION) 2098 return 1; 2099 2100 return ar->hw->conf.listen_interval; 2101 } 2102 2103 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2104 struct ieee80211_vif *vif, 2105 struct ieee80211_sta *sta, 2106 struct wmi_peer_assoc_complete_arg *arg) 2107 { 2108 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2109 u32 aid; 2110 2111 lockdep_assert_held(&ar->conf_mutex); 2112 2113 if (vif->type == NL80211_IFTYPE_STATION) 2114 aid = vif->bss_conf.aid; 2115 else 2116 aid = sta->aid; 2117 2118 ether_addr_copy(arg->addr, sta->addr); 2119 arg->vdev_id = arvif->vdev_id; 2120 arg->peer_aid = aid; 2121 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2122 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2123 arg->peer_num_spatial_streams = 1; 2124 arg->peer_caps = vif->bss_conf.assoc_capability; 2125 } 2126 2127 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2128 struct ieee80211_vif *vif, 2129 struct ieee80211_sta *sta, 2130 struct wmi_peer_assoc_complete_arg *arg) 2131 { 2132 struct ieee80211_bss_conf *info = &vif->bss_conf; 2133 struct cfg80211_chan_def def; 2134 struct cfg80211_bss *bss; 2135 const u8 *rsnie = NULL; 2136 const u8 *wpaie = NULL; 2137 2138 lockdep_assert_held(&ar->conf_mutex); 2139 2140 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2141 return; 2142 2143 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2144 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2145 if (bss) { 2146 const struct cfg80211_bss_ies *ies; 2147 2148 rcu_read_lock(); 2149 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2150 2151 ies = rcu_dereference(bss->ies); 2152 2153 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2154 WLAN_OUI_TYPE_MICROSOFT_WPA, 2155 ies->data, 2156 ies->len); 2157 rcu_read_unlock(); 2158 cfg80211_put_bss(ar->hw->wiphy, bss); 2159 } 2160 2161 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2162 if (rsnie || wpaie) { 2163 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2164 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2165 } 2166 2167 if (wpaie) { 2168 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2169 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2170 } 2171 2172 if (sta->mfp && 2173 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2174 ar->running_fw->fw_file.fw_features)) { 2175 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2176 } 2177 } 2178 2179 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2180 struct ieee80211_vif *vif, 2181 struct ieee80211_sta *sta, 2182 struct wmi_peer_assoc_complete_arg *arg) 2183 { 2184 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2185 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2186 struct cfg80211_chan_def def; 2187 const struct ieee80211_supported_band *sband; 2188 const struct ieee80211_rate *rates; 2189 enum nl80211_band band; 2190 u32 ratemask; 2191 u8 rate; 2192 int i; 2193 2194 lockdep_assert_held(&ar->conf_mutex); 2195 2196 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2197 return; 2198 2199 band = def.chan->band; 2200 sband = ar->hw->wiphy->bands[band]; 2201 ratemask = sta->supp_rates[band]; 2202 ratemask &= arvif->bitrate_mask.control[band].legacy; 2203 rates = sband->bitrates; 2204 2205 rateset->num_rates = 0; 2206 2207 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2208 if (!(ratemask & 1)) 2209 continue; 2210 2211 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2212 rateset->rates[rateset->num_rates] = rate; 2213 rateset->num_rates++; 2214 } 2215 } 2216 2217 static bool 2218 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2219 { 2220 int nss; 2221 2222 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2223 if (ht_mcs_mask[nss]) 2224 return false; 2225 2226 return true; 2227 } 2228 2229 static bool 2230 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2231 { 2232 int nss; 2233 2234 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2235 if (vht_mcs_mask[nss]) 2236 return false; 2237 2238 return true; 2239 } 2240 2241 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2242 struct ieee80211_vif *vif, 2243 struct ieee80211_sta *sta, 2244 struct wmi_peer_assoc_complete_arg *arg) 2245 { 2246 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2247 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2248 struct cfg80211_chan_def def; 2249 enum nl80211_band band; 2250 const u8 *ht_mcs_mask; 2251 const u16 *vht_mcs_mask; 2252 int i, n; 2253 u8 max_nss; 2254 u32 stbc; 2255 2256 lockdep_assert_held(&ar->conf_mutex); 2257 2258 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2259 return; 2260 2261 if (!ht_cap->ht_supported) 2262 return; 2263 2264 band = def.chan->band; 2265 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2266 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2267 2268 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2269 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2270 return; 2271 2272 arg->peer_flags |= ar->wmi.peer_flags->ht; 2273 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2274 ht_cap->ampdu_factor)) - 1; 2275 2276 arg->peer_mpdu_density = 2277 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2278 2279 arg->peer_ht_caps = ht_cap->cap; 2280 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2281 2282 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2283 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2284 2285 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2286 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2287 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2288 } 2289 2290 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2291 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2292 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2293 2294 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2295 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2296 } 2297 2298 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2299 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2300 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2301 } 2302 2303 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2304 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2305 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2306 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2307 arg->peer_rate_caps |= stbc; 2308 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2309 } 2310 2311 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2312 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2313 else if (ht_cap->mcs.rx_mask[1]) 2314 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2315 2316 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2317 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2318 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2319 max_nss = (i / 8) + 1; 2320 arg->peer_ht_rates.rates[n++] = i; 2321 } 2322 2323 /* 2324 * This is a workaround for HT-enabled STAs which break the spec 2325 * and have no HT capabilities RX mask (no HT RX MCS map). 2326 * 2327 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2328 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2329 * 2330 * Firmware asserts if such situation occurs. 2331 */ 2332 if (n == 0) { 2333 arg->peer_ht_rates.num_rates = 8; 2334 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2335 arg->peer_ht_rates.rates[i] = i; 2336 } else { 2337 arg->peer_ht_rates.num_rates = n; 2338 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2339 } 2340 2341 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2342 arg->addr, 2343 arg->peer_ht_rates.num_rates, 2344 arg->peer_num_spatial_streams); 2345 } 2346 2347 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2348 struct ath10k_vif *arvif, 2349 struct ieee80211_sta *sta) 2350 { 2351 u32 uapsd = 0; 2352 u32 max_sp = 0; 2353 int ret = 0; 2354 2355 lockdep_assert_held(&ar->conf_mutex); 2356 2357 if (sta->wme && sta->uapsd_queues) { 2358 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2359 sta->uapsd_queues, sta->max_sp); 2360 2361 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2362 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2363 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2364 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2365 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2366 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2367 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2368 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2369 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2370 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2371 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2372 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2373 2374 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2375 max_sp = sta->max_sp; 2376 2377 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2378 sta->addr, 2379 WMI_AP_PS_PEER_PARAM_UAPSD, 2380 uapsd); 2381 if (ret) { 2382 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2383 arvif->vdev_id, ret); 2384 return ret; 2385 } 2386 2387 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2388 sta->addr, 2389 WMI_AP_PS_PEER_PARAM_MAX_SP, 2390 max_sp); 2391 if (ret) { 2392 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2393 arvif->vdev_id, ret); 2394 return ret; 2395 } 2396 2397 /* TODO setup this based on STA listen interval and 2398 * beacon interval. Currently we don't know 2399 * sta->listen_interval - mac80211 patch required. 2400 * Currently use 10 seconds 2401 */ 2402 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2403 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2404 10); 2405 if (ret) { 2406 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2407 arvif->vdev_id, ret); 2408 return ret; 2409 } 2410 } 2411 2412 return 0; 2413 } 2414 2415 static u16 2416 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2417 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2418 { 2419 int idx_limit; 2420 int nss; 2421 u16 mcs_map; 2422 u16 mcs; 2423 2424 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2425 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2426 vht_mcs_limit[nss]; 2427 2428 if (mcs_map) 2429 idx_limit = fls(mcs_map) - 1; 2430 else 2431 idx_limit = -1; 2432 2433 switch (idx_limit) { 2434 case 0: /* fall through */ 2435 case 1: /* fall through */ 2436 case 2: /* fall through */ 2437 case 3: /* fall through */ 2438 case 4: /* fall through */ 2439 case 5: /* fall through */ 2440 case 6: /* fall through */ 2441 default: 2442 /* see ath10k_mac_can_set_bitrate_mask() */ 2443 WARN_ON(1); 2444 /* fall through */ 2445 case -1: 2446 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2447 break; 2448 case 7: 2449 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2450 break; 2451 case 8: 2452 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2453 break; 2454 case 9: 2455 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2456 break; 2457 } 2458 2459 tx_mcs_set &= ~(0x3 << (nss * 2)); 2460 tx_mcs_set |= mcs << (nss * 2); 2461 } 2462 2463 return tx_mcs_set; 2464 } 2465 2466 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2467 struct ieee80211_vif *vif, 2468 struct ieee80211_sta *sta, 2469 struct wmi_peer_assoc_complete_arg *arg) 2470 { 2471 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2472 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2473 struct cfg80211_chan_def def; 2474 enum nl80211_band band; 2475 const u16 *vht_mcs_mask; 2476 u8 ampdu_factor; 2477 u8 max_nss, vht_mcs; 2478 int i; 2479 2480 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2481 return; 2482 2483 if (!vht_cap->vht_supported) 2484 return; 2485 2486 band = def.chan->band; 2487 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2488 2489 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2490 return; 2491 2492 arg->peer_flags |= ar->wmi.peer_flags->vht; 2493 2494 if (def.chan->band == NL80211_BAND_2GHZ) 2495 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2496 2497 arg->peer_vht_caps = vht_cap->cap; 2498 2499 ampdu_factor = (vht_cap->cap & 2500 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2501 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2502 2503 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2504 * zero in VHT IE. Using it would result in degraded throughput. 2505 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2506 * it if VHT max_mpdu is smaller. 2507 */ 2508 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2509 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2510 ampdu_factor)) - 1); 2511 2512 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2513 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2514 2515 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2516 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2517 2518 /* Calculate peer NSS capability from VHT capabilities if STA 2519 * supports VHT. 2520 */ 2521 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 2522 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 2523 (2 * i) & 3; 2524 2525 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && 2526 vht_mcs_mask[i]) 2527 max_nss = i + 1; 2528 } 2529 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2530 arg->peer_vht_rates.rx_max_rate = 2531 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2532 arg->peer_vht_rates.rx_mcs_set = 2533 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2534 arg->peer_vht_rates.tx_max_rate = 2535 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2536 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2537 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2538 2539 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2540 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2541 2542 if (arg->peer_vht_rates.rx_max_rate && 2543 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) { 2544 switch (arg->peer_vht_rates.rx_max_rate) { 2545 case 1560: 2546 /* Must be 2x2 at 160Mhz is all it can do. */ 2547 arg->peer_bw_rxnss_override = 2; 2548 break; 2549 case 780: 2550 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */ 2551 arg->peer_bw_rxnss_override = 1; 2552 break; 2553 } 2554 } 2555 } 2556 2557 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2558 struct ieee80211_vif *vif, 2559 struct ieee80211_sta *sta, 2560 struct wmi_peer_assoc_complete_arg *arg) 2561 { 2562 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2563 2564 switch (arvif->vdev_type) { 2565 case WMI_VDEV_TYPE_AP: 2566 if (sta->wme) 2567 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2568 2569 if (sta->wme && sta->uapsd_queues) { 2570 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2571 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2572 } 2573 break; 2574 case WMI_VDEV_TYPE_STA: 2575 if (sta->wme) 2576 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2577 break; 2578 case WMI_VDEV_TYPE_IBSS: 2579 if (sta->wme) 2580 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2581 break; 2582 default: 2583 break; 2584 } 2585 2586 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2587 sta->addr, !!(arg->peer_flags & 2588 arvif->ar->wmi.peer_flags->qos)); 2589 } 2590 2591 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2592 { 2593 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2594 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2595 } 2596 2597 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2598 struct ieee80211_sta *sta) 2599 { 2600 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2601 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2602 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2603 return MODE_11AC_VHT160; 2604 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2605 return MODE_11AC_VHT80_80; 2606 default: 2607 /* not sure if this is a valid case? */ 2608 return MODE_11AC_VHT160; 2609 } 2610 } 2611 2612 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2613 return MODE_11AC_VHT80; 2614 2615 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2616 return MODE_11AC_VHT40; 2617 2618 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2619 return MODE_11AC_VHT20; 2620 2621 return MODE_UNKNOWN; 2622 } 2623 2624 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2625 struct ieee80211_vif *vif, 2626 struct ieee80211_sta *sta, 2627 struct wmi_peer_assoc_complete_arg *arg) 2628 { 2629 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2630 struct cfg80211_chan_def def; 2631 enum nl80211_band band; 2632 const u8 *ht_mcs_mask; 2633 const u16 *vht_mcs_mask; 2634 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2635 2636 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2637 return; 2638 2639 band = def.chan->band; 2640 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2641 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2642 2643 switch (band) { 2644 case NL80211_BAND_2GHZ: 2645 if (sta->vht_cap.vht_supported && 2646 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2647 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2648 phymode = MODE_11AC_VHT40; 2649 else 2650 phymode = MODE_11AC_VHT20; 2651 } else if (sta->ht_cap.ht_supported && 2652 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2653 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2654 phymode = MODE_11NG_HT40; 2655 else 2656 phymode = MODE_11NG_HT20; 2657 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2658 phymode = MODE_11G; 2659 } else { 2660 phymode = MODE_11B; 2661 } 2662 2663 break; 2664 case NL80211_BAND_5GHZ: 2665 /* 2666 * Check VHT first. 2667 */ 2668 if (sta->vht_cap.vht_supported && 2669 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2670 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2671 } else if (sta->ht_cap.ht_supported && 2672 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2673 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2674 phymode = MODE_11NA_HT40; 2675 else 2676 phymode = MODE_11NA_HT20; 2677 } else { 2678 phymode = MODE_11A; 2679 } 2680 2681 break; 2682 default: 2683 break; 2684 } 2685 2686 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2687 sta->addr, ath10k_wmi_phymode_str(phymode)); 2688 2689 arg->peer_phymode = phymode; 2690 WARN_ON(phymode == MODE_UNKNOWN); 2691 } 2692 2693 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2694 struct ieee80211_vif *vif, 2695 struct ieee80211_sta *sta, 2696 struct wmi_peer_assoc_complete_arg *arg) 2697 { 2698 lockdep_assert_held(&ar->conf_mutex); 2699 2700 memset(arg, 0, sizeof(*arg)); 2701 2702 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2703 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2704 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2705 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2706 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2707 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2708 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2709 2710 return 0; 2711 } 2712 2713 static const u32 ath10k_smps_map[] = { 2714 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2715 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2716 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2717 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2718 }; 2719 2720 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2721 const u8 *addr, 2722 const struct ieee80211_sta_ht_cap *ht_cap) 2723 { 2724 int smps; 2725 2726 if (!ht_cap->ht_supported) 2727 return 0; 2728 2729 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2730 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2731 2732 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2733 return -EINVAL; 2734 2735 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2736 WMI_PEER_SMPS_STATE, 2737 ath10k_smps_map[smps]); 2738 } 2739 2740 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2741 struct ieee80211_vif *vif, 2742 struct ieee80211_sta_vht_cap vht_cap) 2743 { 2744 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2745 int ret; 2746 u32 param; 2747 u32 value; 2748 2749 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2750 return 0; 2751 2752 if (!(ar->vht_cap_info & 2753 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2754 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2755 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2756 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2757 return 0; 2758 2759 param = ar->wmi.vdev_param->txbf; 2760 value = 0; 2761 2762 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2763 return 0; 2764 2765 /* The following logic is correct. If a remote STA advertises support 2766 * for being a beamformer then we should enable us being a beamformee. 2767 */ 2768 2769 if (ar->vht_cap_info & 2770 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2771 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2772 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2773 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2774 2775 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2776 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2777 } 2778 2779 if (ar->vht_cap_info & 2780 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2781 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2782 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2783 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2784 2785 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2786 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2787 } 2788 2789 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2790 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2791 2792 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2793 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2794 2795 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2796 if (ret) { 2797 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2798 value, ret); 2799 return ret; 2800 } 2801 2802 return 0; 2803 } 2804 2805 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2806 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2807 struct ieee80211_vif *vif, 2808 struct ieee80211_bss_conf *bss_conf) 2809 { 2810 struct ath10k *ar = hw->priv; 2811 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2812 struct ieee80211_sta_ht_cap ht_cap; 2813 struct ieee80211_sta_vht_cap vht_cap; 2814 struct wmi_peer_assoc_complete_arg peer_arg; 2815 struct ieee80211_sta *ap_sta; 2816 int ret; 2817 2818 lockdep_assert_held(&ar->conf_mutex); 2819 2820 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2821 arvif->vdev_id, arvif->bssid, arvif->aid); 2822 2823 rcu_read_lock(); 2824 2825 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2826 if (!ap_sta) { 2827 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2828 bss_conf->bssid, arvif->vdev_id); 2829 rcu_read_unlock(); 2830 return; 2831 } 2832 2833 /* ap_sta must be accessed only within rcu section which must be left 2834 * before calling ath10k_setup_peer_smps() which might sleep. 2835 */ 2836 ht_cap = ap_sta->ht_cap; 2837 vht_cap = ap_sta->vht_cap; 2838 2839 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2840 if (ret) { 2841 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2842 bss_conf->bssid, arvif->vdev_id, ret); 2843 rcu_read_unlock(); 2844 return; 2845 } 2846 2847 rcu_read_unlock(); 2848 2849 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2850 if (ret) { 2851 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2852 bss_conf->bssid, arvif->vdev_id, ret); 2853 return; 2854 } 2855 2856 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2857 if (ret) { 2858 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2859 arvif->vdev_id, ret); 2860 return; 2861 } 2862 2863 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2864 if (ret) { 2865 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2866 arvif->vdev_id, bss_conf->bssid, ret); 2867 return; 2868 } 2869 2870 ath10k_dbg(ar, ATH10K_DBG_MAC, 2871 "mac vdev %d up (associated) bssid %pM aid %d\n", 2872 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2873 2874 WARN_ON(arvif->is_up); 2875 2876 arvif->aid = bss_conf->aid; 2877 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2878 2879 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2880 if (ret) { 2881 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2882 arvif->vdev_id, ret); 2883 return; 2884 } 2885 2886 arvif->is_up = true; 2887 2888 /* Workaround: Some firmware revisions (tested with qca6174 2889 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2890 * poked with peer param command. 2891 */ 2892 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2893 WMI_PEER_DUMMY_VAR, 1); 2894 if (ret) { 2895 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2896 arvif->bssid, arvif->vdev_id, ret); 2897 return; 2898 } 2899 } 2900 2901 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2902 struct ieee80211_vif *vif) 2903 { 2904 struct ath10k *ar = hw->priv; 2905 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2906 struct ieee80211_sta_vht_cap vht_cap = {}; 2907 int ret; 2908 2909 lockdep_assert_held(&ar->conf_mutex); 2910 2911 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2912 arvif->vdev_id, arvif->bssid); 2913 2914 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2915 if (ret) 2916 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2917 arvif->vdev_id, ret); 2918 2919 arvif->def_wep_key_idx = -1; 2920 2921 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2922 if (ret) { 2923 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2924 arvif->vdev_id, ret); 2925 return; 2926 } 2927 2928 arvif->is_up = false; 2929 2930 cancel_delayed_work_sync(&arvif->connection_loss_work); 2931 } 2932 2933 static int ath10k_station_assoc(struct ath10k *ar, 2934 struct ieee80211_vif *vif, 2935 struct ieee80211_sta *sta, 2936 bool reassoc) 2937 { 2938 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2939 struct wmi_peer_assoc_complete_arg peer_arg; 2940 int ret = 0; 2941 2942 lockdep_assert_held(&ar->conf_mutex); 2943 2944 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2945 if (ret) { 2946 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2947 sta->addr, arvif->vdev_id, ret); 2948 return ret; 2949 } 2950 2951 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2952 if (ret) { 2953 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2954 sta->addr, arvif->vdev_id, ret); 2955 return ret; 2956 } 2957 2958 /* Re-assoc is run only to update supported rates for given station. It 2959 * doesn't make much sense to reconfigure the peer completely. 2960 */ 2961 if (!reassoc) { 2962 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2963 &sta->ht_cap); 2964 if (ret) { 2965 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2966 arvif->vdev_id, ret); 2967 return ret; 2968 } 2969 2970 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2971 if (ret) { 2972 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2973 sta->addr, arvif->vdev_id, ret); 2974 return ret; 2975 } 2976 2977 if (!sta->wme) { 2978 arvif->num_legacy_stations++; 2979 ret = ath10k_recalc_rtscts_prot(arvif); 2980 if (ret) { 2981 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2982 arvif->vdev_id, ret); 2983 return ret; 2984 } 2985 } 2986 2987 /* Plumb cached keys only for static WEP */ 2988 if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) { 2989 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2990 if (ret) { 2991 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2992 arvif->vdev_id, ret); 2993 return ret; 2994 } 2995 } 2996 } 2997 2998 return ret; 2999 } 3000 3001 static int ath10k_station_disassoc(struct ath10k *ar, 3002 struct ieee80211_vif *vif, 3003 struct ieee80211_sta *sta) 3004 { 3005 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3006 int ret = 0; 3007 3008 lockdep_assert_held(&ar->conf_mutex); 3009 3010 if (!sta->wme) { 3011 arvif->num_legacy_stations--; 3012 ret = ath10k_recalc_rtscts_prot(arvif); 3013 if (ret) { 3014 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 3015 arvif->vdev_id, ret); 3016 return ret; 3017 } 3018 } 3019 3020 ret = ath10k_clear_peer_keys(arvif, sta->addr); 3021 if (ret) { 3022 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 3023 arvif->vdev_id, ret); 3024 return ret; 3025 } 3026 3027 return ret; 3028 } 3029 3030 /**************/ 3031 /* Regulatory */ 3032 /**************/ 3033 3034 static int ath10k_update_channel_list(struct ath10k *ar) 3035 { 3036 struct ieee80211_hw *hw = ar->hw; 3037 struct ieee80211_supported_band **bands; 3038 enum nl80211_band band; 3039 struct ieee80211_channel *channel; 3040 struct wmi_scan_chan_list_arg arg = {0}; 3041 struct wmi_channel_arg *ch; 3042 bool passive; 3043 int len; 3044 int ret; 3045 int i; 3046 3047 lockdep_assert_held(&ar->conf_mutex); 3048 3049 bands = hw->wiphy->bands; 3050 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3051 if (!bands[band]) 3052 continue; 3053 3054 for (i = 0; i < bands[band]->n_channels; i++) { 3055 if (bands[band]->channels[i].flags & 3056 IEEE80211_CHAN_DISABLED) 3057 continue; 3058 3059 arg.n_channels++; 3060 } 3061 } 3062 3063 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3064 arg.channels = kzalloc(len, GFP_KERNEL); 3065 if (!arg.channels) 3066 return -ENOMEM; 3067 3068 ch = arg.channels; 3069 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3070 if (!bands[band]) 3071 continue; 3072 3073 for (i = 0; i < bands[band]->n_channels; i++) { 3074 channel = &bands[band]->channels[i]; 3075 3076 if (channel->flags & IEEE80211_CHAN_DISABLED) 3077 continue; 3078 3079 ch->allow_ht = true; 3080 3081 /* FIXME: when should we really allow VHT? */ 3082 ch->allow_vht = true; 3083 3084 ch->allow_ibss = 3085 !(channel->flags & IEEE80211_CHAN_NO_IR); 3086 3087 ch->ht40plus = 3088 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3089 3090 ch->chan_radar = 3091 !!(channel->flags & IEEE80211_CHAN_RADAR); 3092 3093 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3094 ch->passive = passive; 3095 3096 /* the firmware is ignoring the "radar" flag of the 3097 * channel and is scanning actively using Probe Requests 3098 * on "Radar detection"/DFS channels which are not 3099 * marked as "available" 3100 */ 3101 ch->passive |= ch->chan_radar; 3102 3103 ch->freq = channel->center_freq; 3104 ch->band_center_freq1 = channel->center_freq; 3105 ch->min_power = 0; 3106 ch->max_power = channel->max_power * 2; 3107 ch->max_reg_power = channel->max_reg_power * 2; 3108 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3109 ch->reg_class_id = 0; /* FIXME */ 3110 3111 /* FIXME: why use only legacy modes, why not any 3112 * HT/VHT modes? Would that even make any 3113 * difference? 3114 */ 3115 if (channel->band == NL80211_BAND_2GHZ) 3116 ch->mode = MODE_11G; 3117 else 3118 ch->mode = MODE_11A; 3119 3120 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3121 continue; 3122 3123 ath10k_dbg(ar, ATH10K_DBG_WMI, 3124 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3125 ch - arg.channels, arg.n_channels, 3126 ch->freq, ch->max_power, ch->max_reg_power, 3127 ch->max_antenna_gain, ch->mode); 3128 3129 ch++; 3130 } 3131 } 3132 3133 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3134 kfree(arg.channels); 3135 3136 return ret; 3137 } 3138 3139 static enum wmi_dfs_region 3140 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3141 { 3142 switch (dfs_region) { 3143 case NL80211_DFS_UNSET: 3144 return WMI_UNINIT_DFS_DOMAIN; 3145 case NL80211_DFS_FCC: 3146 return WMI_FCC_DFS_DOMAIN; 3147 case NL80211_DFS_ETSI: 3148 return WMI_ETSI_DFS_DOMAIN; 3149 case NL80211_DFS_JP: 3150 return WMI_MKK4_DFS_DOMAIN; 3151 } 3152 return WMI_UNINIT_DFS_DOMAIN; 3153 } 3154 3155 static void ath10k_regd_update(struct ath10k *ar) 3156 { 3157 struct reg_dmn_pair_mapping *regpair; 3158 int ret; 3159 enum wmi_dfs_region wmi_dfs_reg; 3160 enum nl80211_dfs_regions nl_dfs_reg; 3161 3162 lockdep_assert_held(&ar->conf_mutex); 3163 3164 ret = ath10k_update_channel_list(ar); 3165 if (ret) 3166 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3167 3168 regpair = ar->ath_common.regulatory.regpair; 3169 3170 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3171 nl_dfs_reg = ar->dfs_detector->region; 3172 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3173 } else { 3174 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3175 } 3176 3177 /* Target allows setting up per-band regdomain but ath_common provides 3178 * a combined one only 3179 */ 3180 ret = ath10k_wmi_pdev_set_regdomain(ar, 3181 regpair->reg_domain, 3182 regpair->reg_domain, /* 2ghz */ 3183 regpair->reg_domain, /* 5ghz */ 3184 regpair->reg_2ghz_ctl, 3185 regpair->reg_5ghz_ctl, 3186 wmi_dfs_reg); 3187 if (ret) 3188 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3189 } 3190 3191 static void ath10k_mac_update_channel_list(struct ath10k *ar, 3192 struct ieee80211_supported_band *band) 3193 { 3194 int i; 3195 3196 if (ar->low_5ghz_chan && ar->high_5ghz_chan) { 3197 for (i = 0; i < band->n_channels; i++) { 3198 if (band->channels[i].center_freq < ar->low_5ghz_chan || 3199 band->channels[i].center_freq > ar->high_5ghz_chan) 3200 band->channels[i].flags |= 3201 IEEE80211_CHAN_DISABLED; 3202 } 3203 } 3204 } 3205 3206 static void ath10k_reg_notifier(struct wiphy *wiphy, 3207 struct regulatory_request *request) 3208 { 3209 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3210 struct ath10k *ar = hw->priv; 3211 bool result; 3212 3213 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3214 3215 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3216 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3217 request->dfs_region); 3218 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3219 request->dfs_region); 3220 if (!result) 3221 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3222 request->dfs_region); 3223 } 3224 3225 mutex_lock(&ar->conf_mutex); 3226 if (ar->state == ATH10K_STATE_ON) 3227 ath10k_regd_update(ar); 3228 mutex_unlock(&ar->conf_mutex); 3229 3230 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 3231 ath10k_mac_update_channel_list(ar, 3232 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); 3233 } 3234 3235 static void ath10k_stop_radar_confirmation(struct ath10k *ar) 3236 { 3237 spin_lock_bh(&ar->data_lock); 3238 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED; 3239 spin_unlock_bh(&ar->data_lock); 3240 3241 cancel_work_sync(&ar->radar_confirmation_work); 3242 } 3243 3244 /***************/ 3245 /* TX handlers */ 3246 /***************/ 3247 3248 enum ath10k_mac_tx_path { 3249 ATH10K_MAC_TX_HTT, 3250 ATH10K_MAC_TX_HTT_MGMT, 3251 ATH10K_MAC_TX_WMI_MGMT, 3252 ATH10K_MAC_TX_UNKNOWN, 3253 }; 3254 3255 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3256 { 3257 lockdep_assert_held(&ar->htt.tx_lock); 3258 3259 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3260 ar->tx_paused |= BIT(reason); 3261 ieee80211_stop_queues(ar->hw); 3262 } 3263 3264 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3265 struct ieee80211_vif *vif) 3266 { 3267 struct ath10k *ar = data; 3268 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3269 3270 if (arvif->tx_paused) 3271 return; 3272 3273 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3274 } 3275 3276 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3277 { 3278 lockdep_assert_held(&ar->htt.tx_lock); 3279 3280 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3281 ar->tx_paused &= ~BIT(reason); 3282 3283 if (ar->tx_paused) 3284 return; 3285 3286 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3287 IEEE80211_IFACE_ITER_RESUME_ALL, 3288 ath10k_mac_tx_unlock_iter, 3289 ar); 3290 3291 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3292 } 3293 3294 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3295 { 3296 struct ath10k *ar = arvif->ar; 3297 3298 lockdep_assert_held(&ar->htt.tx_lock); 3299 3300 WARN_ON(reason >= BITS_PER_LONG); 3301 arvif->tx_paused |= BIT(reason); 3302 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3303 } 3304 3305 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3306 { 3307 struct ath10k *ar = arvif->ar; 3308 3309 lockdep_assert_held(&ar->htt.tx_lock); 3310 3311 WARN_ON(reason >= BITS_PER_LONG); 3312 arvif->tx_paused &= ~BIT(reason); 3313 3314 if (ar->tx_paused) 3315 return; 3316 3317 if (arvif->tx_paused) 3318 return; 3319 3320 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3321 } 3322 3323 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3324 enum wmi_tlv_tx_pause_id pause_id, 3325 enum wmi_tlv_tx_pause_action action) 3326 { 3327 struct ath10k *ar = arvif->ar; 3328 3329 lockdep_assert_held(&ar->htt.tx_lock); 3330 3331 switch (action) { 3332 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3333 ath10k_mac_vif_tx_lock(arvif, pause_id); 3334 break; 3335 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3336 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3337 break; 3338 default: 3339 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3340 "received unknown tx pause action %d on vdev %i, ignoring\n", 3341 action, arvif->vdev_id); 3342 break; 3343 } 3344 } 3345 3346 struct ath10k_mac_tx_pause { 3347 u32 vdev_id; 3348 enum wmi_tlv_tx_pause_id pause_id; 3349 enum wmi_tlv_tx_pause_action action; 3350 }; 3351 3352 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3353 struct ieee80211_vif *vif) 3354 { 3355 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3356 struct ath10k_mac_tx_pause *arg = data; 3357 3358 if (arvif->vdev_id != arg->vdev_id) 3359 return; 3360 3361 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3362 } 3363 3364 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3365 enum wmi_tlv_tx_pause_id pause_id, 3366 enum wmi_tlv_tx_pause_action action) 3367 { 3368 struct ath10k_mac_tx_pause arg = { 3369 .vdev_id = vdev_id, 3370 .pause_id = pause_id, 3371 .action = action, 3372 }; 3373 3374 spin_lock_bh(&ar->htt.tx_lock); 3375 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3376 IEEE80211_IFACE_ITER_RESUME_ALL, 3377 ath10k_mac_handle_tx_pause_iter, 3378 &arg); 3379 spin_unlock_bh(&ar->htt.tx_lock); 3380 } 3381 3382 static enum ath10k_hw_txrx_mode 3383 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3384 struct ieee80211_vif *vif, 3385 struct ieee80211_sta *sta, 3386 struct sk_buff *skb) 3387 { 3388 const struct ieee80211_hdr *hdr = (void *)skb->data; 3389 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 3390 __le16 fc = hdr->frame_control; 3391 3392 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3393 return ATH10K_HW_TXRX_RAW; 3394 3395 if (ieee80211_is_mgmt(fc)) 3396 return ATH10K_HW_TXRX_MGMT; 3397 3398 /* Workaround: 3399 * 3400 * NullFunc frames are mostly used to ping if a client or AP are still 3401 * reachable and responsive. This implies tx status reports must be 3402 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3403 * come to a conclusion that the other end disappeared and tear down 3404 * BSS connection or it can never disconnect from BSS/client (which is 3405 * the case). 3406 * 3407 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3408 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3409 * which seems to deliver correct tx reports for NullFunc frames. The 3410 * downside of using it is it ignores client powersave state so it can 3411 * end up disconnecting sleeping clients in AP mode. It should fix STA 3412 * mode though because AP don't sleep. 3413 */ 3414 if (ar->htt.target_version_major < 3 && 3415 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3416 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3417 ar->running_fw->fw_file.fw_features)) 3418 return ATH10K_HW_TXRX_MGMT; 3419 3420 /* Workaround: 3421 * 3422 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3423 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3424 * to work with Ethernet txmode so use it. 3425 * 3426 * FIXME: Check if raw mode works with TDLS. 3427 */ 3428 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3429 return ATH10K_HW_TXRX_ETHERNET; 3430 3431 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) || 3432 skb_cb->flags & ATH10K_SKB_F_RAW_TX) 3433 return ATH10K_HW_TXRX_RAW; 3434 3435 return ATH10K_HW_TXRX_NATIVE_WIFI; 3436 } 3437 3438 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3439 struct sk_buff *skb) 3440 { 3441 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3442 const struct ieee80211_hdr *hdr = (void *)skb->data; 3443 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3444 IEEE80211_TX_CTL_INJECTED; 3445 3446 if (!ieee80211_has_protected(hdr->frame_control)) 3447 return false; 3448 3449 if ((info->flags & mask) == mask) 3450 return false; 3451 3452 if (vif) 3453 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3454 3455 return true; 3456 } 3457 3458 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3459 * Control in the header. 3460 */ 3461 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3462 { 3463 struct ieee80211_hdr *hdr = (void *)skb->data; 3464 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3465 u8 *qos_ctl; 3466 3467 if (!ieee80211_is_data_qos(hdr->frame_control)) 3468 return; 3469 3470 qos_ctl = ieee80211_get_qos_ctl(hdr); 3471 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3472 skb->data, (void *)qos_ctl - (void *)skb->data); 3473 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3474 3475 /* Some firmware revisions don't handle sending QoS NullFunc well. 3476 * These frames are mainly used for CQM purposes so it doesn't really 3477 * matter whether QoS NullFunc or NullFunc are sent. 3478 */ 3479 hdr = (void *)skb->data; 3480 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3481 cb->flags &= ~ATH10K_SKB_F_QOS; 3482 3483 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3484 } 3485 3486 static void ath10k_tx_h_8023(struct sk_buff *skb) 3487 { 3488 struct ieee80211_hdr *hdr; 3489 struct rfc1042_hdr *rfc1042; 3490 struct ethhdr *eth; 3491 size_t hdrlen; 3492 u8 da[ETH_ALEN]; 3493 u8 sa[ETH_ALEN]; 3494 __be16 type; 3495 3496 hdr = (void *)skb->data; 3497 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3498 rfc1042 = (void *)skb->data + hdrlen; 3499 3500 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3501 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3502 type = rfc1042->snap_type; 3503 3504 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3505 skb_push(skb, sizeof(*eth)); 3506 3507 eth = (void *)skb->data; 3508 ether_addr_copy(eth->h_dest, da); 3509 ether_addr_copy(eth->h_source, sa); 3510 eth->h_proto = type; 3511 } 3512 3513 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3514 struct ieee80211_vif *vif, 3515 struct sk_buff *skb) 3516 { 3517 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3518 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3519 3520 /* This is case only for P2P_GO */ 3521 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3522 return; 3523 3524 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3525 spin_lock_bh(&ar->data_lock); 3526 if (arvif->u.ap.noa_data) 3527 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3528 GFP_ATOMIC)) 3529 skb_put_data(skb, arvif->u.ap.noa_data, 3530 arvif->u.ap.noa_len); 3531 spin_unlock_bh(&ar->data_lock); 3532 } 3533 } 3534 3535 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3536 struct ieee80211_vif *vif, 3537 struct ieee80211_txq *txq, 3538 struct sk_buff *skb, u16 airtime) 3539 { 3540 struct ieee80211_hdr *hdr = (void *)skb->data; 3541 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3542 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3543 bool is_data = ieee80211_is_data(hdr->frame_control) || 3544 ieee80211_is_data_qos(hdr->frame_control); 3545 3546 cb->flags = 0; 3547 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3548 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3549 3550 if (ieee80211_is_mgmt(hdr->frame_control)) 3551 cb->flags |= ATH10K_SKB_F_MGMT; 3552 3553 if (ieee80211_is_data_qos(hdr->frame_control)) 3554 cb->flags |= ATH10K_SKB_F_QOS; 3555 3556 /* Data frames encrypted in software will be posted to firmware 3557 * with tx encap mode set to RAW. Ex: Multicast traffic generated 3558 * for a specific VLAN group will always be encrypted in software. 3559 */ 3560 if (is_data && ieee80211_has_protected(hdr->frame_control) && 3561 !info->control.hw_key) { 3562 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3563 cb->flags |= ATH10K_SKB_F_RAW_TX; 3564 } 3565 3566 cb->vif = vif; 3567 cb->txq = txq; 3568 cb->airtime_est = airtime; 3569 } 3570 3571 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3572 { 3573 /* FIXME: Not really sure since when the behaviour changed. At some 3574 * point new firmware stopped requiring creation of peer entries for 3575 * offchannel tx (and actually creating them causes issues with wmi-htc 3576 * tx credit replenishment and reliability). Assuming it's at least 3.4 3577 * because that's when the `freq` was introduced to TX_FRM HTT command. 3578 */ 3579 return (ar->htt.target_version_major >= 3 && 3580 ar->htt.target_version_minor >= 4 && 3581 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3582 } 3583 3584 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3585 { 3586 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3587 int ret = 0; 3588 3589 spin_lock_bh(&ar->data_lock); 3590 3591 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3592 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3593 ret = -ENOSPC; 3594 goto unlock; 3595 } 3596 3597 __skb_queue_tail(q, skb); 3598 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3599 3600 unlock: 3601 spin_unlock_bh(&ar->data_lock); 3602 3603 return ret; 3604 } 3605 3606 static enum ath10k_mac_tx_path 3607 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3608 struct sk_buff *skb, 3609 enum ath10k_hw_txrx_mode txmode) 3610 { 3611 switch (txmode) { 3612 case ATH10K_HW_TXRX_RAW: 3613 case ATH10K_HW_TXRX_NATIVE_WIFI: 3614 case ATH10K_HW_TXRX_ETHERNET: 3615 return ATH10K_MAC_TX_HTT; 3616 case ATH10K_HW_TXRX_MGMT: 3617 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3618 ar->running_fw->fw_file.fw_features) || 3619 test_bit(WMI_SERVICE_MGMT_TX_WMI, 3620 ar->wmi.svc_map)) 3621 return ATH10K_MAC_TX_WMI_MGMT; 3622 else if (ar->htt.target_version_major >= 3) 3623 return ATH10K_MAC_TX_HTT; 3624 else 3625 return ATH10K_MAC_TX_HTT_MGMT; 3626 } 3627 3628 return ATH10K_MAC_TX_UNKNOWN; 3629 } 3630 3631 static int ath10k_mac_tx_submit(struct ath10k *ar, 3632 enum ath10k_hw_txrx_mode txmode, 3633 enum ath10k_mac_tx_path txpath, 3634 struct sk_buff *skb) 3635 { 3636 struct ath10k_htt *htt = &ar->htt; 3637 int ret = -EINVAL; 3638 3639 switch (txpath) { 3640 case ATH10K_MAC_TX_HTT: 3641 ret = ath10k_htt_tx(htt, txmode, skb); 3642 break; 3643 case ATH10K_MAC_TX_HTT_MGMT: 3644 ret = ath10k_htt_mgmt_tx(htt, skb); 3645 break; 3646 case ATH10K_MAC_TX_WMI_MGMT: 3647 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3648 break; 3649 case ATH10K_MAC_TX_UNKNOWN: 3650 WARN_ON_ONCE(1); 3651 ret = -EINVAL; 3652 break; 3653 } 3654 3655 if (ret) { 3656 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3657 ret); 3658 ieee80211_free_txskb(ar->hw, skb); 3659 } 3660 3661 return ret; 3662 } 3663 3664 /* This function consumes the sk_buff regardless of return value as far as 3665 * caller is concerned so no freeing is necessary afterwards. 3666 */ 3667 static int ath10k_mac_tx(struct ath10k *ar, 3668 struct ieee80211_vif *vif, 3669 enum ath10k_hw_txrx_mode txmode, 3670 enum ath10k_mac_tx_path txpath, 3671 struct sk_buff *skb) 3672 { 3673 struct ieee80211_hw *hw = ar->hw; 3674 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3675 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 3676 int ret; 3677 3678 /* We should disable CCK RATE due to P2P */ 3679 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3680 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3681 3682 switch (txmode) { 3683 case ATH10K_HW_TXRX_MGMT: 3684 case ATH10K_HW_TXRX_NATIVE_WIFI: 3685 ath10k_tx_h_nwifi(hw, skb); 3686 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3687 ath10k_tx_h_seq_no(vif, skb); 3688 break; 3689 case ATH10K_HW_TXRX_ETHERNET: 3690 ath10k_tx_h_8023(skb); 3691 break; 3692 case ATH10K_HW_TXRX_RAW: 3693 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) && 3694 !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) { 3695 WARN_ON_ONCE(1); 3696 ieee80211_free_txskb(hw, skb); 3697 return -ENOTSUPP; 3698 } 3699 } 3700 3701 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3702 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3703 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3704 skb); 3705 3706 skb_queue_tail(&ar->offchan_tx_queue, skb); 3707 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3708 return 0; 3709 } 3710 } 3711 3712 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3713 if (ret) { 3714 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3715 return ret; 3716 } 3717 3718 return 0; 3719 } 3720 3721 void ath10k_offchan_tx_purge(struct ath10k *ar) 3722 { 3723 struct sk_buff *skb; 3724 3725 for (;;) { 3726 skb = skb_dequeue(&ar->offchan_tx_queue); 3727 if (!skb) 3728 break; 3729 3730 ieee80211_free_txskb(ar->hw, skb); 3731 } 3732 } 3733 3734 void ath10k_offchan_tx_work(struct work_struct *work) 3735 { 3736 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3737 struct ath10k_peer *peer; 3738 struct ath10k_vif *arvif; 3739 enum ath10k_hw_txrx_mode txmode; 3740 enum ath10k_mac_tx_path txpath; 3741 struct ieee80211_hdr *hdr; 3742 struct ieee80211_vif *vif; 3743 struct ieee80211_sta *sta; 3744 struct sk_buff *skb; 3745 const u8 *peer_addr; 3746 int vdev_id; 3747 int ret; 3748 unsigned long time_left; 3749 bool tmp_peer_created = false; 3750 3751 /* FW requirement: We must create a peer before FW will send out 3752 * an offchannel frame. Otherwise the frame will be stuck and 3753 * never transmitted. We delete the peer upon tx completion. 3754 * It is unlikely that a peer for offchannel tx will already be 3755 * present. However it may be in some rare cases so account for that. 3756 * Otherwise we might remove a legitimate peer and break stuff. 3757 */ 3758 3759 for (;;) { 3760 skb = skb_dequeue(&ar->offchan_tx_queue); 3761 if (!skb) 3762 break; 3763 3764 mutex_lock(&ar->conf_mutex); 3765 3766 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3767 skb); 3768 3769 hdr = (struct ieee80211_hdr *)skb->data; 3770 peer_addr = ieee80211_get_DA(hdr); 3771 3772 spin_lock_bh(&ar->data_lock); 3773 vdev_id = ar->scan.vdev_id; 3774 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3775 spin_unlock_bh(&ar->data_lock); 3776 3777 if (peer) 3778 /* FIXME: should this use ath10k_warn()? */ 3779 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3780 peer_addr, vdev_id); 3781 3782 if (!peer) { 3783 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3784 peer_addr, 3785 WMI_PEER_TYPE_DEFAULT); 3786 if (ret) 3787 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3788 peer_addr, vdev_id, ret); 3789 tmp_peer_created = (ret == 0); 3790 } 3791 3792 spin_lock_bh(&ar->data_lock); 3793 reinit_completion(&ar->offchan_tx_completed); 3794 ar->offchan_tx_skb = skb; 3795 spin_unlock_bh(&ar->data_lock); 3796 3797 /* It's safe to access vif and sta - conf_mutex guarantees that 3798 * sta_state() and remove_interface() are locked exclusively 3799 * out wrt to this offchannel worker. 3800 */ 3801 arvif = ath10k_get_arvif(ar, vdev_id); 3802 if (arvif) { 3803 vif = arvif->vif; 3804 sta = ieee80211_find_sta(vif, peer_addr); 3805 } else { 3806 vif = NULL; 3807 sta = NULL; 3808 } 3809 3810 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3811 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3812 3813 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3814 if (ret) { 3815 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3816 ret); 3817 /* not serious */ 3818 } 3819 3820 time_left = 3821 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3822 if (time_left == 0) 3823 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3824 skb); 3825 3826 if (!peer && tmp_peer_created) { 3827 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3828 if (ret) 3829 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3830 peer_addr, vdev_id, ret); 3831 } 3832 3833 mutex_unlock(&ar->conf_mutex); 3834 } 3835 } 3836 3837 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3838 { 3839 struct sk_buff *skb; 3840 3841 for (;;) { 3842 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3843 if (!skb) 3844 break; 3845 3846 ieee80211_free_txskb(ar->hw, skb); 3847 } 3848 } 3849 3850 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3851 { 3852 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3853 struct sk_buff *skb; 3854 dma_addr_t paddr; 3855 int ret; 3856 3857 for (;;) { 3858 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3859 if (!skb) 3860 break; 3861 3862 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, 3863 ar->running_fw->fw_file.fw_features)) { 3864 paddr = dma_map_single(ar->dev, skb->data, 3865 skb->len, DMA_TO_DEVICE); 3866 if (!paddr) 3867 continue; 3868 ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); 3869 if (ret) { 3870 ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", 3871 ret); 3872 dma_unmap_single(ar->dev, paddr, skb->len, 3873 DMA_TO_DEVICE); 3874 ieee80211_free_txskb(ar->hw, skb); 3875 } 3876 } else { 3877 ret = ath10k_wmi_mgmt_tx(ar, skb); 3878 if (ret) { 3879 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3880 ret); 3881 ieee80211_free_txskb(ar->hw, skb); 3882 } 3883 } 3884 } 3885 } 3886 3887 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3888 { 3889 struct ath10k_txq *artxq; 3890 3891 if (!txq) 3892 return; 3893 3894 artxq = (void *)txq->drv_priv; 3895 INIT_LIST_HEAD(&artxq->list); 3896 } 3897 3898 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3899 { 3900 struct ath10k_skb_cb *cb; 3901 struct sk_buff *msdu; 3902 int msdu_id; 3903 3904 if (!txq) 3905 return; 3906 3907 spin_lock_bh(&ar->htt.tx_lock); 3908 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3909 cb = ATH10K_SKB_CB(msdu); 3910 if (cb->txq == txq) 3911 cb->txq = NULL; 3912 } 3913 spin_unlock_bh(&ar->htt.tx_lock); 3914 } 3915 3916 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3917 u16 peer_id, 3918 u8 tid) 3919 { 3920 struct ath10k_peer *peer; 3921 3922 lockdep_assert_held(&ar->data_lock); 3923 3924 peer = ar->peer_map[peer_id]; 3925 if (!peer) 3926 return NULL; 3927 3928 if (peer->removed) 3929 return NULL; 3930 3931 if (peer->sta) 3932 return peer->sta->txq[tid]; 3933 else if (peer->vif) 3934 return peer->vif->txq; 3935 else 3936 return NULL; 3937 } 3938 3939 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3940 struct ieee80211_txq *txq) 3941 { 3942 struct ath10k *ar = hw->priv; 3943 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3944 3945 /* No need to get locks */ 3946 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3947 return true; 3948 3949 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3950 return true; 3951 3952 if (artxq->num_fw_queued < artxq->num_push_allowed) 3953 return true; 3954 3955 return false; 3956 } 3957 3958 /* Return estimated airtime in microsecond, which is calculated using last 3959 * reported TX rate. This is just a rough estimation because host driver has no 3960 * knowledge of the actual transmit rate, retries or aggregation. If actual 3961 * airtime can be reported by firmware, then delta between estimated and actual 3962 * airtime can be adjusted from deficit. 3963 */ 3964 #define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */ 3965 #define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */ 3966 static u16 ath10k_mac_update_airtime(struct ath10k *ar, 3967 struct ieee80211_txq *txq, 3968 struct sk_buff *skb) 3969 { 3970 struct ath10k_sta *arsta; 3971 u32 pktlen; 3972 u16 airtime = 0; 3973 3974 if (!txq || !txq->sta) 3975 return airtime; 3976 3977 if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) 3978 return airtime; 3979 3980 spin_lock_bh(&ar->data_lock); 3981 arsta = (struct ath10k_sta *)txq->sta->drv_priv; 3982 3983 pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */ 3984 if (arsta->last_tx_bitrate) { 3985 /* airtime in us, last_tx_bitrate in 100kbps */ 3986 airtime = (pktlen * 8 * (1000 / 100)) 3987 / arsta->last_tx_bitrate; 3988 /* overhead for media access time and IFS */ 3989 airtime += IEEE80211_ATF_OVERHEAD_IFS; 3990 } else { 3991 /* This is mostly for throttle excessive BC/MC frames, and the 3992 * airtime/rate doesn't need be exact. Airtime of BC/MC frames 3993 * in 2G get some discount, which helps prevent very low rate 3994 * frames from being blocked for too long. 3995 */ 3996 airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */ 3997 airtime += IEEE80211_ATF_OVERHEAD; 3998 } 3999 spin_unlock_bh(&ar->data_lock); 4000 4001 return airtime; 4002 } 4003 4004 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 4005 struct ieee80211_txq *txq) 4006 { 4007 struct ath10k *ar = hw->priv; 4008 struct ath10k_htt *htt = &ar->htt; 4009 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4010 struct ieee80211_vif *vif = txq->vif; 4011 struct ieee80211_sta *sta = txq->sta; 4012 enum ath10k_hw_txrx_mode txmode; 4013 enum ath10k_mac_tx_path txpath; 4014 struct sk_buff *skb; 4015 struct ieee80211_hdr *hdr; 4016 size_t skb_len; 4017 bool is_mgmt, is_presp; 4018 int ret; 4019 u16 airtime; 4020 4021 spin_lock_bh(&ar->htt.tx_lock); 4022 ret = ath10k_htt_tx_inc_pending(htt); 4023 spin_unlock_bh(&ar->htt.tx_lock); 4024 4025 if (ret) 4026 return ret; 4027 4028 skb = ieee80211_tx_dequeue(hw, txq); 4029 if (!skb) { 4030 spin_lock_bh(&ar->htt.tx_lock); 4031 ath10k_htt_tx_dec_pending(htt); 4032 spin_unlock_bh(&ar->htt.tx_lock); 4033 4034 return -ENOENT; 4035 } 4036 4037 airtime = ath10k_mac_update_airtime(ar, txq, skb); 4038 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime); 4039 4040 skb_len = skb->len; 4041 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4042 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4043 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4044 4045 if (is_mgmt) { 4046 hdr = (struct ieee80211_hdr *)skb->data; 4047 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4048 4049 spin_lock_bh(&ar->htt.tx_lock); 4050 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4051 4052 if (ret) { 4053 ath10k_htt_tx_dec_pending(htt); 4054 spin_unlock_bh(&ar->htt.tx_lock); 4055 return ret; 4056 } 4057 spin_unlock_bh(&ar->htt.tx_lock); 4058 } 4059 4060 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4061 if (unlikely(ret)) { 4062 ath10k_warn(ar, "failed to push frame: %d\n", ret); 4063 4064 spin_lock_bh(&ar->htt.tx_lock); 4065 ath10k_htt_tx_dec_pending(htt); 4066 if (is_mgmt) 4067 ath10k_htt_tx_mgmt_dec_pending(htt); 4068 spin_unlock_bh(&ar->htt.tx_lock); 4069 4070 return ret; 4071 } 4072 4073 spin_lock_bh(&ar->htt.tx_lock); 4074 artxq->num_fw_queued++; 4075 spin_unlock_bh(&ar->htt.tx_lock); 4076 4077 return skb_len; 4078 } 4079 4080 static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) 4081 { 4082 struct ieee80211_txq *txq; 4083 int ret = 0; 4084 4085 ieee80211_txq_schedule_start(hw, ac); 4086 while ((txq = ieee80211_next_txq(hw, ac))) { 4087 while (ath10k_mac_tx_can_push(hw, txq)) { 4088 ret = ath10k_mac_tx_push_txq(hw, txq); 4089 if (ret < 0) 4090 break; 4091 } 4092 ieee80211_return_txq(hw, txq); 4093 ath10k_htt_tx_txq_update(hw, txq); 4094 if (ret == -EBUSY) 4095 break; 4096 } 4097 ieee80211_txq_schedule_end(hw, ac); 4098 4099 return ret; 4100 } 4101 4102 void ath10k_mac_tx_push_pending(struct ath10k *ar) 4103 { 4104 struct ieee80211_hw *hw = ar->hw; 4105 u32 ac; 4106 4107 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) 4108 return; 4109 4110 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 4111 return; 4112 4113 rcu_read_lock(); 4114 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 4115 if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY) 4116 break; 4117 } 4118 rcu_read_unlock(); 4119 } 4120 EXPORT_SYMBOL(ath10k_mac_tx_push_pending); 4121 4122 /************/ 4123 /* Scanning */ 4124 /************/ 4125 4126 void __ath10k_scan_finish(struct ath10k *ar) 4127 { 4128 lockdep_assert_held(&ar->data_lock); 4129 4130 switch (ar->scan.state) { 4131 case ATH10K_SCAN_IDLE: 4132 break; 4133 case ATH10K_SCAN_RUNNING: 4134 case ATH10K_SCAN_ABORTING: 4135 if (!ar->scan.is_roc) { 4136 struct cfg80211_scan_info info = { 4137 .aborted = (ar->scan.state == 4138 ATH10K_SCAN_ABORTING), 4139 }; 4140 4141 ieee80211_scan_completed(ar->hw, &info); 4142 } else if (ar->scan.roc_notify) { 4143 ieee80211_remain_on_channel_expired(ar->hw); 4144 } 4145 /* fall through */ 4146 case ATH10K_SCAN_STARTING: 4147 ar->scan.state = ATH10K_SCAN_IDLE; 4148 ar->scan_channel = NULL; 4149 ar->scan.roc_freq = 0; 4150 ath10k_offchan_tx_purge(ar); 4151 cancel_delayed_work(&ar->scan.timeout); 4152 complete(&ar->scan.completed); 4153 break; 4154 } 4155 } 4156 4157 void ath10k_scan_finish(struct ath10k *ar) 4158 { 4159 spin_lock_bh(&ar->data_lock); 4160 __ath10k_scan_finish(ar); 4161 spin_unlock_bh(&ar->data_lock); 4162 } 4163 4164 static int ath10k_scan_stop(struct ath10k *ar) 4165 { 4166 struct wmi_stop_scan_arg arg = { 4167 .req_id = 1, /* FIXME */ 4168 .req_type = WMI_SCAN_STOP_ONE, 4169 .u.scan_id = ATH10K_SCAN_ID, 4170 }; 4171 int ret; 4172 4173 lockdep_assert_held(&ar->conf_mutex); 4174 4175 ret = ath10k_wmi_stop_scan(ar, &arg); 4176 if (ret) { 4177 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4178 goto out; 4179 } 4180 4181 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4182 if (ret == 0) { 4183 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4184 ret = -ETIMEDOUT; 4185 } else if (ret > 0) { 4186 ret = 0; 4187 } 4188 4189 out: 4190 /* Scan state should be updated upon scan completion but in case 4191 * firmware fails to deliver the event (for whatever reason) it is 4192 * desired to clean up scan state anyway. Firmware may have just 4193 * dropped the scan completion event delivery due to transport pipe 4194 * being overflown with data and/or it can recover on its own before 4195 * next scan request is submitted. 4196 */ 4197 spin_lock_bh(&ar->data_lock); 4198 if (ar->scan.state != ATH10K_SCAN_IDLE) 4199 __ath10k_scan_finish(ar); 4200 spin_unlock_bh(&ar->data_lock); 4201 4202 return ret; 4203 } 4204 4205 static void ath10k_scan_abort(struct ath10k *ar) 4206 { 4207 int ret; 4208 4209 lockdep_assert_held(&ar->conf_mutex); 4210 4211 spin_lock_bh(&ar->data_lock); 4212 4213 switch (ar->scan.state) { 4214 case ATH10K_SCAN_IDLE: 4215 /* This can happen if timeout worker kicked in and called 4216 * abortion while scan completion was being processed. 4217 */ 4218 break; 4219 case ATH10K_SCAN_STARTING: 4220 case ATH10K_SCAN_ABORTING: 4221 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4222 ath10k_scan_state_str(ar->scan.state), 4223 ar->scan.state); 4224 break; 4225 case ATH10K_SCAN_RUNNING: 4226 ar->scan.state = ATH10K_SCAN_ABORTING; 4227 spin_unlock_bh(&ar->data_lock); 4228 4229 ret = ath10k_scan_stop(ar); 4230 if (ret) 4231 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4232 4233 spin_lock_bh(&ar->data_lock); 4234 break; 4235 } 4236 4237 spin_unlock_bh(&ar->data_lock); 4238 } 4239 4240 void ath10k_scan_timeout_work(struct work_struct *work) 4241 { 4242 struct ath10k *ar = container_of(work, struct ath10k, 4243 scan.timeout.work); 4244 4245 mutex_lock(&ar->conf_mutex); 4246 ath10k_scan_abort(ar); 4247 mutex_unlock(&ar->conf_mutex); 4248 } 4249 4250 static int ath10k_start_scan(struct ath10k *ar, 4251 const struct wmi_start_scan_arg *arg) 4252 { 4253 int ret; 4254 4255 lockdep_assert_held(&ar->conf_mutex); 4256 4257 ret = ath10k_wmi_start_scan(ar, arg); 4258 if (ret) 4259 return ret; 4260 4261 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4262 if (ret == 0) { 4263 ret = ath10k_scan_stop(ar); 4264 if (ret) 4265 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4266 4267 return -ETIMEDOUT; 4268 } 4269 4270 /* If we failed to start the scan, return error code at 4271 * this point. This is probably due to some issue in the 4272 * firmware, but no need to wedge the driver due to that... 4273 */ 4274 spin_lock_bh(&ar->data_lock); 4275 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4276 spin_unlock_bh(&ar->data_lock); 4277 return -EINVAL; 4278 } 4279 spin_unlock_bh(&ar->data_lock); 4280 4281 return 0; 4282 } 4283 4284 /**********************/ 4285 /* mac80211 callbacks */ 4286 /**********************/ 4287 4288 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4289 struct ieee80211_tx_control *control, 4290 struct sk_buff *skb) 4291 { 4292 struct ath10k *ar = hw->priv; 4293 struct ath10k_htt *htt = &ar->htt; 4294 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4295 struct ieee80211_vif *vif = info->control.vif; 4296 struct ieee80211_sta *sta = control->sta; 4297 struct ieee80211_txq *txq = NULL; 4298 struct ieee80211_hdr *hdr = (void *)skb->data; 4299 enum ath10k_hw_txrx_mode txmode; 4300 enum ath10k_mac_tx_path txpath; 4301 bool is_htt; 4302 bool is_mgmt; 4303 bool is_presp; 4304 int ret; 4305 u16 airtime; 4306 4307 airtime = ath10k_mac_update_airtime(ar, txq, skb); 4308 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime); 4309 4310 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4311 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4312 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4313 txpath == ATH10K_MAC_TX_HTT_MGMT); 4314 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4315 4316 if (is_htt) { 4317 spin_lock_bh(&ar->htt.tx_lock); 4318 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4319 4320 ret = ath10k_htt_tx_inc_pending(htt); 4321 if (ret) { 4322 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4323 ret); 4324 spin_unlock_bh(&ar->htt.tx_lock); 4325 ieee80211_free_txskb(ar->hw, skb); 4326 return; 4327 } 4328 4329 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4330 if (ret) { 4331 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4332 ret); 4333 ath10k_htt_tx_dec_pending(htt); 4334 spin_unlock_bh(&ar->htt.tx_lock); 4335 ieee80211_free_txskb(ar->hw, skb); 4336 return; 4337 } 4338 spin_unlock_bh(&ar->htt.tx_lock); 4339 } 4340 4341 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4342 if (ret) { 4343 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4344 if (is_htt) { 4345 spin_lock_bh(&ar->htt.tx_lock); 4346 ath10k_htt_tx_dec_pending(htt); 4347 if (is_mgmt) 4348 ath10k_htt_tx_mgmt_dec_pending(htt); 4349 spin_unlock_bh(&ar->htt.tx_lock); 4350 } 4351 return; 4352 } 4353 } 4354 4355 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4356 struct ieee80211_txq *txq) 4357 { 4358 struct ath10k *ar = hw->priv; 4359 int ret; 4360 u8 ac; 4361 4362 ath10k_htt_tx_txq_update(hw, txq); 4363 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) 4364 return; 4365 4366 ac = txq->ac; 4367 ieee80211_txq_schedule_start(hw, ac); 4368 txq = ieee80211_next_txq(hw, ac); 4369 if (!txq) 4370 goto out; 4371 4372 while (ath10k_mac_tx_can_push(hw, txq)) { 4373 ret = ath10k_mac_tx_push_txq(hw, txq); 4374 if (ret < 0) 4375 break; 4376 } 4377 ieee80211_return_txq(hw, txq); 4378 ath10k_htt_tx_txq_update(hw, txq); 4379 out: 4380 ieee80211_txq_schedule_end(hw, ac); 4381 } 4382 4383 /* Must not be called with conf_mutex held as workers can use that also. */ 4384 void ath10k_drain_tx(struct ath10k *ar) 4385 { 4386 /* make sure rcu-protected mac80211 tx path itself is drained */ 4387 synchronize_net(); 4388 4389 ath10k_offchan_tx_purge(ar); 4390 ath10k_mgmt_over_wmi_tx_purge(ar); 4391 4392 cancel_work_sync(&ar->offchan_tx_work); 4393 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4394 } 4395 4396 void ath10k_halt(struct ath10k *ar) 4397 { 4398 struct ath10k_vif *arvif; 4399 4400 lockdep_assert_held(&ar->conf_mutex); 4401 4402 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4403 ar->filter_flags = 0; 4404 ar->monitor = false; 4405 ar->monitor_arvif = NULL; 4406 4407 if (ar->monitor_started) 4408 ath10k_monitor_stop(ar); 4409 4410 ar->monitor_started = false; 4411 ar->tx_paused = 0; 4412 4413 ath10k_scan_finish(ar); 4414 ath10k_peer_cleanup_all(ar); 4415 ath10k_stop_radar_confirmation(ar); 4416 ath10k_core_stop(ar); 4417 ath10k_hif_power_down(ar); 4418 4419 spin_lock_bh(&ar->data_lock); 4420 list_for_each_entry(arvif, &ar->arvifs, list) 4421 ath10k_mac_vif_beacon_cleanup(arvif); 4422 spin_unlock_bh(&ar->data_lock); 4423 } 4424 4425 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4426 { 4427 struct ath10k *ar = hw->priv; 4428 4429 mutex_lock(&ar->conf_mutex); 4430 4431 *tx_ant = ar->cfg_tx_chainmask; 4432 *rx_ant = ar->cfg_rx_chainmask; 4433 4434 mutex_unlock(&ar->conf_mutex); 4435 4436 return 0; 4437 } 4438 4439 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4440 { 4441 /* It is not clear that allowing gaps in chainmask 4442 * is helpful. Probably it will not do what user 4443 * is hoping for, so warn in that case. 4444 */ 4445 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4446 return; 4447 4448 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4449 dbg, cm); 4450 } 4451 4452 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4453 { 4454 int nsts = ar->vht_cap_info; 4455 4456 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4457 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4458 4459 /* If firmware does not deliver to host number of space-time 4460 * streams supported, assume it support up to 4 BF STS and return 4461 * the value for VHT CAP: nsts-1) 4462 */ 4463 if (nsts == 0) 4464 return 3; 4465 4466 return nsts; 4467 } 4468 4469 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4470 { 4471 int sound_dim = ar->vht_cap_info; 4472 4473 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4474 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4475 4476 /* If the sounding dimension is not advertised by the firmware, 4477 * let's use a default value of 1 4478 */ 4479 if (sound_dim == 0) 4480 return 1; 4481 4482 return sound_dim; 4483 } 4484 4485 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4486 { 4487 struct ieee80211_sta_vht_cap vht_cap = {0}; 4488 struct ath10k_hw_params *hw = &ar->hw_params; 4489 u16 mcs_map; 4490 u32 val; 4491 int i; 4492 4493 vht_cap.vht_supported = 1; 4494 vht_cap.cap = ar->vht_cap_info; 4495 4496 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4497 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4498 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4499 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4500 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4501 4502 vht_cap.cap |= val; 4503 } 4504 4505 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4506 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4507 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4508 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4509 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4510 4511 vht_cap.cap |= val; 4512 } 4513 4514 /* Currently the firmware seems to be buggy, don't enable 80+80 4515 * mode until that's resolved. 4516 */ 4517 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4518 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0) 4519 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4520 4521 mcs_map = 0; 4522 for (i = 0; i < 8; i++) { 4523 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4524 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4525 else 4526 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4527 } 4528 4529 if (ar->cfg_tx_chainmask <= 1) 4530 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4531 4532 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4533 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4534 4535 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do 4536 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give 4537 * user-space a clue if that is the case. 4538 */ 4539 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) && 4540 (hw->vht160_mcs_rx_highest != 0 || 4541 hw->vht160_mcs_tx_highest != 0)) { 4542 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest); 4543 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest); 4544 } 4545 4546 return vht_cap; 4547 } 4548 4549 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4550 { 4551 int i; 4552 struct ieee80211_sta_ht_cap ht_cap = {0}; 4553 4554 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4555 return ht_cap; 4556 4557 ht_cap.ht_supported = 1; 4558 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4559 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4560 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4561 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4562 ht_cap.cap |= 4563 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4564 4565 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4566 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4567 4568 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4569 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4570 4571 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4572 u32 smps; 4573 4574 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4575 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4576 4577 ht_cap.cap |= smps; 4578 } 4579 4580 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4581 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4582 4583 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4584 u32 stbc; 4585 4586 stbc = ar->ht_cap_info; 4587 stbc &= WMI_HT_CAP_RX_STBC; 4588 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4589 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4590 stbc &= IEEE80211_HT_CAP_RX_STBC; 4591 4592 ht_cap.cap |= stbc; 4593 } 4594 4595 if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info & 4596 WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC))) 4597 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4598 4599 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4600 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4601 4602 /* max AMSDU is implicitly taken from vht_cap_info */ 4603 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4604 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4605 4606 for (i = 0; i < ar->num_rf_chains; i++) { 4607 if (ar->cfg_rx_chainmask & BIT(i)) 4608 ht_cap.mcs.rx_mask[i] = 0xFF; 4609 } 4610 4611 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4612 4613 return ht_cap; 4614 } 4615 4616 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4617 { 4618 struct ieee80211_supported_band *band; 4619 struct ieee80211_sta_vht_cap vht_cap; 4620 struct ieee80211_sta_ht_cap ht_cap; 4621 4622 ht_cap = ath10k_get_ht_cap(ar); 4623 vht_cap = ath10k_create_vht_cap(ar); 4624 4625 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4626 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4627 band->ht_cap = ht_cap; 4628 } 4629 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4630 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4631 band->ht_cap = ht_cap; 4632 band->vht_cap = vht_cap; 4633 } 4634 } 4635 4636 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4637 { 4638 int ret; 4639 4640 lockdep_assert_held(&ar->conf_mutex); 4641 4642 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4643 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4644 4645 ar->cfg_tx_chainmask = tx_ant; 4646 ar->cfg_rx_chainmask = rx_ant; 4647 4648 if ((ar->state != ATH10K_STATE_ON) && 4649 (ar->state != ATH10K_STATE_RESTARTED)) 4650 return 0; 4651 4652 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4653 tx_ant); 4654 if (ret) { 4655 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4656 ret, tx_ant); 4657 return ret; 4658 } 4659 4660 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4661 rx_ant); 4662 if (ret) { 4663 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4664 ret, rx_ant); 4665 return ret; 4666 } 4667 4668 /* Reload HT/VHT capability */ 4669 ath10k_mac_setup_ht_vht_cap(ar); 4670 4671 return 0; 4672 } 4673 4674 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4675 { 4676 struct ath10k *ar = hw->priv; 4677 int ret; 4678 4679 mutex_lock(&ar->conf_mutex); 4680 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4681 mutex_unlock(&ar->conf_mutex); 4682 return ret; 4683 } 4684 4685 static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar, 4686 struct wmi_bb_timing_cfg_arg *bb_timing) 4687 { 4688 struct device_node *node; 4689 const char *fem_name; 4690 int ret; 4691 4692 node = ar->dev->of_node; 4693 if (!node) 4694 return -ENOENT; 4695 4696 ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name); 4697 if (ret) 4698 return -ENOENT; 4699 4700 /* 4701 * If external Front End module used in hardware, then default base band timing 4702 * parameter cannot be used since they were fine tuned for reference hardware, 4703 * so choosing different value suitable for that external FEM. 4704 */ 4705 if (!strcmp("microsemi-lx5586", fem_name)) { 4706 bb_timing->bb_tx_timing = 0x00; 4707 bb_timing->bb_xpa_timing = 0x0101; 4708 } else { 4709 return -ENOENT; 4710 } 4711 4712 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n", 4713 bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing); 4714 return 0; 4715 } 4716 4717 static int ath10k_start(struct ieee80211_hw *hw) 4718 { 4719 struct ath10k *ar = hw->priv; 4720 u32 param; 4721 int ret = 0; 4722 struct wmi_bb_timing_cfg_arg bb_timing = {0}; 4723 4724 /* 4725 * This makes sense only when restarting hw. It is harmless to call 4726 * unconditionally. This is necessary to make sure no HTT/WMI tx 4727 * commands will be submitted while restarting. 4728 */ 4729 ath10k_drain_tx(ar); 4730 4731 mutex_lock(&ar->conf_mutex); 4732 4733 switch (ar->state) { 4734 case ATH10K_STATE_OFF: 4735 ar->state = ATH10K_STATE_ON; 4736 break; 4737 case ATH10K_STATE_RESTARTING: 4738 ar->state = ATH10K_STATE_RESTARTED; 4739 break; 4740 case ATH10K_STATE_ON: 4741 case ATH10K_STATE_RESTARTED: 4742 case ATH10K_STATE_WEDGED: 4743 WARN_ON(1); 4744 ret = -EINVAL; 4745 goto err; 4746 case ATH10K_STATE_UTF: 4747 ret = -EBUSY; 4748 goto err; 4749 } 4750 4751 ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); 4752 if (ret) { 4753 ath10k_err(ar, "Could not init hif: %d\n", ret); 4754 goto err_off; 4755 } 4756 4757 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4758 &ar->normal_mode_fw); 4759 if (ret) { 4760 ath10k_err(ar, "Could not init core: %d\n", ret); 4761 goto err_power_down; 4762 } 4763 4764 param = ar->wmi.pdev_param->pmf_qos; 4765 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4766 if (ret) { 4767 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4768 goto err_core_stop; 4769 } 4770 4771 param = ar->wmi.pdev_param->dynamic_bw; 4772 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4773 if (ret) { 4774 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4775 goto err_core_stop; 4776 } 4777 4778 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 4779 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); 4780 if (ret) { 4781 ath10k_err(ar, "failed to set prob req oui: %i\n", ret); 4782 goto err_core_stop; 4783 } 4784 } 4785 4786 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4787 ret = ath10k_wmi_adaptive_qcs(ar, true); 4788 if (ret) { 4789 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4790 ret); 4791 goto err_core_stop; 4792 } 4793 } 4794 4795 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4796 param = ar->wmi.pdev_param->burst_enable; 4797 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4798 if (ret) { 4799 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4800 goto err_core_stop; 4801 } 4802 } 4803 4804 param = ar->wmi.pdev_param->idle_ps_config; 4805 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4806 if (ret && ret != -EOPNOTSUPP) { 4807 ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); 4808 goto err_core_stop; 4809 } 4810 4811 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4812 4813 /* 4814 * By default FW set ARP frames ac to voice (6). In that case ARP 4815 * exchange is not working properly for UAPSD enabled AP. ARP requests 4816 * which arrives with access category 0 are processed by network stack 4817 * and send back with access category 0, but FW changes access category 4818 * to 6. Set ARP frames access category to best effort (0) solves 4819 * this problem. 4820 */ 4821 4822 param = ar->wmi.pdev_param->arp_ac_override; 4823 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4824 if (ret) { 4825 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4826 ret); 4827 goto err_core_stop; 4828 } 4829 4830 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4831 ar->running_fw->fw_file.fw_features)) { 4832 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4833 WMI_CCA_DETECT_LEVEL_AUTO, 4834 WMI_CCA_DETECT_MARGIN_AUTO); 4835 if (ret) { 4836 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4837 ret); 4838 goto err_core_stop; 4839 } 4840 } 4841 4842 param = ar->wmi.pdev_param->ani_enable; 4843 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4844 if (ret) { 4845 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4846 ret); 4847 goto err_core_stop; 4848 } 4849 4850 ar->ani_enabled = true; 4851 4852 if (ath10k_peer_stats_enabled(ar)) { 4853 param = ar->wmi.pdev_param->peer_stats_update_period; 4854 ret = ath10k_wmi_pdev_set_param(ar, param, 4855 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4856 if (ret) { 4857 ath10k_warn(ar, 4858 "failed to set peer stats period : %d\n", 4859 ret); 4860 goto err_core_stop; 4861 } 4862 } 4863 4864 param = ar->wmi.pdev_param->enable_btcoex; 4865 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4866 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4867 ar->running_fw->fw_file.fw_features)) { 4868 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4869 if (ret) { 4870 ath10k_warn(ar, 4871 "failed to set btcoex param: %d\n", ret); 4872 goto err_core_stop; 4873 } 4874 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4875 } 4876 4877 if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) { 4878 ret = __ath10k_fetch_bb_timing_dt(ar, &bb_timing); 4879 if (!ret) { 4880 ret = ath10k_wmi_pdev_bb_timing(ar, &bb_timing); 4881 if (ret) { 4882 ath10k_warn(ar, 4883 "failed to set bb timings: %d\n", 4884 ret); 4885 goto err_core_stop; 4886 } 4887 } 4888 } 4889 4890 ar->num_started_vdevs = 0; 4891 ath10k_regd_update(ar); 4892 4893 ath10k_spectral_start(ar); 4894 ath10k_thermal_set_throttling(ar); 4895 4896 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; 4897 4898 mutex_unlock(&ar->conf_mutex); 4899 return 0; 4900 4901 err_core_stop: 4902 ath10k_core_stop(ar); 4903 4904 err_power_down: 4905 ath10k_hif_power_down(ar); 4906 4907 err_off: 4908 ar->state = ATH10K_STATE_OFF; 4909 4910 err: 4911 mutex_unlock(&ar->conf_mutex); 4912 return ret; 4913 } 4914 4915 static void ath10k_stop(struct ieee80211_hw *hw) 4916 { 4917 struct ath10k *ar = hw->priv; 4918 4919 ath10k_drain_tx(ar); 4920 4921 mutex_lock(&ar->conf_mutex); 4922 if (ar->state != ATH10K_STATE_OFF) { 4923 ath10k_halt(ar); 4924 ar->state = ATH10K_STATE_OFF; 4925 } 4926 mutex_unlock(&ar->conf_mutex); 4927 4928 cancel_work_sync(&ar->set_coverage_class_work); 4929 cancel_delayed_work_sync(&ar->scan.timeout); 4930 cancel_work_sync(&ar->restart_work); 4931 } 4932 4933 static int ath10k_config_ps(struct ath10k *ar) 4934 { 4935 struct ath10k_vif *arvif; 4936 int ret = 0; 4937 4938 lockdep_assert_held(&ar->conf_mutex); 4939 4940 list_for_each_entry(arvif, &ar->arvifs, list) { 4941 ret = ath10k_mac_vif_setup_ps(arvif); 4942 if (ret) { 4943 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4944 break; 4945 } 4946 } 4947 4948 return ret; 4949 } 4950 4951 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4952 { 4953 int ret; 4954 u32 param; 4955 4956 lockdep_assert_held(&ar->conf_mutex); 4957 4958 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4959 4960 param = ar->wmi.pdev_param->txpower_limit2g; 4961 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4962 if (ret) { 4963 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 4964 txpower, ret); 4965 return ret; 4966 } 4967 4968 param = ar->wmi.pdev_param->txpower_limit5g; 4969 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4970 if (ret) { 4971 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 4972 txpower, ret); 4973 return ret; 4974 } 4975 4976 return 0; 4977 } 4978 4979 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 4980 { 4981 struct ath10k_vif *arvif; 4982 int ret, txpower = -1; 4983 4984 lockdep_assert_held(&ar->conf_mutex); 4985 4986 list_for_each_entry(arvif, &ar->arvifs, list) { 4987 if (arvif->txpower <= 0) 4988 continue; 4989 4990 if (txpower == -1) 4991 txpower = arvif->txpower; 4992 else 4993 txpower = min(txpower, arvif->txpower); 4994 } 4995 4996 if (txpower == -1) 4997 return 0; 4998 4999 ret = ath10k_mac_txpower_setup(ar, txpower); 5000 if (ret) { 5001 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 5002 txpower, ret); 5003 return ret; 5004 } 5005 5006 return 0; 5007 } 5008 5009 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 5010 { 5011 struct ath10k *ar = hw->priv; 5012 struct ieee80211_conf *conf = &hw->conf; 5013 int ret = 0; 5014 5015 mutex_lock(&ar->conf_mutex); 5016 5017 if (changed & IEEE80211_CONF_CHANGE_PS) 5018 ath10k_config_ps(ar); 5019 5020 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 5021 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 5022 ret = ath10k_monitor_recalc(ar); 5023 if (ret) 5024 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5025 } 5026 5027 mutex_unlock(&ar->conf_mutex); 5028 return ret; 5029 } 5030 5031 static u32 get_nss_from_chainmask(u16 chain_mask) 5032 { 5033 if ((chain_mask & 0xf) == 0xf) 5034 return 4; 5035 else if ((chain_mask & 0x7) == 0x7) 5036 return 3; 5037 else if ((chain_mask & 0x3) == 0x3) 5038 return 2; 5039 return 1; 5040 } 5041 5042 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 5043 { 5044 u32 value = 0; 5045 struct ath10k *ar = arvif->ar; 5046 int nsts; 5047 int sound_dim; 5048 5049 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 5050 return 0; 5051 5052 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 5053 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 5054 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 5055 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 5056 5057 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 5058 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 5059 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 5060 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 5061 5062 if (!value) 5063 return 0; 5064 5065 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 5066 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 5067 5068 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 5069 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 5070 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 5071 5072 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 5073 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 5074 5075 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 5076 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 5077 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 5078 5079 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5080 ar->wmi.vdev_param->txbf, value); 5081 } 5082 5083 /* 5084 * TODO: 5085 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 5086 * because we will send mgmt frames without CCK. This requirement 5087 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 5088 * in the TX packet. 5089 */ 5090 static int ath10k_add_interface(struct ieee80211_hw *hw, 5091 struct ieee80211_vif *vif) 5092 { 5093 struct ath10k *ar = hw->priv; 5094 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5095 struct ath10k_peer *peer; 5096 enum wmi_sta_powersave_param param; 5097 int ret = 0; 5098 u32 value; 5099 int bit; 5100 int i; 5101 u32 vdev_param; 5102 5103 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 5104 5105 mutex_lock(&ar->conf_mutex); 5106 5107 memset(arvif, 0, sizeof(*arvif)); 5108 ath10k_mac_txq_init(vif->txq); 5109 5110 arvif->ar = ar; 5111 arvif->vif = vif; 5112 5113 INIT_LIST_HEAD(&arvif->list); 5114 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 5115 INIT_DELAYED_WORK(&arvif->connection_loss_work, 5116 ath10k_mac_vif_sta_connection_loss_work); 5117 5118 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 5119 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 5120 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 5121 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 5122 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 5123 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 5124 } 5125 5126 if (ar->num_peers >= ar->max_num_peers) { 5127 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 5128 ret = -ENOBUFS; 5129 goto err; 5130 } 5131 5132 if (ar->free_vdev_map == 0) { 5133 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 5134 ret = -EBUSY; 5135 goto err; 5136 } 5137 bit = __ffs64(ar->free_vdev_map); 5138 5139 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 5140 bit, ar->free_vdev_map); 5141 5142 arvif->vdev_id = bit; 5143 arvif->vdev_subtype = 5144 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 5145 5146 switch (vif->type) { 5147 case NL80211_IFTYPE_P2P_DEVICE: 5148 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5149 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5150 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 5151 break; 5152 case NL80211_IFTYPE_UNSPECIFIED: 5153 case NL80211_IFTYPE_STATION: 5154 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5155 if (vif->p2p) 5156 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5157 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 5158 break; 5159 case NL80211_IFTYPE_ADHOC: 5160 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 5161 break; 5162 case NL80211_IFTYPE_MESH_POINT: 5163 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 5164 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5165 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 5166 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5167 ret = -EINVAL; 5168 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 5169 goto err; 5170 } 5171 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5172 break; 5173 case NL80211_IFTYPE_AP: 5174 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5175 5176 if (vif->p2p) 5177 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5178 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 5179 break; 5180 case NL80211_IFTYPE_MONITOR: 5181 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 5182 break; 5183 default: 5184 WARN_ON(1); 5185 break; 5186 } 5187 5188 /* Using vdev_id as queue number will make it very easy to do per-vif 5189 * tx queue locking. This shouldn't wrap due to interface combinations 5190 * but do a modulo for correctness sake and prevent using offchannel tx 5191 * queues for regular vif tx. 5192 */ 5193 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5194 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 5195 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5196 5197 /* Some firmware revisions don't wait for beacon tx completion before 5198 * sending another SWBA event. This could lead to hardware using old 5199 * (freed) beacon data in some cases, e.g. tx credit starvation 5200 * combined with missed TBTT. This is very very rare. 5201 * 5202 * On non-IOMMU-enabled hosts this could be a possible security issue 5203 * because hw could beacon some random data on the air. On 5204 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 5205 * device would crash. 5206 * 5207 * Since there are no beacon tx completions (implicit nor explicit) 5208 * propagated to host the only workaround for this is to allocate a 5209 * DMA-coherent buffer for a lifetime of a vif and use it for all 5210 * beacon tx commands. Worst case for this approach is some beacons may 5211 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 5212 */ 5213 if (vif->type == NL80211_IFTYPE_ADHOC || 5214 vif->type == NL80211_IFTYPE_MESH_POINT || 5215 vif->type == NL80211_IFTYPE_AP) { 5216 arvif->beacon_buf = dma_alloc_coherent(ar->dev, 5217 IEEE80211_MAX_FRAME_LEN, 5218 &arvif->beacon_paddr, 5219 GFP_ATOMIC); 5220 if (!arvif->beacon_buf) { 5221 ret = -ENOMEM; 5222 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5223 ret); 5224 goto err; 5225 } 5226 } 5227 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 5228 arvif->nohwcrypt = true; 5229 5230 if (arvif->nohwcrypt && 5231 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5232 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 5233 goto err; 5234 } 5235 5236 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 5237 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5238 arvif->beacon_buf ? "single-buf" : "per-skb"); 5239 5240 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 5241 arvif->vdev_subtype, vif->addr); 5242 if (ret) { 5243 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5244 arvif->vdev_id, ret); 5245 goto err; 5246 } 5247 5248 if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, 5249 ar->wmi.svc_map)) { 5250 vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn; 5251 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5252 WMI_VDEV_DISABLE_4_ADDR_SRC_LRN); 5253 if (ret && ret != -EOPNOTSUPP) { 5254 ath10k_warn(ar, "failed to disable 4addr src lrn vdev %i: %d\n", 5255 arvif->vdev_id, ret); 5256 } 5257 } 5258 5259 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5260 spin_lock_bh(&ar->data_lock); 5261 list_add(&arvif->list, &ar->arvifs); 5262 spin_unlock_bh(&ar->data_lock); 5263 5264 /* It makes no sense to have firmware do keepalives. mac80211 already 5265 * takes care of this with idle connection polling. 5266 */ 5267 ret = ath10k_mac_vif_disable_keepalive(arvif); 5268 if (ret) { 5269 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5270 arvif->vdev_id, ret); 5271 goto err_vdev_delete; 5272 } 5273 5274 arvif->def_wep_key_idx = -1; 5275 5276 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5277 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5278 ATH10K_HW_TXRX_NATIVE_WIFI); 5279 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5280 if (ret && ret != -EOPNOTSUPP) { 5281 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5282 arvif->vdev_id, ret); 5283 goto err_vdev_delete; 5284 } 5285 5286 /* Configuring number of spatial stream for monitor interface is causing 5287 * target assert in qca9888 and qca6174. 5288 */ 5289 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5290 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5291 5292 vdev_param = ar->wmi.vdev_param->nss; 5293 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5294 nss); 5295 if (ret) { 5296 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5297 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5298 ret); 5299 goto err_vdev_delete; 5300 } 5301 } 5302 5303 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5304 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5305 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5306 vif->addr, WMI_PEER_TYPE_DEFAULT); 5307 if (ret) { 5308 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5309 arvif->vdev_id, ret); 5310 goto err_vdev_delete; 5311 } 5312 5313 spin_lock_bh(&ar->data_lock); 5314 5315 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5316 if (!peer) { 5317 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5318 vif->addr, arvif->vdev_id); 5319 spin_unlock_bh(&ar->data_lock); 5320 ret = -ENOENT; 5321 goto err_peer_delete; 5322 } 5323 5324 arvif->peer_id = find_first_bit(peer->peer_ids, 5325 ATH10K_MAX_NUM_PEER_IDS); 5326 5327 spin_unlock_bh(&ar->data_lock); 5328 } else { 5329 arvif->peer_id = HTT_INVALID_PEERID; 5330 } 5331 5332 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5333 ret = ath10k_mac_set_kickout(arvif); 5334 if (ret) { 5335 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5336 arvif->vdev_id, ret); 5337 goto err_peer_delete; 5338 } 5339 } 5340 5341 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5342 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5343 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5344 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5345 param, value); 5346 if (ret) { 5347 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5348 arvif->vdev_id, ret); 5349 goto err_peer_delete; 5350 } 5351 5352 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5353 if (ret) { 5354 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5355 arvif->vdev_id, ret); 5356 goto err_peer_delete; 5357 } 5358 5359 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5360 if (ret) { 5361 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5362 arvif->vdev_id, ret); 5363 goto err_peer_delete; 5364 } 5365 } 5366 5367 ret = ath10k_mac_set_txbf_conf(arvif); 5368 if (ret) { 5369 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5370 arvif->vdev_id, ret); 5371 goto err_peer_delete; 5372 } 5373 5374 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5375 if (ret) { 5376 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5377 arvif->vdev_id, ret); 5378 goto err_peer_delete; 5379 } 5380 5381 arvif->txpower = vif->bss_conf.txpower; 5382 ret = ath10k_mac_txpower_recalc(ar); 5383 if (ret) { 5384 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5385 goto err_peer_delete; 5386 } 5387 5388 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { 5389 vdev_param = ar->wmi.vdev_param->rtt_responder_role; 5390 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5391 arvif->ftm_responder); 5392 5393 /* It is harmless to not set FTM role. Do not warn */ 5394 if (ret && ret != -EOPNOTSUPP) 5395 ath10k_warn(ar, "failed to set vdev %i FTM Responder: %d\n", 5396 arvif->vdev_id, ret); 5397 } 5398 5399 if (vif->type == NL80211_IFTYPE_MONITOR) { 5400 ar->monitor_arvif = arvif; 5401 ret = ath10k_monitor_recalc(ar); 5402 if (ret) { 5403 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5404 goto err_peer_delete; 5405 } 5406 } 5407 5408 spin_lock_bh(&ar->htt.tx_lock); 5409 if (!ar->tx_paused) 5410 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5411 spin_unlock_bh(&ar->htt.tx_lock); 5412 5413 mutex_unlock(&ar->conf_mutex); 5414 return 0; 5415 5416 err_peer_delete: 5417 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5418 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 5419 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5420 5421 err_vdev_delete: 5422 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5423 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5424 spin_lock_bh(&ar->data_lock); 5425 list_del(&arvif->list); 5426 spin_unlock_bh(&ar->data_lock); 5427 5428 err: 5429 if (arvif->beacon_buf) { 5430 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5431 arvif->beacon_buf, arvif->beacon_paddr); 5432 arvif->beacon_buf = NULL; 5433 } 5434 5435 mutex_unlock(&ar->conf_mutex); 5436 5437 return ret; 5438 } 5439 5440 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5441 { 5442 int i; 5443 5444 for (i = 0; i < BITS_PER_LONG; i++) 5445 ath10k_mac_vif_tx_unlock(arvif, i); 5446 } 5447 5448 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5449 struct ieee80211_vif *vif) 5450 { 5451 struct ath10k *ar = hw->priv; 5452 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5453 struct ath10k_peer *peer; 5454 int ret; 5455 int i; 5456 5457 cancel_work_sync(&arvif->ap_csa_work); 5458 cancel_delayed_work_sync(&arvif->connection_loss_work); 5459 5460 mutex_lock(&ar->conf_mutex); 5461 5462 spin_lock_bh(&ar->data_lock); 5463 ath10k_mac_vif_beacon_cleanup(arvif); 5464 spin_unlock_bh(&ar->data_lock); 5465 5466 ret = ath10k_spectral_vif_stop(arvif); 5467 if (ret) 5468 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5469 arvif->vdev_id, ret); 5470 5471 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5472 spin_lock_bh(&ar->data_lock); 5473 list_del(&arvif->list); 5474 spin_unlock_bh(&ar->data_lock); 5475 5476 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5477 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5478 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5479 vif->addr); 5480 if (ret) 5481 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5482 arvif->vdev_id, ret); 5483 5484 kfree(arvif->u.ap.noa_data); 5485 } 5486 5487 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5488 arvif->vdev_id); 5489 5490 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5491 if (ret) 5492 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5493 arvif->vdev_id, ret); 5494 5495 /* Some firmware revisions don't notify host about self-peer removal 5496 * until after associated vdev is deleted. 5497 */ 5498 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5499 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5500 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5501 vif->addr); 5502 if (ret) 5503 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5504 arvif->vdev_id, ret); 5505 5506 spin_lock_bh(&ar->data_lock); 5507 ar->num_peers--; 5508 spin_unlock_bh(&ar->data_lock); 5509 } 5510 5511 spin_lock_bh(&ar->data_lock); 5512 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5513 peer = ar->peer_map[i]; 5514 if (!peer) 5515 continue; 5516 5517 if (peer->vif == vif) { 5518 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5519 vif->addr, arvif->vdev_id); 5520 peer->vif = NULL; 5521 } 5522 } 5523 spin_unlock_bh(&ar->data_lock); 5524 5525 ath10k_peer_cleanup(ar, arvif->vdev_id); 5526 ath10k_mac_txq_unref(ar, vif->txq); 5527 5528 if (vif->type == NL80211_IFTYPE_MONITOR) { 5529 ar->monitor_arvif = NULL; 5530 ret = ath10k_monitor_recalc(ar); 5531 if (ret) 5532 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5533 } 5534 5535 ret = ath10k_mac_txpower_recalc(ar); 5536 if (ret) 5537 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5538 5539 spin_lock_bh(&ar->htt.tx_lock); 5540 ath10k_mac_vif_tx_unlock_all(arvif); 5541 spin_unlock_bh(&ar->htt.tx_lock); 5542 5543 ath10k_mac_txq_unref(ar, vif->txq); 5544 5545 mutex_unlock(&ar->conf_mutex); 5546 } 5547 5548 /* 5549 * FIXME: Has to be verified. 5550 */ 5551 #define SUPPORTED_FILTERS \ 5552 (FIF_ALLMULTI | \ 5553 FIF_CONTROL | \ 5554 FIF_PSPOLL | \ 5555 FIF_OTHER_BSS | \ 5556 FIF_BCN_PRBRESP_PROMISC | \ 5557 FIF_PROBE_REQ | \ 5558 FIF_FCSFAIL) 5559 5560 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5561 unsigned int changed_flags, 5562 unsigned int *total_flags, 5563 u64 multicast) 5564 { 5565 struct ath10k *ar = hw->priv; 5566 int ret; 5567 5568 mutex_lock(&ar->conf_mutex); 5569 5570 changed_flags &= SUPPORTED_FILTERS; 5571 *total_flags &= SUPPORTED_FILTERS; 5572 ar->filter_flags = *total_flags; 5573 5574 ret = ath10k_monitor_recalc(ar); 5575 if (ret) 5576 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5577 5578 mutex_unlock(&ar->conf_mutex); 5579 } 5580 5581 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5582 struct ieee80211_vif *vif, 5583 struct ieee80211_bss_conf *info, 5584 u32 changed) 5585 { 5586 struct ath10k *ar = hw->priv; 5587 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5588 struct cfg80211_chan_def def; 5589 u32 vdev_param, pdev_param, slottime, preamble; 5590 u16 bitrate, hw_value; 5591 u8 rate, basic_rate_idx; 5592 int rateidx, ret = 0, hw_rate_code; 5593 enum nl80211_band band; 5594 const struct ieee80211_supported_band *sband; 5595 5596 mutex_lock(&ar->conf_mutex); 5597 5598 if (changed & BSS_CHANGED_IBSS) 5599 ath10k_control_ibss(arvif, info, vif->addr); 5600 5601 if (changed & BSS_CHANGED_BEACON_INT) { 5602 arvif->beacon_interval = info->beacon_int; 5603 vdev_param = ar->wmi.vdev_param->beacon_interval; 5604 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5605 arvif->beacon_interval); 5606 ath10k_dbg(ar, ATH10K_DBG_MAC, 5607 "mac vdev %d beacon_interval %d\n", 5608 arvif->vdev_id, arvif->beacon_interval); 5609 5610 if (ret) 5611 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5612 arvif->vdev_id, ret); 5613 } 5614 5615 if (changed & BSS_CHANGED_BEACON) { 5616 ath10k_dbg(ar, ATH10K_DBG_MAC, 5617 "vdev %d set beacon tx mode to staggered\n", 5618 arvif->vdev_id); 5619 5620 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5621 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5622 WMI_BEACON_STAGGERED_MODE); 5623 if (ret) 5624 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5625 arvif->vdev_id, ret); 5626 5627 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5628 if (ret) 5629 ath10k_warn(ar, "failed to update beacon template: %d\n", 5630 ret); 5631 5632 if (ieee80211_vif_is_mesh(vif)) { 5633 /* mesh doesn't use SSID but firmware needs it */ 5634 strncpy(arvif->u.ap.ssid, "mesh", 5635 sizeof(arvif->u.ap.ssid)); 5636 arvif->u.ap.ssid_len = 4; 5637 } 5638 } 5639 5640 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5641 ret = ath10k_mac_setup_prb_tmpl(arvif); 5642 if (ret) 5643 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5644 arvif->vdev_id, ret); 5645 } 5646 5647 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5648 arvif->dtim_period = info->dtim_period; 5649 5650 ath10k_dbg(ar, ATH10K_DBG_MAC, 5651 "mac vdev %d dtim_period %d\n", 5652 arvif->vdev_id, arvif->dtim_period); 5653 5654 vdev_param = ar->wmi.vdev_param->dtim_period; 5655 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5656 arvif->dtim_period); 5657 if (ret) 5658 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5659 arvif->vdev_id, ret); 5660 } 5661 5662 if (changed & BSS_CHANGED_SSID && 5663 vif->type == NL80211_IFTYPE_AP) { 5664 arvif->u.ap.ssid_len = info->ssid_len; 5665 if (info->ssid_len) 5666 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5667 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5668 } 5669 5670 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5671 ether_addr_copy(arvif->bssid, info->bssid); 5672 5673 if (changed & BSS_CHANGED_FTM_RESPONDER && 5674 arvif->ftm_responder != info->ftm_responder && 5675 test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { 5676 arvif->ftm_responder = info->ftm_responder; 5677 5678 vdev_param = ar->wmi.vdev_param->rtt_responder_role; 5679 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5680 arvif->ftm_responder); 5681 5682 ath10k_dbg(ar, ATH10K_DBG_MAC, 5683 "mac vdev %d ftm_responder %d:ret %d\n", 5684 arvif->vdev_id, arvif->ftm_responder, ret); 5685 } 5686 5687 if (changed & BSS_CHANGED_BEACON_ENABLED) 5688 ath10k_control_beaconing(arvif, info); 5689 5690 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5691 arvif->use_cts_prot = info->use_cts_prot; 5692 5693 ret = ath10k_recalc_rtscts_prot(arvif); 5694 if (ret) 5695 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5696 arvif->vdev_id, ret); 5697 5698 if (ath10k_mac_can_set_cts_prot(arvif)) { 5699 ret = ath10k_mac_set_cts_prot(arvif); 5700 if (ret) 5701 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5702 arvif->vdev_id, ret); 5703 } 5704 } 5705 5706 if (changed & BSS_CHANGED_ERP_SLOT) { 5707 if (info->use_short_slot) 5708 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5709 5710 else 5711 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5712 5713 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5714 arvif->vdev_id, slottime); 5715 5716 vdev_param = ar->wmi.vdev_param->slot_time; 5717 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5718 slottime); 5719 if (ret) 5720 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5721 arvif->vdev_id, ret); 5722 } 5723 5724 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5725 if (info->use_short_preamble) 5726 preamble = WMI_VDEV_PREAMBLE_SHORT; 5727 else 5728 preamble = WMI_VDEV_PREAMBLE_LONG; 5729 5730 ath10k_dbg(ar, ATH10K_DBG_MAC, 5731 "mac vdev %d preamble %dn", 5732 arvif->vdev_id, preamble); 5733 5734 vdev_param = ar->wmi.vdev_param->preamble; 5735 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5736 preamble); 5737 if (ret) 5738 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5739 arvif->vdev_id, ret); 5740 } 5741 5742 if (changed & BSS_CHANGED_ASSOC) { 5743 if (info->assoc) { 5744 /* Workaround: Make sure monitor vdev is not running 5745 * when associating to prevent some firmware revisions 5746 * (e.g. 10.1 and 10.2) from crashing. 5747 */ 5748 if (ar->monitor_started) 5749 ath10k_monitor_stop(ar); 5750 ath10k_bss_assoc(hw, vif, info); 5751 ath10k_monitor_recalc(ar); 5752 } else { 5753 ath10k_bss_disassoc(hw, vif); 5754 } 5755 } 5756 5757 if (changed & BSS_CHANGED_TXPOWER) { 5758 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5759 arvif->vdev_id, info->txpower); 5760 5761 arvif->txpower = info->txpower; 5762 ret = ath10k_mac_txpower_recalc(ar); 5763 if (ret) 5764 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5765 } 5766 5767 if (changed & BSS_CHANGED_PS) { 5768 arvif->ps = vif->bss_conf.ps; 5769 5770 ret = ath10k_config_ps(ar); 5771 if (ret) 5772 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5773 arvif->vdev_id, ret); 5774 } 5775 5776 if (changed & BSS_CHANGED_MCAST_RATE && 5777 !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) { 5778 band = def.chan->band; 5779 rateidx = vif->bss_conf.mcast_rate[band] - 1; 5780 5781 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 5782 rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; 5783 5784 bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate; 5785 hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value; 5786 if (ath10k_mac_bitrate_is_cck(bitrate)) 5787 preamble = WMI_RATE_PREAMBLE_CCK; 5788 else 5789 preamble = WMI_RATE_PREAMBLE_OFDM; 5790 5791 rate = ATH10K_HW_RATECODE(hw_value, 0, preamble); 5792 5793 ath10k_dbg(ar, ATH10K_DBG_MAC, 5794 "mac vdev %d mcast_rate %x\n", 5795 arvif->vdev_id, rate); 5796 5797 vdev_param = ar->wmi.vdev_param->mcast_data_rate; 5798 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5799 vdev_param, rate); 5800 if (ret) 5801 ath10k_warn(ar, 5802 "failed to set mcast rate on vdev %i: %d\n", 5803 arvif->vdev_id, ret); 5804 5805 vdev_param = ar->wmi.vdev_param->bcast_data_rate; 5806 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5807 vdev_param, rate); 5808 if (ret) 5809 ath10k_warn(ar, 5810 "failed to set bcast rate on vdev %i: %d\n", 5811 arvif->vdev_id, ret); 5812 } 5813 5814 if (changed & BSS_CHANGED_BASIC_RATES) { 5815 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) { 5816 mutex_unlock(&ar->conf_mutex); 5817 return; 5818 } 5819 5820 sband = ar->hw->wiphy->bands[def.chan->band]; 5821 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 5822 bitrate = sband->bitrates[basic_rate_idx].bitrate; 5823 5824 hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); 5825 if (hw_rate_code < 0) { 5826 ath10k_warn(ar, "bitrate not supported %d\n", bitrate); 5827 mutex_unlock(&ar->conf_mutex); 5828 return; 5829 } 5830 5831 vdev_param = ar->wmi.vdev_param->mgmt_rate; 5832 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5833 hw_rate_code); 5834 if (ret) 5835 ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); 5836 } 5837 5838 mutex_unlock(&ar->conf_mutex); 5839 } 5840 5841 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5842 { 5843 struct ath10k *ar = hw->priv; 5844 5845 /* This function should never be called if setting the coverage class 5846 * is not supported on this hardware. 5847 */ 5848 if (!ar->hw_params.hw_ops->set_coverage_class) { 5849 WARN_ON_ONCE(1); 5850 return; 5851 } 5852 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5853 } 5854 5855 struct ath10k_mac_tdls_iter_data { 5856 u32 num_tdls_stations; 5857 struct ieee80211_vif *curr_vif; 5858 }; 5859 5860 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5861 struct ieee80211_sta *sta) 5862 { 5863 struct ath10k_mac_tdls_iter_data *iter_data = data; 5864 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5865 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5866 5867 if (sta->tdls && sta_vif == iter_data->curr_vif) 5868 iter_data->num_tdls_stations++; 5869 } 5870 5871 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5872 struct ieee80211_vif *vif) 5873 { 5874 struct ath10k_mac_tdls_iter_data data = {}; 5875 5876 data.curr_vif = vif; 5877 5878 ieee80211_iterate_stations_atomic(hw, 5879 ath10k_mac_tdls_vif_stations_count_iter, 5880 &data); 5881 return data.num_tdls_stations; 5882 } 5883 5884 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5885 struct ieee80211_vif *vif, 5886 struct ieee80211_scan_request *hw_req) 5887 { 5888 struct ath10k *ar = hw->priv; 5889 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5890 struct cfg80211_scan_request *req = &hw_req->req; 5891 struct wmi_start_scan_arg arg; 5892 int ret = 0; 5893 int i; 5894 u32 scan_timeout; 5895 5896 mutex_lock(&ar->conf_mutex); 5897 5898 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 5899 ret = -EBUSY; 5900 goto exit; 5901 } 5902 5903 spin_lock_bh(&ar->data_lock); 5904 switch (ar->scan.state) { 5905 case ATH10K_SCAN_IDLE: 5906 reinit_completion(&ar->scan.started); 5907 reinit_completion(&ar->scan.completed); 5908 ar->scan.state = ATH10K_SCAN_STARTING; 5909 ar->scan.is_roc = false; 5910 ar->scan.vdev_id = arvif->vdev_id; 5911 ret = 0; 5912 break; 5913 case ATH10K_SCAN_STARTING: 5914 case ATH10K_SCAN_RUNNING: 5915 case ATH10K_SCAN_ABORTING: 5916 ret = -EBUSY; 5917 break; 5918 } 5919 spin_unlock_bh(&ar->data_lock); 5920 5921 if (ret) 5922 goto exit; 5923 5924 memset(&arg, 0, sizeof(arg)); 5925 ath10k_wmi_start_scan_init(ar, &arg); 5926 arg.vdev_id = arvif->vdev_id; 5927 arg.scan_id = ATH10K_SCAN_ID; 5928 5929 if (req->ie_len) { 5930 arg.ie_len = req->ie_len; 5931 memcpy(arg.ie, req->ie, arg.ie_len); 5932 } 5933 5934 if (req->n_ssids) { 5935 arg.n_ssids = req->n_ssids; 5936 for (i = 0; i < arg.n_ssids; i++) { 5937 arg.ssids[i].len = req->ssids[i].ssid_len; 5938 arg.ssids[i].ssid = req->ssids[i].ssid; 5939 } 5940 } else { 5941 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5942 } 5943 5944 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { 5945 arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; 5946 ether_addr_copy(arg.mac_addr.addr, req->mac_addr); 5947 ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); 5948 } 5949 5950 if (req->n_channels) { 5951 arg.n_channels = req->n_channels; 5952 for (i = 0; i < arg.n_channels; i++) 5953 arg.channels[i] = req->channels[i]->center_freq; 5954 } 5955 5956 /* if duration is set, default dwell times will be overwritten */ 5957 if (req->duration) { 5958 arg.dwell_time_active = req->duration; 5959 arg.dwell_time_passive = req->duration; 5960 arg.burst_duration_ms = req->duration; 5961 5962 scan_timeout = min_t(u32, arg.max_rest_time * 5963 (arg.n_channels - 1) + (req->duration + 5964 ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * 5965 arg.n_channels, arg.max_scan_time + 200); 5966 5967 } else { 5968 /* Add a 200ms margin to account for event/command processing */ 5969 scan_timeout = arg.max_scan_time + 200; 5970 } 5971 5972 ret = ath10k_start_scan(ar, &arg); 5973 if (ret) { 5974 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 5975 spin_lock_bh(&ar->data_lock); 5976 ar->scan.state = ATH10K_SCAN_IDLE; 5977 spin_unlock_bh(&ar->data_lock); 5978 } 5979 5980 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5981 msecs_to_jiffies(scan_timeout)); 5982 5983 exit: 5984 mutex_unlock(&ar->conf_mutex); 5985 return ret; 5986 } 5987 5988 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 5989 struct ieee80211_vif *vif) 5990 { 5991 struct ath10k *ar = hw->priv; 5992 5993 mutex_lock(&ar->conf_mutex); 5994 ath10k_scan_abort(ar); 5995 mutex_unlock(&ar->conf_mutex); 5996 5997 cancel_delayed_work_sync(&ar->scan.timeout); 5998 } 5999 6000 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 6001 struct ath10k_vif *arvif, 6002 enum set_key_cmd cmd, 6003 struct ieee80211_key_conf *key) 6004 { 6005 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 6006 int ret; 6007 6008 /* 10.1 firmware branch requires default key index to be set to group 6009 * key index after installing it. Otherwise FW/HW Txes corrupted 6010 * frames with multi-vif APs. This is not required for main firmware 6011 * branch (e.g. 636). 6012 * 6013 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 6014 * 6015 * FIXME: It remains unknown if this is required for multi-vif STA 6016 * interfaces on 10.1. 6017 */ 6018 6019 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 6020 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 6021 return; 6022 6023 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 6024 return; 6025 6026 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 6027 return; 6028 6029 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 6030 return; 6031 6032 if (cmd != SET_KEY) 6033 return; 6034 6035 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 6036 key->keyidx); 6037 if (ret) 6038 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 6039 arvif->vdev_id, ret); 6040 } 6041 6042 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 6043 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 6044 struct ieee80211_key_conf *key) 6045 { 6046 struct ath10k *ar = hw->priv; 6047 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6048 struct ath10k_peer *peer; 6049 const u8 *peer_addr; 6050 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 6051 key->cipher == WLAN_CIPHER_SUITE_WEP104; 6052 int ret = 0; 6053 int ret2; 6054 u32 flags = 0; 6055 u32 flags2; 6056 6057 /* this one needs to be done in software */ 6058 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 6059 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 6060 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 6061 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 6062 return 1; 6063 6064 if (arvif->nohwcrypt) 6065 return 1; 6066 6067 if (key->keyidx > WMI_MAX_KEY_INDEX) 6068 return -ENOSPC; 6069 6070 mutex_lock(&ar->conf_mutex); 6071 6072 if (sta) 6073 peer_addr = sta->addr; 6074 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 6075 peer_addr = vif->bss_conf.bssid; 6076 else 6077 peer_addr = vif->addr; 6078 6079 key->hw_key_idx = key->keyidx; 6080 6081 if (is_wep) { 6082 if (cmd == SET_KEY) 6083 arvif->wep_keys[key->keyidx] = key; 6084 else 6085 arvif->wep_keys[key->keyidx] = NULL; 6086 } 6087 6088 /* the peer should not disappear in mid-way (unless FW goes awry) since 6089 * we already hold conf_mutex. we just make sure its there now. 6090 */ 6091 spin_lock_bh(&ar->data_lock); 6092 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 6093 spin_unlock_bh(&ar->data_lock); 6094 6095 if (!peer) { 6096 if (cmd == SET_KEY) { 6097 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 6098 peer_addr); 6099 ret = -EOPNOTSUPP; 6100 goto exit; 6101 } else { 6102 /* if the peer doesn't exist there is no key to disable anymore */ 6103 goto exit; 6104 } 6105 } 6106 6107 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 6108 flags |= WMI_KEY_PAIRWISE; 6109 else 6110 flags |= WMI_KEY_GROUP; 6111 6112 if (is_wep) { 6113 if (cmd == DISABLE_KEY) 6114 ath10k_clear_vdev_key(arvif, key); 6115 6116 /* When WEP keys are uploaded it's possible that there are 6117 * stations associated already (e.g. when merging) without any 6118 * keys. Static WEP needs an explicit per-peer key upload. 6119 */ 6120 if (vif->type == NL80211_IFTYPE_ADHOC && 6121 cmd == SET_KEY) 6122 ath10k_mac_vif_update_wep_key(arvif, key); 6123 6124 /* 802.1x never sets the def_wep_key_idx so each set_key() 6125 * call changes default tx key. 6126 * 6127 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 6128 * after first set_key(). 6129 */ 6130 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 6131 flags |= WMI_KEY_TX_USAGE; 6132 } 6133 6134 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 6135 if (ret) { 6136 WARN_ON(ret > 0); 6137 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 6138 arvif->vdev_id, peer_addr, ret); 6139 goto exit; 6140 } 6141 6142 /* mac80211 sets static WEP keys as groupwise while firmware requires 6143 * them to be installed twice as both pairwise and groupwise. 6144 */ 6145 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 6146 flags2 = flags; 6147 flags2 &= ~WMI_KEY_GROUP; 6148 flags2 |= WMI_KEY_PAIRWISE; 6149 6150 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 6151 if (ret) { 6152 WARN_ON(ret > 0); 6153 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 6154 arvif->vdev_id, peer_addr, ret); 6155 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 6156 peer_addr, flags); 6157 if (ret2) { 6158 WARN_ON(ret2 > 0); 6159 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 6160 arvif->vdev_id, peer_addr, ret2); 6161 } 6162 goto exit; 6163 } 6164 } 6165 6166 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 6167 6168 spin_lock_bh(&ar->data_lock); 6169 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 6170 if (peer && cmd == SET_KEY) 6171 peer->keys[key->keyidx] = key; 6172 else if (peer && cmd == DISABLE_KEY) 6173 peer->keys[key->keyidx] = NULL; 6174 else if (peer == NULL) 6175 /* impossible unless FW goes crazy */ 6176 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 6177 spin_unlock_bh(&ar->data_lock); 6178 6179 if (sta && sta->tdls) 6180 ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6181 WMI_PEER_AUTHORIZE, 1); 6182 6183 exit: 6184 mutex_unlock(&ar->conf_mutex); 6185 return ret; 6186 } 6187 6188 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 6189 struct ieee80211_vif *vif, 6190 int keyidx) 6191 { 6192 struct ath10k *ar = hw->priv; 6193 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6194 int ret; 6195 6196 mutex_lock(&arvif->ar->conf_mutex); 6197 6198 if (arvif->ar->state != ATH10K_STATE_ON) 6199 goto unlock; 6200 6201 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 6202 arvif->vdev_id, keyidx); 6203 6204 ret = ath10k_wmi_vdev_set_param(arvif->ar, 6205 arvif->vdev_id, 6206 arvif->ar->wmi.vdev_param->def_keyid, 6207 keyidx); 6208 6209 if (ret) { 6210 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 6211 arvif->vdev_id, 6212 ret); 6213 goto unlock; 6214 } 6215 6216 arvif->def_wep_key_idx = keyidx; 6217 6218 unlock: 6219 mutex_unlock(&arvif->ar->conf_mutex); 6220 } 6221 6222 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 6223 { 6224 struct ath10k *ar; 6225 struct ath10k_vif *arvif; 6226 struct ath10k_sta *arsta; 6227 struct ieee80211_sta *sta; 6228 struct cfg80211_chan_def def; 6229 enum nl80211_band band; 6230 const u8 *ht_mcs_mask; 6231 const u16 *vht_mcs_mask; 6232 u32 changed, bw, nss, smps; 6233 int err; 6234 6235 arsta = container_of(wk, struct ath10k_sta, update_wk); 6236 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 6237 arvif = arsta->arvif; 6238 ar = arvif->ar; 6239 6240 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 6241 return; 6242 6243 band = def.chan->band; 6244 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 6245 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 6246 6247 spin_lock_bh(&ar->data_lock); 6248 6249 changed = arsta->changed; 6250 arsta->changed = 0; 6251 6252 bw = arsta->bw; 6253 nss = arsta->nss; 6254 smps = arsta->smps; 6255 6256 spin_unlock_bh(&ar->data_lock); 6257 6258 mutex_lock(&ar->conf_mutex); 6259 6260 nss = max_t(u32, 1, nss); 6261 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 6262 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6263 6264 if (changed & IEEE80211_RC_BW_CHANGED) { 6265 enum wmi_phy_mode mode; 6266 6267 mode = chan_to_phymode(&def); 6268 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n", 6269 sta->addr, bw, mode); 6270 6271 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6272 WMI_PEER_PHYMODE, mode); 6273 if (err) { 6274 ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n", 6275 sta->addr, mode, err); 6276 goto exit; 6277 } 6278 6279 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6280 WMI_PEER_CHAN_WIDTH, bw); 6281 if (err) 6282 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 6283 sta->addr, bw, err); 6284 } 6285 6286 if (changed & IEEE80211_RC_NSS_CHANGED) { 6287 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 6288 sta->addr, nss); 6289 6290 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6291 WMI_PEER_NSS, nss); 6292 if (err) 6293 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 6294 sta->addr, nss, err); 6295 } 6296 6297 if (changed & IEEE80211_RC_SMPS_CHANGED) { 6298 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 6299 sta->addr, smps); 6300 6301 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6302 WMI_PEER_SMPS_STATE, smps); 6303 if (err) 6304 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 6305 sta->addr, smps, err); 6306 } 6307 6308 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 6309 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", 6310 sta->addr); 6311 6312 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 6313 if (err) 6314 ath10k_warn(ar, "failed to reassociate station: %pM\n", 6315 sta->addr); 6316 } 6317 6318 exit: 6319 mutex_unlock(&ar->conf_mutex); 6320 } 6321 6322 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 6323 struct ieee80211_sta *sta) 6324 { 6325 struct ath10k *ar = arvif->ar; 6326 6327 lockdep_assert_held(&ar->conf_mutex); 6328 6329 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6330 return 0; 6331 6332 if (ar->num_stations >= ar->max_num_stations) 6333 return -ENOBUFS; 6334 6335 ar->num_stations++; 6336 6337 return 0; 6338 } 6339 6340 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 6341 struct ieee80211_sta *sta) 6342 { 6343 struct ath10k *ar = arvif->ar; 6344 6345 lockdep_assert_held(&ar->conf_mutex); 6346 6347 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6348 return; 6349 6350 ar->num_stations--; 6351 } 6352 6353 static int ath10k_sta_state(struct ieee80211_hw *hw, 6354 struct ieee80211_vif *vif, 6355 struct ieee80211_sta *sta, 6356 enum ieee80211_sta_state old_state, 6357 enum ieee80211_sta_state new_state) 6358 { 6359 struct ath10k *ar = hw->priv; 6360 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6361 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6362 struct ath10k_peer *peer; 6363 int ret = 0; 6364 int i; 6365 6366 if (old_state == IEEE80211_STA_NOTEXIST && 6367 new_state == IEEE80211_STA_NONE) { 6368 memset(arsta, 0, sizeof(*arsta)); 6369 arsta->arvif = arvif; 6370 arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; 6371 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6372 6373 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6374 ath10k_mac_txq_init(sta->txq[i]); 6375 } 6376 6377 /* cancel must be done outside the mutex to avoid deadlock */ 6378 if ((old_state == IEEE80211_STA_NONE && 6379 new_state == IEEE80211_STA_NOTEXIST)) 6380 cancel_work_sync(&arsta->update_wk); 6381 6382 mutex_lock(&ar->conf_mutex); 6383 6384 if (old_state == IEEE80211_STA_NOTEXIST && 6385 new_state == IEEE80211_STA_NONE) { 6386 /* 6387 * New station addition. 6388 */ 6389 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6390 u32 num_tdls_stations; 6391 6392 ath10k_dbg(ar, ATH10K_DBG_MAC, 6393 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6394 arvif->vdev_id, sta->addr, 6395 ar->num_stations + 1, ar->max_num_stations, 6396 ar->num_peers + 1, ar->max_num_peers); 6397 6398 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6399 6400 if (sta->tdls) { 6401 if (num_tdls_stations >= ar->max_num_tdls_vdevs) { 6402 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6403 arvif->vdev_id, 6404 ar->max_num_tdls_vdevs); 6405 ret = -ELNRNG; 6406 goto exit; 6407 } 6408 peer_type = WMI_PEER_TYPE_TDLS; 6409 } 6410 6411 ret = ath10k_mac_inc_num_stations(arvif, sta); 6412 if (ret) { 6413 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6414 ar->max_num_stations); 6415 goto exit; 6416 } 6417 6418 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { 6419 arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), 6420 GFP_KERNEL); 6421 if (!arsta->tx_stats) { 6422 ret = -ENOMEM; 6423 goto exit; 6424 } 6425 } 6426 6427 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6428 sta->addr, peer_type); 6429 if (ret) { 6430 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6431 sta->addr, arvif->vdev_id, ret); 6432 ath10k_mac_dec_num_stations(arvif, sta); 6433 kfree(arsta->tx_stats); 6434 goto exit; 6435 } 6436 6437 spin_lock_bh(&ar->data_lock); 6438 6439 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6440 if (!peer) { 6441 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6442 vif->addr, arvif->vdev_id); 6443 spin_unlock_bh(&ar->data_lock); 6444 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6445 ath10k_mac_dec_num_stations(arvif, sta); 6446 kfree(arsta->tx_stats); 6447 ret = -ENOENT; 6448 goto exit; 6449 } 6450 6451 arsta->peer_id = find_first_bit(peer->peer_ids, 6452 ATH10K_MAX_NUM_PEER_IDS); 6453 6454 spin_unlock_bh(&ar->data_lock); 6455 6456 if (!sta->tdls) 6457 goto exit; 6458 6459 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6460 WMI_TDLS_ENABLE_ACTIVE); 6461 if (ret) { 6462 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6463 arvif->vdev_id, ret); 6464 ath10k_peer_delete(ar, arvif->vdev_id, 6465 sta->addr); 6466 ath10k_mac_dec_num_stations(arvif, sta); 6467 kfree(arsta->tx_stats); 6468 goto exit; 6469 } 6470 6471 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6472 WMI_TDLS_PEER_STATE_PEERING); 6473 if (ret) { 6474 ath10k_warn(ar, 6475 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6476 sta->addr, arvif->vdev_id, ret); 6477 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6478 ath10k_mac_dec_num_stations(arvif, sta); 6479 kfree(arsta->tx_stats); 6480 6481 if (num_tdls_stations != 0) 6482 goto exit; 6483 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6484 WMI_TDLS_DISABLE); 6485 } 6486 } else if ((old_state == IEEE80211_STA_NONE && 6487 new_state == IEEE80211_STA_NOTEXIST)) { 6488 /* 6489 * Existing station deletion. 6490 */ 6491 ath10k_dbg(ar, ATH10K_DBG_MAC, 6492 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6493 arvif->vdev_id, sta->addr, sta); 6494 6495 if (sta->tdls) { 6496 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, 6497 sta, 6498 WMI_TDLS_PEER_STATE_TEARDOWN); 6499 if (ret) 6500 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", 6501 sta->addr, 6502 WMI_TDLS_PEER_STATE_TEARDOWN, ret); 6503 } 6504 6505 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6506 if (ret) 6507 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6508 sta->addr, arvif->vdev_id, ret); 6509 6510 ath10k_mac_dec_num_stations(arvif, sta); 6511 6512 spin_lock_bh(&ar->data_lock); 6513 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6514 peer = ar->peer_map[i]; 6515 if (!peer) 6516 continue; 6517 6518 if (peer->sta == sta) { 6519 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6520 sta->addr, peer, i, arvif->vdev_id); 6521 peer->sta = NULL; 6522 6523 /* Clean up the peer object as well since we 6524 * must have failed to do this above. 6525 */ 6526 list_del(&peer->list); 6527 ar->peer_map[i] = NULL; 6528 kfree(peer); 6529 ar->num_peers--; 6530 } 6531 } 6532 spin_unlock_bh(&ar->data_lock); 6533 6534 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { 6535 kfree(arsta->tx_stats); 6536 arsta->tx_stats = NULL; 6537 } 6538 6539 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6540 ath10k_mac_txq_unref(ar, sta->txq[i]); 6541 6542 if (!sta->tdls) 6543 goto exit; 6544 6545 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6546 goto exit; 6547 6548 /* This was the last tdls peer in current vif */ 6549 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6550 WMI_TDLS_DISABLE); 6551 if (ret) { 6552 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6553 arvif->vdev_id, ret); 6554 } 6555 } else if (old_state == IEEE80211_STA_AUTH && 6556 new_state == IEEE80211_STA_ASSOC && 6557 (vif->type == NL80211_IFTYPE_AP || 6558 vif->type == NL80211_IFTYPE_MESH_POINT || 6559 vif->type == NL80211_IFTYPE_ADHOC)) { 6560 /* 6561 * New association. 6562 */ 6563 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6564 sta->addr); 6565 6566 ret = ath10k_station_assoc(ar, vif, sta, false); 6567 if (ret) 6568 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6569 sta->addr, arvif->vdev_id, ret); 6570 } else if (old_state == IEEE80211_STA_ASSOC && 6571 new_state == IEEE80211_STA_AUTHORIZED && 6572 sta->tdls) { 6573 /* 6574 * Tdls station authorized. 6575 */ 6576 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6577 sta->addr); 6578 6579 ret = ath10k_station_assoc(ar, vif, sta, false); 6580 if (ret) { 6581 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6582 sta->addr, arvif->vdev_id, ret); 6583 goto exit; 6584 } 6585 6586 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6587 WMI_TDLS_PEER_STATE_CONNECTED); 6588 if (ret) 6589 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6590 sta->addr, arvif->vdev_id, ret); 6591 } else if (old_state == IEEE80211_STA_ASSOC && 6592 new_state == IEEE80211_STA_AUTH && 6593 (vif->type == NL80211_IFTYPE_AP || 6594 vif->type == NL80211_IFTYPE_MESH_POINT || 6595 vif->type == NL80211_IFTYPE_ADHOC)) { 6596 /* 6597 * Disassociation. 6598 */ 6599 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6600 sta->addr); 6601 6602 ret = ath10k_station_disassoc(ar, vif, sta); 6603 if (ret) 6604 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6605 sta->addr, arvif->vdev_id, ret); 6606 } 6607 exit: 6608 mutex_unlock(&ar->conf_mutex); 6609 return ret; 6610 } 6611 6612 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6613 u16 ac, bool enable) 6614 { 6615 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6616 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6617 u32 prio = 0, acc = 0; 6618 u32 value = 0; 6619 int ret = 0; 6620 6621 lockdep_assert_held(&ar->conf_mutex); 6622 6623 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6624 return 0; 6625 6626 switch (ac) { 6627 case IEEE80211_AC_VO: 6628 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6629 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6630 prio = 7; 6631 acc = 3; 6632 break; 6633 case IEEE80211_AC_VI: 6634 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6635 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6636 prio = 5; 6637 acc = 2; 6638 break; 6639 case IEEE80211_AC_BE: 6640 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6641 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6642 prio = 2; 6643 acc = 1; 6644 break; 6645 case IEEE80211_AC_BK: 6646 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6647 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6648 prio = 0; 6649 acc = 0; 6650 break; 6651 } 6652 6653 if (enable) 6654 arvif->u.sta.uapsd |= value; 6655 else 6656 arvif->u.sta.uapsd &= ~value; 6657 6658 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6659 WMI_STA_PS_PARAM_UAPSD, 6660 arvif->u.sta.uapsd); 6661 if (ret) { 6662 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6663 goto exit; 6664 } 6665 6666 if (arvif->u.sta.uapsd) 6667 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6668 else 6669 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6670 6671 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6672 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6673 value); 6674 if (ret) 6675 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6676 6677 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6678 if (ret) { 6679 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6680 arvif->vdev_id, ret); 6681 return ret; 6682 } 6683 6684 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6685 if (ret) { 6686 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6687 arvif->vdev_id, ret); 6688 return ret; 6689 } 6690 6691 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6692 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6693 /* Only userspace can make an educated decision when to send 6694 * trigger frame. The following effectively disables u-UAPSD 6695 * autotrigger in firmware (which is enabled by default 6696 * provided the autotrigger service is available). 6697 */ 6698 6699 arg.wmm_ac = acc; 6700 arg.user_priority = prio; 6701 arg.service_interval = 0; 6702 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6703 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6704 6705 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6706 arvif->bssid, &arg, 1); 6707 if (ret) { 6708 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6709 ret); 6710 return ret; 6711 } 6712 } 6713 6714 exit: 6715 return ret; 6716 } 6717 6718 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6719 struct ieee80211_vif *vif, u16 ac, 6720 const struct ieee80211_tx_queue_params *params) 6721 { 6722 struct ath10k *ar = hw->priv; 6723 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6724 struct wmi_wmm_params_arg *p = NULL; 6725 int ret; 6726 6727 mutex_lock(&ar->conf_mutex); 6728 6729 switch (ac) { 6730 case IEEE80211_AC_VO: 6731 p = &arvif->wmm_params.ac_vo; 6732 break; 6733 case IEEE80211_AC_VI: 6734 p = &arvif->wmm_params.ac_vi; 6735 break; 6736 case IEEE80211_AC_BE: 6737 p = &arvif->wmm_params.ac_be; 6738 break; 6739 case IEEE80211_AC_BK: 6740 p = &arvif->wmm_params.ac_bk; 6741 break; 6742 } 6743 6744 if (WARN_ON(!p)) { 6745 ret = -EINVAL; 6746 goto exit; 6747 } 6748 6749 p->cwmin = params->cw_min; 6750 p->cwmax = params->cw_max; 6751 p->aifs = params->aifs; 6752 6753 /* 6754 * The channel time duration programmed in the HW is in absolute 6755 * microseconds, while mac80211 gives the txop in units of 6756 * 32 microseconds. 6757 */ 6758 p->txop = params->txop * 32; 6759 6760 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6761 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6762 &arvif->wmm_params); 6763 if (ret) { 6764 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6765 arvif->vdev_id, ret); 6766 goto exit; 6767 } 6768 } else { 6769 /* This won't work well with multi-interface cases but it's 6770 * better than nothing. 6771 */ 6772 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6773 if (ret) { 6774 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6775 goto exit; 6776 } 6777 } 6778 6779 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6780 if (ret) 6781 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6782 6783 exit: 6784 mutex_unlock(&ar->conf_mutex); 6785 return ret; 6786 } 6787 6788 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6789 6790 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6791 struct ieee80211_vif *vif, 6792 struct ieee80211_channel *chan, 6793 int duration, 6794 enum ieee80211_roc_type type) 6795 { 6796 struct ath10k *ar = hw->priv; 6797 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6798 struct wmi_start_scan_arg arg; 6799 int ret = 0; 6800 u32 scan_time_msec; 6801 6802 mutex_lock(&ar->conf_mutex); 6803 6804 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 6805 ret = -EBUSY; 6806 goto exit; 6807 } 6808 6809 spin_lock_bh(&ar->data_lock); 6810 switch (ar->scan.state) { 6811 case ATH10K_SCAN_IDLE: 6812 reinit_completion(&ar->scan.started); 6813 reinit_completion(&ar->scan.completed); 6814 reinit_completion(&ar->scan.on_channel); 6815 ar->scan.state = ATH10K_SCAN_STARTING; 6816 ar->scan.is_roc = true; 6817 ar->scan.vdev_id = arvif->vdev_id; 6818 ar->scan.roc_freq = chan->center_freq; 6819 ar->scan.roc_notify = true; 6820 ret = 0; 6821 break; 6822 case ATH10K_SCAN_STARTING: 6823 case ATH10K_SCAN_RUNNING: 6824 case ATH10K_SCAN_ABORTING: 6825 ret = -EBUSY; 6826 break; 6827 } 6828 spin_unlock_bh(&ar->data_lock); 6829 6830 if (ret) 6831 goto exit; 6832 6833 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6834 6835 memset(&arg, 0, sizeof(arg)); 6836 ath10k_wmi_start_scan_init(ar, &arg); 6837 arg.vdev_id = arvif->vdev_id; 6838 arg.scan_id = ATH10K_SCAN_ID; 6839 arg.n_channels = 1; 6840 arg.channels[0] = chan->center_freq; 6841 arg.dwell_time_active = scan_time_msec; 6842 arg.dwell_time_passive = scan_time_msec; 6843 arg.max_scan_time = scan_time_msec; 6844 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6845 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6846 arg.burst_duration_ms = duration; 6847 6848 ret = ath10k_start_scan(ar, &arg); 6849 if (ret) { 6850 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6851 spin_lock_bh(&ar->data_lock); 6852 ar->scan.state = ATH10K_SCAN_IDLE; 6853 spin_unlock_bh(&ar->data_lock); 6854 goto exit; 6855 } 6856 6857 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6858 if (ret == 0) { 6859 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6860 6861 ret = ath10k_scan_stop(ar); 6862 if (ret) 6863 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6864 6865 ret = -ETIMEDOUT; 6866 goto exit; 6867 } 6868 6869 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6870 msecs_to_jiffies(duration)); 6871 6872 ret = 0; 6873 exit: 6874 mutex_unlock(&ar->conf_mutex); 6875 return ret; 6876 } 6877 6878 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6879 { 6880 struct ath10k *ar = hw->priv; 6881 6882 mutex_lock(&ar->conf_mutex); 6883 6884 spin_lock_bh(&ar->data_lock); 6885 ar->scan.roc_notify = false; 6886 spin_unlock_bh(&ar->data_lock); 6887 6888 ath10k_scan_abort(ar); 6889 6890 mutex_unlock(&ar->conf_mutex); 6891 6892 cancel_delayed_work_sync(&ar->scan.timeout); 6893 6894 return 0; 6895 } 6896 6897 /* 6898 * Both RTS and Fragmentation threshold are interface-specific 6899 * in ath10k, but device-specific in mac80211. 6900 */ 6901 6902 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6903 { 6904 struct ath10k *ar = hw->priv; 6905 struct ath10k_vif *arvif; 6906 int ret = 0; 6907 6908 mutex_lock(&ar->conf_mutex); 6909 list_for_each_entry(arvif, &ar->arvifs, list) { 6910 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 6911 arvif->vdev_id, value); 6912 6913 ret = ath10k_mac_set_rts(arvif, value); 6914 if (ret) { 6915 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 6916 arvif->vdev_id, ret); 6917 break; 6918 } 6919 } 6920 mutex_unlock(&ar->conf_mutex); 6921 6922 return ret; 6923 } 6924 6925 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6926 { 6927 /* Even though there's a WMI enum for fragmentation threshold no known 6928 * firmware actually implements it. Moreover it is not possible to rely 6929 * frame fragmentation to mac80211 because firmware clears the "more 6930 * fragments" bit in frame control making it impossible for remote 6931 * devices to reassemble frames. 6932 * 6933 * Hence implement a dummy callback just to say fragmentation isn't 6934 * supported. This effectively prevents mac80211 from doing frame 6935 * fragmentation in software. 6936 */ 6937 return -EOPNOTSUPP; 6938 } 6939 6940 void ath10k_mac_wait_tx_complete(struct ath10k *ar) 6941 { 6942 bool skip; 6943 long time_left; 6944 6945 /* mac80211 doesn't care if we really xmit queued frames or not 6946 * we'll collect those frames either way if we stop/delete vdevs 6947 */ 6948 6949 if (ar->state == ATH10K_STATE_WEDGED) 6950 return; 6951 6952 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 6953 bool empty; 6954 6955 spin_lock_bh(&ar->htt.tx_lock); 6956 empty = (ar->htt.num_pending_tx == 0); 6957 spin_unlock_bh(&ar->htt.tx_lock); 6958 6959 skip = (ar->state == ATH10K_STATE_WEDGED) || 6960 test_bit(ATH10K_FLAG_CRASH_FLUSH, 6961 &ar->dev_flags); 6962 6963 (empty || skip); 6964 }), ATH10K_FLUSH_TIMEOUT_HZ); 6965 6966 if (time_left == 0 || skip) 6967 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 6968 skip, ar->state, time_left); 6969 } 6970 6971 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6972 u32 queues, bool drop) 6973 { 6974 struct ath10k *ar = hw->priv; 6975 struct ath10k_vif *arvif; 6976 u32 bitmap; 6977 6978 if (drop) { 6979 if (vif && vif->type == NL80211_IFTYPE_STATION) { 6980 bitmap = ~(1 << WMI_MGMT_TID); 6981 list_for_each_entry(arvif, &ar->arvifs, list) { 6982 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 6983 ath10k_wmi_peer_flush(ar, arvif->vdev_id, 6984 arvif->bssid, bitmap); 6985 } 6986 } 6987 return; 6988 } 6989 6990 mutex_lock(&ar->conf_mutex); 6991 ath10k_mac_wait_tx_complete(ar); 6992 mutex_unlock(&ar->conf_mutex); 6993 } 6994 6995 /* TODO: Implement this function properly 6996 * For now it is needed to reply to Probe Requests in IBSS mode. 6997 * Propably we need this information from FW. 6998 */ 6999 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 7000 { 7001 return 1; 7002 } 7003 7004 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 7005 enum ieee80211_reconfig_type reconfig_type) 7006 { 7007 struct ath10k *ar = hw->priv; 7008 7009 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 7010 return; 7011 7012 mutex_lock(&ar->conf_mutex); 7013 7014 /* If device failed to restart it will be in a different state, e.g. 7015 * ATH10K_STATE_WEDGED 7016 */ 7017 if (ar->state == ATH10K_STATE_RESTARTED) { 7018 ath10k_info(ar, "device successfully recovered\n"); 7019 ar->state = ATH10K_STATE_ON; 7020 ieee80211_wake_queues(ar->hw); 7021 } 7022 7023 mutex_unlock(&ar->conf_mutex); 7024 } 7025 7026 static void 7027 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 7028 struct ieee80211_channel *channel) 7029 { 7030 int ret; 7031 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 7032 7033 lockdep_assert_held(&ar->conf_mutex); 7034 7035 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 7036 (ar->rx_channel != channel)) 7037 return; 7038 7039 if (ar->scan.state != ATH10K_SCAN_IDLE) { 7040 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 7041 return; 7042 } 7043 7044 reinit_completion(&ar->bss_survey_done); 7045 7046 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 7047 if (ret) { 7048 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 7049 return; 7050 } 7051 7052 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 7053 if (!ret) { 7054 ath10k_warn(ar, "bss channel survey timed out\n"); 7055 return; 7056 } 7057 } 7058 7059 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 7060 struct survey_info *survey) 7061 { 7062 struct ath10k *ar = hw->priv; 7063 struct ieee80211_supported_band *sband; 7064 struct survey_info *ar_survey = &ar->survey[idx]; 7065 int ret = 0; 7066 7067 mutex_lock(&ar->conf_mutex); 7068 7069 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 7070 if (sband && idx >= sband->n_channels) { 7071 idx -= sband->n_channels; 7072 sband = NULL; 7073 } 7074 7075 if (!sband) 7076 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 7077 7078 if (!sband || idx >= sband->n_channels) { 7079 ret = -ENOENT; 7080 goto exit; 7081 } 7082 7083 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 7084 7085 spin_lock_bh(&ar->data_lock); 7086 memcpy(survey, ar_survey, sizeof(*survey)); 7087 spin_unlock_bh(&ar->data_lock); 7088 7089 survey->channel = &sband->channels[idx]; 7090 7091 if (ar->rx_channel == survey->channel) 7092 survey->filled |= SURVEY_INFO_IN_USE; 7093 7094 exit: 7095 mutex_unlock(&ar->conf_mutex); 7096 return ret; 7097 } 7098 7099 static bool 7100 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 7101 enum nl80211_band band, 7102 const struct cfg80211_bitrate_mask *mask) 7103 { 7104 int num_rates = 0; 7105 int i; 7106 7107 num_rates += hweight32(mask->control[band].legacy); 7108 7109 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 7110 num_rates += hweight8(mask->control[band].ht_mcs[i]); 7111 7112 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 7113 num_rates += hweight16(mask->control[band].vht_mcs[i]); 7114 7115 return num_rates == 1; 7116 } 7117 7118 static bool 7119 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 7120 enum nl80211_band band, 7121 const struct cfg80211_bitrate_mask *mask, 7122 int *nss) 7123 { 7124 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 7125 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 7126 u8 ht_nss_mask = 0; 7127 u8 vht_nss_mask = 0; 7128 int i; 7129 7130 if (mask->control[band].legacy) 7131 return false; 7132 7133 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 7134 if (mask->control[band].ht_mcs[i] == 0) 7135 continue; 7136 else if (mask->control[band].ht_mcs[i] == 7137 sband->ht_cap.mcs.rx_mask[i]) 7138 ht_nss_mask |= BIT(i); 7139 else 7140 return false; 7141 } 7142 7143 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 7144 if (mask->control[band].vht_mcs[i] == 0) 7145 continue; 7146 else if (mask->control[band].vht_mcs[i] == 7147 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 7148 vht_nss_mask |= BIT(i); 7149 else 7150 return false; 7151 } 7152 7153 if (ht_nss_mask != vht_nss_mask) 7154 return false; 7155 7156 if (ht_nss_mask == 0) 7157 return false; 7158 7159 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 7160 return false; 7161 7162 *nss = fls(ht_nss_mask); 7163 7164 return true; 7165 } 7166 7167 static int 7168 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 7169 enum nl80211_band band, 7170 const struct cfg80211_bitrate_mask *mask, 7171 u8 *rate, u8 *nss) 7172 { 7173 int rate_idx; 7174 int i; 7175 u16 bitrate; 7176 u8 preamble; 7177 u8 hw_rate; 7178 7179 if (hweight32(mask->control[band].legacy) == 1) { 7180 rate_idx = ffs(mask->control[band].legacy) - 1; 7181 7182 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 7183 rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; 7184 7185 hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value; 7186 bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate; 7187 7188 if (ath10k_mac_bitrate_is_cck(bitrate)) 7189 preamble = WMI_RATE_PREAMBLE_CCK; 7190 else 7191 preamble = WMI_RATE_PREAMBLE_OFDM; 7192 7193 *nss = 1; 7194 *rate = preamble << 6 | 7195 (*nss - 1) << 4 | 7196 hw_rate << 0; 7197 7198 return 0; 7199 } 7200 7201 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 7202 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 7203 *nss = i + 1; 7204 *rate = WMI_RATE_PREAMBLE_HT << 6 | 7205 (*nss - 1) << 4 | 7206 (ffs(mask->control[band].ht_mcs[i]) - 1); 7207 7208 return 0; 7209 } 7210 } 7211 7212 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 7213 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 7214 *nss = i + 1; 7215 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 7216 (*nss - 1) << 4 | 7217 (ffs(mask->control[band].vht_mcs[i]) - 1); 7218 7219 return 0; 7220 } 7221 } 7222 7223 return -EINVAL; 7224 } 7225 7226 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 7227 u8 rate, u8 nss, u8 sgi, u8 ldpc) 7228 { 7229 struct ath10k *ar = arvif->ar; 7230 u32 vdev_param; 7231 int ret; 7232 7233 lockdep_assert_held(&ar->conf_mutex); 7234 7235 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 7236 arvif->vdev_id, rate, nss, sgi); 7237 7238 vdev_param = ar->wmi.vdev_param->fixed_rate; 7239 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 7240 if (ret) { 7241 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 7242 rate, ret); 7243 return ret; 7244 } 7245 7246 vdev_param = ar->wmi.vdev_param->nss; 7247 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 7248 if (ret) { 7249 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 7250 return ret; 7251 } 7252 7253 vdev_param = ar->wmi.vdev_param->sgi; 7254 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 7255 if (ret) { 7256 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 7257 return ret; 7258 } 7259 7260 vdev_param = ar->wmi.vdev_param->ldpc; 7261 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 7262 if (ret) { 7263 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 7264 return ret; 7265 } 7266 7267 return 0; 7268 } 7269 7270 static bool 7271 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 7272 enum nl80211_band band, 7273 const struct cfg80211_bitrate_mask *mask) 7274 { 7275 int i; 7276 u16 vht_mcs; 7277 7278 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 7279 * to express all VHT MCS rate masks. Effectively only the following 7280 * ranges can be used: none, 0-7, 0-8 and 0-9. 7281 */ 7282 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 7283 vht_mcs = mask->control[band].vht_mcs[i]; 7284 7285 switch (vht_mcs) { 7286 case 0: 7287 case BIT(8) - 1: 7288 case BIT(9) - 1: 7289 case BIT(10) - 1: 7290 break; 7291 default: 7292 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 7293 return false; 7294 } 7295 } 7296 7297 return true; 7298 } 7299 7300 static void ath10k_mac_set_bitrate_mask_iter(void *data, 7301 struct ieee80211_sta *sta) 7302 { 7303 struct ath10k_vif *arvif = data; 7304 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7305 struct ath10k *ar = arvif->ar; 7306 7307 if (arsta->arvif != arvif) 7308 return; 7309 7310 spin_lock_bh(&ar->data_lock); 7311 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 7312 spin_unlock_bh(&ar->data_lock); 7313 7314 ieee80211_queue_work(ar->hw, &arsta->update_wk); 7315 } 7316 7317 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 7318 struct ieee80211_vif *vif, 7319 const struct cfg80211_bitrate_mask *mask) 7320 { 7321 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7322 struct cfg80211_chan_def def; 7323 struct ath10k *ar = arvif->ar; 7324 enum nl80211_band band; 7325 const u8 *ht_mcs_mask; 7326 const u16 *vht_mcs_mask; 7327 u8 rate; 7328 u8 nss; 7329 u8 sgi; 7330 u8 ldpc; 7331 int single_nss; 7332 int ret; 7333 7334 if (ath10k_mac_vif_chan(vif, &def)) 7335 return -EPERM; 7336 7337 band = def.chan->band; 7338 ht_mcs_mask = mask->control[band].ht_mcs; 7339 vht_mcs_mask = mask->control[band].vht_mcs; 7340 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7341 7342 sgi = mask->control[band].gi; 7343 if (sgi == NL80211_TXRATE_FORCE_LGI) 7344 return -EINVAL; 7345 7346 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 7347 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 7348 &rate, &nss); 7349 if (ret) { 7350 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 7351 arvif->vdev_id, ret); 7352 return ret; 7353 } 7354 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7355 &single_nss)) { 7356 rate = WMI_FIXED_RATE_NONE; 7357 nss = single_nss; 7358 } else { 7359 rate = WMI_FIXED_RATE_NONE; 7360 nss = min(ar->num_rf_chains, 7361 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 7362 ath10k_mac_max_vht_nss(vht_mcs_mask))); 7363 7364 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 7365 return -EINVAL; 7366 7367 mutex_lock(&ar->conf_mutex); 7368 7369 arvif->bitrate_mask = *mask; 7370 ieee80211_iterate_stations_atomic(ar->hw, 7371 ath10k_mac_set_bitrate_mask_iter, 7372 arvif); 7373 7374 mutex_unlock(&ar->conf_mutex); 7375 } 7376 7377 mutex_lock(&ar->conf_mutex); 7378 7379 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7380 if (ret) { 7381 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 7382 arvif->vdev_id, ret); 7383 goto exit; 7384 } 7385 7386 exit: 7387 mutex_unlock(&ar->conf_mutex); 7388 7389 return ret; 7390 } 7391 7392 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 7393 struct ieee80211_vif *vif, 7394 struct ieee80211_sta *sta, 7395 u32 changed) 7396 { 7397 struct ath10k *ar = hw->priv; 7398 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7399 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7400 struct ath10k_peer *peer; 7401 u32 bw, smps; 7402 7403 spin_lock_bh(&ar->data_lock); 7404 7405 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 7406 if (!peer) { 7407 spin_unlock_bh(&ar->data_lock); 7408 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", 7409 sta->addr, arvif->vdev_id); 7410 return; 7411 } 7412 7413 ath10k_dbg(ar, ATH10K_DBG_MAC, 7414 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 7415 sta->addr, changed, sta->bandwidth, sta->rx_nss, 7416 sta->smps_mode); 7417 7418 if (changed & IEEE80211_RC_BW_CHANGED) { 7419 bw = WMI_PEER_CHWIDTH_20MHZ; 7420 7421 switch (sta->bandwidth) { 7422 case IEEE80211_STA_RX_BW_20: 7423 bw = WMI_PEER_CHWIDTH_20MHZ; 7424 break; 7425 case IEEE80211_STA_RX_BW_40: 7426 bw = WMI_PEER_CHWIDTH_40MHZ; 7427 break; 7428 case IEEE80211_STA_RX_BW_80: 7429 bw = WMI_PEER_CHWIDTH_80MHZ; 7430 break; 7431 case IEEE80211_STA_RX_BW_160: 7432 bw = WMI_PEER_CHWIDTH_160MHZ; 7433 break; 7434 default: 7435 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7436 sta->bandwidth, sta->addr); 7437 bw = WMI_PEER_CHWIDTH_20MHZ; 7438 break; 7439 } 7440 7441 arsta->bw = bw; 7442 } 7443 7444 if (changed & IEEE80211_RC_NSS_CHANGED) 7445 arsta->nss = sta->rx_nss; 7446 7447 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7448 smps = WMI_PEER_SMPS_PS_NONE; 7449 7450 switch (sta->smps_mode) { 7451 case IEEE80211_SMPS_AUTOMATIC: 7452 case IEEE80211_SMPS_OFF: 7453 smps = WMI_PEER_SMPS_PS_NONE; 7454 break; 7455 case IEEE80211_SMPS_STATIC: 7456 smps = WMI_PEER_SMPS_STATIC; 7457 break; 7458 case IEEE80211_SMPS_DYNAMIC: 7459 smps = WMI_PEER_SMPS_DYNAMIC; 7460 break; 7461 case IEEE80211_SMPS_NUM_MODES: 7462 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7463 sta->smps_mode, sta->addr); 7464 smps = WMI_PEER_SMPS_PS_NONE; 7465 break; 7466 } 7467 7468 arsta->smps = smps; 7469 } 7470 7471 arsta->changed |= changed; 7472 7473 spin_unlock_bh(&ar->data_lock); 7474 7475 ieee80211_queue_work(hw, &arsta->update_wk); 7476 } 7477 7478 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7479 struct ieee80211_vif *vif, s64 tsf_offset) 7480 { 7481 struct ath10k *ar = hw->priv; 7482 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7483 u32 offset, vdev_param; 7484 int ret; 7485 7486 if (tsf_offset < 0) { 7487 vdev_param = ar->wmi.vdev_param->dec_tsf; 7488 offset = -tsf_offset; 7489 } else { 7490 vdev_param = ar->wmi.vdev_param->inc_tsf; 7491 offset = tsf_offset; 7492 } 7493 7494 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7495 vdev_param, offset); 7496 7497 if (ret && ret != -EOPNOTSUPP) 7498 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7499 offset, vdev_param, ret); 7500 } 7501 7502 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7503 struct ieee80211_vif *vif, 7504 struct ieee80211_ampdu_params *params) 7505 { 7506 struct ath10k *ar = hw->priv; 7507 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7508 struct ieee80211_sta *sta = params->sta; 7509 enum ieee80211_ampdu_mlme_action action = params->action; 7510 u16 tid = params->tid; 7511 7512 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7513 arvif->vdev_id, sta->addr, tid, action); 7514 7515 switch (action) { 7516 case IEEE80211_AMPDU_RX_START: 7517 case IEEE80211_AMPDU_RX_STOP: 7518 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7519 * creation/removal. Do we need to verify this? 7520 */ 7521 return 0; 7522 case IEEE80211_AMPDU_TX_START: 7523 case IEEE80211_AMPDU_TX_STOP_CONT: 7524 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7525 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7526 case IEEE80211_AMPDU_TX_OPERATIONAL: 7527 /* Firmware offloads Tx aggregation entirely so deny mac80211 7528 * Tx aggregation requests. 7529 */ 7530 return -EOPNOTSUPP; 7531 } 7532 7533 return -EINVAL; 7534 } 7535 7536 static void 7537 ath10k_mac_update_rx_channel(struct ath10k *ar, 7538 struct ieee80211_chanctx_conf *ctx, 7539 struct ieee80211_vif_chanctx_switch *vifs, 7540 int n_vifs) 7541 { 7542 struct cfg80211_chan_def *def = NULL; 7543 7544 /* Both locks are required because ar->rx_channel is modified. This 7545 * allows readers to hold either lock. 7546 */ 7547 lockdep_assert_held(&ar->conf_mutex); 7548 lockdep_assert_held(&ar->data_lock); 7549 7550 WARN_ON(ctx && vifs); 7551 WARN_ON(vifs && !n_vifs); 7552 7553 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7554 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7555 * ppdu on Rx may reduce performance on low-end systems. It should be 7556 * possible to make tables/hashmaps to speed the lookup up (be vary of 7557 * cpu data cache lines though regarding sizes) but to keep the initial 7558 * implementation simple and less intrusive fallback to the slow lookup 7559 * only for multi-channel cases. Single-channel cases will remain to 7560 * use the old channel derival and thus performance should not be 7561 * affected much. 7562 */ 7563 rcu_read_lock(); 7564 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7565 ieee80211_iter_chan_contexts_atomic(ar->hw, 7566 ath10k_mac_get_any_chandef_iter, 7567 &def); 7568 7569 if (vifs) 7570 def = &vifs[0].new_ctx->def; 7571 7572 ar->rx_channel = def->chan; 7573 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7574 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7575 /* During driver restart due to firmware assert, since mac80211 7576 * already has valid channel context for given radio, channel 7577 * context iteration return num_chanctx > 0. So fix rx_channel 7578 * when restart is in progress. 7579 */ 7580 ar->rx_channel = ctx->def.chan; 7581 } else { 7582 ar->rx_channel = NULL; 7583 } 7584 rcu_read_unlock(); 7585 } 7586 7587 static void 7588 ath10k_mac_update_vif_chan(struct ath10k *ar, 7589 struct ieee80211_vif_chanctx_switch *vifs, 7590 int n_vifs) 7591 { 7592 struct ath10k_vif *arvif; 7593 int ret; 7594 int i; 7595 7596 lockdep_assert_held(&ar->conf_mutex); 7597 7598 /* First stop monitor interface. Some FW versions crash if there's a 7599 * lone monitor interface. 7600 */ 7601 if (ar->monitor_started) 7602 ath10k_monitor_stop(ar); 7603 7604 for (i = 0; i < n_vifs; i++) { 7605 arvif = (void *)vifs[i].vif->drv_priv; 7606 7607 ath10k_dbg(ar, ATH10K_DBG_MAC, 7608 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7609 arvif->vdev_id, 7610 vifs[i].old_ctx->def.chan->center_freq, 7611 vifs[i].new_ctx->def.chan->center_freq, 7612 vifs[i].old_ctx->def.width, 7613 vifs[i].new_ctx->def.width); 7614 7615 if (WARN_ON(!arvif->is_started)) 7616 continue; 7617 7618 if (WARN_ON(!arvif->is_up)) 7619 continue; 7620 7621 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7622 if (ret) { 7623 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7624 arvif->vdev_id, ret); 7625 continue; 7626 } 7627 } 7628 7629 /* All relevant vdevs are downed and associated channel resources 7630 * should be available for the channel switch now. 7631 */ 7632 7633 spin_lock_bh(&ar->data_lock); 7634 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7635 spin_unlock_bh(&ar->data_lock); 7636 7637 for (i = 0; i < n_vifs; i++) { 7638 arvif = (void *)vifs[i].vif->drv_priv; 7639 7640 if (WARN_ON(!arvif->is_started)) 7641 continue; 7642 7643 if (WARN_ON(!arvif->is_up)) 7644 continue; 7645 7646 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7647 if (ret) 7648 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7649 ret); 7650 7651 ret = ath10k_mac_setup_prb_tmpl(arvif); 7652 if (ret) 7653 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7654 ret); 7655 7656 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7657 if (ret) { 7658 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7659 arvif->vdev_id, ret); 7660 continue; 7661 } 7662 7663 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7664 arvif->bssid); 7665 if (ret) { 7666 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7667 arvif->vdev_id, ret); 7668 continue; 7669 } 7670 } 7671 7672 ath10k_monitor_recalc(ar); 7673 } 7674 7675 static int 7676 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7677 struct ieee80211_chanctx_conf *ctx) 7678 { 7679 struct ath10k *ar = hw->priv; 7680 7681 ath10k_dbg(ar, ATH10K_DBG_MAC, 7682 "mac chanctx add freq %hu width %d ptr %pK\n", 7683 ctx->def.chan->center_freq, ctx->def.width, ctx); 7684 7685 mutex_lock(&ar->conf_mutex); 7686 7687 spin_lock_bh(&ar->data_lock); 7688 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7689 spin_unlock_bh(&ar->data_lock); 7690 7691 ath10k_recalc_radar_detection(ar); 7692 ath10k_monitor_recalc(ar); 7693 7694 mutex_unlock(&ar->conf_mutex); 7695 7696 return 0; 7697 } 7698 7699 static void 7700 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7701 struct ieee80211_chanctx_conf *ctx) 7702 { 7703 struct ath10k *ar = hw->priv; 7704 7705 ath10k_dbg(ar, ATH10K_DBG_MAC, 7706 "mac chanctx remove freq %hu width %d ptr %pK\n", 7707 ctx->def.chan->center_freq, ctx->def.width, ctx); 7708 7709 mutex_lock(&ar->conf_mutex); 7710 7711 spin_lock_bh(&ar->data_lock); 7712 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7713 spin_unlock_bh(&ar->data_lock); 7714 7715 ath10k_recalc_radar_detection(ar); 7716 ath10k_monitor_recalc(ar); 7717 7718 mutex_unlock(&ar->conf_mutex); 7719 } 7720 7721 struct ath10k_mac_change_chanctx_arg { 7722 struct ieee80211_chanctx_conf *ctx; 7723 struct ieee80211_vif_chanctx_switch *vifs; 7724 int n_vifs; 7725 int next_vif; 7726 }; 7727 7728 static void 7729 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7730 struct ieee80211_vif *vif) 7731 { 7732 struct ath10k_mac_change_chanctx_arg *arg = data; 7733 7734 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7735 return; 7736 7737 arg->n_vifs++; 7738 } 7739 7740 static void 7741 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7742 struct ieee80211_vif *vif) 7743 { 7744 struct ath10k_mac_change_chanctx_arg *arg = data; 7745 struct ieee80211_chanctx_conf *ctx; 7746 7747 ctx = rcu_access_pointer(vif->chanctx_conf); 7748 if (ctx != arg->ctx) 7749 return; 7750 7751 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7752 return; 7753 7754 arg->vifs[arg->next_vif].vif = vif; 7755 arg->vifs[arg->next_vif].old_ctx = ctx; 7756 arg->vifs[arg->next_vif].new_ctx = ctx; 7757 arg->next_vif++; 7758 } 7759 7760 static void 7761 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7762 struct ieee80211_chanctx_conf *ctx, 7763 u32 changed) 7764 { 7765 struct ath10k *ar = hw->priv; 7766 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7767 7768 mutex_lock(&ar->conf_mutex); 7769 7770 ath10k_dbg(ar, ATH10K_DBG_MAC, 7771 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7772 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7773 7774 /* This shouldn't really happen because channel switching should use 7775 * switch_vif_chanctx(). 7776 */ 7777 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7778 goto unlock; 7779 7780 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7781 ieee80211_iterate_active_interfaces_atomic( 7782 hw, 7783 IEEE80211_IFACE_ITER_NORMAL, 7784 ath10k_mac_change_chanctx_cnt_iter, 7785 &arg); 7786 if (arg.n_vifs == 0) 7787 goto radar; 7788 7789 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7790 GFP_KERNEL); 7791 if (!arg.vifs) 7792 goto radar; 7793 7794 ieee80211_iterate_active_interfaces_atomic( 7795 hw, 7796 IEEE80211_IFACE_ITER_NORMAL, 7797 ath10k_mac_change_chanctx_fill_iter, 7798 &arg); 7799 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7800 kfree(arg.vifs); 7801 } 7802 7803 radar: 7804 ath10k_recalc_radar_detection(ar); 7805 7806 /* FIXME: How to configure Rx chains properly? */ 7807 7808 /* No other actions are actually necessary. Firmware maintains channel 7809 * definitions per vdev internally and there's no host-side channel 7810 * context abstraction to configure, e.g. channel width. 7811 */ 7812 7813 unlock: 7814 mutex_unlock(&ar->conf_mutex); 7815 } 7816 7817 static int 7818 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7819 struct ieee80211_vif *vif, 7820 struct ieee80211_chanctx_conf *ctx) 7821 { 7822 struct ath10k *ar = hw->priv; 7823 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7824 int ret; 7825 7826 mutex_lock(&ar->conf_mutex); 7827 7828 ath10k_dbg(ar, ATH10K_DBG_MAC, 7829 "mac chanctx assign ptr %pK vdev_id %i\n", 7830 ctx, arvif->vdev_id); 7831 7832 if (WARN_ON(arvif->is_started)) { 7833 mutex_unlock(&ar->conf_mutex); 7834 return -EBUSY; 7835 } 7836 7837 ret = ath10k_vdev_start(arvif, &ctx->def); 7838 if (ret) { 7839 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 7840 arvif->vdev_id, vif->addr, 7841 ctx->def.chan->center_freq, ret); 7842 goto err; 7843 } 7844 7845 arvif->is_started = true; 7846 7847 ret = ath10k_mac_vif_setup_ps(arvif); 7848 if (ret) { 7849 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 7850 arvif->vdev_id, ret); 7851 goto err_stop; 7852 } 7853 7854 if (vif->type == NL80211_IFTYPE_MONITOR) { 7855 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 7856 if (ret) { 7857 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 7858 arvif->vdev_id, ret); 7859 goto err_stop; 7860 } 7861 7862 arvif->is_up = true; 7863 } 7864 7865 if (ath10k_mac_can_set_cts_prot(arvif)) { 7866 ret = ath10k_mac_set_cts_prot(arvif); 7867 if (ret) 7868 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 7869 arvif->vdev_id, ret); 7870 } 7871 7872 if (ath10k_peer_stats_enabled(ar)) { 7873 ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS; 7874 ret = ath10k_wmi_pdev_pktlog_enable(ar, 7875 ar->pktlog_filter); 7876 if (ret) { 7877 ath10k_warn(ar, "failed to enable pktlog %d\n", ret); 7878 goto err_stop; 7879 } 7880 } 7881 7882 mutex_unlock(&ar->conf_mutex); 7883 return 0; 7884 7885 err_stop: 7886 ath10k_vdev_stop(arvif); 7887 arvif->is_started = false; 7888 ath10k_mac_vif_setup_ps(arvif); 7889 7890 err: 7891 mutex_unlock(&ar->conf_mutex); 7892 return ret; 7893 } 7894 7895 static void 7896 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7897 struct ieee80211_vif *vif, 7898 struct ieee80211_chanctx_conf *ctx) 7899 { 7900 struct ath10k *ar = hw->priv; 7901 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7902 int ret; 7903 7904 mutex_lock(&ar->conf_mutex); 7905 7906 ath10k_dbg(ar, ATH10K_DBG_MAC, 7907 "mac chanctx unassign ptr %pK vdev_id %i\n", 7908 ctx, arvif->vdev_id); 7909 7910 WARN_ON(!arvif->is_started); 7911 7912 if (vif->type == NL80211_IFTYPE_MONITOR) { 7913 WARN_ON(!arvif->is_up); 7914 7915 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7916 if (ret) 7917 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 7918 arvif->vdev_id, ret); 7919 7920 arvif->is_up = false; 7921 } 7922 7923 ret = ath10k_vdev_stop(arvif); 7924 if (ret) 7925 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 7926 arvif->vdev_id, ret); 7927 7928 arvif->is_started = false; 7929 7930 mutex_unlock(&ar->conf_mutex); 7931 } 7932 7933 static int 7934 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7935 struct ieee80211_vif_chanctx_switch *vifs, 7936 int n_vifs, 7937 enum ieee80211_chanctx_switch_mode mode) 7938 { 7939 struct ath10k *ar = hw->priv; 7940 7941 mutex_lock(&ar->conf_mutex); 7942 7943 ath10k_dbg(ar, ATH10K_DBG_MAC, 7944 "mac chanctx switch n_vifs %d mode %d\n", 7945 n_vifs, mode); 7946 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 7947 7948 mutex_unlock(&ar->conf_mutex); 7949 return 0; 7950 } 7951 7952 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 7953 struct ieee80211_vif *vif, 7954 struct ieee80211_sta *sta) 7955 { 7956 struct ath10k *ar; 7957 struct ath10k_peer *peer; 7958 7959 ar = hw->priv; 7960 7961 list_for_each_entry(peer, &ar->peers, list) 7962 if (peer->sta == sta) 7963 peer->removed = true; 7964 } 7965 7966 static void ath10k_sta_statistics(struct ieee80211_hw *hw, 7967 struct ieee80211_vif *vif, 7968 struct ieee80211_sta *sta, 7969 struct station_info *sinfo) 7970 { 7971 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7972 struct ath10k *ar = arsta->arvif->ar; 7973 7974 if (!ath10k_peer_stats_enabled(ar)) 7975 return; 7976 7977 sinfo->rx_duration = arsta->rx_duration; 7978 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 7979 7980 if (!arsta->txrate.legacy && !arsta->txrate.nss) 7981 return; 7982 7983 if (arsta->txrate.legacy) { 7984 sinfo->txrate.legacy = arsta->txrate.legacy; 7985 } else { 7986 sinfo->txrate.mcs = arsta->txrate.mcs; 7987 sinfo->txrate.nss = arsta->txrate.nss; 7988 sinfo->txrate.bw = arsta->txrate.bw; 7989 } 7990 sinfo->txrate.flags = arsta->txrate.flags; 7991 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 7992 } 7993 7994 static const struct ieee80211_ops ath10k_ops = { 7995 .tx = ath10k_mac_op_tx, 7996 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 7997 .start = ath10k_start, 7998 .stop = ath10k_stop, 7999 .config = ath10k_config, 8000 .add_interface = ath10k_add_interface, 8001 .remove_interface = ath10k_remove_interface, 8002 .configure_filter = ath10k_configure_filter, 8003 .bss_info_changed = ath10k_bss_info_changed, 8004 .set_coverage_class = ath10k_mac_op_set_coverage_class, 8005 .hw_scan = ath10k_hw_scan, 8006 .cancel_hw_scan = ath10k_cancel_hw_scan, 8007 .set_key = ath10k_set_key, 8008 .set_default_unicast_key = ath10k_set_default_unicast_key, 8009 .sta_state = ath10k_sta_state, 8010 .conf_tx = ath10k_conf_tx, 8011 .remain_on_channel = ath10k_remain_on_channel, 8012 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 8013 .set_rts_threshold = ath10k_set_rts_threshold, 8014 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 8015 .flush = ath10k_flush, 8016 .tx_last_beacon = ath10k_tx_last_beacon, 8017 .set_antenna = ath10k_set_antenna, 8018 .get_antenna = ath10k_get_antenna, 8019 .reconfig_complete = ath10k_reconfig_complete, 8020 .get_survey = ath10k_get_survey, 8021 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 8022 .sta_rc_update = ath10k_sta_rc_update, 8023 .offset_tsf = ath10k_offset_tsf, 8024 .ampdu_action = ath10k_ampdu_action, 8025 .get_et_sset_count = ath10k_debug_get_et_sset_count, 8026 .get_et_stats = ath10k_debug_get_et_stats, 8027 .get_et_strings = ath10k_debug_get_et_strings, 8028 .add_chanctx = ath10k_mac_op_add_chanctx, 8029 .remove_chanctx = ath10k_mac_op_remove_chanctx, 8030 .change_chanctx = ath10k_mac_op_change_chanctx, 8031 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 8032 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 8033 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 8034 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 8035 .sta_statistics = ath10k_sta_statistics, 8036 8037 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 8038 8039 #ifdef CONFIG_PM 8040 .suspend = ath10k_wow_op_suspend, 8041 .resume = ath10k_wow_op_resume, 8042 .set_wakeup = ath10k_wow_op_set_wakeup, 8043 #endif 8044 #ifdef CONFIG_MAC80211_DEBUGFS 8045 .sta_add_debugfs = ath10k_sta_add_debugfs, 8046 #endif 8047 }; 8048 8049 #define CHAN2G(_channel, _freq, _flags) { \ 8050 .band = NL80211_BAND_2GHZ, \ 8051 .hw_value = (_channel), \ 8052 .center_freq = (_freq), \ 8053 .flags = (_flags), \ 8054 .max_antenna_gain = 0, \ 8055 .max_power = 30, \ 8056 } 8057 8058 #define CHAN5G(_channel, _freq, _flags) { \ 8059 .band = NL80211_BAND_5GHZ, \ 8060 .hw_value = (_channel), \ 8061 .center_freq = (_freq), \ 8062 .flags = (_flags), \ 8063 .max_antenna_gain = 0, \ 8064 .max_power = 30, \ 8065 } 8066 8067 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 8068 CHAN2G(1, 2412, 0), 8069 CHAN2G(2, 2417, 0), 8070 CHAN2G(3, 2422, 0), 8071 CHAN2G(4, 2427, 0), 8072 CHAN2G(5, 2432, 0), 8073 CHAN2G(6, 2437, 0), 8074 CHAN2G(7, 2442, 0), 8075 CHAN2G(8, 2447, 0), 8076 CHAN2G(9, 2452, 0), 8077 CHAN2G(10, 2457, 0), 8078 CHAN2G(11, 2462, 0), 8079 CHAN2G(12, 2467, 0), 8080 CHAN2G(13, 2472, 0), 8081 CHAN2G(14, 2484, 0), 8082 }; 8083 8084 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 8085 CHAN5G(36, 5180, 0), 8086 CHAN5G(40, 5200, 0), 8087 CHAN5G(44, 5220, 0), 8088 CHAN5G(48, 5240, 0), 8089 CHAN5G(52, 5260, 0), 8090 CHAN5G(56, 5280, 0), 8091 CHAN5G(60, 5300, 0), 8092 CHAN5G(64, 5320, 0), 8093 CHAN5G(100, 5500, 0), 8094 CHAN5G(104, 5520, 0), 8095 CHAN5G(108, 5540, 0), 8096 CHAN5G(112, 5560, 0), 8097 CHAN5G(116, 5580, 0), 8098 CHAN5G(120, 5600, 0), 8099 CHAN5G(124, 5620, 0), 8100 CHAN5G(128, 5640, 0), 8101 CHAN5G(132, 5660, 0), 8102 CHAN5G(136, 5680, 0), 8103 CHAN5G(140, 5700, 0), 8104 CHAN5G(144, 5720, 0), 8105 CHAN5G(149, 5745, 0), 8106 CHAN5G(153, 5765, 0), 8107 CHAN5G(157, 5785, 0), 8108 CHAN5G(161, 5805, 0), 8109 CHAN5G(165, 5825, 0), 8110 CHAN5G(169, 5845, 0), 8111 CHAN5G(173, 5865, 0), 8112 /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */ 8113 /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */ 8114 }; 8115 8116 struct ath10k *ath10k_mac_create(size_t priv_size) 8117 { 8118 struct ieee80211_hw *hw; 8119 struct ieee80211_ops *ops; 8120 struct ath10k *ar; 8121 8122 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 8123 if (!ops) 8124 return NULL; 8125 8126 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 8127 if (!hw) { 8128 kfree(ops); 8129 return NULL; 8130 } 8131 8132 ar = hw->priv; 8133 ar->hw = hw; 8134 ar->ops = ops; 8135 8136 return ar; 8137 } 8138 8139 void ath10k_mac_destroy(struct ath10k *ar) 8140 { 8141 struct ieee80211_ops *ops = ar->ops; 8142 8143 ieee80211_free_hw(ar->hw); 8144 kfree(ops); 8145 } 8146 8147 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 8148 { 8149 .max = 8, 8150 .types = BIT(NL80211_IFTYPE_STATION) 8151 | BIT(NL80211_IFTYPE_P2P_CLIENT) 8152 }, 8153 { 8154 .max = 3, 8155 .types = BIT(NL80211_IFTYPE_P2P_GO) 8156 }, 8157 { 8158 .max = 1, 8159 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 8160 }, 8161 { 8162 .max = 7, 8163 .types = BIT(NL80211_IFTYPE_AP) 8164 #ifdef CONFIG_MAC80211_MESH 8165 | BIT(NL80211_IFTYPE_MESH_POINT) 8166 #endif 8167 }, 8168 }; 8169 8170 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 8171 { 8172 .max = 8, 8173 .types = BIT(NL80211_IFTYPE_AP) 8174 #ifdef CONFIG_MAC80211_MESH 8175 | BIT(NL80211_IFTYPE_MESH_POINT) 8176 #endif 8177 }, 8178 { 8179 .max = 1, 8180 .types = BIT(NL80211_IFTYPE_STATION) 8181 }, 8182 }; 8183 8184 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 8185 { 8186 .limits = ath10k_if_limits, 8187 .n_limits = ARRAY_SIZE(ath10k_if_limits), 8188 .max_interfaces = 8, 8189 .num_different_channels = 1, 8190 .beacon_int_infra_match = true, 8191 }, 8192 }; 8193 8194 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 8195 { 8196 .limits = ath10k_10x_if_limits, 8197 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 8198 .max_interfaces = 8, 8199 .num_different_channels = 1, 8200 .beacon_int_infra_match = true, 8201 .beacon_int_min_gcd = 1, 8202 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8203 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8204 BIT(NL80211_CHAN_WIDTH_20) | 8205 BIT(NL80211_CHAN_WIDTH_40) | 8206 BIT(NL80211_CHAN_WIDTH_80), 8207 #endif 8208 }, 8209 }; 8210 8211 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 8212 { 8213 .max = 2, 8214 .types = BIT(NL80211_IFTYPE_STATION), 8215 }, 8216 { 8217 .max = 2, 8218 .types = BIT(NL80211_IFTYPE_AP) | 8219 #ifdef CONFIG_MAC80211_MESH 8220 BIT(NL80211_IFTYPE_MESH_POINT) | 8221 #endif 8222 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8223 BIT(NL80211_IFTYPE_P2P_GO), 8224 }, 8225 { 8226 .max = 1, 8227 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 8228 }, 8229 }; 8230 8231 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 8232 { 8233 .max = 2, 8234 .types = BIT(NL80211_IFTYPE_STATION), 8235 }, 8236 { 8237 .max = 2, 8238 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 8239 }, 8240 { 8241 .max = 1, 8242 .types = BIT(NL80211_IFTYPE_AP) | 8243 #ifdef CONFIG_MAC80211_MESH 8244 BIT(NL80211_IFTYPE_MESH_POINT) | 8245 #endif 8246 BIT(NL80211_IFTYPE_P2P_GO), 8247 }, 8248 { 8249 .max = 1, 8250 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 8251 }, 8252 }; 8253 8254 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 8255 { 8256 .max = 1, 8257 .types = BIT(NL80211_IFTYPE_STATION), 8258 }, 8259 { 8260 .max = 1, 8261 .types = BIT(NL80211_IFTYPE_ADHOC), 8262 }, 8263 }; 8264 8265 /* FIXME: This is not thouroughly tested. These combinations may over- or 8266 * underestimate hw/fw capabilities. 8267 */ 8268 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 8269 { 8270 .limits = ath10k_tlv_if_limit, 8271 .num_different_channels = 1, 8272 .max_interfaces = 4, 8273 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8274 }, 8275 { 8276 .limits = ath10k_tlv_if_limit_ibss, 8277 .num_different_channels = 1, 8278 .max_interfaces = 2, 8279 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8280 }, 8281 }; 8282 8283 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 8284 { 8285 .limits = ath10k_tlv_if_limit, 8286 .num_different_channels = 1, 8287 .max_interfaces = 4, 8288 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8289 }, 8290 { 8291 .limits = ath10k_tlv_qcs_if_limit, 8292 .num_different_channels = 2, 8293 .max_interfaces = 4, 8294 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 8295 }, 8296 { 8297 .limits = ath10k_tlv_if_limit_ibss, 8298 .num_different_channels = 1, 8299 .max_interfaces = 2, 8300 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8301 }, 8302 }; 8303 8304 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 8305 { 8306 .max = 1, 8307 .types = BIT(NL80211_IFTYPE_STATION), 8308 }, 8309 { 8310 .max = 16, 8311 .types = BIT(NL80211_IFTYPE_AP) 8312 #ifdef CONFIG_MAC80211_MESH 8313 | BIT(NL80211_IFTYPE_MESH_POINT) 8314 #endif 8315 }, 8316 }; 8317 8318 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 8319 { 8320 .limits = ath10k_10_4_if_limits, 8321 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 8322 .max_interfaces = 16, 8323 .num_different_channels = 1, 8324 .beacon_int_infra_match = true, 8325 .beacon_int_min_gcd = 1, 8326 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8327 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8328 BIT(NL80211_CHAN_WIDTH_20) | 8329 BIT(NL80211_CHAN_WIDTH_40) | 8330 BIT(NL80211_CHAN_WIDTH_80), 8331 #endif 8332 }, 8333 }; 8334 8335 static const struct 8336 ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = { 8337 { 8338 .limits = ath10k_10_4_if_limits, 8339 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 8340 .max_interfaces = 16, 8341 .num_different_channels = 1, 8342 .beacon_int_infra_match = true, 8343 .beacon_int_min_gcd = 100, 8344 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8345 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8346 BIT(NL80211_CHAN_WIDTH_20) | 8347 BIT(NL80211_CHAN_WIDTH_40) | 8348 BIT(NL80211_CHAN_WIDTH_80), 8349 #endif 8350 }, 8351 }; 8352 8353 static void ath10k_get_arvif_iter(void *data, u8 *mac, 8354 struct ieee80211_vif *vif) 8355 { 8356 struct ath10k_vif_iter *arvif_iter = data; 8357 struct ath10k_vif *arvif = (void *)vif->drv_priv; 8358 8359 if (arvif->vdev_id == arvif_iter->vdev_id) 8360 arvif_iter->arvif = arvif; 8361 } 8362 8363 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 8364 { 8365 struct ath10k_vif_iter arvif_iter; 8366 u32 flags; 8367 8368 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 8369 arvif_iter.vdev_id = vdev_id; 8370 8371 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 8372 ieee80211_iterate_active_interfaces_atomic(ar->hw, 8373 flags, 8374 ath10k_get_arvif_iter, 8375 &arvif_iter); 8376 if (!arvif_iter.arvif) { 8377 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 8378 return NULL; 8379 } 8380 8381 return arvif_iter.arvif; 8382 } 8383 8384 #define WRD_METHOD "WRDD" 8385 #define WRDD_WIFI (0x07) 8386 8387 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 8388 { 8389 union acpi_object *mcc_pkg; 8390 union acpi_object *domain_type; 8391 union acpi_object *mcc_value; 8392 u32 i; 8393 8394 if (wrdd->type != ACPI_TYPE_PACKAGE || 8395 wrdd->package.count < 2 || 8396 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 8397 wrdd->package.elements[0].integer.value != 0) { 8398 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 8399 return 0; 8400 } 8401 8402 for (i = 1; i < wrdd->package.count; ++i) { 8403 mcc_pkg = &wrdd->package.elements[i]; 8404 8405 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 8406 continue; 8407 if (mcc_pkg->package.count < 2) 8408 continue; 8409 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 8410 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 8411 continue; 8412 8413 domain_type = &mcc_pkg->package.elements[0]; 8414 if (domain_type->integer.value != WRDD_WIFI) 8415 continue; 8416 8417 mcc_value = &mcc_pkg->package.elements[1]; 8418 return mcc_value->integer.value; 8419 } 8420 return 0; 8421 } 8422 8423 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 8424 { 8425 acpi_handle root_handle; 8426 acpi_handle handle; 8427 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 8428 acpi_status status; 8429 u32 alpha2_code; 8430 char alpha2[3]; 8431 8432 root_handle = ACPI_HANDLE(ar->dev); 8433 if (!root_handle) 8434 return -EOPNOTSUPP; 8435 8436 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 8437 if (ACPI_FAILURE(status)) { 8438 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8439 "failed to get wrd method %d\n", status); 8440 return -EIO; 8441 } 8442 8443 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 8444 if (ACPI_FAILURE(status)) { 8445 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8446 "failed to call wrdc %d\n", status); 8447 return -EIO; 8448 } 8449 8450 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 8451 kfree(wrdd.pointer); 8452 if (!alpha2_code) 8453 return -EIO; 8454 8455 alpha2[0] = (alpha2_code >> 8) & 0xff; 8456 alpha2[1] = (alpha2_code >> 0) & 0xff; 8457 alpha2[2] = '\0'; 8458 8459 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8460 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 8461 8462 *rd = ath_regd_find_country_by_name(alpha2); 8463 if (*rd == 0xffff) 8464 return -EIO; 8465 8466 *rd |= COUNTRY_ERD_FLAG; 8467 return 0; 8468 } 8469 8470 static int ath10k_mac_init_rd(struct ath10k *ar) 8471 { 8472 int ret; 8473 u16 rd; 8474 8475 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 8476 if (ret) { 8477 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8478 "fallback to eeprom programmed regulatory settings\n"); 8479 rd = ar->hw_eeprom_rd; 8480 } 8481 8482 ar->ath_common.regulatory.current_rd = rd; 8483 return 0; 8484 } 8485 8486 int ath10k_mac_register(struct ath10k *ar) 8487 { 8488 static const u32 cipher_suites[] = { 8489 WLAN_CIPHER_SUITE_WEP40, 8490 WLAN_CIPHER_SUITE_WEP104, 8491 WLAN_CIPHER_SUITE_TKIP, 8492 WLAN_CIPHER_SUITE_CCMP, 8493 8494 /* Do not add hardware supported ciphers before this line. 8495 * Allow software encryption for all chips. Don't forget to 8496 * update n_cipher_suites below. 8497 */ 8498 WLAN_CIPHER_SUITE_AES_CMAC, 8499 WLAN_CIPHER_SUITE_BIP_CMAC_256, 8500 WLAN_CIPHER_SUITE_BIP_GMAC_128, 8501 WLAN_CIPHER_SUITE_BIP_GMAC_256, 8502 8503 /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256 8504 * and CCMP-256 in hardware. 8505 */ 8506 WLAN_CIPHER_SUITE_GCMP, 8507 WLAN_CIPHER_SUITE_GCMP_256, 8508 WLAN_CIPHER_SUITE_CCMP_256, 8509 }; 8510 struct ieee80211_supported_band *band; 8511 void *channels; 8512 int ret; 8513 8514 if (!is_valid_ether_addr(ar->mac_addr)) { 8515 ath10k_warn(ar, "invalid MAC address; choosing random\n"); 8516 eth_random_addr(ar->mac_addr); 8517 } 8518 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8519 8520 SET_IEEE80211_DEV(ar->hw, ar->dev); 8521 8522 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8523 ARRAY_SIZE(ath10k_5ghz_channels)) != 8524 ATH10K_NUM_CHANS); 8525 8526 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8527 channels = kmemdup(ath10k_2ghz_channels, 8528 sizeof(ath10k_2ghz_channels), 8529 GFP_KERNEL); 8530 if (!channels) { 8531 ret = -ENOMEM; 8532 goto err_free; 8533 } 8534 8535 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8536 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8537 band->channels = channels; 8538 8539 if (ar->hw_params.cck_rate_map_rev2) { 8540 band->n_bitrates = ath10k_g_rates_rev2_size; 8541 band->bitrates = ath10k_g_rates_rev2; 8542 } else { 8543 band->n_bitrates = ath10k_g_rates_size; 8544 band->bitrates = ath10k_g_rates; 8545 } 8546 8547 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8548 } 8549 8550 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8551 channels = kmemdup(ath10k_5ghz_channels, 8552 sizeof(ath10k_5ghz_channels), 8553 GFP_KERNEL); 8554 if (!channels) { 8555 ret = -ENOMEM; 8556 goto err_free; 8557 } 8558 8559 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8560 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8561 band->channels = channels; 8562 band->n_bitrates = ath10k_a_rates_size; 8563 band->bitrates = ath10k_a_rates; 8564 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8565 } 8566 8567 wiphy_read_of_freq_limits(ar->hw->wiphy); 8568 ath10k_mac_setup_ht_vht_cap(ar); 8569 8570 ar->hw->wiphy->interface_modes = 8571 BIT(NL80211_IFTYPE_STATION) | 8572 BIT(NL80211_IFTYPE_AP) | 8573 BIT(NL80211_IFTYPE_MESH_POINT); 8574 8575 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8576 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8577 8578 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8579 ar->hw->wiphy->interface_modes |= 8580 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8581 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8582 BIT(NL80211_IFTYPE_P2P_GO); 8583 8584 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8585 8586 if (!test_bit(ATH10K_FW_FEATURE_NO_PS, 8587 ar->running_fw->fw_file.fw_features)) { 8588 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8589 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8590 } 8591 8592 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8593 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8594 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8595 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8596 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8597 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8598 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8599 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8600 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8601 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8602 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8603 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8604 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8605 8606 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8607 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8608 8609 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8610 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8611 8612 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8613 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8614 8615 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8616 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8617 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8618 } 8619 8620 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8621 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8622 8623 if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { 8624 ar->hw->wiphy->max_sched_scan_reqs = 1; 8625 ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; 8626 ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; 8627 ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; 8628 ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; 8629 ar->hw->wiphy->max_sched_scan_plan_interval = 8630 WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; 8631 ar->hw->wiphy->max_sched_scan_plan_iterations = 8632 WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; 8633 } 8634 8635 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8636 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8637 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8638 8639 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8640 8641 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8642 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8643 8644 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8645 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8646 * correct Probe Responses. This is more of a hack advert.. 8647 */ 8648 ar->hw->wiphy->probe_resp_offload |= 8649 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8650 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8651 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8652 } 8653 8654 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || 8655 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { 8656 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8657 if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map)) 8658 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); 8659 } 8660 8661 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) 8662 ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA); 8663 8664 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8665 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8666 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8667 8668 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8669 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8670 NL80211_FEATURE_AP_SCAN; 8671 8672 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8673 8674 ret = ath10k_wow_init(ar); 8675 if (ret) { 8676 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8677 goto err_free; 8678 } 8679 8680 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8681 wiphy_ext_feature_set(ar->hw->wiphy, 8682 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 8683 8684 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) || 8685 test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map)) 8686 wiphy_ext_feature_set(ar->hw->wiphy, 8687 NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); 8688 8689 if (ath10k_peer_stats_enabled(ar) || 8690 test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) 8691 wiphy_ext_feature_set(ar->hw->wiphy, 8692 NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); 8693 8694 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) 8695 wiphy_ext_feature_set(ar->hw->wiphy, 8696 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); 8697 8698 /* 8699 * on LL hardware queues are managed entirely by the FW 8700 * so we only advertise to mac we can do the queues thing 8701 */ 8702 ar->hw->queues = IEEE80211_MAX_QUEUES; 8703 8704 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8705 * something that vdev_ids can't reach so that we don't stop the queue 8706 * accidentally. 8707 */ 8708 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8709 8710 switch (ar->running_fw->fw_file.wmi_op_version) { 8711 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8712 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8713 ar->hw->wiphy->n_iface_combinations = 8714 ARRAY_SIZE(ath10k_if_comb); 8715 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8716 break; 8717 case ATH10K_FW_WMI_OP_VERSION_TLV: 8718 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8719 ar->hw->wiphy->iface_combinations = 8720 ath10k_tlv_qcs_if_comb; 8721 ar->hw->wiphy->n_iface_combinations = 8722 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8723 } else { 8724 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8725 ar->hw->wiphy->n_iface_combinations = 8726 ARRAY_SIZE(ath10k_tlv_if_comb); 8727 } 8728 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8729 break; 8730 case ATH10K_FW_WMI_OP_VERSION_10_1: 8731 case ATH10K_FW_WMI_OP_VERSION_10_2: 8732 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8733 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8734 ar->hw->wiphy->n_iface_combinations = 8735 ARRAY_SIZE(ath10k_10x_if_comb); 8736 break; 8737 case ATH10K_FW_WMI_OP_VERSION_10_4: 8738 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8739 ar->hw->wiphy->n_iface_combinations = 8740 ARRAY_SIZE(ath10k_10_4_if_comb); 8741 if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, 8742 ar->wmi.svc_map)) { 8743 ar->hw->wiphy->iface_combinations = 8744 ath10k_10_4_bcn_int_if_comb; 8745 ar->hw->wiphy->n_iface_combinations = 8746 ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb); 8747 } 8748 break; 8749 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8750 case ATH10K_FW_WMI_OP_VERSION_MAX: 8751 WARN_ON(1); 8752 ret = -EINVAL; 8753 goto err_free; 8754 } 8755 8756 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8757 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8758 8759 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8760 /* Init ath dfs pattern detector */ 8761 ar->ath_common.debug_mask = ATH_DBG_DFS; 8762 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8763 NL80211_DFS_UNSET); 8764 8765 if (!ar->dfs_detector) 8766 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8767 } 8768 8769 ret = ath10k_mac_init_rd(ar); 8770 if (ret) { 8771 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8772 goto err_dfs_detector_exit; 8773 } 8774 8775 /* Disable set_coverage_class for chipsets that do not support it. */ 8776 if (!ar->hw_params.hw_ops->set_coverage_class) 8777 ar->ops->set_coverage_class = NULL; 8778 8779 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8780 ath10k_reg_notifier); 8781 if (ret) { 8782 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8783 goto err_dfs_detector_exit; 8784 } 8785 8786 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 8787 ar->hw->wiphy->features |= 8788 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 8789 } 8790 8791 ar->hw->wiphy->cipher_suites = cipher_suites; 8792 8793 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 8794 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported 8795 * from chip specific hw_param table. 8796 */ 8797 if (!ar->hw_params.n_cipher_suites || 8798 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) { 8799 ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n", 8800 ar->hw_params.n_cipher_suites); 8801 ar->hw_params.n_cipher_suites = 8; 8802 } 8803 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites; 8804 8805 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8806 8807 ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER; 8808 8809 ret = ieee80211_register_hw(ar->hw); 8810 if (ret) { 8811 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 8812 goto err_dfs_detector_exit; 8813 } 8814 8815 if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) { 8816 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); 8817 ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); 8818 } 8819 8820 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 8821 ret = regulatory_hint(ar->hw->wiphy, 8822 ar->ath_common.regulatory.alpha2); 8823 if (ret) 8824 goto err_unregister; 8825 } 8826 8827 return 0; 8828 8829 err_unregister: 8830 ieee80211_unregister_hw(ar->hw); 8831 8832 err_dfs_detector_exit: 8833 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8834 ar->dfs_detector->exit(ar->dfs_detector); 8835 8836 err_free: 8837 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8838 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8839 8840 SET_IEEE80211_DEV(ar->hw, NULL); 8841 return ret; 8842 } 8843 8844 void ath10k_mac_unregister(struct ath10k *ar) 8845 { 8846 ieee80211_unregister_hw(ar->hw); 8847 8848 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8849 ar->dfs_detector->exit(ar->dfs_detector); 8850 8851 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8852 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8853 8854 SET_IEEE80211_DEV(ar->hw, NULL); 8855 } 8856