1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 */ 7 8 #include "mac.h" 9 10 #include <net/cfg80211.h> 11 #include <net/mac80211.h> 12 #include <linux/etherdevice.h> 13 #include <linux/acpi.h> 14 #include <linux/of.h> 15 16 #include "hif.h" 17 #include "core.h" 18 #include "debug.h" 19 #include "wmi.h" 20 #include "htt.h" 21 #include "txrx.h" 22 #include "testmode.h" 23 #include "wmi-tlv.h" 24 #include "wmi-ops.h" 25 #include "wow.h" 26 27 /*********/ 28 /* Rates */ 29 /*********/ 30 31 static struct ieee80211_rate ath10k_rates[] = { 32 { .bitrate = 10, 33 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 34 { .bitrate = 20, 35 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 36 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 37 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 38 { .bitrate = 55, 39 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 40 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 41 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 42 { .bitrate = 110, 43 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 44 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 45 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 46 47 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 48 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 49 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 50 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 51 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 52 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 53 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 54 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 55 }; 56 57 static struct ieee80211_rate ath10k_rates_rev2[] = { 58 { .bitrate = 10, 59 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 60 { .bitrate = 20, 61 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 62 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 63 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 64 { .bitrate = 55, 65 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 66 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 67 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 68 { .bitrate = 110, 69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 70 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 71 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 72 73 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 74 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 75 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 76 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 77 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 78 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 79 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 80 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 81 }; 82 83 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 84 85 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 86 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 87 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 88 #define ath10k_g_rates (ath10k_rates + 0) 89 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 90 91 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 92 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 93 94 #define ath10k_wmi_legacy_rates ath10k_rates 95 96 static bool ath10k_mac_bitrate_is_cck(int bitrate) 97 { 98 switch (bitrate) { 99 case 10: 100 case 20: 101 case 55: 102 case 110: 103 return true; 104 } 105 106 return false; 107 } 108 109 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 110 { 111 return DIV_ROUND_UP(bitrate, 5) | 112 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 113 } 114 115 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 116 u8 hw_rate, bool cck) 117 { 118 const struct ieee80211_rate *rate; 119 int i; 120 121 for (i = 0; i < sband->n_bitrates; i++) { 122 rate = &sband->bitrates[i]; 123 124 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 125 continue; 126 127 if (rate->hw_value == hw_rate) 128 return i; 129 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 130 rate->hw_value_short == hw_rate) 131 return i; 132 } 133 134 return 0; 135 } 136 137 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 138 u32 bitrate) 139 { 140 int i; 141 142 for (i = 0; i < sband->n_bitrates; i++) 143 if (sband->bitrates[i].bitrate == bitrate) 144 return i; 145 146 return 0; 147 } 148 149 static int ath10k_mac_get_rate_hw_value(int bitrate) 150 { 151 int i; 152 u8 hw_value_prefix = 0; 153 154 if (ath10k_mac_bitrate_is_cck(bitrate)) 155 hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6; 156 157 for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) { 158 if (ath10k_rates[i].bitrate == bitrate) 159 return hw_value_prefix | ath10k_rates[i].hw_value; 160 } 161 162 return -EINVAL; 163 } 164 165 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 166 { 167 switch ((mcs_map >> (2 * nss)) & 0x3) { 168 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 169 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 170 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 171 } 172 return 0; 173 } 174 175 static u32 176 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 177 { 178 int nss; 179 180 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 181 if (ht_mcs_mask[nss]) 182 return nss + 1; 183 184 return 1; 185 } 186 187 static u32 188 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 189 { 190 int nss; 191 192 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 193 if (vht_mcs_mask[nss]) 194 return nss + 1; 195 196 return 1; 197 } 198 199 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 200 { 201 enum wmi_host_platform_type platform_type; 202 int ret; 203 204 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 205 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 206 else 207 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 208 209 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 210 211 if (ret && ret != -EOPNOTSUPP) { 212 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 213 return ret; 214 } 215 216 return 0; 217 } 218 219 /**********/ 220 /* Crypto */ 221 /**********/ 222 223 static int ath10k_send_key(struct ath10k_vif *arvif, 224 struct ieee80211_key_conf *key, 225 enum set_key_cmd cmd, 226 const u8 *macaddr, u32 flags) 227 { 228 struct ath10k *ar = arvif->ar; 229 struct wmi_vdev_install_key_arg arg = { 230 .vdev_id = arvif->vdev_id, 231 .key_idx = key->keyidx, 232 .key_len = key->keylen, 233 .key_data = key->key, 234 .key_flags = flags, 235 .macaddr = macaddr, 236 }; 237 238 lockdep_assert_held(&arvif->ar->conf_mutex); 239 240 switch (key->cipher) { 241 case WLAN_CIPHER_SUITE_CCMP: 242 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; 243 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 244 break; 245 case WLAN_CIPHER_SUITE_TKIP: 246 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP]; 247 arg.key_txmic_len = 8; 248 arg.key_rxmic_len = 8; 249 break; 250 case WLAN_CIPHER_SUITE_WEP40: 251 case WLAN_CIPHER_SUITE_WEP104: 252 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP]; 253 break; 254 case WLAN_CIPHER_SUITE_CCMP_256: 255 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; 256 break; 257 case WLAN_CIPHER_SUITE_GCMP: 258 case WLAN_CIPHER_SUITE_GCMP_256: 259 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM]; 260 break; 261 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 262 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 263 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 264 case WLAN_CIPHER_SUITE_AES_CMAC: 265 WARN_ON(1); 266 return -EINVAL; 267 default: 268 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 269 return -EOPNOTSUPP; 270 } 271 272 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 273 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 274 275 if (cmd == DISABLE_KEY) { 276 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE]; 277 arg.key_data = NULL; 278 } 279 280 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 281 } 282 283 static int ath10k_install_key(struct ath10k_vif *arvif, 284 struct ieee80211_key_conf *key, 285 enum set_key_cmd cmd, 286 const u8 *macaddr, u32 flags) 287 { 288 struct ath10k *ar = arvif->ar; 289 int ret; 290 unsigned long time_left; 291 292 lockdep_assert_held(&ar->conf_mutex); 293 294 reinit_completion(&ar->install_key_done); 295 296 if (arvif->nohwcrypt) 297 return 1; 298 299 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 300 if (ret) 301 return ret; 302 303 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 304 if (time_left == 0) 305 return -ETIMEDOUT; 306 307 return 0; 308 } 309 310 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 311 const u8 *addr) 312 { 313 struct ath10k *ar = arvif->ar; 314 struct ath10k_peer *peer; 315 int ret; 316 int i; 317 u32 flags; 318 319 lockdep_assert_held(&ar->conf_mutex); 320 321 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 322 arvif->vif->type != NL80211_IFTYPE_ADHOC && 323 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 324 return -EINVAL; 325 326 spin_lock_bh(&ar->data_lock); 327 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 328 spin_unlock_bh(&ar->data_lock); 329 330 if (!peer) 331 return -ENOENT; 332 333 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 334 if (arvif->wep_keys[i] == NULL) 335 continue; 336 337 switch (arvif->vif->type) { 338 case NL80211_IFTYPE_AP: 339 flags = WMI_KEY_PAIRWISE; 340 341 if (arvif->def_wep_key_idx == i) 342 flags |= WMI_KEY_TX_USAGE; 343 344 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 345 SET_KEY, addr, flags); 346 if (ret < 0) 347 return ret; 348 break; 349 case NL80211_IFTYPE_ADHOC: 350 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 351 SET_KEY, addr, 352 WMI_KEY_PAIRWISE); 353 if (ret < 0) 354 return ret; 355 356 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 357 SET_KEY, addr, WMI_KEY_GROUP); 358 if (ret < 0) 359 return ret; 360 break; 361 default: 362 WARN_ON(1); 363 return -EINVAL; 364 } 365 366 spin_lock_bh(&ar->data_lock); 367 peer->keys[i] = arvif->wep_keys[i]; 368 spin_unlock_bh(&ar->data_lock); 369 } 370 371 /* In some cases (notably with static WEP IBSS with multiple keys) 372 * multicast Tx becomes broken. Both pairwise and groupwise keys are 373 * installed already. Using WMI_KEY_TX_USAGE in different combinations 374 * didn't seem help. Using def_keyid vdev parameter seems to be 375 * effective so use that. 376 * 377 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 378 */ 379 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 380 return 0; 381 382 if (arvif->def_wep_key_idx == -1) 383 return 0; 384 385 ret = ath10k_wmi_vdev_set_param(arvif->ar, 386 arvif->vdev_id, 387 arvif->ar->wmi.vdev_param->def_keyid, 388 arvif->def_wep_key_idx); 389 if (ret) { 390 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 391 arvif->vdev_id, ret); 392 return ret; 393 } 394 395 return 0; 396 } 397 398 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 399 const u8 *addr) 400 { 401 struct ath10k *ar = arvif->ar; 402 struct ath10k_peer *peer; 403 int first_errno = 0; 404 int ret; 405 int i; 406 u32 flags = 0; 407 408 lockdep_assert_held(&ar->conf_mutex); 409 410 spin_lock_bh(&ar->data_lock); 411 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 412 spin_unlock_bh(&ar->data_lock); 413 414 if (!peer) 415 return -ENOENT; 416 417 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 418 if (peer->keys[i] == NULL) 419 continue; 420 421 /* key flags are not required to delete the key */ 422 ret = ath10k_install_key(arvif, peer->keys[i], 423 DISABLE_KEY, addr, flags); 424 if (ret < 0 && first_errno == 0) 425 first_errno = ret; 426 427 if (ret < 0) 428 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 429 i, ret); 430 431 spin_lock_bh(&ar->data_lock); 432 peer->keys[i] = NULL; 433 spin_unlock_bh(&ar->data_lock); 434 } 435 436 return first_errno; 437 } 438 439 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 440 u8 keyidx) 441 { 442 struct ath10k_peer *peer; 443 int i; 444 445 lockdep_assert_held(&ar->data_lock); 446 447 /* We don't know which vdev this peer belongs to, 448 * since WMI doesn't give us that information. 449 * 450 * FIXME: multi-bss needs to be handled. 451 */ 452 peer = ath10k_peer_find(ar, 0, addr); 453 if (!peer) 454 return false; 455 456 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 457 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 458 return true; 459 } 460 461 return false; 462 } 463 464 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 465 struct ieee80211_key_conf *key) 466 { 467 struct ath10k *ar = arvif->ar; 468 struct ath10k_peer *peer; 469 u8 addr[ETH_ALEN]; 470 int first_errno = 0; 471 int ret; 472 int i; 473 u32 flags = 0; 474 475 lockdep_assert_held(&ar->conf_mutex); 476 477 for (;;) { 478 /* since ath10k_install_key we can't hold data_lock all the 479 * time, so we try to remove the keys incrementally 480 */ 481 spin_lock_bh(&ar->data_lock); 482 i = 0; 483 list_for_each_entry(peer, &ar->peers, list) { 484 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 485 if (peer->keys[i] == key) { 486 ether_addr_copy(addr, peer->addr); 487 peer->keys[i] = NULL; 488 break; 489 } 490 } 491 492 if (i < ARRAY_SIZE(peer->keys)) 493 break; 494 } 495 spin_unlock_bh(&ar->data_lock); 496 497 if (i == ARRAY_SIZE(peer->keys)) 498 break; 499 /* key flags are not required to delete the key */ 500 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 501 if (ret < 0 && first_errno == 0) 502 first_errno = ret; 503 504 if (ret) 505 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 506 addr, ret); 507 } 508 509 return first_errno; 510 } 511 512 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 513 struct ieee80211_key_conf *key) 514 { 515 struct ath10k *ar = arvif->ar; 516 struct ath10k_peer *peer; 517 int ret; 518 519 lockdep_assert_held(&ar->conf_mutex); 520 521 list_for_each_entry(peer, &ar->peers, list) { 522 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 523 continue; 524 525 if (ether_addr_equal(peer->addr, arvif->bssid)) 526 continue; 527 528 if (peer->keys[key->keyidx] == key) 529 continue; 530 531 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 532 arvif->vdev_id, key->keyidx); 533 534 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 535 if (ret) { 536 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 537 arvif->vdev_id, peer->addr, ret); 538 return ret; 539 } 540 } 541 542 return 0; 543 } 544 545 /*********************/ 546 /* General utilities */ 547 /*********************/ 548 549 static inline enum wmi_phy_mode 550 chan_to_phymode(const struct cfg80211_chan_def *chandef) 551 { 552 enum wmi_phy_mode phymode = MODE_UNKNOWN; 553 554 switch (chandef->chan->band) { 555 case NL80211_BAND_2GHZ: 556 switch (chandef->width) { 557 case NL80211_CHAN_WIDTH_20_NOHT: 558 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 559 phymode = MODE_11B; 560 else 561 phymode = MODE_11G; 562 break; 563 case NL80211_CHAN_WIDTH_20: 564 phymode = MODE_11NG_HT20; 565 break; 566 case NL80211_CHAN_WIDTH_40: 567 phymode = MODE_11NG_HT40; 568 break; 569 case NL80211_CHAN_WIDTH_5: 570 case NL80211_CHAN_WIDTH_10: 571 case NL80211_CHAN_WIDTH_80: 572 case NL80211_CHAN_WIDTH_80P80: 573 case NL80211_CHAN_WIDTH_160: 574 phymode = MODE_UNKNOWN; 575 break; 576 } 577 break; 578 case NL80211_BAND_5GHZ: 579 switch (chandef->width) { 580 case NL80211_CHAN_WIDTH_20_NOHT: 581 phymode = MODE_11A; 582 break; 583 case NL80211_CHAN_WIDTH_20: 584 phymode = MODE_11NA_HT20; 585 break; 586 case NL80211_CHAN_WIDTH_40: 587 phymode = MODE_11NA_HT40; 588 break; 589 case NL80211_CHAN_WIDTH_80: 590 phymode = MODE_11AC_VHT80; 591 break; 592 case NL80211_CHAN_WIDTH_160: 593 phymode = MODE_11AC_VHT160; 594 break; 595 case NL80211_CHAN_WIDTH_80P80: 596 phymode = MODE_11AC_VHT80_80; 597 break; 598 case NL80211_CHAN_WIDTH_5: 599 case NL80211_CHAN_WIDTH_10: 600 phymode = MODE_UNKNOWN; 601 break; 602 } 603 break; 604 default: 605 break; 606 } 607 608 WARN_ON(phymode == MODE_UNKNOWN); 609 return phymode; 610 } 611 612 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 613 { 614 /* 615 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 616 * 0 for no restriction 617 * 1 for 1/4 us 618 * 2 for 1/2 us 619 * 3 for 1 us 620 * 4 for 2 us 621 * 5 for 4 us 622 * 6 for 8 us 623 * 7 for 16 us 624 */ 625 switch (mpdudensity) { 626 case 0: 627 return 0; 628 case 1: 629 case 2: 630 case 3: 631 /* Our lower layer calculations limit our precision to 632 * 1 microsecond 633 */ 634 return 1; 635 case 4: 636 return 2; 637 case 5: 638 return 4; 639 case 6: 640 return 8; 641 case 7: 642 return 16; 643 default: 644 return 0; 645 } 646 } 647 648 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 649 struct cfg80211_chan_def *def) 650 { 651 struct ieee80211_chanctx_conf *conf; 652 653 rcu_read_lock(); 654 conf = rcu_dereference(vif->chanctx_conf); 655 if (!conf) { 656 rcu_read_unlock(); 657 return -ENOENT; 658 } 659 660 *def = conf->def; 661 rcu_read_unlock(); 662 663 return 0; 664 } 665 666 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 667 struct ieee80211_chanctx_conf *conf, 668 void *data) 669 { 670 int *num = data; 671 672 (*num)++; 673 } 674 675 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 676 { 677 int num = 0; 678 679 ieee80211_iter_chan_contexts_atomic(ar->hw, 680 ath10k_mac_num_chanctxs_iter, 681 &num); 682 683 return num; 684 } 685 686 static void 687 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 688 struct ieee80211_chanctx_conf *conf, 689 void *data) 690 { 691 struct cfg80211_chan_def **def = data; 692 693 *def = &conf->def; 694 } 695 696 static void ath10k_wait_for_peer_delete_done(struct ath10k *ar, u32 vdev_id, 697 const u8 *addr) 698 { 699 unsigned long time_left; 700 int ret; 701 702 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) { 703 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 704 if (ret) { 705 ath10k_warn(ar, "failed wait for peer deleted"); 706 return; 707 } 708 709 time_left = wait_for_completion_timeout(&ar->peer_delete_done, 710 5 * HZ); 711 if (!time_left) 712 ath10k_warn(ar, "Timeout in receiving peer delete response\n"); 713 } 714 } 715 716 static int ath10k_peer_create(struct ath10k *ar, 717 struct ieee80211_vif *vif, 718 struct ieee80211_sta *sta, 719 u32 vdev_id, 720 const u8 *addr, 721 enum wmi_peer_type peer_type) 722 { 723 struct ath10k_vif *arvif; 724 struct ath10k_peer *peer; 725 int num_peers = 0; 726 int ret; 727 728 lockdep_assert_held(&ar->conf_mutex); 729 730 num_peers = ar->num_peers; 731 732 /* Each vdev consumes a peer entry as well */ 733 list_for_each_entry(arvif, &ar->arvifs, list) 734 num_peers++; 735 736 if (num_peers >= ar->max_num_peers) 737 return -ENOBUFS; 738 739 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 740 if (ret) { 741 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 742 addr, vdev_id, ret); 743 return ret; 744 } 745 746 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 747 if (ret) { 748 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 749 addr, vdev_id, ret); 750 return ret; 751 } 752 753 spin_lock_bh(&ar->data_lock); 754 755 peer = ath10k_peer_find(ar, vdev_id, addr); 756 if (!peer) { 757 spin_unlock_bh(&ar->data_lock); 758 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 759 addr, vdev_id); 760 ath10k_wait_for_peer_delete_done(ar, vdev_id, addr); 761 return -ENOENT; 762 } 763 764 peer->vif = vif; 765 peer->sta = sta; 766 767 spin_unlock_bh(&ar->data_lock); 768 769 ar->num_peers++; 770 771 return 0; 772 } 773 774 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 775 { 776 struct ath10k *ar = arvif->ar; 777 u32 param; 778 int ret; 779 780 param = ar->wmi.pdev_param->sta_kickout_th; 781 ret = ath10k_wmi_pdev_set_param(ar, param, 782 ATH10K_KICKOUT_THRESHOLD); 783 if (ret) { 784 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 785 arvif->vdev_id, ret); 786 return ret; 787 } 788 789 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 790 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 791 ATH10K_KEEPALIVE_MIN_IDLE); 792 if (ret) { 793 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 794 arvif->vdev_id, ret); 795 return ret; 796 } 797 798 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 799 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 800 ATH10K_KEEPALIVE_MAX_IDLE); 801 if (ret) { 802 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 803 arvif->vdev_id, ret); 804 return ret; 805 } 806 807 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 808 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 809 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 810 if (ret) { 811 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 812 arvif->vdev_id, ret); 813 return ret; 814 } 815 816 return 0; 817 } 818 819 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 820 { 821 struct ath10k *ar = arvif->ar; 822 u32 vdev_param; 823 824 vdev_param = ar->wmi.vdev_param->rts_threshold; 825 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 826 } 827 828 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 829 { 830 int ret; 831 832 lockdep_assert_held(&ar->conf_mutex); 833 834 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 835 if (ret) 836 return ret; 837 838 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 839 if (ret) 840 return ret; 841 842 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) { 843 unsigned long time_left; 844 845 time_left = wait_for_completion_timeout 846 (&ar->peer_delete_done, 5 * HZ); 847 848 if (!time_left) { 849 ath10k_warn(ar, "Timeout in receiving peer delete response\n"); 850 return -ETIMEDOUT; 851 } 852 } 853 854 ar->num_peers--; 855 856 return 0; 857 } 858 859 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 860 { 861 struct ath10k_peer *peer, *tmp; 862 int peer_id; 863 int i; 864 865 lockdep_assert_held(&ar->conf_mutex); 866 867 spin_lock_bh(&ar->data_lock); 868 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 869 if (peer->vdev_id != vdev_id) 870 continue; 871 872 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 873 peer->addr, vdev_id); 874 875 for_each_set_bit(peer_id, peer->peer_ids, 876 ATH10K_MAX_NUM_PEER_IDS) { 877 ar->peer_map[peer_id] = NULL; 878 } 879 880 /* Double check that peer is properly un-referenced from 881 * the peer_map 882 */ 883 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 884 if (ar->peer_map[i] == peer) { 885 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 886 peer->addr, peer, i); 887 ar->peer_map[i] = NULL; 888 } 889 } 890 891 list_del(&peer->list); 892 kfree(peer); 893 ar->num_peers--; 894 } 895 spin_unlock_bh(&ar->data_lock); 896 } 897 898 static void ath10k_peer_cleanup_all(struct ath10k *ar) 899 { 900 struct ath10k_peer *peer, *tmp; 901 int i; 902 903 lockdep_assert_held(&ar->conf_mutex); 904 905 spin_lock_bh(&ar->data_lock); 906 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 907 list_del(&peer->list); 908 kfree(peer); 909 } 910 911 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 912 ar->peer_map[i] = NULL; 913 914 spin_unlock_bh(&ar->data_lock); 915 916 ar->num_peers = 0; 917 ar->num_stations = 0; 918 } 919 920 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 921 struct ieee80211_sta *sta, 922 enum wmi_tdls_peer_state state) 923 { 924 int ret; 925 struct wmi_tdls_peer_update_cmd_arg arg = {}; 926 struct wmi_tdls_peer_capab_arg cap = {}; 927 struct wmi_channel_arg chan_arg = {}; 928 929 lockdep_assert_held(&ar->conf_mutex); 930 931 arg.vdev_id = vdev_id; 932 arg.peer_state = state; 933 ether_addr_copy(arg.addr, sta->addr); 934 935 cap.peer_max_sp = sta->max_sp; 936 cap.peer_uapsd_queues = sta->uapsd_queues; 937 938 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 939 !sta->tdls_initiator) 940 cap.is_peer_responder = 1; 941 942 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 943 if (ret) { 944 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 945 arg.addr, vdev_id, ret); 946 return ret; 947 } 948 949 return 0; 950 } 951 952 /************************/ 953 /* Interface management */ 954 /************************/ 955 956 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 957 { 958 struct ath10k *ar = arvif->ar; 959 960 lockdep_assert_held(&ar->data_lock); 961 962 if (!arvif->beacon) 963 return; 964 965 if (!arvif->beacon_buf) 966 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 967 arvif->beacon->len, DMA_TO_DEVICE); 968 969 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 970 arvif->beacon_state != ATH10K_BEACON_SENT)) 971 return; 972 973 dev_kfree_skb_any(arvif->beacon); 974 975 arvif->beacon = NULL; 976 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 977 } 978 979 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 980 { 981 struct ath10k *ar = arvif->ar; 982 983 lockdep_assert_held(&ar->data_lock); 984 985 ath10k_mac_vif_beacon_free(arvif); 986 987 if (arvif->beacon_buf) { 988 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 989 arvif->beacon_buf, arvif->beacon_paddr); 990 arvif->beacon_buf = NULL; 991 } 992 } 993 994 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 995 { 996 unsigned long time_left; 997 998 lockdep_assert_held(&ar->conf_mutex); 999 1000 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 1001 return -ESHUTDOWN; 1002 1003 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 1004 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 1005 if (time_left == 0) 1006 return -ETIMEDOUT; 1007 1008 return ar->last_wmi_vdev_start_status; 1009 } 1010 1011 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 1012 { 1013 struct cfg80211_chan_def *chandef = NULL; 1014 struct ieee80211_channel *channel = NULL; 1015 struct wmi_vdev_start_request_arg arg = {}; 1016 int ret = 0; 1017 1018 lockdep_assert_held(&ar->conf_mutex); 1019 1020 ieee80211_iter_chan_contexts_atomic(ar->hw, 1021 ath10k_mac_get_any_chandef_iter, 1022 &chandef); 1023 if (WARN_ON_ONCE(!chandef)) 1024 return -ENOENT; 1025 1026 channel = chandef->chan; 1027 1028 arg.vdev_id = vdev_id; 1029 arg.channel.freq = channel->center_freq; 1030 arg.channel.band_center_freq1 = chandef->center_freq1; 1031 arg.channel.band_center_freq2 = chandef->center_freq2; 1032 1033 /* TODO setup this dynamically, what in case we 1034 * don't have any vifs? 1035 */ 1036 arg.channel.mode = chan_to_phymode(chandef); 1037 arg.channel.chan_radar = 1038 !!(channel->flags & IEEE80211_CHAN_RADAR); 1039 1040 arg.channel.min_power = 0; 1041 arg.channel.max_power = channel->max_power * 2; 1042 arg.channel.max_reg_power = channel->max_reg_power * 2; 1043 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 1044 1045 reinit_completion(&ar->vdev_setup_done); 1046 reinit_completion(&ar->vdev_delete_done); 1047 1048 ret = ath10k_wmi_vdev_start(ar, &arg); 1049 if (ret) { 1050 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 1051 vdev_id, ret); 1052 return ret; 1053 } 1054 1055 ret = ath10k_vdev_setup_sync(ar); 1056 if (ret) { 1057 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1058 vdev_id, ret); 1059 return ret; 1060 } 1061 1062 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1063 if (ret) { 1064 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1065 vdev_id, ret); 1066 goto vdev_stop; 1067 } 1068 1069 ar->monitor_vdev_id = vdev_id; 1070 1071 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1072 ar->monitor_vdev_id); 1073 return 0; 1074 1075 vdev_stop: 1076 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1077 if (ret) 1078 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1079 ar->monitor_vdev_id, ret); 1080 1081 return ret; 1082 } 1083 1084 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1085 { 1086 int ret = 0; 1087 1088 lockdep_assert_held(&ar->conf_mutex); 1089 1090 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1091 if (ret) 1092 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1093 ar->monitor_vdev_id, ret); 1094 1095 reinit_completion(&ar->vdev_setup_done); 1096 reinit_completion(&ar->vdev_delete_done); 1097 1098 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1099 if (ret) 1100 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1101 ar->monitor_vdev_id, ret); 1102 1103 ret = ath10k_vdev_setup_sync(ar); 1104 if (ret) 1105 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1106 ar->monitor_vdev_id, ret); 1107 1108 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1109 ar->monitor_vdev_id); 1110 return ret; 1111 } 1112 1113 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1114 { 1115 int bit, ret = 0; 1116 1117 lockdep_assert_held(&ar->conf_mutex); 1118 1119 if (ar->free_vdev_map == 0) { 1120 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1121 return -ENOMEM; 1122 } 1123 1124 bit = __ffs64(ar->free_vdev_map); 1125 1126 ar->monitor_vdev_id = bit; 1127 1128 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1129 WMI_VDEV_TYPE_MONITOR, 1130 0, ar->mac_addr); 1131 if (ret) { 1132 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1133 ar->monitor_vdev_id, ret); 1134 return ret; 1135 } 1136 1137 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1138 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1139 ar->monitor_vdev_id); 1140 1141 return 0; 1142 } 1143 1144 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1145 { 1146 int ret = 0; 1147 1148 lockdep_assert_held(&ar->conf_mutex); 1149 1150 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1151 if (ret) { 1152 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1153 ar->monitor_vdev_id, ret); 1154 return ret; 1155 } 1156 1157 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1158 1159 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1160 ar->monitor_vdev_id); 1161 return ret; 1162 } 1163 1164 static int ath10k_monitor_start(struct ath10k *ar) 1165 { 1166 int ret; 1167 1168 lockdep_assert_held(&ar->conf_mutex); 1169 1170 ret = ath10k_monitor_vdev_create(ar); 1171 if (ret) { 1172 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1173 return ret; 1174 } 1175 1176 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1177 if (ret) { 1178 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1179 ath10k_monitor_vdev_delete(ar); 1180 return ret; 1181 } 1182 1183 ar->monitor_started = true; 1184 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1185 1186 return 0; 1187 } 1188 1189 static int ath10k_monitor_stop(struct ath10k *ar) 1190 { 1191 int ret; 1192 1193 lockdep_assert_held(&ar->conf_mutex); 1194 1195 ret = ath10k_monitor_vdev_stop(ar); 1196 if (ret) { 1197 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1198 return ret; 1199 } 1200 1201 ret = ath10k_monitor_vdev_delete(ar); 1202 if (ret) { 1203 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1204 return ret; 1205 } 1206 1207 ar->monitor_started = false; 1208 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1209 1210 return 0; 1211 } 1212 1213 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1214 { 1215 int num_ctx; 1216 1217 /* At least one chanctx is required to derive a channel to start 1218 * monitor vdev on. 1219 */ 1220 num_ctx = ath10k_mac_num_chanctxs(ar); 1221 if (num_ctx == 0) 1222 return false; 1223 1224 /* If there's already an existing special monitor interface then don't 1225 * bother creating another monitor vdev. 1226 */ 1227 if (ar->monitor_arvif) 1228 return false; 1229 1230 return ar->monitor || 1231 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1232 ar->running_fw->fw_file.fw_features) && 1233 (ar->filter_flags & FIF_OTHER_BSS)) || 1234 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1235 } 1236 1237 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1238 { 1239 int num_ctx; 1240 1241 num_ctx = ath10k_mac_num_chanctxs(ar); 1242 1243 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1244 * shouldn't allow this but make sure to prevent handling the following 1245 * case anyway since multi-channel DFS hasn't been tested at all. 1246 */ 1247 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1248 return false; 1249 1250 return true; 1251 } 1252 1253 static int ath10k_monitor_recalc(struct ath10k *ar) 1254 { 1255 bool needed; 1256 bool allowed; 1257 int ret; 1258 1259 lockdep_assert_held(&ar->conf_mutex); 1260 1261 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1262 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1263 1264 ath10k_dbg(ar, ATH10K_DBG_MAC, 1265 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1266 ar->monitor_started, needed, allowed); 1267 1268 if (WARN_ON(needed && !allowed)) { 1269 if (ar->monitor_started) { 1270 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1271 1272 ret = ath10k_monitor_stop(ar); 1273 if (ret) 1274 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1275 ret); 1276 /* not serious */ 1277 } 1278 1279 return -EPERM; 1280 } 1281 1282 if (needed == ar->monitor_started) 1283 return 0; 1284 1285 if (needed) 1286 return ath10k_monitor_start(ar); 1287 else 1288 return ath10k_monitor_stop(ar); 1289 } 1290 1291 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1292 { 1293 struct ath10k *ar = arvif->ar; 1294 1295 lockdep_assert_held(&ar->conf_mutex); 1296 1297 if (!arvif->is_started) { 1298 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1299 return false; 1300 } 1301 1302 return true; 1303 } 1304 1305 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1306 { 1307 struct ath10k *ar = arvif->ar; 1308 u32 vdev_param; 1309 1310 lockdep_assert_held(&ar->conf_mutex); 1311 1312 vdev_param = ar->wmi.vdev_param->protection_mode; 1313 1314 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1315 arvif->vdev_id, arvif->use_cts_prot); 1316 1317 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1318 arvif->use_cts_prot ? 1 : 0); 1319 } 1320 1321 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1322 { 1323 struct ath10k *ar = arvif->ar; 1324 u32 vdev_param, rts_cts = 0; 1325 1326 lockdep_assert_held(&ar->conf_mutex); 1327 1328 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1329 1330 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1331 1332 if (arvif->num_legacy_stations > 0) 1333 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1334 WMI_RTSCTS_PROFILE); 1335 else 1336 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1337 WMI_RTSCTS_PROFILE); 1338 1339 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1340 arvif->vdev_id, rts_cts); 1341 1342 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1343 rts_cts); 1344 } 1345 1346 static int ath10k_start_cac(struct ath10k *ar) 1347 { 1348 int ret; 1349 1350 lockdep_assert_held(&ar->conf_mutex); 1351 1352 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1353 1354 ret = ath10k_monitor_recalc(ar); 1355 if (ret) { 1356 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1357 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1358 return ret; 1359 } 1360 1361 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1362 ar->monitor_vdev_id); 1363 1364 return 0; 1365 } 1366 1367 static int ath10k_stop_cac(struct ath10k *ar) 1368 { 1369 lockdep_assert_held(&ar->conf_mutex); 1370 1371 /* CAC is not running - do nothing */ 1372 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1373 return 0; 1374 1375 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1376 ath10k_monitor_stop(ar); 1377 1378 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1379 1380 return 0; 1381 } 1382 1383 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1384 struct ieee80211_chanctx_conf *conf, 1385 void *data) 1386 { 1387 bool *ret = data; 1388 1389 if (!*ret && conf->radar_enabled) 1390 *ret = true; 1391 } 1392 1393 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1394 { 1395 bool has_radar = false; 1396 1397 ieee80211_iter_chan_contexts_atomic(ar->hw, 1398 ath10k_mac_has_radar_iter, 1399 &has_radar); 1400 1401 return has_radar; 1402 } 1403 1404 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1405 { 1406 int ret; 1407 1408 lockdep_assert_held(&ar->conf_mutex); 1409 1410 ath10k_stop_cac(ar); 1411 1412 if (!ath10k_mac_has_radar_enabled(ar)) 1413 return; 1414 1415 if (ar->num_started_vdevs > 0) 1416 return; 1417 1418 ret = ath10k_start_cac(ar); 1419 if (ret) { 1420 /* 1421 * Not possible to start CAC on current channel so starting 1422 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1423 * by indicating that radar was detected. 1424 */ 1425 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1426 ieee80211_radar_detected(ar->hw); 1427 } 1428 } 1429 1430 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1431 { 1432 struct ath10k *ar = arvif->ar; 1433 int ret; 1434 1435 lockdep_assert_held(&ar->conf_mutex); 1436 1437 reinit_completion(&ar->vdev_setup_done); 1438 reinit_completion(&ar->vdev_delete_done); 1439 1440 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1441 if (ret) { 1442 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1443 arvif->vdev_id, ret); 1444 return ret; 1445 } 1446 1447 ret = ath10k_vdev_setup_sync(ar); 1448 if (ret) { 1449 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n", 1450 arvif->vdev_id, ret); 1451 return ret; 1452 } 1453 1454 WARN_ON(ar->num_started_vdevs == 0); 1455 1456 if (ar->num_started_vdevs != 0) { 1457 ar->num_started_vdevs--; 1458 ath10k_recalc_radar_detection(ar); 1459 } 1460 1461 return ret; 1462 } 1463 1464 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1465 const struct cfg80211_chan_def *chandef, 1466 bool restart) 1467 { 1468 struct ath10k *ar = arvif->ar; 1469 struct wmi_vdev_start_request_arg arg = {}; 1470 int ret = 0; 1471 1472 lockdep_assert_held(&ar->conf_mutex); 1473 1474 reinit_completion(&ar->vdev_setup_done); 1475 reinit_completion(&ar->vdev_delete_done); 1476 1477 arg.vdev_id = arvif->vdev_id; 1478 arg.dtim_period = arvif->dtim_period; 1479 arg.bcn_intval = arvif->beacon_interval; 1480 1481 arg.channel.freq = chandef->chan->center_freq; 1482 arg.channel.band_center_freq1 = chandef->center_freq1; 1483 arg.channel.band_center_freq2 = chandef->center_freq2; 1484 arg.channel.mode = chan_to_phymode(chandef); 1485 1486 arg.channel.min_power = 0; 1487 arg.channel.max_power = chandef->chan->max_power * 2; 1488 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1489 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1490 1491 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1492 arg.ssid = arvif->u.ap.ssid; 1493 arg.ssid_len = arvif->u.ap.ssid_len; 1494 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1495 1496 /* For now allow DFS for AP mode */ 1497 arg.channel.chan_radar = 1498 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1499 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1500 arg.ssid = arvif->vif->bss_conf.ssid; 1501 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1502 } 1503 1504 ath10k_dbg(ar, ATH10K_DBG_MAC, 1505 "mac vdev %d start center_freq %d phymode %s\n", 1506 arg.vdev_id, arg.channel.freq, 1507 ath10k_wmi_phymode_str(arg.channel.mode)); 1508 1509 if (restart) 1510 ret = ath10k_wmi_vdev_restart(ar, &arg); 1511 else 1512 ret = ath10k_wmi_vdev_start(ar, &arg); 1513 1514 if (ret) { 1515 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1516 arg.vdev_id, ret); 1517 return ret; 1518 } 1519 1520 ret = ath10k_vdev_setup_sync(ar); 1521 if (ret) { 1522 ath10k_warn(ar, 1523 "failed to synchronize setup for vdev %i restart %d: %d\n", 1524 arg.vdev_id, restart, ret); 1525 return ret; 1526 } 1527 1528 ar->num_started_vdevs++; 1529 ath10k_recalc_radar_detection(ar); 1530 1531 return ret; 1532 } 1533 1534 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1535 const struct cfg80211_chan_def *def) 1536 { 1537 return ath10k_vdev_start_restart(arvif, def, false); 1538 } 1539 1540 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1541 const struct cfg80211_chan_def *def) 1542 { 1543 return ath10k_vdev_start_restart(arvif, def, true); 1544 } 1545 1546 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1547 struct sk_buff *bcn) 1548 { 1549 struct ath10k *ar = arvif->ar; 1550 struct ieee80211_mgmt *mgmt; 1551 const u8 *p2p_ie; 1552 int ret; 1553 1554 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1555 return 0; 1556 1557 mgmt = (void *)bcn->data; 1558 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1559 mgmt->u.beacon.variable, 1560 bcn->len - (mgmt->u.beacon.variable - 1561 bcn->data)); 1562 if (!p2p_ie) 1563 return -ENOENT; 1564 1565 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1566 if (ret) { 1567 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1568 arvif->vdev_id, ret); 1569 return ret; 1570 } 1571 1572 return 0; 1573 } 1574 1575 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1576 u8 oui_type, size_t ie_offset) 1577 { 1578 size_t len; 1579 const u8 *next; 1580 const u8 *end; 1581 u8 *ie; 1582 1583 if (WARN_ON(skb->len < ie_offset)) 1584 return -EINVAL; 1585 1586 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1587 skb->data + ie_offset, 1588 skb->len - ie_offset); 1589 if (!ie) 1590 return -ENOENT; 1591 1592 len = ie[1] + 2; 1593 end = skb->data + skb->len; 1594 next = ie + len; 1595 1596 if (WARN_ON(next > end)) 1597 return -EINVAL; 1598 1599 memmove(ie, next, end - next); 1600 skb_trim(skb, skb->len - len); 1601 1602 return 0; 1603 } 1604 1605 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1606 { 1607 struct ath10k *ar = arvif->ar; 1608 struct ieee80211_hw *hw = ar->hw; 1609 struct ieee80211_vif *vif = arvif->vif; 1610 struct ieee80211_mutable_offsets offs = {}; 1611 struct sk_buff *bcn; 1612 int ret; 1613 1614 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1615 return 0; 1616 1617 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1618 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1619 return 0; 1620 1621 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1622 if (!bcn) { 1623 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1624 return -EPERM; 1625 } 1626 1627 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1628 if (ret) { 1629 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1630 kfree_skb(bcn); 1631 return ret; 1632 } 1633 1634 /* P2P IE is inserted by firmware automatically (as configured above) 1635 * so remove it from the base beacon template to avoid duplicate P2P 1636 * IEs in beacon frames. 1637 */ 1638 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1639 offsetof(struct ieee80211_mgmt, 1640 u.beacon.variable)); 1641 1642 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1643 0, NULL, 0); 1644 kfree_skb(bcn); 1645 1646 if (ret) { 1647 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1648 ret); 1649 return ret; 1650 } 1651 1652 return 0; 1653 } 1654 1655 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1656 { 1657 struct ath10k *ar = arvif->ar; 1658 struct ieee80211_hw *hw = ar->hw; 1659 struct ieee80211_vif *vif = arvif->vif; 1660 struct sk_buff *prb; 1661 int ret; 1662 1663 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1664 return 0; 1665 1666 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1667 return 0; 1668 1669 /* For mesh, probe response and beacon share the same template */ 1670 if (ieee80211_vif_is_mesh(vif)) 1671 return 0; 1672 1673 prb = ieee80211_proberesp_get(hw, vif); 1674 if (!prb) { 1675 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1676 return -EPERM; 1677 } 1678 1679 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1680 kfree_skb(prb); 1681 1682 if (ret) { 1683 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1684 ret); 1685 return ret; 1686 } 1687 1688 return 0; 1689 } 1690 1691 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1692 { 1693 struct ath10k *ar = arvif->ar; 1694 struct cfg80211_chan_def def; 1695 int ret; 1696 1697 /* When originally vdev is started during assign_vif_chanctx() some 1698 * information is missing, notably SSID. Firmware revisions with beacon 1699 * offloading require the SSID to be provided during vdev (re)start to 1700 * handle hidden SSID properly. 1701 * 1702 * Vdev restart must be done after vdev has been both started and 1703 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1704 * deliver vdev restart response event causing timeouts during vdev 1705 * syncing in ath10k. 1706 * 1707 * Note: The vdev down/up and template reinstallation could be skipped 1708 * since only wmi-tlv firmware are known to have beacon offload and 1709 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1710 * response delivery. It's probably more robust to keep it as is. 1711 */ 1712 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1713 return 0; 1714 1715 if (WARN_ON(!arvif->is_started)) 1716 return -EINVAL; 1717 1718 if (WARN_ON(!arvif->is_up)) 1719 return -EINVAL; 1720 1721 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1722 return -EINVAL; 1723 1724 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1725 if (ret) { 1726 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1727 arvif->vdev_id, ret); 1728 return ret; 1729 } 1730 1731 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1732 * firmware will crash upon vdev up. 1733 */ 1734 1735 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1736 if (ret) { 1737 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1738 return ret; 1739 } 1740 1741 ret = ath10k_mac_setup_prb_tmpl(arvif); 1742 if (ret) { 1743 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1744 return ret; 1745 } 1746 1747 ret = ath10k_vdev_restart(arvif, &def); 1748 if (ret) { 1749 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1750 arvif->vdev_id, ret); 1751 return ret; 1752 } 1753 1754 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1755 arvif->bssid); 1756 if (ret) { 1757 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1758 arvif->vdev_id, ret); 1759 return ret; 1760 } 1761 1762 return 0; 1763 } 1764 1765 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1766 struct ieee80211_bss_conf *info) 1767 { 1768 struct ath10k *ar = arvif->ar; 1769 int ret = 0; 1770 1771 lockdep_assert_held(&arvif->ar->conf_mutex); 1772 1773 if (!info->enable_beacon) { 1774 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1775 if (ret) 1776 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1777 arvif->vdev_id, ret); 1778 1779 arvif->is_up = false; 1780 1781 spin_lock_bh(&arvif->ar->data_lock); 1782 ath10k_mac_vif_beacon_free(arvif); 1783 spin_unlock_bh(&arvif->ar->data_lock); 1784 1785 return; 1786 } 1787 1788 arvif->tx_seq_no = 0x1000; 1789 1790 arvif->aid = 0; 1791 ether_addr_copy(arvif->bssid, info->bssid); 1792 1793 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1794 arvif->bssid); 1795 if (ret) { 1796 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1797 arvif->vdev_id, ret); 1798 return; 1799 } 1800 1801 arvif->is_up = true; 1802 1803 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1804 if (ret) { 1805 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1806 arvif->vdev_id, ret); 1807 return; 1808 } 1809 1810 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1811 } 1812 1813 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1814 struct ieee80211_bss_conf *info, 1815 const u8 self_peer[ETH_ALEN]) 1816 { 1817 struct ath10k *ar = arvif->ar; 1818 u32 vdev_param; 1819 int ret = 0; 1820 1821 lockdep_assert_held(&arvif->ar->conf_mutex); 1822 1823 if (!info->ibss_joined) { 1824 if (is_zero_ether_addr(arvif->bssid)) 1825 return; 1826 1827 eth_zero_addr(arvif->bssid); 1828 1829 return; 1830 } 1831 1832 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1833 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1834 ATH10K_DEFAULT_ATIM); 1835 if (ret) 1836 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1837 arvif->vdev_id, ret); 1838 } 1839 1840 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1841 { 1842 struct ath10k *ar = arvif->ar; 1843 u32 param; 1844 u32 value; 1845 int ret; 1846 1847 lockdep_assert_held(&arvif->ar->conf_mutex); 1848 1849 if (arvif->u.sta.uapsd) 1850 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1851 else 1852 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1853 1854 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1855 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1856 if (ret) { 1857 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1858 value, arvif->vdev_id, ret); 1859 return ret; 1860 } 1861 1862 return 0; 1863 } 1864 1865 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1866 { 1867 struct ath10k *ar = arvif->ar; 1868 u32 param; 1869 u32 value; 1870 int ret; 1871 1872 lockdep_assert_held(&arvif->ar->conf_mutex); 1873 1874 if (arvif->u.sta.uapsd) 1875 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1876 else 1877 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1878 1879 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1880 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1881 param, value); 1882 if (ret) { 1883 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1884 value, arvif->vdev_id, ret); 1885 return ret; 1886 } 1887 1888 return 0; 1889 } 1890 1891 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1892 { 1893 struct ath10k_vif *arvif; 1894 int num = 0; 1895 1896 lockdep_assert_held(&ar->conf_mutex); 1897 1898 list_for_each_entry(arvif, &ar->arvifs, list) 1899 if (arvif->is_started) 1900 num++; 1901 1902 return num; 1903 } 1904 1905 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1906 { 1907 struct ath10k *ar = arvif->ar; 1908 struct ieee80211_vif *vif = arvif->vif; 1909 struct ieee80211_conf *conf = &ar->hw->conf; 1910 enum wmi_sta_powersave_param param; 1911 enum wmi_sta_ps_mode psmode; 1912 int ret; 1913 int ps_timeout; 1914 bool enable_ps; 1915 1916 lockdep_assert_held(&arvif->ar->conf_mutex); 1917 1918 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1919 return 0; 1920 1921 enable_ps = arvif->ps; 1922 1923 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1924 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1925 ar->running_fw->fw_file.fw_features)) { 1926 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1927 arvif->vdev_id); 1928 enable_ps = false; 1929 } 1930 1931 if (!arvif->is_started) { 1932 /* mac80211 can update vif powersave state while disconnected. 1933 * Firmware doesn't behave nicely and consumes more power than 1934 * necessary if PS is disabled on a non-started vdev. Hence 1935 * force-enable PS for non-running vdevs. 1936 */ 1937 psmode = WMI_STA_PS_MODE_ENABLED; 1938 } else if (enable_ps) { 1939 psmode = WMI_STA_PS_MODE_ENABLED; 1940 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1941 1942 ps_timeout = conf->dynamic_ps_timeout; 1943 if (ps_timeout == 0) { 1944 /* Firmware doesn't like 0 */ 1945 ps_timeout = ieee80211_tu_to_usec( 1946 vif->bss_conf.beacon_int) / 1000; 1947 } 1948 1949 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1950 ps_timeout); 1951 if (ret) { 1952 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1953 arvif->vdev_id, ret); 1954 return ret; 1955 } 1956 } else { 1957 psmode = WMI_STA_PS_MODE_DISABLED; 1958 } 1959 1960 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1961 arvif->vdev_id, psmode ? "enable" : "disable"); 1962 1963 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1964 if (ret) { 1965 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1966 psmode, arvif->vdev_id, ret); 1967 return ret; 1968 } 1969 1970 return 0; 1971 } 1972 1973 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1974 { 1975 struct ath10k *ar = arvif->ar; 1976 struct wmi_sta_keepalive_arg arg = {}; 1977 int ret; 1978 1979 lockdep_assert_held(&arvif->ar->conf_mutex); 1980 1981 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1982 return 0; 1983 1984 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1985 return 0; 1986 1987 /* Some firmware revisions have a bug and ignore the `enabled` field. 1988 * Instead use the interval to disable the keepalive. 1989 */ 1990 arg.vdev_id = arvif->vdev_id; 1991 arg.enabled = 1; 1992 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1993 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1994 1995 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1996 if (ret) { 1997 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1998 arvif->vdev_id, ret); 1999 return ret; 2000 } 2001 2002 return 0; 2003 } 2004 2005 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 2006 { 2007 struct ath10k *ar = arvif->ar; 2008 struct ieee80211_vif *vif = arvif->vif; 2009 int ret; 2010 2011 lockdep_assert_held(&arvif->ar->conf_mutex); 2012 2013 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 2014 return; 2015 2016 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 2017 return; 2018 2019 if (!vif->csa_active) 2020 return; 2021 2022 if (!arvif->is_up) 2023 return; 2024 2025 if (!ieee80211_csa_is_complete(vif)) { 2026 ieee80211_csa_update_counter(vif); 2027 2028 ret = ath10k_mac_setup_bcn_tmpl(arvif); 2029 if (ret) 2030 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 2031 ret); 2032 2033 ret = ath10k_mac_setup_prb_tmpl(arvif); 2034 if (ret) 2035 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 2036 ret); 2037 } else { 2038 ieee80211_csa_finish(vif); 2039 } 2040 } 2041 2042 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 2043 { 2044 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2045 ap_csa_work); 2046 struct ath10k *ar = arvif->ar; 2047 2048 mutex_lock(&ar->conf_mutex); 2049 ath10k_mac_vif_ap_csa_count_down(arvif); 2050 mutex_unlock(&ar->conf_mutex); 2051 } 2052 2053 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 2054 struct ieee80211_vif *vif) 2055 { 2056 struct sk_buff *skb = data; 2057 struct ieee80211_mgmt *mgmt = (void *)skb->data; 2058 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2059 2060 if (vif->type != NL80211_IFTYPE_STATION) 2061 return; 2062 2063 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2064 return; 2065 2066 cancel_delayed_work(&arvif->connection_loss_work); 2067 } 2068 2069 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2070 { 2071 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2072 IEEE80211_IFACE_ITER_NORMAL, 2073 ath10k_mac_handle_beacon_iter, 2074 skb); 2075 } 2076 2077 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2078 struct ieee80211_vif *vif) 2079 { 2080 u32 *vdev_id = data; 2081 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2082 struct ath10k *ar = arvif->ar; 2083 struct ieee80211_hw *hw = ar->hw; 2084 2085 if (arvif->vdev_id != *vdev_id) 2086 return; 2087 2088 if (!arvif->is_up) 2089 return; 2090 2091 ieee80211_beacon_loss(vif); 2092 2093 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2094 * (done by mac80211) succeeds but beacons do not resume then it 2095 * doesn't make sense to continue operation. Queue connection loss work 2096 * which can be cancelled when beacon is received. 2097 */ 2098 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2099 ATH10K_CONNECTION_LOSS_HZ); 2100 } 2101 2102 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2103 { 2104 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2105 IEEE80211_IFACE_ITER_NORMAL, 2106 ath10k_mac_handle_beacon_miss_iter, 2107 &vdev_id); 2108 } 2109 2110 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2111 { 2112 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2113 connection_loss_work.work); 2114 struct ieee80211_vif *vif = arvif->vif; 2115 2116 if (!arvif->is_up) 2117 return; 2118 2119 ieee80211_connection_loss(vif); 2120 } 2121 2122 /**********************/ 2123 /* Station management */ 2124 /**********************/ 2125 2126 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2127 struct ieee80211_vif *vif) 2128 { 2129 /* Some firmware revisions have unstable STA powersave when listen 2130 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2131 * generate NullFunc frames properly even if buffered frames have been 2132 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2133 * buffered frames. Often pinging the device from AP would simply fail. 2134 * 2135 * As a workaround set it to 1. 2136 */ 2137 if (vif->type == NL80211_IFTYPE_STATION) 2138 return 1; 2139 2140 return ar->hw->conf.listen_interval; 2141 } 2142 2143 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2144 struct ieee80211_vif *vif, 2145 struct ieee80211_sta *sta, 2146 struct wmi_peer_assoc_complete_arg *arg) 2147 { 2148 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2149 u32 aid; 2150 2151 lockdep_assert_held(&ar->conf_mutex); 2152 2153 if (vif->type == NL80211_IFTYPE_STATION) 2154 aid = vif->bss_conf.aid; 2155 else 2156 aid = sta->aid; 2157 2158 ether_addr_copy(arg->addr, sta->addr); 2159 arg->vdev_id = arvif->vdev_id; 2160 arg->peer_aid = aid; 2161 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2162 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2163 arg->peer_num_spatial_streams = 1; 2164 arg->peer_caps = vif->bss_conf.assoc_capability; 2165 } 2166 2167 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2168 struct ieee80211_vif *vif, 2169 struct ieee80211_sta *sta, 2170 struct wmi_peer_assoc_complete_arg *arg) 2171 { 2172 struct ieee80211_bss_conf *info = &vif->bss_conf; 2173 struct cfg80211_chan_def def; 2174 struct cfg80211_bss *bss; 2175 const u8 *rsnie = NULL; 2176 const u8 *wpaie = NULL; 2177 2178 lockdep_assert_held(&ar->conf_mutex); 2179 2180 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2181 return; 2182 2183 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2184 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2185 if (bss) { 2186 const struct cfg80211_bss_ies *ies; 2187 2188 rcu_read_lock(); 2189 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2190 2191 ies = rcu_dereference(bss->ies); 2192 2193 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2194 WLAN_OUI_TYPE_MICROSOFT_WPA, 2195 ies->data, 2196 ies->len); 2197 rcu_read_unlock(); 2198 cfg80211_put_bss(ar->hw->wiphy, bss); 2199 } 2200 2201 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2202 if (rsnie || wpaie) { 2203 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2204 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2205 } 2206 2207 if (wpaie) { 2208 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2209 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2210 } 2211 2212 if (sta->mfp && 2213 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2214 ar->running_fw->fw_file.fw_features)) { 2215 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2216 } 2217 } 2218 2219 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2220 struct ieee80211_vif *vif, 2221 struct ieee80211_sta *sta, 2222 struct wmi_peer_assoc_complete_arg *arg) 2223 { 2224 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2225 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2226 struct cfg80211_chan_def def; 2227 const struct ieee80211_supported_band *sband; 2228 const struct ieee80211_rate *rates; 2229 enum nl80211_band band; 2230 u32 ratemask; 2231 u8 rate; 2232 int i; 2233 2234 lockdep_assert_held(&ar->conf_mutex); 2235 2236 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2237 return; 2238 2239 band = def.chan->band; 2240 sband = ar->hw->wiphy->bands[band]; 2241 ratemask = sta->supp_rates[band]; 2242 ratemask &= arvif->bitrate_mask.control[band].legacy; 2243 rates = sband->bitrates; 2244 2245 rateset->num_rates = 0; 2246 2247 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2248 if (!(ratemask & 1)) 2249 continue; 2250 2251 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2252 rateset->rates[rateset->num_rates] = rate; 2253 rateset->num_rates++; 2254 } 2255 } 2256 2257 static bool 2258 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2259 { 2260 int nss; 2261 2262 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2263 if (ht_mcs_mask[nss]) 2264 return false; 2265 2266 return true; 2267 } 2268 2269 static bool 2270 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2271 { 2272 int nss; 2273 2274 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2275 if (vht_mcs_mask[nss]) 2276 return false; 2277 2278 return true; 2279 } 2280 2281 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2282 struct ieee80211_vif *vif, 2283 struct ieee80211_sta *sta, 2284 struct wmi_peer_assoc_complete_arg *arg) 2285 { 2286 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2287 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2288 struct cfg80211_chan_def def; 2289 enum nl80211_band band; 2290 const u8 *ht_mcs_mask; 2291 const u16 *vht_mcs_mask; 2292 int i, n; 2293 u8 max_nss; 2294 u32 stbc; 2295 2296 lockdep_assert_held(&ar->conf_mutex); 2297 2298 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2299 return; 2300 2301 if (!ht_cap->ht_supported) 2302 return; 2303 2304 band = def.chan->band; 2305 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2306 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2307 2308 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2309 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2310 return; 2311 2312 arg->peer_flags |= ar->wmi.peer_flags->ht; 2313 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2314 ht_cap->ampdu_factor)) - 1; 2315 2316 arg->peer_mpdu_density = 2317 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2318 2319 arg->peer_ht_caps = ht_cap->cap; 2320 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2321 2322 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2323 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2324 2325 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2326 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2327 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2328 } 2329 2330 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2331 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2332 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2333 2334 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2335 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2336 } 2337 2338 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2339 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2340 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2341 } 2342 2343 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2344 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2345 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2346 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2347 arg->peer_rate_caps |= stbc; 2348 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2349 } 2350 2351 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2352 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2353 else if (ht_cap->mcs.rx_mask[1]) 2354 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2355 2356 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2357 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2358 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2359 max_nss = (i / 8) + 1; 2360 arg->peer_ht_rates.rates[n++] = i; 2361 } 2362 2363 /* 2364 * This is a workaround for HT-enabled STAs which break the spec 2365 * and have no HT capabilities RX mask (no HT RX MCS map). 2366 * 2367 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2368 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2369 * 2370 * Firmware asserts if such situation occurs. 2371 */ 2372 if (n == 0) { 2373 arg->peer_ht_rates.num_rates = 8; 2374 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2375 arg->peer_ht_rates.rates[i] = i; 2376 } else { 2377 arg->peer_ht_rates.num_rates = n; 2378 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2379 } 2380 2381 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2382 arg->addr, 2383 arg->peer_ht_rates.num_rates, 2384 arg->peer_num_spatial_streams); 2385 } 2386 2387 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2388 struct ath10k_vif *arvif, 2389 struct ieee80211_sta *sta) 2390 { 2391 u32 uapsd = 0; 2392 u32 max_sp = 0; 2393 int ret = 0; 2394 2395 lockdep_assert_held(&ar->conf_mutex); 2396 2397 if (sta->wme && sta->uapsd_queues) { 2398 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2399 sta->uapsd_queues, sta->max_sp); 2400 2401 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2402 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2403 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2404 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2405 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2406 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2407 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2408 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2409 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2410 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2411 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2412 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2413 2414 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2415 max_sp = sta->max_sp; 2416 2417 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2418 sta->addr, 2419 WMI_AP_PS_PEER_PARAM_UAPSD, 2420 uapsd); 2421 if (ret) { 2422 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2423 arvif->vdev_id, ret); 2424 return ret; 2425 } 2426 2427 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2428 sta->addr, 2429 WMI_AP_PS_PEER_PARAM_MAX_SP, 2430 max_sp); 2431 if (ret) { 2432 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2433 arvif->vdev_id, ret); 2434 return ret; 2435 } 2436 2437 /* TODO setup this based on STA listen interval and 2438 * beacon interval. Currently we don't know 2439 * sta->listen_interval - mac80211 patch required. 2440 * Currently use 10 seconds 2441 */ 2442 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2443 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2444 10); 2445 if (ret) { 2446 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2447 arvif->vdev_id, ret); 2448 return ret; 2449 } 2450 } 2451 2452 return 0; 2453 } 2454 2455 static u16 2456 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2457 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2458 { 2459 int idx_limit; 2460 int nss; 2461 u16 mcs_map; 2462 u16 mcs; 2463 2464 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2465 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2466 vht_mcs_limit[nss]; 2467 2468 if (mcs_map) 2469 idx_limit = fls(mcs_map) - 1; 2470 else 2471 idx_limit = -1; 2472 2473 switch (idx_limit) { 2474 case 0: /* fall through */ 2475 case 1: /* fall through */ 2476 case 2: /* fall through */ 2477 case 3: /* fall through */ 2478 case 4: /* fall through */ 2479 case 5: /* fall through */ 2480 case 6: /* fall through */ 2481 default: 2482 /* see ath10k_mac_can_set_bitrate_mask() */ 2483 WARN_ON(1); 2484 /* fall through */ 2485 case -1: 2486 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2487 break; 2488 case 7: 2489 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2490 break; 2491 case 8: 2492 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2493 break; 2494 case 9: 2495 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2496 break; 2497 } 2498 2499 tx_mcs_set &= ~(0x3 << (nss * 2)); 2500 tx_mcs_set |= mcs << (nss * 2); 2501 } 2502 2503 return tx_mcs_set; 2504 } 2505 2506 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2507 struct ieee80211_vif *vif, 2508 struct ieee80211_sta *sta, 2509 struct wmi_peer_assoc_complete_arg *arg) 2510 { 2511 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2512 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2513 struct cfg80211_chan_def def; 2514 enum nl80211_band band; 2515 const u16 *vht_mcs_mask; 2516 u8 ampdu_factor; 2517 u8 max_nss, vht_mcs; 2518 int i; 2519 2520 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2521 return; 2522 2523 if (!vht_cap->vht_supported) 2524 return; 2525 2526 band = def.chan->band; 2527 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2528 2529 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2530 return; 2531 2532 arg->peer_flags |= ar->wmi.peer_flags->vht; 2533 2534 if (def.chan->band == NL80211_BAND_2GHZ) 2535 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2536 2537 arg->peer_vht_caps = vht_cap->cap; 2538 2539 ampdu_factor = (vht_cap->cap & 2540 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2541 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2542 2543 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2544 * zero in VHT IE. Using it would result in degraded throughput. 2545 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2546 * it if VHT max_mpdu is smaller. 2547 */ 2548 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2549 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2550 ampdu_factor)) - 1); 2551 2552 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2553 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2554 2555 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2556 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2557 2558 /* Calculate peer NSS capability from VHT capabilities if STA 2559 * supports VHT. 2560 */ 2561 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 2562 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 2563 (2 * i) & 3; 2564 2565 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && 2566 vht_mcs_mask[i]) 2567 max_nss = i + 1; 2568 } 2569 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2570 arg->peer_vht_rates.rx_max_rate = 2571 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2572 arg->peer_vht_rates.rx_mcs_set = 2573 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2574 arg->peer_vht_rates.tx_max_rate = 2575 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2576 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2577 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2578 2579 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2580 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2581 2582 if (arg->peer_vht_rates.rx_max_rate && 2583 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) { 2584 switch (arg->peer_vht_rates.rx_max_rate) { 2585 case 1560: 2586 /* Must be 2x2 at 160Mhz is all it can do. */ 2587 arg->peer_bw_rxnss_override = 2; 2588 break; 2589 case 780: 2590 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */ 2591 arg->peer_bw_rxnss_override = 1; 2592 break; 2593 } 2594 } 2595 } 2596 2597 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2598 struct ieee80211_vif *vif, 2599 struct ieee80211_sta *sta, 2600 struct wmi_peer_assoc_complete_arg *arg) 2601 { 2602 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2603 2604 switch (arvif->vdev_type) { 2605 case WMI_VDEV_TYPE_AP: 2606 if (sta->wme) 2607 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2608 2609 if (sta->wme && sta->uapsd_queues) { 2610 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2611 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2612 } 2613 break; 2614 case WMI_VDEV_TYPE_STA: 2615 if (sta->wme) 2616 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2617 break; 2618 case WMI_VDEV_TYPE_IBSS: 2619 if (sta->wme) 2620 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2621 break; 2622 default: 2623 break; 2624 } 2625 2626 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2627 sta->addr, !!(arg->peer_flags & 2628 arvif->ar->wmi.peer_flags->qos)); 2629 } 2630 2631 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2632 { 2633 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2634 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2635 } 2636 2637 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2638 struct ieee80211_sta *sta) 2639 { 2640 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2641 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2642 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2643 return MODE_11AC_VHT160; 2644 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2645 return MODE_11AC_VHT80_80; 2646 default: 2647 /* not sure if this is a valid case? */ 2648 return MODE_11AC_VHT160; 2649 } 2650 } 2651 2652 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2653 return MODE_11AC_VHT80; 2654 2655 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2656 return MODE_11AC_VHT40; 2657 2658 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2659 return MODE_11AC_VHT20; 2660 2661 return MODE_UNKNOWN; 2662 } 2663 2664 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2665 struct ieee80211_vif *vif, 2666 struct ieee80211_sta *sta, 2667 struct wmi_peer_assoc_complete_arg *arg) 2668 { 2669 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2670 struct cfg80211_chan_def def; 2671 enum nl80211_band band; 2672 const u8 *ht_mcs_mask; 2673 const u16 *vht_mcs_mask; 2674 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2675 2676 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2677 return; 2678 2679 band = def.chan->band; 2680 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2681 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2682 2683 switch (band) { 2684 case NL80211_BAND_2GHZ: 2685 if (sta->vht_cap.vht_supported && 2686 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2687 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2688 phymode = MODE_11AC_VHT40; 2689 else 2690 phymode = MODE_11AC_VHT20; 2691 } else if (sta->ht_cap.ht_supported && 2692 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2693 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2694 phymode = MODE_11NG_HT40; 2695 else 2696 phymode = MODE_11NG_HT20; 2697 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2698 phymode = MODE_11G; 2699 } else { 2700 phymode = MODE_11B; 2701 } 2702 2703 break; 2704 case NL80211_BAND_5GHZ: 2705 /* 2706 * Check VHT first. 2707 */ 2708 if (sta->vht_cap.vht_supported && 2709 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2710 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2711 } else if (sta->ht_cap.ht_supported && 2712 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2713 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2714 phymode = MODE_11NA_HT40; 2715 else 2716 phymode = MODE_11NA_HT20; 2717 } else { 2718 phymode = MODE_11A; 2719 } 2720 2721 break; 2722 default: 2723 break; 2724 } 2725 2726 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2727 sta->addr, ath10k_wmi_phymode_str(phymode)); 2728 2729 arg->peer_phymode = phymode; 2730 WARN_ON(phymode == MODE_UNKNOWN); 2731 } 2732 2733 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2734 struct ieee80211_vif *vif, 2735 struct ieee80211_sta *sta, 2736 struct wmi_peer_assoc_complete_arg *arg) 2737 { 2738 lockdep_assert_held(&ar->conf_mutex); 2739 2740 memset(arg, 0, sizeof(*arg)); 2741 2742 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2743 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2744 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2745 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2746 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2747 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2748 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2749 2750 return 0; 2751 } 2752 2753 static const u32 ath10k_smps_map[] = { 2754 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2755 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2756 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2757 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2758 }; 2759 2760 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2761 const u8 *addr, 2762 const struct ieee80211_sta_ht_cap *ht_cap) 2763 { 2764 int smps; 2765 2766 if (!ht_cap->ht_supported) 2767 return 0; 2768 2769 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2770 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2771 2772 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2773 return -EINVAL; 2774 2775 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2776 WMI_PEER_SMPS_STATE, 2777 ath10k_smps_map[smps]); 2778 } 2779 2780 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2781 struct ieee80211_vif *vif, 2782 struct ieee80211_sta_vht_cap vht_cap) 2783 { 2784 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2785 int ret; 2786 u32 param; 2787 u32 value; 2788 2789 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2790 return 0; 2791 2792 if (!(ar->vht_cap_info & 2793 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2794 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2795 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2796 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2797 return 0; 2798 2799 param = ar->wmi.vdev_param->txbf; 2800 value = 0; 2801 2802 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2803 return 0; 2804 2805 /* The following logic is correct. If a remote STA advertises support 2806 * for being a beamformer then we should enable us being a beamformee. 2807 */ 2808 2809 if (ar->vht_cap_info & 2810 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2811 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2812 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2813 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2814 2815 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2816 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2817 } 2818 2819 if (ar->vht_cap_info & 2820 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2821 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2822 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2823 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2824 2825 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2826 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2827 } 2828 2829 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2830 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2831 2832 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2833 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2834 2835 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2836 if (ret) { 2837 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2838 value, ret); 2839 return ret; 2840 } 2841 2842 return 0; 2843 } 2844 2845 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2846 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2847 struct ieee80211_vif *vif, 2848 struct ieee80211_bss_conf *bss_conf) 2849 { 2850 struct ath10k *ar = hw->priv; 2851 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2852 struct ieee80211_sta_ht_cap ht_cap; 2853 struct ieee80211_sta_vht_cap vht_cap; 2854 struct wmi_peer_assoc_complete_arg peer_arg; 2855 struct ieee80211_sta *ap_sta; 2856 int ret; 2857 2858 lockdep_assert_held(&ar->conf_mutex); 2859 2860 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2861 arvif->vdev_id, arvif->bssid, arvif->aid); 2862 2863 rcu_read_lock(); 2864 2865 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2866 if (!ap_sta) { 2867 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2868 bss_conf->bssid, arvif->vdev_id); 2869 rcu_read_unlock(); 2870 return; 2871 } 2872 2873 /* ap_sta must be accessed only within rcu section which must be left 2874 * before calling ath10k_setup_peer_smps() which might sleep. 2875 */ 2876 ht_cap = ap_sta->ht_cap; 2877 vht_cap = ap_sta->vht_cap; 2878 2879 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2880 if (ret) { 2881 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2882 bss_conf->bssid, arvif->vdev_id, ret); 2883 rcu_read_unlock(); 2884 return; 2885 } 2886 2887 rcu_read_unlock(); 2888 2889 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2890 if (ret) { 2891 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2892 bss_conf->bssid, arvif->vdev_id, ret); 2893 return; 2894 } 2895 2896 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2897 if (ret) { 2898 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2899 arvif->vdev_id, ret); 2900 return; 2901 } 2902 2903 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2904 if (ret) { 2905 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2906 arvif->vdev_id, bss_conf->bssid, ret); 2907 return; 2908 } 2909 2910 ath10k_dbg(ar, ATH10K_DBG_MAC, 2911 "mac vdev %d up (associated) bssid %pM aid %d\n", 2912 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2913 2914 WARN_ON(arvif->is_up); 2915 2916 arvif->aid = bss_conf->aid; 2917 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2918 2919 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2920 if (ret) { 2921 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2922 arvif->vdev_id, ret); 2923 return; 2924 } 2925 2926 arvif->is_up = true; 2927 2928 /* Workaround: Some firmware revisions (tested with qca6174 2929 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2930 * poked with peer param command. 2931 */ 2932 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2933 WMI_PEER_DUMMY_VAR, 1); 2934 if (ret) { 2935 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2936 arvif->bssid, arvif->vdev_id, ret); 2937 return; 2938 } 2939 } 2940 2941 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2942 struct ieee80211_vif *vif) 2943 { 2944 struct ath10k *ar = hw->priv; 2945 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2946 struct ieee80211_sta_vht_cap vht_cap = {}; 2947 int ret; 2948 2949 lockdep_assert_held(&ar->conf_mutex); 2950 2951 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2952 arvif->vdev_id, arvif->bssid); 2953 2954 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2955 if (ret) 2956 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2957 arvif->vdev_id, ret); 2958 2959 arvif->def_wep_key_idx = -1; 2960 2961 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2962 if (ret) { 2963 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2964 arvif->vdev_id, ret); 2965 return; 2966 } 2967 2968 arvif->is_up = false; 2969 2970 cancel_delayed_work_sync(&arvif->connection_loss_work); 2971 } 2972 2973 static int ath10k_station_assoc(struct ath10k *ar, 2974 struct ieee80211_vif *vif, 2975 struct ieee80211_sta *sta, 2976 bool reassoc) 2977 { 2978 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2979 struct wmi_peer_assoc_complete_arg peer_arg; 2980 int ret = 0; 2981 2982 lockdep_assert_held(&ar->conf_mutex); 2983 2984 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2985 if (ret) { 2986 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2987 sta->addr, arvif->vdev_id, ret); 2988 return ret; 2989 } 2990 2991 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2992 if (ret) { 2993 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2994 sta->addr, arvif->vdev_id, ret); 2995 return ret; 2996 } 2997 2998 /* Re-assoc is run only to update supported rates for given station. It 2999 * doesn't make much sense to reconfigure the peer completely. 3000 */ 3001 if (!reassoc) { 3002 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 3003 &sta->ht_cap); 3004 if (ret) { 3005 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 3006 arvif->vdev_id, ret); 3007 return ret; 3008 } 3009 3010 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 3011 if (ret) { 3012 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 3013 sta->addr, arvif->vdev_id, ret); 3014 return ret; 3015 } 3016 3017 if (!sta->wme) { 3018 arvif->num_legacy_stations++; 3019 ret = ath10k_recalc_rtscts_prot(arvif); 3020 if (ret) { 3021 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 3022 arvif->vdev_id, ret); 3023 return ret; 3024 } 3025 } 3026 3027 /* Plumb cached keys only for static WEP */ 3028 if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) { 3029 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 3030 if (ret) { 3031 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 3032 arvif->vdev_id, ret); 3033 return ret; 3034 } 3035 } 3036 } 3037 3038 return ret; 3039 } 3040 3041 static int ath10k_station_disassoc(struct ath10k *ar, 3042 struct ieee80211_vif *vif, 3043 struct ieee80211_sta *sta) 3044 { 3045 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3046 int ret = 0; 3047 3048 lockdep_assert_held(&ar->conf_mutex); 3049 3050 if (!sta->wme) { 3051 arvif->num_legacy_stations--; 3052 ret = ath10k_recalc_rtscts_prot(arvif); 3053 if (ret) { 3054 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 3055 arvif->vdev_id, ret); 3056 return ret; 3057 } 3058 } 3059 3060 ret = ath10k_clear_peer_keys(arvif, sta->addr); 3061 if (ret) { 3062 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 3063 arvif->vdev_id, ret); 3064 return ret; 3065 } 3066 3067 return ret; 3068 } 3069 3070 /**************/ 3071 /* Regulatory */ 3072 /**************/ 3073 3074 static int ath10k_update_channel_list(struct ath10k *ar) 3075 { 3076 struct ieee80211_hw *hw = ar->hw; 3077 struct ieee80211_supported_band **bands; 3078 enum nl80211_band band; 3079 struct ieee80211_channel *channel; 3080 struct wmi_scan_chan_list_arg arg = {0}; 3081 struct wmi_channel_arg *ch; 3082 bool passive; 3083 int len; 3084 int ret; 3085 int i; 3086 3087 lockdep_assert_held(&ar->conf_mutex); 3088 3089 bands = hw->wiphy->bands; 3090 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3091 if (!bands[band]) 3092 continue; 3093 3094 for (i = 0; i < bands[band]->n_channels; i++) { 3095 if (bands[band]->channels[i].flags & 3096 IEEE80211_CHAN_DISABLED) 3097 continue; 3098 3099 arg.n_channels++; 3100 } 3101 } 3102 3103 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3104 arg.channels = kzalloc(len, GFP_KERNEL); 3105 if (!arg.channels) 3106 return -ENOMEM; 3107 3108 ch = arg.channels; 3109 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3110 if (!bands[band]) 3111 continue; 3112 3113 for (i = 0; i < bands[band]->n_channels; i++) { 3114 channel = &bands[band]->channels[i]; 3115 3116 if (channel->flags & IEEE80211_CHAN_DISABLED) 3117 continue; 3118 3119 ch->allow_ht = true; 3120 3121 /* FIXME: when should we really allow VHT? */ 3122 ch->allow_vht = true; 3123 3124 ch->allow_ibss = 3125 !(channel->flags & IEEE80211_CHAN_NO_IR); 3126 3127 ch->ht40plus = 3128 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3129 3130 ch->chan_radar = 3131 !!(channel->flags & IEEE80211_CHAN_RADAR); 3132 3133 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3134 ch->passive = passive; 3135 3136 /* the firmware is ignoring the "radar" flag of the 3137 * channel and is scanning actively using Probe Requests 3138 * on "Radar detection"/DFS channels which are not 3139 * marked as "available" 3140 */ 3141 ch->passive |= ch->chan_radar; 3142 3143 ch->freq = channel->center_freq; 3144 ch->band_center_freq1 = channel->center_freq; 3145 ch->min_power = 0; 3146 ch->max_power = channel->max_power * 2; 3147 ch->max_reg_power = channel->max_reg_power * 2; 3148 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3149 ch->reg_class_id = 0; /* FIXME */ 3150 3151 /* FIXME: why use only legacy modes, why not any 3152 * HT/VHT modes? Would that even make any 3153 * difference? 3154 */ 3155 if (channel->band == NL80211_BAND_2GHZ) 3156 ch->mode = MODE_11G; 3157 else 3158 ch->mode = MODE_11A; 3159 3160 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3161 continue; 3162 3163 ath10k_dbg(ar, ATH10K_DBG_WMI, 3164 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3165 ch - arg.channels, arg.n_channels, 3166 ch->freq, ch->max_power, ch->max_reg_power, 3167 ch->max_antenna_gain, ch->mode); 3168 3169 ch++; 3170 } 3171 } 3172 3173 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3174 kfree(arg.channels); 3175 3176 return ret; 3177 } 3178 3179 static enum wmi_dfs_region 3180 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3181 { 3182 switch (dfs_region) { 3183 case NL80211_DFS_UNSET: 3184 return WMI_UNINIT_DFS_DOMAIN; 3185 case NL80211_DFS_FCC: 3186 return WMI_FCC_DFS_DOMAIN; 3187 case NL80211_DFS_ETSI: 3188 return WMI_ETSI_DFS_DOMAIN; 3189 case NL80211_DFS_JP: 3190 return WMI_MKK4_DFS_DOMAIN; 3191 } 3192 return WMI_UNINIT_DFS_DOMAIN; 3193 } 3194 3195 static void ath10k_regd_update(struct ath10k *ar) 3196 { 3197 struct reg_dmn_pair_mapping *regpair; 3198 int ret; 3199 enum wmi_dfs_region wmi_dfs_reg; 3200 enum nl80211_dfs_regions nl_dfs_reg; 3201 3202 lockdep_assert_held(&ar->conf_mutex); 3203 3204 ret = ath10k_update_channel_list(ar); 3205 if (ret) 3206 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3207 3208 regpair = ar->ath_common.regulatory.regpair; 3209 3210 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3211 nl_dfs_reg = ar->dfs_detector->region; 3212 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3213 } else { 3214 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3215 } 3216 3217 /* Target allows setting up per-band regdomain but ath_common provides 3218 * a combined one only 3219 */ 3220 ret = ath10k_wmi_pdev_set_regdomain(ar, 3221 regpair->reg_domain, 3222 regpair->reg_domain, /* 2ghz */ 3223 regpair->reg_domain, /* 5ghz */ 3224 regpair->reg_2ghz_ctl, 3225 regpair->reg_5ghz_ctl, 3226 wmi_dfs_reg); 3227 if (ret) 3228 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3229 } 3230 3231 static void ath10k_mac_update_channel_list(struct ath10k *ar, 3232 struct ieee80211_supported_band *band) 3233 { 3234 int i; 3235 3236 if (ar->low_5ghz_chan && ar->high_5ghz_chan) { 3237 for (i = 0; i < band->n_channels; i++) { 3238 if (band->channels[i].center_freq < ar->low_5ghz_chan || 3239 band->channels[i].center_freq > ar->high_5ghz_chan) 3240 band->channels[i].flags |= 3241 IEEE80211_CHAN_DISABLED; 3242 } 3243 } 3244 } 3245 3246 static void ath10k_reg_notifier(struct wiphy *wiphy, 3247 struct regulatory_request *request) 3248 { 3249 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3250 struct ath10k *ar = hw->priv; 3251 bool result; 3252 3253 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3254 3255 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3256 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3257 request->dfs_region); 3258 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3259 request->dfs_region); 3260 if (!result) 3261 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3262 request->dfs_region); 3263 } 3264 3265 mutex_lock(&ar->conf_mutex); 3266 if (ar->state == ATH10K_STATE_ON) 3267 ath10k_regd_update(ar); 3268 mutex_unlock(&ar->conf_mutex); 3269 3270 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 3271 ath10k_mac_update_channel_list(ar, 3272 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); 3273 } 3274 3275 static void ath10k_stop_radar_confirmation(struct ath10k *ar) 3276 { 3277 spin_lock_bh(&ar->data_lock); 3278 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED; 3279 spin_unlock_bh(&ar->data_lock); 3280 3281 cancel_work_sync(&ar->radar_confirmation_work); 3282 } 3283 3284 /***************/ 3285 /* TX handlers */ 3286 /***************/ 3287 3288 enum ath10k_mac_tx_path { 3289 ATH10K_MAC_TX_HTT, 3290 ATH10K_MAC_TX_HTT_MGMT, 3291 ATH10K_MAC_TX_WMI_MGMT, 3292 ATH10K_MAC_TX_UNKNOWN, 3293 }; 3294 3295 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3296 { 3297 lockdep_assert_held(&ar->htt.tx_lock); 3298 3299 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3300 ar->tx_paused |= BIT(reason); 3301 ieee80211_stop_queues(ar->hw); 3302 } 3303 3304 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3305 struct ieee80211_vif *vif) 3306 { 3307 struct ath10k *ar = data; 3308 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3309 3310 if (arvif->tx_paused) 3311 return; 3312 3313 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3314 } 3315 3316 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3317 { 3318 lockdep_assert_held(&ar->htt.tx_lock); 3319 3320 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3321 ar->tx_paused &= ~BIT(reason); 3322 3323 if (ar->tx_paused) 3324 return; 3325 3326 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3327 IEEE80211_IFACE_ITER_RESUME_ALL, 3328 ath10k_mac_tx_unlock_iter, 3329 ar); 3330 3331 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3332 } 3333 3334 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3335 { 3336 struct ath10k *ar = arvif->ar; 3337 3338 lockdep_assert_held(&ar->htt.tx_lock); 3339 3340 WARN_ON(reason >= BITS_PER_LONG); 3341 arvif->tx_paused |= BIT(reason); 3342 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3343 } 3344 3345 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3346 { 3347 struct ath10k *ar = arvif->ar; 3348 3349 lockdep_assert_held(&ar->htt.tx_lock); 3350 3351 WARN_ON(reason >= BITS_PER_LONG); 3352 arvif->tx_paused &= ~BIT(reason); 3353 3354 if (ar->tx_paused) 3355 return; 3356 3357 if (arvif->tx_paused) 3358 return; 3359 3360 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3361 } 3362 3363 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3364 enum wmi_tlv_tx_pause_id pause_id, 3365 enum wmi_tlv_tx_pause_action action) 3366 { 3367 struct ath10k *ar = arvif->ar; 3368 3369 lockdep_assert_held(&ar->htt.tx_lock); 3370 3371 switch (action) { 3372 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3373 ath10k_mac_vif_tx_lock(arvif, pause_id); 3374 break; 3375 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3376 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3377 break; 3378 default: 3379 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3380 "received unknown tx pause action %d on vdev %i, ignoring\n", 3381 action, arvif->vdev_id); 3382 break; 3383 } 3384 } 3385 3386 struct ath10k_mac_tx_pause { 3387 u32 vdev_id; 3388 enum wmi_tlv_tx_pause_id pause_id; 3389 enum wmi_tlv_tx_pause_action action; 3390 }; 3391 3392 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3393 struct ieee80211_vif *vif) 3394 { 3395 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3396 struct ath10k_mac_tx_pause *arg = data; 3397 3398 if (arvif->vdev_id != arg->vdev_id) 3399 return; 3400 3401 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3402 } 3403 3404 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3405 enum wmi_tlv_tx_pause_id pause_id, 3406 enum wmi_tlv_tx_pause_action action) 3407 { 3408 struct ath10k_mac_tx_pause arg = { 3409 .vdev_id = vdev_id, 3410 .pause_id = pause_id, 3411 .action = action, 3412 }; 3413 3414 spin_lock_bh(&ar->htt.tx_lock); 3415 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3416 IEEE80211_IFACE_ITER_RESUME_ALL, 3417 ath10k_mac_handle_tx_pause_iter, 3418 &arg); 3419 spin_unlock_bh(&ar->htt.tx_lock); 3420 } 3421 3422 static enum ath10k_hw_txrx_mode 3423 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3424 struct ieee80211_vif *vif, 3425 struct ieee80211_sta *sta, 3426 struct sk_buff *skb) 3427 { 3428 const struct ieee80211_hdr *hdr = (void *)skb->data; 3429 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 3430 __le16 fc = hdr->frame_control; 3431 3432 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3433 return ATH10K_HW_TXRX_RAW; 3434 3435 if (ieee80211_is_mgmt(fc)) 3436 return ATH10K_HW_TXRX_MGMT; 3437 3438 /* Workaround: 3439 * 3440 * NullFunc frames are mostly used to ping if a client or AP are still 3441 * reachable and responsive. This implies tx status reports must be 3442 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3443 * come to a conclusion that the other end disappeared and tear down 3444 * BSS connection or it can never disconnect from BSS/client (which is 3445 * the case). 3446 * 3447 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3448 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3449 * which seems to deliver correct tx reports for NullFunc frames. The 3450 * downside of using it is it ignores client powersave state so it can 3451 * end up disconnecting sleeping clients in AP mode. It should fix STA 3452 * mode though because AP don't sleep. 3453 */ 3454 if (ar->htt.target_version_major < 3 && 3455 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3456 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3457 ar->running_fw->fw_file.fw_features)) 3458 return ATH10K_HW_TXRX_MGMT; 3459 3460 /* Workaround: 3461 * 3462 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3463 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3464 * to work with Ethernet txmode so use it. 3465 * 3466 * FIXME: Check if raw mode works with TDLS. 3467 */ 3468 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3469 return ATH10K_HW_TXRX_ETHERNET; 3470 3471 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) || 3472 skb_cb->flags & ATH10K_SKB_F_RAW_TX) 3473 return ATH10K_HW_TXRX_RAW; 3474 3475 return ATH10K_HW_TXRX_NATIVE_WIFI; 3476 } 3477 3478 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3479 struct sk_buff *skb) 3480 { 3481 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3482 const struct ieee80211_hdr *hdr = (void *)skb->data; 3483 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3484 IEEE80211_TX_CTL_INJECTED; 3485 3486 if (!ieee80211_has_protected(hdr->frame_control)) 3487 return false; 3488 3489 if ((info->flags & mask) == mask) 3490 return false; 3491 3492 if (vif) 3493 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3494 3495 return true; 3496 } 3497 3498 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3499 * Control in the header. 3500 */ 3501 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3502 { 3503 struct ieee80211_hdr *hdr = (void *)skb->data; 3504 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3505 u8 *qos_ctl; 3506 3507 if (!ieee80211_is_data_qos(hdr->frame_control)) 3508 return; 3509 3510 qos_ctl = ieee80211_get_qos_ctl(hdr); 3511 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3512 skb->data, (void *)qos_ctl - (void *)skb->data); 3513 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3514 3515 /* Some firmware revisions don't handle sending QoS NullFunc well. 3516 * These frames are mainly used for CQM purposes so it doesn't really 3517 * matter whether QoS NullFunc or NullFunc are sent. 3518 */ 3519 hdr = (void *)skb->data; 3520 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3521 cb->flags &= ~ATH10K_SKB_F_QOS; 3522 3523 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3524 } 3525 3526 static void ath10k_tx_h_8023(struct sk_buff *skb) 3527 { 3528 struct ieee80211_hdr *hdr; 3529 struct rfc1042_hdr *rfc1042; 3530 struct ethhdr *eth; 3531 size_t hdrlen; 3532 u8 da[ETH_ALEN]; 3533 u8 sa[ETH_ALEN]; 3534 __be16 type; 3535 3536 hdr = (void *)skb->data; 3537 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3538 rfc1042 = (void *)skb->data + hdrlen; 3539 3540 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3541 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3542 type = rfc1042->snap_type; 3543 3544 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3545 skb_push(skb, sizeof(*eth)); 3546 3547 eth = (void *)skb->data; 3548 ether_addr_copy(eth->h_dest, da); 3549 ether_addr_copy(eth->h_source, sa); 3550 eth->h_proto = type; 3551 } 3552 3553 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3554 struct ieee80211_vif *vif, 3555 struct sk_buff *skb) 3556 { 3557 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3558 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3559 3560 /* This is case only for P2P_GO */ 3561 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3562 return; 3563 3564 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3565 spin_lock_bh(&ar->data_lock); 3566 if (arvif->u.ap.noa_data) 3567 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3568 GFP_ATOMIC)) 3569 skb_put_data(skb, arvif->u.ap.noa_data, 3570 arvif->u.ap.noa_len); 3571 spin_unlock_bh(&ar->data_lock); 3572 } 3573 } 3574 3575 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3576 struct ieee80211_vif *vif, 3577 struct ieee80211_txq *txq, 3578 struct sk_buff *skb, u16 airtime) 3579 { 3580 struct ieee80211_hdr *hdr = (void *)skb->data; 3581 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3582 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3583 bool is_data = ieee80211_is_data(hdr->frame_control) || 3584 ieee80211_is_data_qos(hdr->frame_control); 3585 3586 cb->flags = 0; 3587 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3588 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3589 3590 if (ieee80211_is_mgmt(hdr->frame_control)) 3591 cb->flags |= ATH10K_SKB_F_MGMT; 3592 3593 if (ieee80211_is_data_qos(hdr->frame_control)) 3594 cb->flags |= ATH10K_SKB_F_QOS; 3595 3596 /* Data frames encrypted in software will be posted to firmware 3597 * with tx encap mode set to RAW. Ex: Multicast traffic generated 3598 * for a specific VLAN group will always be encrypted in software. 3599 */ 3600 if (is_data && ieee80211_has_protected(hdr->frame_control) && 3601 !info->control.hw_key) { 3602 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3603 cb->flags |= ATH10K_SKB_F_RAW_TX; 3604 } 3605 3606 cb->vif = vif; 3607 cb->txq = txq; 3608 cb->airtime_est = airtime; 3609 } 3610 3611 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3612 { 3613 /* FIXME: Not really sure since when the behaviour changed. At some 3614 * point new firmware stopped requiring creation of peer entries for 3615 * offchannel tx (and actually creating them causes issues with wmi-htc 3616 * tx credit replenishment and reliability). Assuming it's at least 3.4 3617 * because that's when the `freq` was introduced to TX_FRM HTT command. 3618 */ 3619 return (ar->htt.target_version_major >= 3 && 3620 ar->htt.target_version_minor >= 4 && 3621 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3622 } 3623 3624 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3625 { 3626 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3627 int ret = 0; 3628 3629 spin_lock_bh(&ar->data_lock); 3630 3631 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3632 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3633 ret = -ENOSPC; 3634 goto unlock; 3635 } 3636 3637 __skb_queue_tail(q, skb); 3638 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3639 3640 unlock: 3641 spin_unlock_bh(&ar->data_lock); 3642 3643 return ret; 3644 } 3645 3646 static enum ath10k_mac_tx_path 3647 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3648 struct sk_buff *skb, 3649 enum ath10k_hw_txrx_mode txmode) 3650 { 3651 switch (txmode) { 3652 case ATH10K_HW_TXRX_RAW: 3653 case ATH10K_HW_TXRX_NATIVE_WIFI: 3654 case ATH10K_HW_TXRX_ETHERNET: 3655 return ATH10K_MAC_TX_HTT; 3656 case ATH10K_HW_TXRX_MGMT: 3657 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3658 ar->running_fw->fw_file.fw_features) || 3659 test_bit(WMI_SERVICE_MGMT_TX_WMI, 3660 ar->wmi.svc_map)) 3661 return ATH10K_MAC_TX_WMI_MGMT; 3662 else if (ar->htt.target_version_major >= 3) 3663 return ATH10K_MAC_TX_HTT; 3664 else 3665 return ATH10K_MAC_TX_HTT_MGMT; 3666 } 3667 3668 return ATH10K_MAC_TX_UNKNOWN; 3669 } 3670 3671 static int ath10k_mac_tx_submit(struct ath10k *ar, 3672 enum ath10k_hw_txrx_mode txmode, 3673 enum ath10k_mac_tx_path txpath, 3674 struct sk_buff *skb) 3675 { 3676 struct ath10k_htt *htt = &ar->htt; 3677 int ret = -EINVAL; 3678 3679 switch (txpath) { 3680 case ATH10K_MAC_TX_HTT: 3681 ret = ath10k_htt_tx(htt, txmode, skb); 3682 break; 3683 case ATH10K_MAC_TX_HTT_MGMT: 3684 ret = ath10k_htt_mgmt_tx(htt, skb); 3685 break; 3686 case ATH10K_MAC_TX_WMI_MGMT: 3687 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3688 break; 3689 case ATH10K_MAC_TX_UNKNOWN: 3690 WARN_ON_ONCE(1); 3691 ret = -EINVAL; 3692 break; 3693 } 3694 3695 if (ret) { 3696 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3697 ret); 3698 ieee80211_free_txskb(ar->hw, skb); 3699 } 3700 3701 return ret; 3702 } 3703 3704 /* This function consumes the sk_buff regardless of return value as far as 3705 * caller is concerned so no freeing is necessary afterwards. 3706 */ 3707 static int ath10k_mac_tx(struct ath10k *ar, 3708 struct ieee80211_vif *vif, 3709 enum ath10k_hw_txrx_mode txmode, 3710 enum ath10k_mac_tx_path txpath, 3711 struct sk_buff *skb) 3712 { 3713 struct ieee80211_hw *hw = ar->hw; 3714 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3715 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 3716 int ret; 3717 3718 /* We should disable CCK RATE due to P2P */ 3719 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3720 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3721 3722 switch (txmode) { 3723 case ATH10K_HW_TXRX_MGMT: 3724 case ATH10K_HW_TXRX_NATIVE_WIFI: 3725 ath10k_tx_h_nwifi(hw, skb); 3726 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3727 ath10k_tx_h_seq_no(vif, skb); 3728 break; 3729 case ATH10K_HW_TXRX_ETHERNET: 3730 ath10k_tx_h_8023(skb); 3731 break; 3732 case ATH10K_HW_TXRX_RAW: 3733 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) && 3734 !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) { 3735 WARN_ON_ONCE(1); 3736 ieee80211_free_txskb(hw, skb); 3737 return -ENOTSUPP; 3738 } 3739 } 3740 3741 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3742 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3743 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3744 skb); 3745 3746 skb_queue_tail(&ar->offchan_tx_queue, skb); 3747 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3748 return 0; 3749 } 3750 } 3751 3752 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3753 if (ret) { 3754 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3755 return ret; 3756 } 3757 3758 return 0; 3759 } 3760 3761 void ath10k_offchan_tx_purge(struct ath10k *ar) 3762 { 3763 struct sk_buff *skb; 3764 3765 for (;;) { 3766 skb = skb_dequeue(&ar->offchan_tx_queue); 3767 if (!skb) 3768 break; 3769 3770 ieee80211_free_txskb(ar->hw, skb); 3771 } 3772 } 3773 3774 void ath10k_offchan_tx_work(struct work_struct *work) 3775 { 3776 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3777 struct ath10k_peer *peer; 3778 struct ath10k_vif *arvif; 3779 enum ath10k_hw_txrx_mode txmode; 3780 enum ath10k_mac_tx_path txpath; 3781 struct ieee80211_hdr *hdr; 3782 struct ieee80211_vif *vif; 3783 struct ieee80211_sta *sta; 3784 struct sk_buff *skb; 3785 const u8 *peer_addr; 3786 int vdev_id; 3787 int ret; 3788 unsigned long time_left; 3789 bool tmp_peer_created = false; 3790 3791 /* FW requirement: We must create a peer before FW will send out 3792 * an offchannel frame. Otherwise the frame will be stuck and 3793 * never transmitted. We delete the peer upon tx completion. 3794 * It is unlikely that a peer for offchannel tx will already be 3795 * present. However it may be in some rare cases so account for that. 3796 * Otherwise we might remove a legitimate peer and break stuff. 3797 */ 3798 3799 for (;;) { 3800 skb = skb_dequeue(&ar->offchan_tx_queue); 3801 if (!skb) 3802 break; 3803 3804 mutex_lock(&ar->conf_mutex); 3805 3806 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3807 skb); 3808 3809 hdr = (struct ieee80211_hdr *)skb->data; 3810 peer_addr = ieee80211_get_DA(hdr); 3811 3812 spin_lock_bh(&ar->data_lock); 3813 vdev_id = ar->scan.vdev_id; 3814 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3815 spin_unlock_bh(&ar->data_lock); 3816 3817 if (peer) 3818 /* FIXME: should this use ath10k_warn()? */ 3819 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3820 peer_addr, vdev_id); 3821 3822 if (!peer) { 3823 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3824 peer_addr, 3825 WMI_PEER_TYPE_DEFAULT); 3826 if (ret) 3827 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3828 peer_addr, vdev_id, ret); 3829 tmp_peer_created = (ret == 0); 3830 } 3831 3832 spin_lock_bh(&ar->data_lock); 3833 reinit_completion(&ar->offchan_tx_completed); 3834 ar->offchan_tx_skb = skb; 3835 spin_unlock_bh(&ar->data_lock); 3836 3837 /* It's safe to access vif and sta - conf_mutex guarantees that 3838 * sta_state() and remove_interface() are locked exclusively 3839 * out wrt to this offchannel worker. 3840 */ 3841 arvif = ath10k_get_arvif(ar, vdev_id); 3842 if (arvif) { 3843 vif = arvif->vif; 3844 sta = ieee80211_find_sta(vif, peer_addr); 3845 } else { 3846 vif = NULL; 3847 sta = NULL; 3848 } 3849 3850 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3851 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3852 3853 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3854 if (ret) { 3855 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3856 ret); 3857 /* not serious */ 3858 } 3859 3860 time_left = 3861 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3862 if (time_left == 0) 3863 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3864 skb); 3865 3866 if (!peer && tmp_peer_created) { 3867 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3868 if (ret) 3869 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3870 peer_addr, vdev_id, ret); 3871 } 3872 3873 mutex_unlock(&ar->conf_mutex); 3874 } 3875 } 3876 3877 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3878 { 3879 struct sk_buff *skb; 3880 3881 for (;;) { 3882 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3883 if (!skb) 3884 break; 3885 3886 ieee80211_free_txskb(ar->hw, skb); 3887 } 3888 } 3889 3890 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3891 { 3892 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3893 struct sk_buff *skb; 3894 dma_addr_t paddr; 3895 int ret; 3896 3897 for (;;) { 3898 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3899 if (!skb) 3900 break; 3901 3902 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, 3903 ar->running_fw->fw_file.fw_features)) { 3904 paddr = dma_map_single(ar->dev, skb->data, 3905 skb->len, DMA_TO_DEVICE); 3906 if (!paddr) 3907 continue; 3908 ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); 3909 if (ret) { 3910 ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", 3911 ret); 3912 dma_unmap_single(ar->dev, paddr, skb->len, 3913 DMA_TO_DEVICE); 3914 ieee80211_free_txskb(ar->hw, skb); 3915 } 3916 } else { 3917 ret = ath10k_wmi_mgmt_tx(ar, skb); 3918 if (ret) { 3919 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3920 ret); 3921 ieee80211_free_txskb(ar->hw, skb); 3922 } 3923 } 3924 } 3925 } 3926 3927 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3928 { 3929 struct ath10k_txq *artxq; 3930 3931 if (!txq) 3932 return; 3933 3934 artxq = (void *)txq->drv_priv; 3935 INIT_LIST_HEAD(&artxq->list); 3936 } 3937 3938 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3939 { 3940 struct ath10k_skb_cb *cb; 3941 struct sk_buff *msdu; 3942 int msdu_id; 3943 3944 if (!txq) 3945 return; 3946 3947 spin_lock_bh(&ar->htt.tx_lock); 3948 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3949 cb = ATH10K_SKB_CB(msdu); 3950 if (cb->txq == txq) 3951 cb->txq = NULL; 3952 } 3953 spin_unlock_bh(&ar->htt.tx_lock); 3954 } 3955 3956 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3957 u16 peer_id, 3958 u8 tid) 3959 { 3960 struct ath10k_peer *peer; 3961 3962 lockdep_assert_held(&ar->data_lock); 3963 3964 peer = ar->peer_map[peer_id]; 3965 if (!peer) 3966 return NULL; 3967 3968 if (peer->removed) 3969 return NULL; 3970 3971 if (peer->sta) 3972 return peer->sta->txq[tid]; 3973 else if (peer->vif) 3974 return peer->vif->txq; 3975 else 3976 return NULL; 3977 } 3978 3979 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3980 struct ieee80211_txq *txq) 3981 { 3982 struct ath10k *ar = hw->priv; 3983 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3984 3985 /* No need to get locks */ 3986 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3987 return true; 3988 3989 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3990 return true; 3991 3992 if (artxq->num_fw_queued < artxq->num_push_allowed) 3993 return true; 3994 3995 return false; 3996 } 3997 3998 /* Return estimated airtime in microsecond, which is calculated using last 3999 * reported TX rate. This is just a rough estimation because host driver has no 4000 * knowledge of the actual transmit rate, retries or aggregation. If actual 4001 * airtime can be reported by firmware, then delta between estimated and actual 4002 * airtime can be adjusted from deficit. 4003 */ 4004 #define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */ 4005 #define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */ 4006 static u16 ath10k_mac_update_airtime(struct ath10k *ar, 4007 struct ieee80211_txq *txq, 4008 struct sk_buff *skb) 4009 { 4010 struct ath10k_sta *arsta; 4011 u32 pktlen; 4012 u16 airtime = 0; 4013 4014 if (!txq || !txq->sta) 4015 return airtime; 4016 4017 if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) 4018 return airtime; 4019 4020 spin_lock_bh(&ar->data_lock); 4021 arsta = (struct ath10k_sta *)txq->sta->drv_priv; 4022 4023 pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */ 4024 if (arsta->last_tx_bitrate) { 4025 /* airtime in us, last_tx_bitrate in 100kbps */ 4026 airtime = (pktlen * 8 * (1000 / 100)) 4027 / arsta->last_tx_bitrate; 4028 /* overhead for media access time and IFS */ 4029 airtime += IEEE80211_ATF_OVERHEAD_IFS; 4030 } else { 4031 /* This is mostly for throttle excessive BC/MC frames, and the 4032 * airtime/rate doesn't need be exact. Airtime of BC/MC frames 4033 * in 2G get some discount, which helps prevent very low rate 4034 * frames from being blocked for too long. 4035 */ 4036 airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */ 4037 airtime += IEEE80211_ATF_OVERHEAD; 4038 } 4039 spin_unlock_bh(&ar->data_lock); 4040 4041 return airtime; 4042 } 4043 4044 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 4045 struct ieee80211_txq *txq) 4046 { 4047 struct ath10k *ar = hw->priv; 4048 struct ath10k_htt *htt = &ar->htt; 4049 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4050 struct ieee80211_vif *vif = txq->vif; 4051 struct ieee80211_sta *sta = txq->sta; 4052 enum ath10k_hw_txrx_mode txmode; 4053 enum ath10k_mac_tx_path txpath; 4054 struct sk_buff *skb; 4055 struct ieee80211_hdr *hdr; 4056 size_t skb_len; 4057 bool is_mgmt, is_presp; 4058 int ret; 4059 u16 airtime; 4060 4061 spin_lock_bh(&ar->htt.tx_lock); 4062 ret = ath10k_htt_tx_inc_pending(htt); 4063 spin_unlock_bh(&ar->htt.tx_lock); 4064 4065 if (ret) 4066 return ret; 4067 4068 skb = ieee80211_tx_dequeue(hw, txq); 4069 if (!skb) { 4070 spin_lock_bh(&ar->htt.tx_lock); 4071 ath10k_htt_tx_dec_pending(htt); 4072 spin_unlock_bh(&ar->htt.tx_lock); 4073 4074 return -ENOENT; 4075 } 4076 4077 airtime = ath10k_mac_update_airtime(ar, txq, skb); 4078 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime); 4079 4080 skb_len = skb->len; 4081 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4082 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4083 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4084 4085 if (is_mgmt) { 4086 hdr = (struct ieee80211_hdr *)skb->data; 4087 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4088 4089 spin_lock_bh(&ar->htt.tx_lock); 4090 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4091 4092 if (ret) { 4093 ath10k_htt_tx_dec_pending(htt); 4094 spin_unlock_bh(&ar->htt.tx_lock); 4095 return ret; 4096 } 4097 spin_unlock_bh(&ar->htt.tx_lock); 4098 } 4099 4100 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4101 if (unlikely(ret)) { 4102 ath10k_warn(ar, "failed to push frame: %d\n", ret); 4103 4104 spin_lock_bh(&ar->htt.tx_lock); 4105 ath10k_htt_tx_dec_pending(htt); 4106 if (is_mgmt) 4107 ath10k_htt_tx_mgmt_dec_pending(htt); 4108 spin_unlock_bh(&ar->htt.tx_lock); 4109 4110 return ret; 4111 } 4112 4113 spin_lock_bh(&ar->htt.tx_lock); 4114 artxq->num_fw_queued++; 4115 spin_unlock_bh(&ar->htt.tx_lock); 4116 4117 return skb_len; 4118 } 4119 4120 static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) 4121 { 4122 struct ieee80211_txq *txq; 4123 int ret = 0; 4124 4125 ieee80211_txq_schedule_start(hw, ac); 4126 while ((txq = ieee80211_next_txq(hw, ac))) { 4127 while (ath10k_mac_tx_can_push(hw, txq)) { 4128 ret = ath10k_mac_tx_push_txq(hw, txq); 4129 if (ret < 0) 4130 break; 4131 } 4132 ieee80211_return_txq(hw, txq, false); 4133 ath10k_htt_tx_txq_update(hw, txq); 4134 if (ret == -EBUSY) 4135 break; 4136 } 4137 ieee80211_txq_schedule_end(hw, ac); 4138 4139 return ret; 4140 } 4141 4142 void ath10k_mac_tx_push_pending(struct ath10k *ar) 4143 { 4144 struct ieee80211_hw *hw = ar->hw; 4145 u32 ac; 4146 4147 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) 4148 return; 4149 4150 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 4151 return; 4152 4153 rcu_read_lock(); 4154 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 4155 if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY) 4156 break; 4157 } 4158 rcu_read_unlock(); 4159 } 4160 EXPORT_SYMBOL(ath10k_mac_tx_push_pending); 4161 4162 /************/ 4163 /* Scanning */ 4164 /************/ 4165 4166 void __ath10k_scan_finish(struct ath10k *ar) 4167 { 4168 lockdep_assert_held(&ar->data_lock); 4169 4170 switch (ar->scan.state) { 4171 case ATH10K_SCAN_IDLE: 4172 break; 4173 case ATH10K_SCAN_RUNNING: 4174 case ATH10K_SCAN_ABORTING: 4175 if (!ar->scan.is_roc) { 4176 struct cfg80211_scan_info info = { 4177 .aborted = (ar->scan.state == 4178 ATH10K_SCAN_ABORTING), 4179 }; 4180 4181 ieee80211_scan_completed(ar->hw, &info); 4182 } else if (ar->scan.roc_notify) { 4183 ieee80211_remain_on_channel_expired(ar->hw); 4184 } 4185 /* fall through */ 4186 case ATH10K_SCAN_STARTING: 4187 ar->scan.state = ATH10K_SCAN_IDLE; 4188 ar->scan_channel = NULL; 4189 ar->scan.roc_freq = 0; 4190 ath10k_offchan_tx_purge(ar); 4191 cancel_delayed_work(&ar->scan.timeout); 4192 complete(&ar->scan.completed); 4193 break; 4194 } 4195 } 4196 4197 void ath10k_scan_finish(struct ath10k *ar) 4198 { 4199 spin_lock_bh(&ar->data_lock); 4200 __ath10k_scan_finish(ar); 4201 spin_unlock_bh(&ar->data_lock); 4202 } 4203 4204 static int ath10k_scan_stop(struct ath10k *ar) 4205 { 4206 struct wmi_stop_scan_arg arg = { 4207 .req_id = 1, /* FIXME */ 4208 .req_type = WMI_SCAN_STOP_ONE, 4209 .u.scan_id = ATH10K_SCAN_ID, 4210 }; 4211 int ret; 4212 4213 lockdep_assert_held(&ar->conf_mutex); 4214 4215 ret = ath10k_wmi_stop_scan(ar, &arg); 4216 if (ret) { 4217 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4218 goto out; 4219 } 4220 4221 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4222 if (ret == 0) { 4223 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4224 ret = -ETIMEDOUT; 4225 } else if (ret > 0) { 4226 ret = 0; 4227 } 4228 4229 out: 4230 /* Scan state should be updated upon scan completion but in case 4231 * firmware fails to deliver the event (for whatever reason) it is 4232 * desired to clean up scan state anyway. Firmware may have just 4233 * dropped the scan completion event delivery due to transport pipe 4234 * being overflown with data and/or it can recover on its own before 4235 * next scan request is submitted. 4236 */ 4237 spin_lock_bh(&ar->data_lock); 4238 if (ar->scan.state != ATH10K_SCAN_IDLE) 4239 __ath10k_scan_finish(ar); 4240 spin_unlock_bh(&ar->data_lock); 4241 4242 return ret; 4243 } 4244 4245 static void ath10k_scan_abort(struct ath10k *ar) 4246 { 4247 int ret; 4248 4249 lockdep_assert_held(&ar->conf_mutex); 4250 4251 spin_lock_bh(&ar->data_lock); 4252 4253 switch (ar->scan.state) { 4254 case ATH10K_SCAN_IDLE: 4255 /* This can happen if timeout worker kicked in and called 4256 * abortion while scan completion was being processed. 4257 */ 4258 break; 4259 case ATH10K_SCAN_STARTING: 4260 case ATH10K_SCAN_ABORTING: 4261 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4262 ath10k_scan_state_str(ar->scan.state), 4263 ar->scan.state); 4264 break; 4265 case ATH10K_SCAN_RUNNING: 4266 ar->scan.state = ATH10K_SCAN_ABORTING; 4267 spin_unlock_bh(&ar->data_lock); 4268 4269 ret = ath10k_scan_stop(ar); 4270 if (ret) 4271 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4272 4273 spin_lock_bh(&ar->data_lock); 4274 break; 4275 } 4276 4277 spin_unlock_bh(&ar->data_lock); 4278 } 4279 4280 void ath10k_scan_timeout_work(struct work_struct *work) 4281 { 4282 struct ath10k *ar = container_of(work, struct ath10k, 4283 scan.timeout.work); 4284 4285 mutex_lock(&ar->conf_mutex); 4286 ath10k_scan_abort(ar); 4287 mutex_unlock(&ar->conf_mutex); 4288 } 4289 4290 static int ath10k_start_scan(struct ath10k *ar, 4291 const struct wmi_start_scan_arg *arg) 4292 { 4293 int ret; 4294 4295 lockdep_assert_held(&ar->conf_mutex); 4296 4297 ret = ath10k_wmi_start_scan(ar, arg); 4298 if (ret) 4299 return ret; 4300 4301 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4302 if (ret == 0) { 4303 ret = ath10k_scan_stop(ar); 4304 if (ret) 4305 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4306 4307 return -ETIMEDOUT; 4308 } 4309 4310 /* If we failed to start the scan, return error code at 4311 * this point. This is probably due to some issue in the 4312 * firmware, but no need to wedge the driver due to that... 4313 */ 4314 spin_lock_bh(&ar->data_lock); 4315 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4316 spin_unlock_bh(&ar->data_lock); 4317 return -EINVAL; 4318 } 4319 spin_unlock_bh(&ar->data_lock); 4320 4321 return 0; 4322 } 4323 4324 /**********************/ 4325 /* mac80211 callbacks */ 4326 /**********************/ 4327 4328 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4329 struct ieee80211_tx_control *control, 4330 struct sk_buff *skb) 4331 { 4332 struct ath10k *ar = hw->priv; 4333 struct ath10k_htt *htt = &ar->htt; 4334 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4335 struct ieee80211_vif *vif = info->control.vif; 4336 struct ieee80211_sta *sta = control->sta; 4337 struct ieee80211_txq *txq = NULL; 4338 struct ieee80211_hdr *hdr = (void *)skb->data; 4339 enum ath10k_hw_txrx_mode txmode; 4340 enum ath10k_mac_tx_path txpath; 4341 bool is_htt; 4342 bool is_mgmt; 4343 bool is_presp; 4344 int ret; 4345 u16 airtime; 4346 4347 airtime = ath10k_mac_update_airtime(ar, txq, skb); 4348 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime); 4349 4350 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4351 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4352 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4353 txpath == ATH10K_MAC_TX_HTT_MGMT); 4354 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4355 4356 if (is_htt) { 4357 spin_lock_bh(&ar->htt.tx_lock); 4358 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4359 4360 ret = ath10k_htt_tx_inc_pending(htt); 4361 if (ret) { 4362 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4363 ret); 4364 spin_unlock_bh(&ar->htt.tx_lock); 4365 ieee80211_free_txskb(ar->hw, skb); 4366 return; 4367 } 4368 4369 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4370 if (ret) { 4371 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4372 ret); 4373 ath10k_htt_tx_dec_pending(htt); 4374 spin_unlock_bh(&ar->htt.tx_lock); 4375 ieee80211_free_txskb(ar->hw, skb); 4376 return; 4377 } 4378 spin_unlock_bh(&ar->htt.tx_lock); 4379 } 4380 4381 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4382 if (ret) { 4383 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4384 if (is_htt) { 4385 spin_lock_bh(&ar->htt.tx_lock); 4386 ath10k_htt_tx_dec_pending(htt); 4387 if (is_mgmt) 4388 ath10k_htt_tx_mgmt_dec_pending(htt); 4389 spin_unlock_bh(&ar->htt.tx_lock); 4390 } 4391 return; 4392 } 4393 } 4394 4395 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4396 struct ieee80211_txq *txq) 4397 { 4398 struct ath10k *ar = hw->priv; 4399 int ret; 4400 u8 ac; 4401 4402 ath10k_htt_tx_txq_update(hw, txq); 4403 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) 4404 return; 4405 4406 ac = txq->ac; 4407 ieee80211_txq_schedule_start(hw, ac); 4408 txq = ieee80211_next_txq(hw, ac); 4409 if (!txq) 4410 goto out; 4411 4412 while (ath10k_mac_tx_can_push(hw, txq)) { 4413 ret = ath10k_mac_tx_push_txq(hw, txq); 4414 if (ret < 0) 4415 break; 4416 } 4417 ieee80211_return_txq(hw, txq, false); 4418 ath10k_htt_tx_txq_update(hw, txq); 4419 out: 4420 ieee80211_txq_schedule_end(hw, ac); 4421 } 4422 4423 /* Must not be called with conf_mutex held as workers can use that also. */ 4424 void ath10k_drain_tx(struct ath10k *ar) 4425 { 4426 /* make sure rcu-protected mac80211 tx path itself is drained */ 4427 synchronize_net(); 4428 4429 ath10k_offchan_tx_purge(ar); 4430 ath10k_mgmt_over_wmi_tx_purge(ar); 4431 4432 cancel_work_sync(&ar->offchan_tx_work); 4433 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4434 } 4435 4436 void ath10k_halt(struct ath10k *ar) 4437 { 4438 struct ath10k_vif *arvif; 4439 4440 lockdep_assert_held(&ar->conf_mutex); 4441 4442 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4443 ar->filter_flags = 0; 4444 ar->monitor = false; 4445 ar->monitor_arvif = NULL; 4446 4447 if (ar->monitor_started) 4448 ath10k_monitor_stop(ar); 4449 4450 ar->monitor_started = false; 4451 ar->tx_paused = 0; 4452 4453 ath10k_scan_finish(ar); 4454 ath10k_peer_cleanup_all(ar); 4455 ath10k_stop_radar_confirmation(ar); 4456 ath10k_core_stop(ar); 4457 ath10k_hif_power_down(ar); 4458 4459 spin_lock_bh(&ar->data_lock); 4460 list_for_each_entry(arvif, &ar->arvifs, list) 4461 ath10k_mac_vif_beacon_cleanup(arvif); 4462 spin_unlock_bh(&ar->data_lock); 4463 } 4464 4465 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4466 { 4467 struct ath10k *ar = hw->priv; 4468 4469 mutex_lock(&ar->conf_mutex); 4470 4471 *tx_ant = ar->cfg_tx_chainmask; 4472 *rx_ant = ar->cfg_rx_chainmask; 4473 4474 mutex_unlock(&ar->conf_mutex); 4475 4476 return 0; 4477 } 4478 4479 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4480 { 4481 /* It is not clear that allowing gaps in chainmask 4482 * is helpful. Probably it will not do what user 4483 * is hoping for, so warn in that case. 4484 */ 4485 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4486 return; 4487 4488 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4489 dbg, cm); 4490 } 4491 4492 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4493 { 4494 int nsts = ar->vht_cap_info; 4495 4496 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4497 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4498 4499 /* If firmware does not deliver to host number of space-time 4500 * streams supported, assume it support up to 4 BF STS and return 4501 * the value for VHT CAP: nsts-1) 4502 */ 4503 if (nsts == 0) 4504 return 3; 4505 4506 return nsts; 4507 } 4508 4509 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4510 { 4511 int sound_dim = ar->vht_cap_info; 4512 4513 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4514 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4515 4516 /* If the sounding dimension is not advertised by the firmware, 4517 * let's use a default value of 1 4518 */ 4519 if (sound_dim == 0) 4520 return 1; 4521 4522 return sound_dim; 4523 } 4524 4525 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4526 { 4527 struct ieee80211_sta_vht_cap vht_cap = {0}; 4528 struct ath10k_hw_params *hw = &ar->hw_params; 4529 u16 mcs_map; 4530 u32 val; 4531 int i; 4532 4533 vht_cap.vht_supported = 1; 4534 vht_cap.cap = ar->vht_cap_info; 4535 4536 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4537 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4538 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4539 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4540 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4541 4542 vht_cap.cap |= val; 4543 } 4544 4545 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4546 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4547 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4548 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4549 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4550 4551 vht_cap.cap |= val; 4552 } 4553 4554 /* Currently the firmware seems to be buggy, don't enable 80+80 4555 * mode until that's resolved. 4556 */ 4557 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4558 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0) 4559 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4560 4561 mcs_map = 0; 4562 for (i = 0; i < 8; i++) { 4563 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4564 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4565 else 4566 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4567 } 4568 4569 if (ar->cfg_tx_chainmask <= 1) 4570 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4571 4572 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4573 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4574 4575 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do 4576 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give 4577 * user-space a clue if that is the case. 4578 */ 4579 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) && 4580 (hw->vht160_mcs_rx_highest != 0 || 4581 hw->vht160_mcs_tx_highest != 0)) { 4582 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest); 4583 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest); 4584 } 4585 4586 return vht_cap; 4587 } 4588 4589 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4590 { 4591 int i; 4592 struct ieee80211_sta_ht_cap ht_cap = {0}; 4593 4594 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4595 return ht_cap; 4596 4597 ht_cap.ht_supported = 1; 4598 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4599 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4600 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4601 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4602 ht_cap.cap |= 4603 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4604 4605 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4606 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4607 4608 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4609 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4610 4611 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4612 u32 smps; 4613 4614 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4615 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4616 4617 ht_cap.cap |= smps; 4618 } 4619 4620 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4621 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4622 4623 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4624 u32 stbc; 4625 4626 stbc = ar->ht_cap_info; 4627 stbc &= WMI_HT_CAP_RX_STBC; 4628 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4629 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4630 stbc &= IEEE80211_HT_CAP_RX_STBC; 4631 4632 ht_cap.cap |= stbc; 4633 } 4634 4635 if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info & 4636 WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC))) 4637 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4638 4639 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4640 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4641 4642 /* max AMSDU is implicitly taken from vht_cap_info */ 4643 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4644 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4645 4646 for (i = 0; i < ar->num_rf_chains; i++) { 4647 if (ar->cfg_rx_chainmask & BIT(i)) 4648 ht_cap.mcs.rx_mask[i] = 0xFF; 4649 } 4650 4651 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4652 4653 return ht_cap; 4654 } 4655 4656 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4657 { 4658 struct ieee80211_supported_band *band; 4659 struct ieee80211_sta_vht_cap vht_cap; 4660 struct ieee80211_sta_ht_cap ht_cap; 4661 4662 ht_cap = ath10k_get_ht_cap(ar); 4663 vht_cap = ath10k_create_vht_cap(ar); 4664 4665 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4666 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4667 band->ht_cap = ht_cap; 4668 } 4669 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4670 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4671 band->ht_cap = ht_cap; 4672 band->vht_cap = vht_cap; 4673 } 4674 } 4675 4676 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4677 { 4678 int ret; 4679 4680 lockdep_assert_held(&ar->conf_mutex); 4681 4682 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4683 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4684 4685 ar->cfg_tx_chainmask = tx_ant; 4686 ar->cfg_rx_chainmask = rx_ant; 4687 4688 if ((ar->state != ATH10K_STATE_ON) && 4689 (ar->state != ATH10K_STATE_RESTARTED)) 4690 return 0; 4691 4692 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4693 tx_ant); 4694 if (ret) { 4695 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4696 ret, tx_ant); 4697 return ret; 4698 } 4699 4700 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4701 rx_ant); 4702 if (ret) { 4703 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4704 ret, rx_ant); 4705 return ret; 4706 } 4707 4708 /* Reload HT/VHT capability */ 4709 ath10k_mac_setup_ht_vht_cap(ar); 4710 4711 return 0; 4712 } 4713 4714 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4715 { 4716 struct ath10k *ar = hw->priv; 4717 int ret; 4718 4719 mutex_lock(&ar->conf_mutex); 4720 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4721 mutex_unlock(&ar->conf_mutex); 4722 return ret; 4723 } 4724 4725 static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar, 4726 struct wmi_bb_timing_cfg_arg *bb_timing) 4727 { 4728 struct device_node *node; 4729 const char *fem_name; 4730 int ret; 4731 4732 node = ar->dev->of_node; 4733 if (!node) 4734 return -ENOENT; 4735 4736 ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name); 4737 if (ret) 4738 return -ENOENT; 4739 4740 /* 4741 * If external Front End module used in hardware, then default base band timing 4742 * parameter cannot be used since they were fine tuned for reference hardware, 4743 * so choosing different value suitable for that external FEM. 4744 */ 4745 if (!strcmp("microsemi-lx5586", fem_name)) { 4746 bb_timing->bb_tx_timing = 0x00; 4747 bb_timing->bb_xpa_timing = 0x0101; 4748 } else { 4749 return -ENOENT; 4750 } 4751 4752 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n", 4753 bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing); 4754 return 0; 4755 } 4756 4757 static int ath10k_start(struct ieee80211_hw *hw) 4758 { 4759 struct ath10k *ar = hw->priv; 4760 u32 param; 4761 int ret = 0; 4762 struct wmi_bb_timing_cfg_arg bb_timing = {0}; 4763 4764 /* 4765 * This makes sense only when restarting hw. It is harmless to call 4766 * unconditionally. This is necessary to make sure no HTT/WMI tx 4767 * commands will be submitted while restarting. 4768 */ 4769 ath10k_drain_tx(ar); 4770 4771 mutex_lock(&ar->conf_mutex); 4772 4773 switch (ar->state) { 4774 case ATH10K_STATE_OFF: 4775 ar->state = ATH10K_STATE_ON; 4776 break; 4777 case ATH10K_STATE_RESTARTING: 4778 ar->state = ATH10K_STATE_RESTARTED; 4779 break; 4780 case ATH10K_STATE_ON: 4781 case ATH10K_STATE_RESTARTED: 4782 case ATH10K_STATE_WEDGED: 4783 WARN_ON(1); 4784 ret = -EINVAL; 4785 goto err; 4786 case ATH10K_STATE_UTF: 4787 ret = -EBUSY; 4788 goto err; 4789 } 4790 4791 ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); 4792 if (ret) { 4793 ath10k_err(ar, "Could not init hif: %d\n", ret); 4794 goto err_off; 4795 } 4796 4797 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4798 &ar->normal_mode_fw); 4799 if (ret) { 4800 ath10k_err(ar, "Could not init core: %d\n", ret); 4801 goto err_power_down; 4802 } 4803 4804 param = ar->wmi.pdev_param->pmf_qos; 4805 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4806 if (ret) { 4807 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4808 goto err_core_stop; 4809 } 4810 4811 param = ar->wmi.pdev_param->dynamic_bw; 4812 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4813 if (ret) { 4814 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4815 goto err_core_stop; 4816 } 4817 4818 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 4819 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); 4820 if (ret) { 4821 ath10k_err(ar, "failed to set prob req oui: %i\n", ret); 4822 goto err_core_stop; 4823 } 4824 } 4825 4826 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4827 ret = ath10k_wmi_adaptive_qcs(ar, true); 4828 if (ret) { 4829 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4830 ret); 4831 goto err_core_stop; 4832 } 4833 } 4834 4835 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4836 param = ar->wmi.pdev_param->burst_enable; 4837 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4838 if (ret) { 4839 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4840 goto err_core_stop; 4841 } 4842 } 4843 4844 param = ar->wmi.pdev_param->idle_ps_config; 4845 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4846 if (ret && ret != -EOPNOTSUPP) { 4847 ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); 4848 goto err_core_stop; 4849 } 4850 4851 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4852 4853 /* 4854 * By default FW set ARP frames ac to voice (6). In that case ARP 4855 * exchange is not working properly for UAPSD enabled AP. ARP requests 4856 * which arrives with access category 0 are processed by network stack 4857 * and send back with access category 0, but FW changes access category 4858 * to 6. Set ARP frames access category to best effort (0) solves 4859 * this problem. 4860 */ 4861 4862 param = ar->wmi.pdev_param->arp_ac_override; 4863 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4864 if (ret) { 4865 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4866 ret); 4867 goto err_core_stop; 4868 } 4869 4870 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4871 ar->running_fw->fw_file.fw_features)) { 4872 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4873 WMI_CCA_DETECT_LEVEL_AUTO, 4874 WMI_CCA_DETECT_MARGIN_AUTO); 4875 if (ret) { 4876 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4877 ret); 4878 goto err_core_stop; 4879 } 4880 } 4881 4882 param = ar->wmi.pdev_param->ani_enable; 4883 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4884 if (ret) { 4885 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4886 ret); 4887 goto err_core_stop; 4888 } 4889 4890 ar->ani_enabled = true; 4891 4892 if (ath10k_peer_stats_enabled(ar)) { 4893 param = ar->wmi.pdev_param->peer_stats_update_period; 4894 ret = ath10k_wmi_pdev_set_param(ar, param, 4895 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4896 if (ret) { 4897 ath10k_warn(ar, 4898 "failed to set peer stats period : %d\n", 4899 ret); 4900 goto err_core_stop; 4901 } 4902 } 4903 4904 param = ar->wmi.pdev_param->enable_btcoex; 4905 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4906 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4907 ar->running_fw->fw_file.fw_features)) { 4908 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4909 if (ret) { 4910 ath10k_warn(ar, 4911 "failed to set btcoex param: %d\n", ret); 4912 goto err_core_stop; 4913 } 4914 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4915 } 4916 4917 if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) { 4918 ret = __ath10k_fetch_bb_timing_dt(ar, &bb_timing); 4919 if (!ret) { 4920 ret = ath10k_wmi_pdev_bb_timing(ar, &bb_timing); 4921 if (ret) { 4922 ath10k_warn(ar, 4923 "failed to set bb timings: %d\n", 4924 ret); 4925 goto err_core_stop; 4926 } 4927 } 4928 } 4929 4930 ar->num_started_vdevs = 0; 4931 ath10k_regd_update(ar); 4932 4933 ath10k_spectral_start(ar); 4934 ath10k_thermal_set_throttling(ar); 4935 4936 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; 4937 4938 mutex_unlock(&ar->conf_mutex); 4939 return 0; 4940 4941 err_core_stop: 4942 ath10k_core_stop(ar); 4943 4944 err_power_down: 4945 ath10k_hif_power_down(ar); 4946 4947 err_off: 4948 ar->state = ATH10K_STATE_OFF; 4949 4950 err: 4951 mutex_unlock(&ar->conf_mutex); 4952 return ret; 4953 } 4954 4955 static void ath10k_stop(struct ieee80211_hw *hw) 4956 { 4957 struct ath10k *ar = hw->priv; 4958 4959 ath10k_drain_tx(ar); 4960 4961 mutex_lock(&ar->conf_mutex); 4962 if (ar->state != ATH10K_STATE_OFF) { 4963 ath10k_halt(ar); 4964 ar->state = ATH10K_STATE_OFF; 4965 } 4966 mutex_unlock(&ar->conf_mutex); 4967 4968 cancel_work_sync(&ar->set_coverage_class_work); 4969 cancel_delayed_work_sync(&ar->scan.timeout); 4970 cancel_work_sync(&ar->restart_work); 4971 } 4972 4973 static int ath10k_config_ps(struct ath10k *ar) 4974 { 4975 struct ath10k_vif *arvif; 4976 int ret = 0; 4977 4978 lockdep_assert_held(&ar->conf_mutex); 4979 4980 list_for_each_entry(arvif, &ar->arvifs, list) { 4981 ret = ath10k_mac_vif_setup_ps(arvif); 4982 if (ret) { 4983 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4984 break; 4985 } 4986 } 4987 4988 return ret; 4989 } 4990 4991 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4992 { 4993 int ret; 4994 u32 param; 4995 4996 lockdep_assert_held(&ar->conf_mutex); 4997 4998 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4999 5000 param = ar->wmi.pdev_param->txpower_limit2g; 5001 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 5002 if (ret) { 5003 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 5004 txpower, ret); 5005 return ret; 5006 } 5007 5008 param = ar->wmi.pdev_param->txpower_limit5g; 5009 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 5010 if (ret) { 5011 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 5012 txpower, ret); 5013 return ret; 5014 } 5015 5016 return 0; 5017 } 5018 5019 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 5020 { 5021 struct ath10k_vif *arvif; 5022 int ret, txpower = -1; 5023 5024 lockdep_assert_held(&ar->conf_mutex); 5025 5026 list_for_each_entry(arvif, &ar->arvifs, list) { 5027 if (arvif->txpower <= 0) 5028 continue; 5029 5030 if (txpower == -1) 5031 txpower = arvif->txpower; 5032 else 5033 txpower = min(txpower, arvif->txpower); 5034 } 5035 5036 if (txpower == -1) 5037 return 0; 5038 5039 ret = ath10k_mac_txpower_setup(ar, txpower); 5040 if (ret) { 5041 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 5042 txpower, ret); 5043 return ret; 5044 } 5045 5046 return 0; 5047 } 5048 5049 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 5050 { 5051 struct ath10k *ar = hw->priv; 5052 struct ieee80211_conf *conf = &hw->conf; 5053 int ret = 0; 5054 5055 mutex_lock(&ar->conf_mutex); 5056 5057 if (changed & IEEE80211_CONF_CHANGE_PS) 5058 ath10k_config_ps(ar); 5059 5060 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 5061 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 5062 ret = ath10k_monitor_recalc(ar); 5063 if (ret) 5064 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5065 } 5066 5067 mutex_unlock(&ar->conf_mutex); 5068 return ret; 5069 } 5070 5071 static u32 get_nss_from_chainmask(u16 chain_mask) 5072 { 5073 if ((chain_mask & 0xf) == 0xf) 5074 return 4; 5075 else if ((chain_mask & 0x7) == 0x7) 5076 return 3; 5077 else if ((chain_mask & 0x3) == 0x3) 5078 return 2; 5079 return 1; 5080 } 5081 5082 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 5083 { 5084 u32 value = 0; 5085 struct ath10k *ar = arvif->ar; 5086 int nsts; 5087 int sound_dim; 5088 5089 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 5090 return 0; 5091 5092 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 5093 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 5094 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 5095 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 5096 5097 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 5098 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 5099 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 5100 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 5101 5102 if (!value) 5103 return 0; 5104 5105 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 5106 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 5107 5108 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 5109 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 5110 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 5111 5112 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 5113 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 5114 5115 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 5116 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 5117 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 5118 5119 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5120 ar->wmi.vdev_param->txbf, value); 5121 } 5122 5123 /* 5124 * TODO: 5125 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 5126 * because we will send mgmt frames without CCK. This requirement 5127 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 5128 * in the TX packet. 5129 */ 5130 static int ath10k_add_interface(struct ieee80211_hw *hw, 5131 struct ieee80211_vif *vif) 5132 { 5133 struct ath10k *ar = hw->priv; 5134 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5135 struct ath10k_peer *peer; 5136 enum wmi_sta_powersave_param param; 5137 int ret = 0; 5138 u32 value; 5139 int bit; 5140 int i; 5141 u32 vdev_param; 5142 5143 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 5144 5145 mutex_lock(&ar->conf_mutex); 5146 5147 memset(arvif, 0, sizeof(*arvif)); 5148 ath10k_mac_txq_init(vif->txq); 5149 5150 arvif->ar = ar; 5151 arvif->vif = vif; 5152 5153 INIT_LIST_HEAD(&arvif->list); 5154 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 5155 INIT_DELAYED_WORK(&arvif->connection_loss_work, 5156 ath10k_mac_vif_sta_connection_loss_work); 5157 5158 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 5159 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 5160 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 5161 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 5162 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 5163 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 5164 } 5165 5166 if (ar->num_peers >= ar->max_num_peers) { 5167 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 5168 ret = -ENOBUFS; 5169 goto err; 5170 } 5171 5172 if (ar->free_vdev_map == 0) { 5173 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 5174 ret = -EBUSY; 5175 goto err; 5176 } 5177 bit = __ffs64(ar->free_vdev_map); 5178 5179 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 5180 bit, ar->free_vdev_map); 5181 5182 arvif->vdev_id = bit; 5183 arvif->vdev_subtype = 5184 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 5185 5186 switch (vif->type) { 5187 case NL80211_IFTYPE_P2P_DEVICE: 5188 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5189 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5190 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 5191 break; 5192 case NL80211_IFTYPE_UNSPECIFIED: 5193 case NL80211_IFTYPE_STATION: 5194 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5195 if (vif->p2p) 5196 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5197 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 5198 break; 5199 case NL80211_IFTYPE_ADHOC: 5200 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 5201 break; 5202 case NL80211_IFTYPE_MESH_POINT: 5203 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 5204 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5205 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 5206 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5207 ret = -EINVAL; 5208 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 5209 goto err; 5210 } 5211 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5212 break; 5213 case NL80211_IFTYPE_AP: 5214 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5215 5216 if (vif->p2p) 5217 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5218 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 5219 break; 5220 case NL80211_IFTYPE_MONITOR: 5221 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 5222 break; 5223 default: 5224 WARN_ON(1); 5225 break; 5226 } 5227 5228 /* Using vdev_id as queue number will make it very easy to do per-vif 5229 * tx queue locking. This shouldn't wrap due to interface combinations 5230 * but do a modulo for correctness sake and prevent using offchannel tx 5231 * queues for regular vif tx. 5232 */ 5233 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5234 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 5235 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5236 5237 /* Some firmware revisions don't wait for beacon tx completion before 5238 * sending another SWBA event. This could lead to hardware using old 5239 * (freed) beacon data in some cases, e.g. tx credit starvation 5240 * combined with missed TBTT. This is very very rare. 5241 * 5242 * On non-IOMMU-enabled hosts this could be a possible security issue 5243 * because hw could beacon some random data on the air. On 5244 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 5245 * device would crash. 5246 * 5247 * Since there are no beacon tx completions (implicit nor explicit) 5248 * propagated to host the only workaround for this is to allocate a 5249 * DMA-coherent buffer for a lifetime of a vif and use it for all 5250 * beacon tx commands. Worst case for this approach is some beacons may 5251 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 5252 */ 5253 if (vif->type == NL80211_IFTYPE_ADHOC || 5254 vif->type == NL80211_IFTYPE_MESH_POINT || 5255 vif->type == NL80211_IFTYPE_AP) { 5256 arvif->beacon_buf = dma_alloc_coherent(ar->dev, 5257 IEEE80211_MAX_FRAME_LEN, 5258 &arvif->beacon_paddr, 5259 GFP_ATOMIC); 5260 if (!arvif->beacon_buf) { 5261 ret = -ENOMEM; 5262 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5263 ret); 5264 goto err; 5265 } 5266 } 5267 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 5268 arvif->nohwcrypt = true; 5269 5270 if (arvif->nohwcrypt && 5271 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5272 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 5273 goto err; 5274 } 5275 5276 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 5277 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5278 arvif->beacon_buf ? "single-buf" : "per-skb"); 5279 5280 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 5281 arvif->vdev_subtype, vif->addr); 5282 if (ret) { 5283 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5284 arvif->vdev_id, ret); 5285 goto err; 5286 } 5287 5288 if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, 5289 ar->wmi.svc_map)) { 5290 vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn; 5291 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5292 WMI_VDEV_DISABLE_4_ADDR_SRC_LRN); 5293 if (ret && ret != -EOPNOTSUPP) { 5294 ath10k_warn(ar, "failed to disable 4addr src lrn vdev %i: %d\n", 5295 arvif->vdev_id, ret); 5296 } 5297 } 5298 5299 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5300 spin_lock_bh(&ar->data_lock); 5301 list_add(&arvif->list, &ar->arvifs); 5302 spin_unlock_bh(&ar->data_lock); 5303 5304 /* It makes no sense to have firmware do keepalives. mac80211 already 5305 * takes care of this with idle connection polling. 5306 */ 5307 ret = ath10k_mac_vif_disable_keepalive(arvif); 5308 if (ret) { 5309 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5310 arvif->vdev_id, ret); 5311 goto err_vdev_delete; 5312 } 5313 5314 arvif->def_wep_key_idx = -1; 5315 5316 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5317 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5318 ATH10K_HW_TXRX_NATIVE_WIFI); 5319 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5320 if (ret && ret != -EOPNOTSUPP) { 5321 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5322 arvif->vdev_id, ret); 5323 goto err_vdev_delete; 5324 } 5325 5326 /* Configuring number of spatial stream for monitor interface is causing 5327 * target assert in qca9888 and qca6174. 5328 */ 5329 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5330 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5331 5332 vdev_param = ar->wmi.vdev_param->nss; 5333 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5334 nss); 5335 if (ret) { 5336 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5337 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5338 ret); 5339 goto err_vdev_delete; 5340 } 5341 } 5342 5343 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5344 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5345 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5346 vif->addr, WMI_PEER_TYPE_DEFAULT); 5347 if (ret) { 5348 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5349 arvif->vdev_id, ret); 5350 goto err_vdev_delete; 5351 } 5352 5353 spin_lock_bh(&ar->data_lock); 5354 5355 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5356 if (!peer) { 5357 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5358 vif->addr, arvif->vdev_id); 5359 spin_unlock_bh(&ar->data_lock); 5360 ret = -ENOENT; 5361 goto err_peer_delete; 5362 } 5363 5364 arvif->peer_id = find_first_bit(peer->peer_ids, 5365 ATH10K_MAX_NUM_PEER_IDS); 5366 5367 spin_unlock_bh(&ar->data_lock); 5368 } else { 5369 arvif->peer_id = HTT_INVALID_PEERID; 5370 } 5371 5372 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5373 ret = ath10k_mac_set_kickout(arvif); 5374 if (ret) { 5375 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5376 arvif->vdev_id, ret); 5377 goto err_peer_delete; 5378 } 5379 } 5380 5381 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5382 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5383 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5384 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5385 param, value); 5386 if (ret) { 5387 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5388 arvif->vdev_id, ret); 5389 goto err_peer_delete; 5390 } 5391 5392 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5393 if (ret) { 5394 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5395 arvif->vdev_id, ret); 5396 goto err_peer_delete; 5397 } 5398 5399 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5400 if (ret) { 5401 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5402 arvif->vdev_id, ret); 5403 goto err_peer_delete; 5404 } 5405 } 5406 5407 ret = ath10k_mac_set_txbf_conf(arvif); 5408 if (ret) { 5409 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5410 arvif->vdev_id, ret); 5411 goto err_peer_delete; 5412 } 5413 5414 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5415 if (ret) { 5416 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5417 arvif->vdev_id, ret); 5418 goto err_peer_delete; 5419 } 5420 5421 arvif->txpower = vif->bss_conf.txpower; 5422 ret = ath10k_mac_txpower_recalc(ar); 5423 if (ret) { 5424 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5425 goto err_peer_delete; 5426 } 5427 5428 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { 5429 vdev_param = ar->wmi.vdev_param->rtt_responder_role; 5430 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5431 arvif->ftm_responder); 5432 5433 /* It is harmless to not set FTM role. Do not warn */ 5434 if (ret && ret != -EOPNOTSUPP) 5435 ath10k_warn(ar, "failed to set vdev %i FTM Responder: %d\n", 5436 arvif->vdev_id, ret); 5437 } 5438 5439 if (vif->type == NL80211_IFTYPE_MONITOR) { 5440 ar->monitor_arvif = arvif; 5441 ret = ath10k_monitor_recalc(ar); 5442 if (ret) { 5443 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5444 goto err_peer_delete; 5445 } 5446 } 5447 5448 spin_lock_bh(&ar->htt.tx_lock); 5449 if (!ar->tx_paused) 5450 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5451 spin_unlock_bh(&ar->htt.tx_lock); 5452 5453 mutex_unlock(&ar->conf_mutex); 5454 return 0; 5455 5456 err_peer_delete: 5457 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5458 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5459 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5460 ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id, 5461 vif->addr); 5462 } 5463 5464 err_vdev_delete: 5465 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5466 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5467 spin_lock_bh(&ar->data_lock); 5468 list_del(&arvif->list); 5469 spin_unlock_bh(&ar->data_lock); 5470 5471 err: 5472 if (arvif->beacon_buf) { 5473 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5474 arvif->beacon_buf, arvif->beacon_paddr); 5475 arvif->beacon_buf = NULL; 5476 } 5477 5478 mutex_unlock(&ar->conf_mutex); 5479 5480 return ret; 5481 } 5482 5483 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5484 { 5485 int i; 5486 5487 for (i = 0; i < BITS_PER_LONG; i++) 5488 ath10k_mac_vif_tx_unlock(arvif, i); 5489 } 5490 5491 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5492 struct ieee80211_vif *vif) 5493 { 5494 struct ath10k *ar = hw->priv; 5495 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5496 struct ath10k_peer *peer; 5497 unsigned long time_left; 5498 int ret; 5499 int i; 5500 5501 cancel_work_sync(&arvif->ap_csa_work); 5502 cancel_delayed_work_sync(&arvif->connection_loss_work); 5503 5504 mutex_lock(&ar->conf_mutex); 5505 5506 spin_lock_bh(&ar->data_lock); 5507 ath10k_mac_vif_beacon_cleanup(arvif); 5508 spin_unlock_bh(&ar->data_lock); 5509 5510 ret = ath10k_spectral_vif_stop(arvif); 5511 if (ret) 5512 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5513 arvif->vdev_id, ret); 5514 5515 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5516 spin_lock_bh(&ar->data_lock); 5517 list_del(&arvif->list); 5518 spin_unlock_bh(&ar->data_lock); 5519 5520 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5521 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5522 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5523 vif->addr); 5524 if (ret) 5525 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5526 arvif->vdev_id, ret); 5527 5528 ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id, 5529 vif->addr); 5530 kfree(arvif->u.ap.noa_data); 5531 } 5532 5533 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5534 arvif->vdev_id); 5535 5536 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5537 if (ret) 5538 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5539 arvif->vdev_id, ret); 5540 5541 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) { 5542 time_left = wait_for_completion_timeout(&ar->vdev_delete_done, 5543 ATH10K_VDEV_DELETE_TIMEOUT_HZ); 5544 if (time_left == 0) { 5545 ath10k_warn(ar, "Timeout in receiving vdev delete response\n"); 5546 goto out; 5547 } 5548 } 5549 5550 /* Some firmware revisions don't notify host about self-peer removal 5551 * until after associated vdev is deleted. 5552 */ 5553 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5554 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5555 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5556 vif->addr); 5557 if (ret) 5558 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5559 arvif->vdev_id, ret); 5560 5561 spin_lock_bh(&ar->data_lock); 5562 ar->num_peers--; 5563 spin_unlock_bh(&ar->data_lock); 5564 } 5565 5566 spin_lock_bh(&ar->data_lock); 5567 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5568 peer = ar->peer_map[i]; 5569 if (!peer) 5570 continue; 5571 5572 if (peer->vif == vif) { 5573 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5574 vif->addr, arvif->vdev_id); 5575 peer->vif = NULL; 5576 } 5577 } 5578 spin_unlock_bh(&ar->data_lock); 5579 5580 ath10k_peer_cleanup(ar, arvif->vdev_id); 5581 ath10k_mac_txq_unref(ar, vif->txq); 5582 5583 if (vif->type == NL80211_IFTYPE_MONITOR) { 5584 ar->monitor_arvif = NULL; 5585 ret = ath10k_monitor_recalc(ar); 5586 if (ret) 5587 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5588 } 5589 5590 ret = ath10k_mac_txpower_recalc(ar); 5591 if (ret) 5592 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5593 5594 spin_lock_bh(&ar->htt.tx_lock); 5595 ath10k_mac_vif_tx_unlock_all(arvif); 5596 spin_unlock_bh(&ar->htt.tx_lock); 5597 5598 ath10k_mac_txq_unref(ar, vif->txq); 5599 5600 out: 5601 mutex_unlock(&ar->conf_mutex); 5602 } 5603 5604 /* 5605 * FIXME: Has to be verified. 5606 */ 5607 #define SUPPORTED_FILTERS \ 5608 (FIF_ALLMULTI | \ 5609 FIF_CONTROL | \ 5610 FIF_PSPOLL | \ 5611 FIF_OTHER_BSS | \ 5612 FIF_BCN_PRBRESP_PROMISC | \ 5613 FIF_PROBE_REQ | \ 5614 FIF_FCSFAIL) 5615 5616 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5617 unsigned int changed_flags, 5618 unsigned int *total_flags, 5619 u64 multicast) 5620 { 5621 struct ath10k *ar = hw->priv; 5622 int ret; 5623 5624 mutex_lock(&ar->conf_mutex); 5625 5626 changed_flags &= SUPPORTED_FILTERS; 5627 *total_flags &= SUPPORTED_FILTERS; 5628 ar->filter_flags = *total_flags; 5629 5630 ret = ath10k_monitor_recalc(ar); 5631 if (ret) 5632 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5633 5634 mutex_unlock(&ar->conf_mutex); 5635 } 5636 5637 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5638 struct ieee80211_vif *vif, 5639 struct ieee80211_bss_conf *info, 5640 u32 changed) 5641 { 5642 struct ath10k *ar = hw->priv; 5643 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5644 struct cfg80211_chan_def def; 5645 u32 vdev_param, pdev_param, slottime, preamble; 5646 u16 bitrate, hw_value; 5647 u8 rate, basic_rate_idx, rateidx; 5648 int ret = 0, hw_rate_code, mcast_rate; 5649 enum nl80211_band band; 5650 const struct ieee80211_supported_band *sband; 5651 5652 mutex_lock(&ar->conf_mutex); 5653 5654 if (changed & BSS_CHANGED_IBSS) 5655 ath10k_control_ibss(arvif, info, vif->addr); 5656 5657 if (changed & BSS_CHANGED_BEACON_INT) { 5658 arvif->beacon_interval = info->beacon_int; 5659 vdev_param = ar->wmi.vdev_param->beacon_interval; 5660 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5661 arvif->beacon_interval); 5662 ath10k_dbg(ar, ATH10K_DBG_MAC, 5663 "mac vdev %d beacon_interval %d\n", 5664 arvif->vdev_id, arvif->beacon_interval); 5665 5666 if (ret) 5667 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5668 arvif->vdev_id, ret); 5669 } 5670 5671 if (changed & BSS_CHANGED_BEACON) { 5672 ath10k_dbg(ar, ATH10K_DBG_MAC, 5673 "vdev %d set beacon tx mode to staggered\n", 5674 arvif->vdev_id); 5675 5676 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5677 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5678 WMI_BEACON_STAGGERED_MODE); 5679 if (ret) 5680 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5681 arvif->vdev_id, ret); 5682 5683 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5684 if (ret) 5685 ath10k_warn(ar, "failed to update beacon template: %d\n", 5686 ret); 5687 5688 if (ieee80211_vif_is_mesh(vif)) { 5689 /* mesh doesn't use SSID but firmware needs it */ 5690 strncpy(arvif->u.ap.ssid, "mesh", 5691 sizeof(arvif->u.ap.ssid)); 5692 arvif->u.ap.ssid_len = 4; 5693 } 5694 } 5695 5696 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5697 ret = ath10k_mac_setup_prb_tmpl(arvif); 5698 if (ret) 5699 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5700 arvif->vdev_id, ret); 5701 } 5702 5703 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5704 arvif->dtim_period = info->dtim_period; 5705 5706 ath10k_dbg(ar, ATH10K_DBG_MAC, 5707 "mac vdev %d dtim_period %d\n", 5708 arvif->vdev_id, arvif->dtim_period); 5709 5710 vdev_param = ar->wmi.vdev_param->dtim_period; 5711 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5712 arvif->dtim_period); 5713 if (ret) 5714 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5715 arvif->vdev_id, ret); 5716 } 5717 5718 if (changed & BSS_CHANGED_SSID && 5719 vif->type == NL80211_IFTYPE_AP) { 5720 arvif->u.ap.ssid_len = info->ssid_len; 5721 if (info->ssid_len) 5722 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5723 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5724 } 5725 5726 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5727 ether_addr_copy(arvif->bssid, info->bssid); 5728 5729 if (changed & BSS_CHANGED_FTM_RESPONDER && 5730 arvif->ftm_responder != info->ftm_responder && 5731 test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { 5732 arvif->ftm_responder = info->ftm_responder; 5733 5734 vdev_param = ar->wmi.vdev_param->rtt_responder_role; 5735 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5736 arvif->ftm_responder); 5737 5738 ath10k_dbg(ar, ATH10K_DBG_MAC, 5739 "mac vdev %d ftm_responder %d:ret %d\n", 5740 arvif->vdev_id, arvif->ftm_responder, ret); 5741 } 5742 5743 if (changed & BSS_CHANGED_BEACON_ENABLED) 5744 ath10k_control_beaconing(arvif, info); 5745 5746 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5747 arvif->use_cts_prot = info->use_cts_prot; 5748 5749 ret = ath10k_recalc_rtscts_prot(arvif); 5750 if (ret) 5751 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5752 arvif->vdev_id, ret); 5753 5754 if (ath10k_mac_can_set_cts_prot(arvif)) { 5755 ret = ath10k_mac_set_cts_prot(arvif); 5756 if (ret) 5757 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5758 arvif->vdev_id, ret); 5759 } 5760 } 5761 5762 if (changed & BSS_CHANGED_ERP_SLOT) { 5763 if (info->use_short_slot) 5764 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5765 5766 else 5767 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5768 5769 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5770 arvif->vdev_id, slottime); 5771 5772 vdev_param = ar->wmi.vdev_param->slot_time; 5773 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5774 slottime); 5775 if (ret) 5776 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5777 arvif->vdev_id, ret); 5778 } 5779 5780 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5781 if (info->use_short_preamble) 5782 preamble = WMI_VDEV_PREAMBLE_SHORT; 5783 else 5784 preamble = WMI_VDEV_PREAMBLE_LONG; 5785 5786 ath10k_dbg(ar, ATH10K_DBG_MAC, 5787 "mac vdev %d preamble %dn", 5788 arvif->vdev_id, preamble); 5789 5790 vdev_param = ar->wmi.vdev_param->preamble; 5791 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5792 preamble); 5793 if (ret) 5794 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5795 arvif->vdev_id, ret); 5796 } 5797 5798 if (changed & BSS_CHANGED_ASSOC) { 5799 if (info->assoc) { 5800 /* Workaround: Make sure monitor vdev is not running 5801 * when associating to prevent some firmware revisions 5802 * (e.g. 10.1 and 10.2) from crashing. 5803 */ 5804 if (ar->monitor_started) 5805 ath10k_monitor_stop(ar); 5806 ath10k_bss_assoc(hw, vif, info); 5807 ath10k_monitor_recalc(ar); 5808 } else { 5809 ath10k_bss_disassoc(hw, vif); 5810 } 5811 } 5812 5813 if (changed & BSS_CHANGED_TXPOWER) { 5814 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5815 arvif->vdev_id, info->txpower); 5816 5817 arvif->txpower = info->txpower; 5818 ret = ath10k_mac_txpower_recalc(ar); 5819 if (ret) 5820 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5821 } 5822 5823 if (changed & BSS_CHANGED_PS) { 5824 arvif->ps = vif->bss_conf.ps; 5825 5826 ret = ath10k_config_ps(ar); 5827 if (ret) 5828 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5829 arvif->vdev_id, ret); 5830 } 5831 5832 if (changed & BSS_CHANGED_MCAST_RATE && 5833 !ath10k_mac_vif_chan(arvif->vif, &def)) { 5834 band = def.chan->band; 5835 mcast_rate = vif->bss_conf.mcast_rate[band]; 5836 if (mcast_rate > 0) 5837 rateidx = mcast_rate - 1; 5838 else 5839 rateidx = ffs(vif->bss_conf.basic_rates) - 1; 5840 5841 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 5842 rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; 5843 5844 bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate; 5845 hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value; 5846 if (ath10k_mac_bitrate_is_cck(bitrate)) 5847 preamble = WMI_RATE_PREAMBLE_CCK; 5848 else 5849 preamble = WMI_RATE_PREAMBLE_OFDM; 5850 5851 rate = ATH10K_HW_RATECODE(hw_value, 0, preamble); 5852 5853 ath10k_dbg(ar, ATH10K_DBG_MAC, 5854 "mac vdev %d mcast_rate %x\n", 5855 arvif->vdev_id, rate); 5856 5857 vdev_param = ar->wmi.vdev_param->mcast_data_rate; 5858 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5859 vdev_param, rate); 5860 if (ret) 5861 ath10k_warn(ar, 5862 "failed to set mcast rate on vdev %i: %d\n", 5863 arvif->vdev_id, ret); 5864 5865 vdev_param = ar->wmi.vdev_param->bcast_data_rate; 5866 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5867 vdev_param, rate); 5868 if (ret) 5869 ath10k_warn(ar, 5870 "failed to set bcast rate on vdev %i: %d\n", 5871 arvif->vdev_id, ret); 5872 } 5873 5874 if (changed & BSS_CHANGED_BASIC_RATES) { 5875 if (ath10k_mac_vif_chan(vif, &def)) { 5876 mutex_unlock(&ar->conf_mutex); 5877 return; 5878 } 5879 5880 sband = ar->hw->wiphy->bands[def.chan->band]; 5881 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 5882 bitrate = sband->bitrates[basic_rate_idx].bitrate; 5883 5884 hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); 5885 if (hw_rate_code < 0) { 5886 ath10k_warn(ar, "bitrate not supported %d\n", bitrate); 5887 mutex_unlock(&ar->conf_mutex); 5888 return; 5889 } 5890 5891 vdev_param = ar->wmi.vdev_param->mgmt_rate; 5892 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5893 hw_rate_code); 5894 if (ret) 5895 ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); 5896 } 5897 5898 mutex_unlock(&ar->conf_mutex); 5899 } 5900 5901 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5902 { 5903 struct ath10k *ar = hw->priv; 5904 5905 /* This function should never be called if setting the coverage class 5906 * is not supported on this hardware. 5907 */ 5908 if (!ar->hw_params.hw_ops->set_coverage_class) { 5909 WARN_ON_ONCE(1); 5910 return; 5911 } 5912 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5913 } 5914 5915 struct ath10k_mac_tdls_iter_data { 5916 u32 num_tdls_stations; 5917 struct ieee80211_vif *curr_vif; 5918 }; 5919 5920 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5921 struct ieee80211_sta *sta) 5922 { 5923 struct ath10k_mac_tdls_iter_data *iter_data = data; 5924 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5925 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5926 5927 if (sta->tdls && sta_vif == iter_data->curr_vif) 5928 iter_data->num_tdls_stations++; 5929 } 5930 5931 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5932 struct ieee80211_vif *vif) 5933 { 5934 struct ath10k_mac_tdls_iter_data data = {}; 5935 5936 data.curr_vif = vif; 5937 5938 ieee80211_iterate_stations_atomic(hw, 5939 ath10k_mac_tdls_vif_stations_count_iter, 5940 &data); 5941 return data.num_tdls_stations; 5942 } 5943 5944 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5945 struct ieee80211_vif *vif, 5946 struct ieee80211_scan_request *hw_req) 5947 { 5948 struct ath10k *ar = hw->priv; 5949 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5950 struct cfg80211_scan_request *req = &hw_req->req; 5951 struct wmi_start_scan_arg arg; 5952 int ret = 0; 5953 int i; 5954 u32 scan_timeout; 5955 5956 mutex_lock(&ar->conf_mutex); 5957 5958 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 5959 ret = -EBUSY; 5960 goto exit; 5961 } 5962 5963 spin_lock_bh(&ar->data_lock); 5964 switch (ar->scan.state) { 5965 case ATH10K_SCAN_IDLE: 5966 reinit_completion(&ar->scan.started); 5967 reinit_completion(&ar->scan.completed); 5968 ar->scan.state = ATH10K_SCAN_STARTING; 5969 ar->scan.is_roc = false; 5970 ar->scan.vdev_id = arvif->vdev_id; 5971 ret = 0; 5972 break; 5973 case ATH10K_SCAN_STARTING: 5974 case ATH10K_SCAN_RUNNING: 5975 case ATH10K_SCAN_ABORTING: 5976 ret = -EBUSY; 5977 break; 5978 } 5979 spin_unlock_bh(&ar->data_lock); 5980 5981 if (ret) 5982 goto exit; 5983 5984 memset(&arg, 0, sizeof(arg)); 5985 ath10k_wmi_start_scan_init(ar, &arg); 5986 arg.vdev_id = arvif->vdev_id; 5987 arg.scan_id = ATH10K_SCAN_ID; 5988 5989 if (req->ie_len) { 5990 arg.ie_len = req->ie_len; 5991 memcpy(arg.ie, req->ie, arg.ie_len); 5992 } 5993 5994 if (req->n_ssids) { 5995 arg.n_ssids = req->n_ssids; 5996 for (i = 0; i < arg.n_ssids; i++) { 5997 arg.ssids[i].len = req->ssids[i].ssid_len; 5998 arg.ssids[i].ssid = req->ssids[i].ssid; 5999 } 6000 } else { 6001 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6002 } 6003 6004 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { 6005 arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; 6006 ether_addr_copy(arg.mac_addr.addr, req->mac_addr); 6007 ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); 6008 } 6009 6010 if (req->n_channels) { 6011 arg.n_channels = req->n_channels; 6012 for (i = 0; i < arg.n_channels; i++) 6013 arg.channels[i] = req->channels[i]->center_freq; 6014 } 6015 6016 /* if duration is set, default dwell times will be overwritten */ 6017 if (req->duration) { 6018 arg.dwell_time_active = req->duration; 6019 arg.dwell_time_passive = req->duration; 6020 arg.burst_duration_ms = req->duration; 6021 6022 scan_timeout = min_t(u32, arg.max_rest_time * 6023 (arg.n_channels - 1) + (req->duration + 6024 ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * 6025 arg.n_channels, arg.max_scan_time + 200); 6026 6027 } else { 6028 /* Add a 200ms margin to account for event/command processing */ 6029 scan_timeout = arg.max_scan_time + 200; 6030 } 6031 6032 ret = ath10k_start_scan(ar, &arg); 6033 if (ret) { 6034 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 6035 spin_lock_bh(&ar->data_lock); 6036 ar->scan.state = ATH10K_SCAN_IDLE; 6037 spin_unlock_bh(&ar->data_lock); 6038 } 6039 6040 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6041 msecs_to_jiffies(scan_timeout)); 6042 6043 exit: 6044 mutex_unlock(&ar->conf_mutex); 6045 return ret; 6046 } 6047 6048 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 6049 struct ieee80211_vif *vif) 6050 { 6051 struct ath10k *ar = hw->priv; 6052 6053 mutex_lock(&ar->conf_mutex); 6054 ath10k_scan_abort(ar); 6055 mutex_unlock(&ar->conf_mutex); 6056 6057 cancel_delayed_work_sync(&ar->scan.timeout); 6058 } 6059 6060 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 6061 struct ath10k_vif *arvif, 6062 enum set_key_cmd cmd, 6063 struct ieee80211_key_conf *key) 6064 { 6065 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 6066 int ret; 6067 6068 /* 10.1 firmware branch requires default key index to be set to group 6069 * key index after installing it. Otherwise FW/HW Txes corrupted 6070 * frames with multi-vif APs. This is not required for main firmware 6071 * branch (e.g. 636). 6072 * 6073 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 6074 * 6075 * FIXME: It remains unknown if this is required for multi-vif STA 6076 * interfaces on 10.1. 6077 */ 6078 6079 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 6080 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 6081 return; 6082 6083 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 6084 return; 6085 6086 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 6087 return; 6088 6089 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 6090 return; 6091 6092 if (cmd != SET_KEY) 6093 return; 6094 6095 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 6096 key->keyidx); 6097 if (ret) 6098 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 6099 arvif->vdev_id, ret); 6100 } 6101 6102 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 6103 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 6104 struct ieee80211_key_conf *key) 6105 { 6106 struct ath10k *ar = hw->priv; 6107 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6108 struct ath10k_peer *peer; 6109 const u8 *peer_addr; 6110 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 6111 key->cipher == WLAN_CIPHER_SUITE_WEP104; 6112 int ret = 0; 6113 int ret2; 6114 u32 flags = 0; 6115 u32 flags2; 6116 6117 /* this one needs to be done in software */ 6118 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 6119 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 6120 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 6121 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 6122 return 1; 6123 6124 if (arvif->nohwcrypt) 6125 return 1; 6126 6127 if (key->keyidx > WMI_MAX_KEY_INDEX) 6128 return -ENOSPC; 6129 6130 mutex_lock(&ar->conf_mutex); 6131 6132 if (sta) 6133 peer_addr = sta->addr; 6134 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 6135 peer_addr = vif->bss_conf.bssid; 6136 else 6137 peer_addr = vif->addr; 6138 6139 key->hw_key_idx = key->keyidx; 6140 6141 if (is_wep) { 6142 if (cmd == SET_KEY) 6143 arvif->wep_keys[key->keyidx] = key; 6144 else 6145 arvif->wep_keys[key->keyidx] = NULL; 6146 } 6147 6148 /* the peer should not disappear in mid-way (unless FW goes awry) since 6149 * we already hold conf_mutex. we just make sure its there now. 6150 */ 6151 spin_lock_bh(&ar->data_lock); 6152 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 6153 spin_unlock_bh(&ar->data_lock); 6154 6155 if (!peer) { 6156 if (cmd == SET_KEY) { 6157 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 6158 peer_addr); 6159 ret = -EOPNOTSUPP; 6160 goto exit; 6161 } else { 6162 /* if the peer doesn't exist there is no key to disable anymore */ 6163 goto exit; 6164 } 6165 } 6166 6167 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 6168 flags |= WMI_KEY_PAIRWISE; 6169 else 6170 flags |= WMI_KEY_GROUP; 6171 6172 if (is_wep) { 6173 if (cmd == DISABLE_KEY) 6174 ath10k_clear_vdev_key(arvif, key); 6175 6176 /* When WEP keys are uploaded it's possible that there are 6177 * stations associated already (e.g. when merging) without any 6178 * keys. Static WEP needs an explicit per-peer key upload. 6179 */ 6180 if (vif->type == NL80211_IFTYPE_ADHOC && 6181 cmd == SET_KEY) 6182 ath10k_mac_vif_update_wep_key(arvif, key); 6183 6184 /* 802.1x never sets the def_wep_key_idx so each set_key() 6185 * call changes default tx key. 6186 * 6187 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 6188 * after first set_key(). 6189 */ 6190 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 6191 flags |= WMI_KEY_TX_USAGE; 6192 } 6193 6194 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 6195 if (ret) { 6196 WARN_ON(ret > 0); 6197 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 6198 arvif->vdev_id, peer_addr, ret); 6199 goto exit; 6200 } 6201 6202 /* mac80211 sets static WEP keys as groupwise while firmware requires 6203 * them to be installed twice as both pairwise and groupwise. 6204 */ 6205 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 6206 flags2 = flags; 6207 flags2 &= ~WMI_KEY_GROUP; 6208 flags2 |= WMI_KEY_PAIRWISE; 6209 6210 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 6211 if (ret) { 6212 WARN_ON(ret > 0); 6213 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 6214 arvif->vdev_id, peer_addr, ret); 6215 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 6216 peer_addr, flags); 6217 if (ret2) { 6218 WARN_ON(ret2 > 0); 6219 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 6220 arvif->vdev_id, peer_addr, ret2); 6221 } 6222 goto exit; 6223 } 6224 } 6225 6226 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 6227 6228 spin_lock_bh(&ar->data_lock); 6229 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 6230 if (peer && cmd == SET_KEY) 6231 peer->keys[key->keyidx] = key; 6232 else if (peer && cmd == DISABLE_KEY) 6233 peer->keys[key->keyidx] = NULL; 6234 else if (peer == NULL) 6235 /* impossible unless FW goes crazy */ 6236 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 6237 spin_unlock_bh(&ar->data_lock); 6238 6239 if (sta && sta->tdls) 6240 ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6241 WMI_PEER_AUTHORIZE, 1); 6242 6243 exit: 6244 mutex_unlock(&ar->conf_mutex); 6245 return ret; 6246 } 6247 6248 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 6249 struct ieee80211_vif *vif, 6250 int keyidx) 6251 { 6252 struct ath10k *ar = hw->priv; 6253 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6254 int ret; 6255 6256 mutex_lock(&arvif->ar->conf_mutex); 6257 6258 if (arvif->ar->state != ATH10K_STATE_ON) 6259 goto unlock; 6260 6261 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 6262 arvif->vdev_id, keyidx); 6263 6264 ret = ath10k_wmi_vdev_set_param(arvif->ar, 6265 arvif->vdev_id, 6266 arvif->ar->wmi.vdev_param->def_keyid, 6267 keyidx); 6268 6269 if (ret) { 6270 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 6271 arvif->vdev_id, 6272 ret); 6273 goto unlock; 6274 } 6275 6276 arvif->def_wep_key_idx = keyidx; 6277 6278 unlock: 6279 mutex_unlock(&arvif->ar->conf_mutex); 6280 } 6281 6282 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 6283 { 6284 struct ath10k *ar; 6285 struct ath10k_vif *arvif; 6286 struct ath10k_sta *arsta; 6287 struct ieee80211_sta *sta; 6288 struct cfg80211_chan_def def; 6289 enum nl80211_band band; 6290 const u8 *ht_mcs_mask; 6291 const u16 *vht_mcs_mask; 6292 u32 changed, bw, nss, smps; 6293 int err; 6294 6295 arsta = container_of(wk, struct ath10k_sta, update_wk); 6296 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 6297 arvif = arsta->arvif; 6298 ar = arvif->ar; 6299 6300 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 6301 return; 6302 6303 band = def.chan->band; 6304 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 6305 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 6306 6307 spin_lock_bh(&ar->data_lock); 6308 6309 changed = arsta->changed; 6310 arsta->changed = 0; 6311 6312 bw = arsta->bw; 6313 nss = arsta->nss; 6314 smps = arsta->smps; 6315 6316 spin_unlock_bh(&ar->data_lock); 6317 6318 mutex_lock(&ar->conf_mutex); 6319 6320 nss = max_t(u32, 1, nss); 6321 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 6322 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6323 6324 if (changed & IEEE80211_RC_BW_CHANGED) { 6325 enum wmi_phy_mode mode; 6326 6327 mode = chan_to_phymode(&def); 6328 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n", 6329 sta->addr, bw, mode); 6330 6331 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6332 WMI_PEER_PHYMODE, mode); 6333 if (err) { 6334 ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n", 6335 sta->addr, mode, err); 6336 goto exit; 6337 } 6338 6339 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6340 WMI_PEER_CHAN_WIDTH, bw); 6341 if (err) 6342 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 6343 sta->addr, bw, err); 6344 } 6345 6346 if (changed & IEEE80211_RC_NSS_CHANGED) { 6347 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 6348 sta->addr, nss); 6349 6350 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6351 WMI_PEER_NSS, nss); 6352 if (err) 6353 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 6354 sta->addr, nss, err); 6355 } 6356 6357 if (changed & IEEE80211_RC_SMPS_CHANGED) { 6358 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 6359 sta->addr, smps); 6360 6361 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6362 WMI_PEER_SMPS_STATE, smps); 6363 if (err) 6364 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 6365 sta->addr, smps, err); 6366 } 6367 6368 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 6369 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", 6370 sta->addr); 6371 6372 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 6373 if (err) 6374 ath10k_warn(ar, "failed to reassociate station: %pM\n", 6375 sta->addr); 6376 } 6377 6378 exit: 6379 mutex_unlock(&ar->conf_mutex); 6380 } 6381 6382 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 6383 struct ieee80211_sta *sta) 6384 { 6385 struct ath10k *ar = arvif->ar; 6386 6387 lockdep_assert_held(&ar->conf_mutex); 6388 6389 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6390 return 0; 6391 6392 if (ar->num_stations >= ar->max_num_stations) 6393 return -ENOBUFS; 6394 6395 ar->num_stations++; 6396 6397 return 0; 6398 } 6399 6400 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 6401 struct ieee80211_sta *sta) 6402 { 6403 struct ath10k *ar = arvif->ar; 6404 6405 lockdep_assert_held(&ar->conf_mutex); 6406 6407 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6408 return; 6409 6410 ar->num_stations--; 6411 } 6412 6413 static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw, 6414 struct ieee80211_vif *vif, 6415 struct ieee80211_sta *sta) 6416 { 6417 struct ath10k *ar = hw->priv; 6418 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6419 int ret = 0; 6420 s16 txpwr; 6421 6422 if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) { 6423 txpwr = 0; 6424 } else { 6425 txpwr = sta->txpwr.power; 6426 if (!txpwr) 6427 return -EINVAL; 6428 } 6429 6430 if (txpwr > ATH10K_TX_POWER_MAX_VAL || txpwr < ATH10K_TX_POWER_MIN_VAL) 6431 return -EINVAL; 6432 6433 mutex_lock(&ar->conf_mutex); 6434 6435 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6436 WMI_PEER_USE_FIXED_PWR, txpwr); 6437 if (ret) { 6438 ath10k_warn(ar, "failed to set tx power for station ret: %d\n", 6439 ret); 6440 goto out; 6441 } 6442 6443 out: 6444 mutex_unlock(&ar->conf_mutex); 6445 return ret; 6446 } 6447 6448 static int ath10k_sta_state(struct ieee80211_hw *hw, 6449 struct ieee80211_vif *vif, 6450 struct ieee80211_sta *sta, 6451 enum ieee80211_sta_state old_state, 6452 enum ieee80211_sta_state new_state) 6453 { 6454 struct ath10k *ar = hw->priv; 6455 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6456 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6457 struct ath10k_peer *peer; 6458 int ret = 0; 6459 int i; 6460 6461 if (old_state == IEEE80211_STA_NOTEXIST && 6462 new_state == IEEE80211_STA_NONE) { 6463 memset(arsta, 0, sizeof(*arsta)); 6464 arsta->arvif = arvif; 6465 arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; 6466 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6467 6468 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6469 ath10k_mac_txq_init(sta->txq[i]); 6470 } 6471 6472 /* cancel must be done outside the mutex to avoid deadlock */ 6473 if ((old_state == IEEE80211_STA_NONE && 6474 new_state == IEEE80211_STA_NOTEXIST)) 6475 cancel_work_sync(&arsta->update_wk); 6476 6477 mutex_lock(&ar->conf_mutex); 6478 6479 if (old_state == IEEE80211_STA_NOTEXIST && 6480 new_state == IEEE80211_STA_NONE) { 6481 /* 6482 * New station addition. 6483 */ 6484 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6485 u32 num_tdls_stations; 6486 6487 ath10k_dbg(ar, ATH10K_DBG_MAC, 6488 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6489 arvif->vdev_id, sta->addr, 6490 ar->num_stations + 1, ar->max_num_stations, 6491 ar->num_peers + 1, ar->max_num_peers); 6492 6493 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6494 6495 if (sta->tdls) { 6496 if (num_tdls_stations >= ar->max_num_tdls_vdevs) { 6497 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6498 arvif->vdev_id, 6499 ar->max_num_tdls_vdevs); 6500 ret = -ELNRNG; 6501 goto exit; 6502 } 6503 peer_type = WMI_PEER_TYPE_TDLS; 6504 } 6505 6506 ret = ath10k_mac_inc_num_stations(arvif, sta); 6507 if (ret) { 6508 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6509 ar->max_num_stations); 6510 goto exit; 6511 } 6512 6513 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { 6514 arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), 6515 GFP_KERNEL); 6516 if (!arsta->tx_stats) { 6517 ret = -ENOMEM; 6518 goto exit; 6519 } 6520 } 6521 6522 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6523 sta->addr, peer_type); 6524 if (ret) { 6525 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6526 sta->addr, arvif->vdev_id, ret); 6527 ath10k_mac_dec_num_stations(arvif, sta); 6528 kfree(arsta->tx_stats); 6529 goto exit; 6530 } 6531 6532 spin_lock_bh(&ar->data_lock); 6533 6534 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6535 if (!peer) { 6536 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6537 vif->addr, arvif->vdev_id); 6538 spin_unlock_bh(&ar->data_lock); 6539 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6540 ath10k_mac_dec_num_stations(arvif, sta); 6541 kfree(arsta->tx_stats); 6542 ret = -ENOENT; 6543 goto exit; 6544 } 6545 6546 arsta->peer_id = find_first_bit(peer->peer_ids, 6547 ATH10K_MAX_NUM_PEER_IDS); 6548 6549 spin_unlock_bh(&ar->data_lock); 6550 6551 if (!sta->tdls) 6552 goto exit; 6553 6554 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6555 WMI_TDLS_ENABLE_ACTIVE); 6556 if (ret) { 6557 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6558 arvif->vdev_id, ret); 6559 ath10k_peer_delete(ar, arvif->vdev_id, 6560 sta->addr); 6561 ath10k_mac_dec_num_stations(arvif, sta); 6562 kfree(arsta->tx_stats); 6563 goto exit; 6564 } 6565 6566 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6567 WMI_TDLS_PEER_STATE_PEERING); 6568 if (ret) { 6569 ath10k_warn(ar, 6570 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6571 sta->addr, arvif->vdev_id, ret); 6572 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6573 ath10k_mac_dec_num_stations(arvif, sta); 6574 kfree(arsta->tx_stats); 6575 6576 if (num_tdls_stations != 0) 6577 goto exit; 6578 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6579 WMI_TDLS_DISABLE); 6580 } 6581 } else if ((old_state == IEEE80211_STA_NONE && 6582 new_state == IEEE80211_STA_NOTEXIST)) { 6583 /* 6584 * Existing station deletion. 6585 */ 6586 ath10k_dbg(ar, ATH10K_DBG_MAC, 6587 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6588 arvif->vdev_id, sta->addr, sta); 6589 6590 if (sta->tdls) { 6591 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, 6592 sta, 6593 WMI_TDLS_PEER_STATE_TEARDOWN); 6594 if (ret) 6595 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", 6596 sta->addr, 6597 WMI_TDLS_PEER_STATE_TEARDOWN, ret); 6598 } 6599 6600 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6601 if (ret) 6602 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6603 sta->addr, arvif->vdev_id, ret); 6604 6605 ath10k_mac_dec_num_stations(arvif, sta); 6606 6607 spin_lock_bh(&ar->data_lock); 6608 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6609 peer = ar->peer_map[i]; 6610 if (!peer) 6611 continue; 6612 6613 if (peer->sta == sta) { 6614 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6615 sta->addr, peer, i, arvif->vdev_id); 6616 peer->sta = NULL; 6617 6618 /* Clean up the peer object as well since we 6619 * must have failed to do this above. 6620 */ 6621 list_del(&peer->list); 6622 ar->peer_map[i] = NULL; 6623 kfree(peer); 6624 ar->num_peers--; 6625 } 6626 } 6627 spin_unlock_bh(&ar->data_lock); 6628 6629 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { 6630 kfree(arsta->tx_stats); 6631 arsta->tx_stats = NULL; 6632 } 6633 6634 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6635 ath10k_mac_txq_unref(ar, sta->txq[i]); 6636 6637 if (!sta->tdls) 6638 goto exit; 6639 6640 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6641 goto exit; 6642 6643 /* This was the last tdls peer in current vif */ 6644 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6645 WMI_TDLS_DISABLE); 6646 if (ret) { 6647 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6648 arvif->vdev_id, ret); 6649 } 6650 } else if (old_state == IEEE80211_STA_AUTH && 6651 new_state == IEEE80211_STA_ASSOC && 6652 (vif->type == NL80211_IFTYPE_AP || 6653 vif->type == NL80211_IFTYPE_MESH_POINT || 6654 vif->type == NL80211_IFTYPE_ADHOC)) { 6655 /* 6656 * New association. 6657 */ 6658 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6659 sta->addr); 6660 6661 ret = ath10k_station_assoc(ar, vif, sta, false); 6662 if (ret) 6663 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6664 sta->addr, arvif->vdev_id, ret); 6665 } else if (old_state == IEEE80211_STA_ASSOC && 6666 new_state == IEEE80211_STA_AUTHORIZED && 6667 sta->tdls) { 6668 /* 6669 * Tdls station authorized. 6670 */ 6671 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6672 sta->addr); 6673 6674 ret = ath10k_station_assoc(ar, vif, sta, false); 6675 if (ret) { 6676 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6677 sta->addr, arvif->vdev_id, ret); 6678 goto exit; 6679 } 6680 6681 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6682 WMI_TDLS_PEER_STATE_CONNECTED); 6683 if (ret) 6684 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6685 sta->addr, arvif->vdev_id, ret); 6686 } else if (old_state == IEEE80211_STA_ASSOC && 6687 new_state == IEEE80211_STA_AUTH && 6688 (vif->type == NL80211_IFTYPE_AP || 6689 vif->type == NL80211_IFTYPE_MESH_POINT || 6690 vif->type == NL80211_IFTYPE_ADHOC)) { 6691 /* 6692 * Disassociation. 6693 */ 6694 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6695 sta->addr); 6696 6697 ret = ath10k_station_disassoc(ar, vif, sta); 6698 if (ret) 6699 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6700 sta->addr, arvif->vdev_id, ret); 6701 } 6702 exit: 6703 mutex_unlock(&ar->conf_mutex); 6704 return ret; 6705 } 6706 6707 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6708 u16 ac, bool enable) 6709 { 6710 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6711 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6712 u32 prio = 0, acc = 0; 6713 u32 value = 0; 6714 int ret = 0; 6715 6716 lockdep_assert_held(&ar->conf_mutex); 6717 6718 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6719 return 0; 6720 6721 switch (ac) { 6722 case IEEE80211_AC_VO: 6723 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6724 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6725 prio = 7; 6726 acc = 3; 6727 break; 6728 case IEEE80211_AC_VI: 6729 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6730 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6731 prio = 5; 6732 acc = 2; 6733 break; 6734 case IEEE80211_AC_BE: 6735 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6736 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6737 prio = 2; 6738 acc = 1; 6739 break; 6740 case IEEE80211_AC_BK: 6741 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6742 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6743 prio = 0; 6744 acc = 0; 6745 break; 6746 } 6747 6748 if (enable) 6749 arvif->u.sta.uapsd |= value; 6750 else 6751 arvif->u.sta.uapsd &= ~value; 6752 6753 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6754 WMI_STA_PS_PARAM_UAPSD, 6755 arvif->u.sta.uapsd); 6756 if (ret) { 6757 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6758 goto exit; 6759 } 6760 6761 if (arvif->u.sta.uapsd) 6762 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6763 else 6764 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6765 6766 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6767 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6768 value); 6769 if (ret) 6770 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6771 6772 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6773 if (ret) { 6774 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6775 arvif->vdev_id, ret); 6776 return ret; 6777 } 6778 6779 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6780 if (ret) { 6781 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6782 arvif->vdev_id, ret); 6783 return ret; 6784 } 6785 6786 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6787 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6788 /* Only userspace can make an educated decision when to send 6789 * trigger frame. The following effectively disables u-UAPSD 6790 * autotrigger in firmware (which is enabled by default 6791 * provided the autotrigger service is available). 6792 */ 6793 6794 arg.wmm_ac = acc; 6795 arg.user_priority = prio; 6796 arg.service_interval = 0; 6797 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6798 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6799 6800 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6801 arvif->bssid, &arg, 1); 6802 if (ret) { 6803 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6804 ret); 6805 return ret; 6806 } 6807 } 6808 6809 exit: 6810 return ret; 6811 } 6812 6813 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6814 struct ieee80211_vif *vif, u16 ac, 6815 const struct ieee80211_tx_queue_params *params) 6816 { 6817 struct ath10k *ar = hw->priv; 6818 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6819 struct wmi_wmm_params_arg *p = NULL; 6820 int ret; 6821 6822 mutex_lock(&ar->conf_mutex); 6823 6824 switch (ac) { 6825 case IEEE80211_AC_VO: 6826 p = &arvif->wmm_params.ac_vo; 6827 break; 6828 case IEEE80211_AC_VI: 6829 p = &arvif->wmm_params.ac_vi; 6830 break; 6831 case IEEE80211_AC_BE: 6832 p = &arvif->wmm_params.ac_be; 6833 break; 6834 case IEEE80211_AC_BK: 6835 p = &arvif->wmm_params.ac_bk; 6836 break; 6837 } 6838 6839 if (WARN_ON(!p)) { 6840 ret = -EINVAL; 6841 goto exit; 6842 } 6843 6844 p->cwmin = params->cw_min; 6845 p->cwmax = params->cw_max; 6846 p->aifs = params->aifs; 6847 6848 /* 6849 * The channel time duration programmed in the HW is in absolute 6850 * microseconds, while mac80211 gives the txop in units of 6851 * 32 microseconds. 6852 */ 6853 p->txop = params->txop * 32; 6854 6855 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6856 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6857 &arvif->wmm_params); 6858 if (ret) { 6859 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6860 arvif->vdev_id, ret); 6861 goto exit; 6862 } 6863 } else { 6864 /* This won't work well with multi-interface cases but it's 6865 * better than nothing. 6866 */ 6867 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6868 if (ret) { 6869 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6870 goto exit; 6871 } 6872 } 6873 6874 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6875 if (ret) 6876 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6877 6878 exit: 6879 mutex_unlock(&ar->conf_mutex); 6880 return ret; 6881 } 6882 6883 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6884 6885 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6886 struct ieee80211_vif *vif, 6887 struct ieee80211_channel *chan, 6888 int duration, 6889 enum ieee80211_roc_type type) 6890 { 6891 struct ath10k *ar = hw->priv; 6892 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6893 struct wmi_start_scan_arg arg; 6894 int ret = 0; 6895 u32 scan_time_msec; 6896 6897 mutex_lock(&ar->conf_mutex); 6898 6899 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 6900 ret = -EBUSY; 6901 goto exit; 6902 } 6903 6904 spin_lock_bh(&ar->data_lock); 6905 switch (ar->scan.state) { 6906 case ATH10K_SCAN_IDLE: 6907 reinit_completion(&ar->scan.started); 6908 reinit_completion(&ar->scan.completed); 6909 reinit_completion(&ar->scan.on_channel); 6910 ar->scan.state = ATH10K_SCAN_STARTING; 6911 ar->scan.is_roc = true; 6912 ar->scan.vdev_id = arvif->vdev_id; 6913 ar->scan.roc_freq = chan->center_freq; 6914 ar->scan.roc_notify = true; 6915 ret = 0; 6916 break; 6917 case ATH10K_SCAN_STARTING: 6918 case ATH10K_SCAN_RUNNING: 6919 case ATH10K_SCAN_ABORTING: 6920 ret = -EBUSY; 6921 break; 6922 } 6923 spin_unlock_bh(&ar->data_lock); 6924 6925 if (ret) 6926 goto exit; 6927 6928 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6929 6930 memset(&arg, 0, sizeof(arg)); 6931 ath10k_wmi_start_scan_init(ar, &arg); 6932 arg.vdev_id = arvif->vdev_id; 6933 arg.scan_id = ATH10K_SCAN_ID; 6934 arg.n_channels = 1; 6935 arg.channels[0] = chan->center_freq; 6936 arg.dwell_time_active = scan_time_msec; 6937 arg.dwell_time_passive = scan_time_msec; 6938 arg.max_scan_time = scan_time_msec; 6939 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6940 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6941 arg.burst_duration_ms = duration; 6942 6943 ret = ath10k_start_scan(ar, &arg); 6944 if (ret) { 6945 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6946 spin_lock_bh(&ar->data_lock); 6947 ar->scan.state = ATH10K_SCAN_IDLE; 6948 spin_unlock_bh(&ar->data_lock); 6949 goto exit; 6950 } 6951 6952 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6953 if (ret == 0) { 6954 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6955 6956 ret = ath10k_scan_stop(ar); 6957 if (ret) 6958 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6959 6960 ret = -ETIMEDOUT; 6961 goto exit; 6962 } 6963 6964 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6965 msecs_to_jiffies(duration)); 6966 6967 ret = 0; 6968 exit: 6969 mutex_unlock(&ar->conf_mutex); 6970 return ret; 6971 } 6972 6973 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6974 { 6975 struct ath10k *ar = hw->priv; 6976 6977 mutex_lock(&ar->conf_mutex); 6978 6979 spin_lock_bh(&ar->data_lock); 6980 ar->scan.roc_notify = false; 6981 spin_unlock_bh(&ar->data_lock); 6982 6983 ath10k_scan_abort(ar); 6984 6985 mutex_unlock(&ar->conf_mutex); 6986 6987 cancel_delayed_work_sync(&ar->scan.timeout); 6988 6989 return 0; 6990 } 6991 6992 /* 6993 * Both RTS and Fragmentation threshold are interface-specific 6994 * in ath10k, but device-specific in mac80211. 6995 */ 6996 6997 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6998 { 6999 struct ath10k *ar = hw->priv; 7000 struct ath10k_vif *arvif; 7001 int ret = 0; 7002 7003 mutex_lock(&ar->conf_mutex); 7004 list_for_each_entry(arvif, &ar->arvifs, list) { 7005 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 7006 arvif->vdev_id, value); 7007 7008 ret = ath10k_mac_set_rts(arvif, value); 7009 if (ret) { 7010 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 7011 arvif->vdev_id, ret); 7012 break; 7013 } 7014 } 7015 mutex_unlock(&ar->conf_mutex); 7016 7017 return ret; 7018 } 7019 7020 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 7021 { 7022 /* Even though there's a WMI enum for fragmentation threshold no known 7023 * firmware actually implements it. Moreover it is not possible to rely 7024 * frame fragmentation to mac80211 because firmware clears the "more 7025 * fragments" bit in frame control making it impossible for remote 7026 * devices to reassemble frames. 7027 * 7028 * Hence implement a dummy callback just to say fragmentation isn't 7029 * supported. This effectively prevents mac80211 from doing frame 7030 * fragmentation in software. 7031 */ 7032 return -EOPNOTSUPP; 7033 } 7034 7035 void ath10k_mac_wait_tx_complete(struct ath10k *ar) 7036 { 7037 bool skip; 7038 long time_left; 7039 7040 /* mac80211 doesn't care if we really xmit queued frames or not 7041 * we'll collect those frames either way if we stop/delete vdevs 7042 */ 7043 7044 if (ar->state == ATH10K_STATE_WEDGED) 7045 return; 7046 7047 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 7048 bool empty; 7049 7050 spin_lock_bh(&ar->htt.tx_lock); 7051 empty = (ar->htt.num_pending_tx == 0); 7052 spin_unlock_bh(&ar->htt.tx_lock); 7053 7054 skip = (ar->state == ATH10K_STATE_WEDGED) || 7055 test_bit(ATH10K_FLAG_CRASH_FLUSH, 7056 &ar->dev_flags); 7057 7058 (empty || skip); 7059 }), ATH10K_FLUSH_TIMEOUT_HZ); 7060 7061 if (time_left == 0 || skip) 7062 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 7063 skip, ar->state, time_left); 7064 } 7065 7066 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 7067 u32 queues, bool drop) 7068 { 7069 struct ath10k *ar = hw->priv; 7070 struct ath10k_vif *arvif; 7071 u32 bitmap; 7072 7073 if (drop) { 7074 if (vif && vif->type == NL80211_IFTYPE_STATION) { 7075 bitmap = ~(1 << WMI_MGMT_TID); 7076 list_for_each_entry(arvif, &ar->arvifs, list) { 7077 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 7078 ath10k_wmi_peer_flush(ar, arvif->vdev_id, 7079 arvif->bssid, bitmap); 7080 } 7081 } 7082 return; 7083 } 7084 7085 mutex_lock(&ar->conf_mutex); 7086 ath10k_mac_wait_tx_complete(ar); 7087 mutex_unlock(&ar->conf_mutex); 7088 } 7089 7090 /* TODO: Implement this function properly 7091 * For now it is needed to reply to Probe Requests in IBSS mode. 7092 * Propably we need this information from FW. 7093 */ 7094 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 7095 { 7096 return 1; 7097 } 7098 7099 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 7100 enum ieee80211_reconfig_type reconfig_type) 7101 { 7102 struct ath10k *ar = hw->priv; 7103 7104 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 7105 return; 7106 7107 mutex_lock(&ar->conf_mutex); 7108 7109 /* If device failed to restart it will be in a different state, e.g. 7110 * ATH10K_STATE_WEDGED 7111 */ 7112 if (ar->state == ATH10K_STATE_RESTARTED) { 7113 ath10k_info(ar, "device successfully recovered\n"); 7114 ar->state = ATH10K_STATE_ON; 7115 ieee80211_wake_queues(ar->hw); 7116 } 7117 7118 mutex_unlock(&ar->conf_mutex); 7119 } 7120 7121 static void 7122 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 7123 struct ieee80211_channel *channel) 7124 { 7125 int ret; 7126 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 7127 7128 lockdep_assert_held(&ar->conf_mutex); 7129 7130 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 7131 (ar->rx_channel != channel)) 7132 return; 7133 7134 if (ar->scan.state != ATH10K_SCAN_IDLE) { 7135 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 7136 return; 7137 } 7138 7139 reinit_completion(&ar->bss_survey_done); 7140 7141 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 7142 if (ret) { 7143 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 7144 return; 7145 } 7146 7147 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 7148 if (!ret) { 7149 ath10k_warn(ar, "bss channel survey timed out\n"); 7150 return; 7151 } 7152 } 7153 7154 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 7155 struct survey_info *survey) 7156 { 7157 struct ath10k *ar = hw->priv; 7158 struct ieee80211_supported_band *sband; 7159 struct survey_info *ar_survey = &ar->survey[idx]; 7160 int ret = 0; 7161 7162 mutex_lock(&ar->conf_mutex); 7163 7164 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 7165 if (sband && idx >= sband->n_channels) { 7166 idx -= sband->n_channels; 7167 sband = NULL; 7168 } 7169 7170 if (!sband) 7171 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 7172 7173 if (!sband || idx >= sband->n_channels) { 7174 ret = -ENOENT; 7175 goto exit; 7176 } 7177 7178 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 7179 7180 spin_lock_bh(&ar->data_lock); 7181 memcpy(survey, ar_survey, sizeof(*survey)); 7182 spin_unlock_bh(&ar->data_lock); 7183 7184 survey->channel = &sband->channels[idx]; 7185 7186 if (ar->rx_channel == survey->channel) 7187 survey->filled |= SURVEY_INFO_IN_USE; 7188 7189 exit: 7190 mutex_unlock(&ar->conf_mutex); 7191 return ret; 7192 } 7193 7194 static bool 7195 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 7196 enum nl80211_band band, 7197 const struct cfg80211_bitrate_mask *mask, 7198 int *vht_num_rates) 7199 { 7200 int num_rates = 0; 7201 int i, tmp; 7202 7203 num_rates += hweight32(mask->control[band].legacy); 7204 7205 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 7206 num_rates += hweight8(mask->control[band].ht_mcs[i]); 7207 7208 *vht_num_rates = 0; 7209 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 7210 tmp = hweight16(mask->control[band].vht_mcs[i]); 7211 num_rates += tmp; 7212 *vht_num_rates += tmp; 7213 } 7214 7215 return num_rates == 1; 7216 } 7217 7218 static bool 7219 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 7220 enum nl80211_band band, 7221 const struct cfg80211_bitrate_mask *mask, 7222 int *nss) 7223 { 7224 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 7225 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 7226 u8 ht_nss_mask = 0; 7227 u8 vht_nss_mask = 0; 7228 int i; 7229 7230 if (mask->control[band].legacy) 7231 return false; 7232 7233 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 7234 if (mask->control[band].ht_mcs[i] == 0) 7235 continue; 7236 else if (mask->control[band].ht_mcs[i] == 7237 sband->ht_cap.mcs.rx_mask[i]) 7238 ht_nss_mask |= BIT(i); 7239 else 7240 return false; 7241 } 7242 7243 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 7244 if (mask->control[band].vht_mcs[i] == 0) 7245 continue; 7246 else if (mask->control[band].vht_mcs[i] == 7247 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 7248 vht_nss_mask |= BIT(i); 7249 else 7250 return false; 7251 } 7252 7253 if (ht_nss_mask != vht_nss_mask) 7254 return false; 7255 7256 if (ht_nss_mask == 0) 7257 return false; 7258 7259 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 7260 return false; 7261 7262 *nss = fls(ht_nss_mask); 7263 7264 return true; 7265 } 7266 7267 static int 7268 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 7269 enum nl80211_band band, 7270 const struct cfg80211_bitrate_mask *mask, 7271 u8 *rate, u8 *nss, bool vht_only) 7272 { 7273 int rate_idx; 7274 int i; 7275 u16 bitrate; 7276 u8 preamble; 7277 u8 hw_rate; 7278 7279 if (vht_only) 7280 goto next; 7281 7282 if (hweight32(mask->control[band].legacy) == 1) { 7283 rate_idx = ffs(mask->control[band].legacy) - 1; 7284 7285 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 7286 rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; 7287 7288 hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value; 7289 bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate; 7290 7291 if (ath10k_mac_bitrate_is_cck(bitrate)) 7292 preamble = WMI_RATE_PREAMBLE_CCK; 7293 else 7294 preamble = WMI_RATE_PREAMBLE_OFDM; 7295 7296 *nss = 1; 7297 *rate = preamble << 6 | 7298 (*nss - 1) << 4 | 7299 hw_rate << 0; 7300 7301 return 0; 7302 } 7303 7304 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 7305 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 7306 *nss = i + 1; 7307 *rate = WMI_RATE_PREAMBLE_HT << 6 | 7308 (*nss - 1) << 4 | 7309 (ffs(mask->control[band].ht_mcs[i]) - 1); 7310 7311 return 0; 7312 } 7313 } 7314 7315 next: 7316 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 7317 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 7318 *nss = i + 1; 7319 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 7320 (*nss - 1) << 4 | 7321 (ffs(mask->control[band].vht_mcs[i]) - 1); 7322 7323 return 0; 7324 } 7325 } 7326 7327 return -EINVAL; 7328 } 7329 7330 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 7331 u8 rate, u8 nss, u8 sgi, u8 ldpc) 7332 { 7333 struct ath10k *ar = arvif->ar; 7334 u32 vdev_param; 7335 int ret; 7336 7337 lockdep_assert_held(&ar->conf_mutex); 7338 7339 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 7340 arvif->vdev_id, rate, nss, sgi); 7341 7342 vdev_param = ar->wmi.vdev_param->fixed_rate; 7343 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 7344 if (ret) { 7345 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 7346 rate, ret); 7347 return ret; 7348 } 7349 7350 vdev_param = ar->wmi.vdev_param->nss; 7351 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 7352 if (ret) { 7353 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 7354 return ret; 7355 } 7356 7357 vdev_param = ar->wmi.vdev_param->sgi; 7358 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 7359 if (ret) { 7360 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 7361 return ret; 7362 } 7363 7364 vdev_param = ar->wmi.vdev_param->ldpc; 7365 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 7366 if (ret) { 7367 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 7368 return ret; 7369 } 7370 7371 return 0; 7372 } 7373 7374 static bool 7375 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 7376 enum nl80211_band band, 7377 const struct cfg80211_bitrate_mask *mask, 7378 bool allow_pfr) 7379 { 7380 int i; 7381 u16 vht_mcs; 7382 7383 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 7384 * to express all VHT MCS rate masks. Effectively only the following 7385 * ranges can be used: none, 0-7, 0-8 and 0-9. 7386 */ 7387 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 7388 vht_mcs = mask->control[band].vht_mcs[i]; 7389 7390 switch (vht_mcs) { 7391 case 0: 7392 case BIT(8) - 1: 7393 case BIT(9) - 1: 7394 case BIT(10) - 1: 7395 break; 7396 default: 7397 if (!allow_pfr) 7398 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 7399 return false; 7400 } 7401 } 7402 7403 return true; 7404 } 7405 7406 static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar, 7407 struct ath10k_vif *arvif, 7408 struct ieee80211_sta *sta) 7409 { 7410 int err; 7411 u8 rate = arvif->vht_pfr; 7412 7413 /* skip non vht and multiple rate peers */ 7414 if (!sta->vht_cap.vht_supported || arvif->vht_num_rates != 1) 7415 return false; 7416 7417 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 7418 WMI_PEER_PARAM_FIXED_RATE, rate); 7419 if (err) 7420 ath10k_warn(ar, "failed to eanble STA %pM peer fixed rate: %d\n", 7421 sta->addr, err); 7422 7423 return true; 7424 } 7425 7426 static void ath10k_mac_set_bitrate_mask_iter(void *data, 7427 struct ieee80211_sta *sta) 7428 { 7429 struct ath10k_vif *arvif = data; 7430 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7431 struct ath10k *ar = arvif->ar; 7432 7433 if (arsta->arvif != arvif) 7434 return; 7435 7436 if (ath10k_mac_set_vht_bitrate_mask_fixup(ar, arvif, sta)) 7437 return; 7438 7439 spin_lock_bh(&ar->data_lock); 7440 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 7441 spin_unlock_bh(&ar->data_lock); 7442 7443 ieee80211_queue_work(ar->hw, &arsta->update_wk); 7444 } 7445 7446 static void ath10k_mac_clr_bitrate_mask_iter(void *data, 7447 struct ieee80211_sta *sta) 7448 { 7449 struct ath10k_vif *arvif = data; 7450 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7451 struct ath10k *ar = arvif->ar; 7452 int err; 7453 7454 /* clear vht peers only */ 7455 if (arsta->arvif != arvif || !sta->vht_cap.vht_supported) 7456 return; 7457 7458 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 7459 WMI_PEER_PARAM_FIXED_RATE, 7460 WMI_FIXED_RATE_NONE); 7461 if (err) 7462 ath10k_warn(ar, "failed to clear STA %pM peer fixed rate: %d\n", 7463 sta->addr, err); 7464 } 7465 7466 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 7467 struct ieee80211_vif *vif, 7468 const struct cfg80211_bitrate_mask *mask) 7469 { 7470 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7471 struct cfg80211_chan_def def; 7472 struct ath10k *ar = arvif->ar; 7473 enum nl80211_band band; 7474 const u8 *ht_mcs_mask; 7475 const u16 *vht_mcs_mask; 7476 u8 rate; 7477 u8 nss; 7478 u8 sgi; 7479 u8 ldpc; 7480 int single_nss; 7481 int ret; 7482 int vht_num_rates, allow_pfr; 7483 u8 vht_pfr; 7484 bool update_bitrate_mask = true; 7485 7486 if (ath10k_mac_vif_chan(vif, &def)) 7487 return -EPERM; 7488 7489 band = def.chan->band; 7490 ht_mcs_mask = mask->control[band].ht_mcs; 7491 vht_mcs_mask = mask->control[band].vht_mcs; 7492 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7493 7494 sgi = mask->control[band].gi; 7495 if (sgi == NL80211_TXRATE_FORCE_LGI) 7496 return -EINVAL; 7497 7498 allow_pfr = test_bit(ATH10K_FW_FEATURE_PEER_FIXED_RATE, 7499 ar->normal_mode_fw.fw_file.fw_features); 7500 if (allow_pfr) { 7501 mutex_lock(&ar->conf_mutex); 7502 ieee80211_iterate_stations_atomic(ar->hw, 7503 ath10k_mac_clr_bitrate_mask_iter, 7504 arvif); 7505 mutex_unlock(&ar->conf_mutex); 7506 } 7507 7508 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask, 7509 &vht_num_rates)) { 7510 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 7511 &rate, &nss, 7512 false); 7513 if (ret) { 7514 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 7515 arvif->vdev_id, ret); 7516 return ret; 7517 } 7518 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7519 &single_nss)) { 7520 rate = WMI_FIXED_RATE_NONE; 7521 nss = single_nss; 7522 } else { 7523 rate = WMI_FIXED_RATE_NONE; 7524 nss = min(ar->num_rf_chains, 7525 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 7526 ath10k_mac_max_vht_nss(vht_mcs_mask))); 7527 7528 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask, 7529 allow_pfr)) { 7530 u8 vht_nss; 7531 7532 if (!allow_pfr || vht_num_rates != 1) 7533 return -EINVAL; 7534 7535 /* Reach here, firmware supports peer fixed rate and has 7536 * single vht rate, and don't update vif birate_mask, as 7537 * the rate only for specific peer. 7538 */ 7539 ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 7540 &vht_pfr, 7541 &vht_nss, 7542 true); 7543 update_bitrate_mask = false; 7544 } else { 7545 vht_pfr = 0; 7546 } 7547 7548 mutex_lock(&ar->conf_mutex); 7549 7550 if (update_bitrate_mask) 7551 arvif->bitrate_mask = *mask; 7552 arvif->vht_num_rates = vht_num_rates; 7553 arvif->vht_pfr = vht_pfr; 7554 ieee80211_iterate_stations_atomic(ar->hw, 7555 ath10k_mac_set_bitrate_mask_iter, 7556 arvif); 7557 7558 mutex_unlock(&ar->conf_mutex); 7559 } 7560 7561 mutex_lock(&ar->conf_mutex); 7562 7563 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7564 if (ret) { 7565 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 7566 arvif->vdev_id, ret); 7567 goto exit; 7568 } 7569 7570 exit: 7571 mutex_unlock(&ar->conf_mutex); 7572 7573 return ret; 7574 } 7575 7576 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 7577 struct ieee80211_vif *vif, 7578 struct ieee80211_sta *sta, 7579 u32 changed) 7580 { 7581 struct ath10k *ar = hw->priv; 7582 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7583 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7584 struct ath10k_peer *peer; 7585 u32 bw, smps; 7586 7587 spin_lock_bh(&ar->data_lock); 7588 7589 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 7590 if (!peer) { 7591 spin_unlock_bh(&ar->data_lock); 7592 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", 7593 sta->addr, arvif->vdev_id); 7594 return; 7595 } 7596 7597 ath10k_dbg(ar, ATH10K_DBG_MAC, 7598 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 7599 sta->addr, changed, sta->bandwidth, sta->rx_nss, 7600 sta->smps_mode); 7601 7602 if (changed & IEEE80211_RC_BW_CHANGED) { 7603 bw = WMI_PEER_CHWIDTH_20MHZ; 7604 7605 switch (sta->bandwidth) { 7606 case IEEE80211_STA_RX_BW_20: 7607 bw = WMI_PEER_CHWIDTH_20MHZ; 7608 break; 7609 case IEEE80211_STA_RX_BW_40: 7610 bw = WMI_PEER_CHWIDTH_40MHZ; 7611 break; 7612 case IEEE80211_STA_RX_BW_80: 7613 bw = WMI_PEER_CHWIDTH_80MHZ; 7614 break; 7615 case IEEE80211_STA_RX_BW_160: 7616 bw = WMI_PEER_CHWIDTH_160MHZ; 7617 break; 7618 default: 7619 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7620 sta->bandwidth, sta->addr); 7621 bw = WMI_PEER_CHWIDTH_20MHZ; 7622 break; 7623 } 7624 7625 arsta->bw = bw; 7626 } 7627 7628 if (changed & IEEE80211_RC_NSS_CHANGED) 7629 arsta->nss = sta->rx_nss; 7630 7631 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7632 smps = WMI_PEER_SMPS_PS_NONE; 7633 7634 switch (sta->smps_mode) { 7635 case IEEE80211_SMPS_AUTOMATIC: 7636 case IEEE80211_SMPS_OFF: 7637 smps = WMI_PEER_SMPS_PS_NONE; 7638 break; 7639 case IEEE80211_SMPS_STATIC: 7640 smps = WMI_PEER_SMPS_STATIC; 7641 break; 7642 case IEEE80211_SMPS_DYNAMIC: 7643 smps = WMI_PEER_SMPS_DYNAMIC; 7644 break; 7645 case IEEE80211_SMPS_NUM_MODES: 7646 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7647 sta->smps_mode, sta->addr); 7648 smps = WMI_PEER_SMPS_PS_NONE; 7649 break; 7650 } 7651 7652 arsta->smps = smps; 7653 } 7654 7655 arsta->changed |= changed; 7656 7657 spin_unlock_bh(&ar->data_lock); 7658 7659 ieee80211_queue_work(hw, &arsta->update_wk); 7660 } 7661 7662 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7663 struct ieee80211_vif *vif, s64 tsf_offset) 7664 { 7665 struct ath10k *ar = hw->priv; 7666 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7667 u32 offset, vdev_param; 7668 int ret; 7669 7670 if (tsf_offset < 0) { 7671 vdev_param = ar->wmi.vdev_param->dec_tsf; 7672 offset = -tsf_offset; 7673 } else { 7674 vdev_param = ar->wmi.vdev_param->inc_tsf; 7675 offset = tsf_offset; 7676 } 7677 7678 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7679 vdev_param, offset); 7680 7681 if (ret && ret != -EOPNOTSUPP) 7682 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7683 offset, vdev_param, ret); 7684 } 7685 7686 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7687 struct ieee80211_vif *vif, 7688 struct ieee80211_ampdu_params *params) 7689 { 7690 struct ath10k *ar = hw->priv; 7691 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7692 struct ieee80211_sta *sta = params->sta; 7693 enum ieee80211_ampdu_mlme_action action = params->action; 7694 u16 tid = params->tid; 7695 7696 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7697 arvif->vdev_id, sta->addr, tid, action); 7698 7699 switch (action) { 7700 case IEEE80211_AMPDU_RX_START: 7701 case IEEE80211_AMPDU_RX_STOP: 7702 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7703 * creation/removal. Do we need to verify this? 7704 */ 7705 return 0; 7706 case IEEE80211_AMPDU_TX_START: 7707 case IEEE80211_AMPDU_TX_STOP_CONT: 7708 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7709 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7710 case IEEE80211_AMPDU_TX_OPERATIONAL: 7711 /* Firmware offloads Tx aggregation entirely so deny mac80211 7712 * Tx aggregation requests. 7713 */ 7714 return -EOPNOTSUPP; 7715 } 7716 7717 return -EINVAL; 7718 } 7719 7720 static void 7721 ath10k_mac_update_rx_channel(struct ath10k *ar, 7722 struct ieee80211_chanctx_conf *ctx, 7723 struct ieee80211_vif_chanctx_switch *vifs, 7724 int n_vifs) 7725 { 7726 struct cfg80211_chan_def *def = NULL; 7727 7728 /* Both locks are required because ar->rx_channel is modified. This 7729 * allows readers to hold either lock. 7730 */ 7731 lockdep_assert_held(&ar->conf_mutex); 7732 lockdep_assert_held(&ar->data_lock); 7733 7734 WARN_ON(ctx && vifs); 7735 WARN_ON(vifs && !n_vifs); 7736 7737 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7738 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7739 * ppdu on Rx may reduce performance on low-end systems. It should be 7740 * possible to make tables/hashmaps to speed the lookup up (be vary of 7741 * cpu data cache lines though regarding sizes) but to keep the initial 7742 * implementation simple and less intrusive fallback to the slow lookup 7743 * only for multi-channel cases. Single-channel cases will remain to 7744 * use the old channel derival and thus performance should not be 7745 * affected much. 7746 */ 7747 rcu_read_lock(); 7748 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7749 ieee80211_iter_chan_contexts_atomic(ar->hw, 7750 ath10k_mac_get_any_chandef_iter, 7751 &def); 7752 7753 if (vifs) 7754 def = &vifs[0].new_ctx->def; 7755 7756 ar->rx_channel = def->chan; 7757 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7758 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7759 /* During driver restart due to firmware assert, since mac80211 7760 * already has valid channel context for given radio, channel 7761 * context iteration return num_chanctx > 0. So fix rx_channel 7762 * when restart is in progress. 7763 */ 7764 ar->rx_channel = ctx->def.chan; 7765 } else { 7766 ar->rx_channel = NULL; 7767 } 7768 rcu_read_unlock(); 7769 } 7770 7771 static void 7772 ath10k_mac_update_vif_chan(struct ath10k *ar, 7773 struct ieee80211_vif_chanctx_switch *vifs, 7774 int n_vifs) 7775 { 7776 struct ath10k_vif *arvif; 7777 int ret; 7778 int i; 7779 7780 lockdep_assert_held(&ar->conf_mutex); 7781 7782 /* First stop monitor interface. Some FW versions crash if there's a 7783 * lone monitor interface. 7784 */ 7785 if (ar->monitor_started) 7786 ath10k_monitor_stop(ar); 7787 7788 for (i = 0; i < n_vifs; i++) { 7789 arvif = (void *)vifs[i].vif->drv_priv; 7790 7791 ath10k_dbg(ar, ATH10K_DBG_MAC, 7792 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7793 arvif->vdev_id, 7794 vifs[i].old_ctx->def.chan->center_freq, 7795 vifs[i].new_ctx->def.chan->center_freq, 7796 vifs[i].old_ctx->def.width, 7797 vifs[i].new_ctx->def.width); 7798 7799 if (WARN_ON(!arvif->is_started)) 7800 continue; 7801 7802 if (WARN_ON(!arvif->is_up)) 7803 continue; 7804 7805 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7806 if (ret) { 7807 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7808 arvif->vdev_id, ret); 7809 continue; 7810 } 7811 } 7812 7813 /* All relevant vdevs are downed and associated channel resources 7814 * should be available for the channel switch now. 7815 */ 7816 7817 spin_lock_bh(&ar->data_lock); 7818 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7819 spin_unlock_bh(&ar->data_lock); 7820 7821 for (i = 0; i < n_vifs; i++) { 7822 arvif = (void *)vifs[i].vif->drv_priv; 7823 7824 if (WARN_ON(!arvif->is_started)) 7825 continue; 7826 7827 if (WARN_ON(!arvif->is_up)) 7828 continue; 7829 7830 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7831 if (ret) 7832 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7833 ret); 7834 7835 ret = ath10k_mac_setup_prb_tmpl(arvif); 7836 if (ret) 7837 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7838 ret); 7839 7840 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7841 if (ret) { 7842 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7843 arvif->vdev_id, ret); 7844 continue; 7845 } 7846 7847 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7848 arvif->bssid); 7849 if (ret) { 7850 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7851 arvif->vdev_id, ret); 7852 continue; 7853 } 7854 } 7855 7856 ath10k_monitor_recalc(ar); 7857 } 7858 7859 static int 7860 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7861 struct ieee80211_chanctx_conf *ctx) 7862 { 7863 struct ath10k *ar = hw->priv; 7864 7865 ath10k_dbg(ar, ATH10K_DBG_MAC, 7866 "mac chanctx add freq %hu width %d ptr %pK\n", 7867 ctx->def.chan->center_freq, ctx->def.width, ctx); 7868 7869 mutex_lock(&ar->conf_mutex); 7870 7871 spin_lock_bh(&ar->data_lock); 7872 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7873 spin_unlock_bh(&ar->data_lock); 7874 7875 ath10k_recalc_radar_detection(ar); 7876 ath10k_monitor_recalc(ar); 7877 7878 mutex_unlock(&ar->conf_mutex); 7879 7880 return 0; 7881 } 7882 7883 static void 7884 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7885 struct ieee80211_chanctx_conf *ctx) 7886 { 7887 struct ath10k *ar = hw->priv; 7888 7889 ath10k_dbg(ar, ATH10K_DBG_MAC, 7890 "mac chanctx remove freq %hu width %d ptr %pK\n", 7891 ctx->def.chan->center_freq, ctx->def.width, ctx); 7892 7893 mutex_lock(&ar->conf_mutex); 7894 7895 spin_lock_bh(&ar->data_lock); 7896 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7897 spin_unlock_bh(&ar->data_lock); 7898 7899 ath10k_recalc_radar_detection(ar); 7900 ath10k_monitor_recalc(ar); 7901 7902 mutex_unlock(&ar->conf_mutex); 7903 } 7904 7905 struct ath10k_mac_change_chanctx_arg { 7906 struct ieee80211_chanctx_conf *ctx; 7907 struct ieee80211_vif_chanctx_switch *vifs; 7908 int n_vifs; 7909 int next_vif; 7910 }; 7911 7912 static void 7913 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7914 struct ieee80211_vif *vif) 7915 { 7916 struct ath10k_mac_change_chanctx_arg *arg = data; 7917 7918 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7919 return; 7920 7921 arg->n_vifs++; 7922 } 7923 7924 static void 7925 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7926 struct ieee80211_vif *vif) 7927 { 7928 struct ath10k_mac_change_chanctx_arg *arg = data; 7929 struct ieee80211_chanctx_conf *ctx; 7930 7931 ctx = rcu_access_pointer(vif->chanctx_conf); 7932 if (ctx != arg->ctx) 7933 return; 7934 7935 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7936 return; 7937 7938 arg->vifs[arg->next_vif].vif = vif; 7939 arg->vifs[arg->next_vif].old_ctx = ctx; 7940 arg->vifs[arg->next_vif].new_ctx = ctx; 7941 arg->next_vif++; 7942 } 7943 7944 static void 7945 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7946 struct ieee80211_chanctx_conf *ctx, 7947 u32 changed) 7948 { 7949 struct ath10k *ar = hw->priv; 7950 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7951 7952 mutex_lock(&ar->conf_mutex); 7953 7954 ath10k_dbg(ar, ATH10K_DBG_MAC, 7955 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7956 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7957 7958 /* This shouldn't really happen because channel switching should use 7959 * switch_vif_chanctx(). 7960 */ 7961 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7962 goto unlock; 7963 7964 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7965 ieee80211_iterate_active_interfaces_atomic( 7966 hw, 7967 IEEE80211_IFACE_ITER_NORMAL, 7968 ath10k_mac_change_chanctx_cnt_iter, 7969 &arg); 7970 if (arg.n_vifs == 0) 7971 goto radar; 7972 7973 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7974 GFP_KERNEL); 7975 if (!arg.vifs) 7976 goto radar; 7977 7978 ieee80211_iterate_active_interfaces_atomic( 7979 hw, 7980 IEEE80211_IFACE_ITER_NORMAL, 7981 ath10k_mac_change_chanctx_fill_iter, 7982 &arg); 7983 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7984 kfree(arg.vifs); 7985 } 7986 7987 radar: 7988 ath10k_recalc_radar_detection(ar); 7989 7990 /* FIXME: How to configure Rx chains properly? */ 7991 7992 /* No other actions are actually necessary. Firmware maintains channel 7993 * definitions per vdev internally and there's no host-side channel 7994 * context abstraction to configure, e.g. channel width. 7995 */ 7996 7997 unlock: 7998 mutex_unlock(&ar->conf_mutex); 7999 } 8000 8001 static int 8002 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 8003 struct ieee80211_vif *vif, 8004 struct ieee80211_chanctx_conf *ctx) 8005 { 8006 struct ath10k *ar = hw->priv; 8007 struct ath10k_vif *arvif = (void *)vif->drv_priv; 8008 int ret; 8009 8010 mutex_lock(&ar->conf_mutex); 8011 8012 ath10k_dbg(ar, ATH10K_DBG_MAC, 8013 "mac chanctx assign ptr %pK vdev_id %i\n", 8014 ctx, arvif->vdev_id); 8015 8016 if (WARN_ON(arvif->is_started)) { 8017 mutex_unlock(&ar->conf_mutex); 8018 return -EBUSY; 8019 } 8020 8021 ret = ath10k_vdev_start(arvif, &ctx->def); 8022 if (ret) { 8023 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 8024 arvif->vdev_id, vif->addr, 8025 ctx->def.chan->center_freq, ret); 8026 goto err; 8027 } 8028 8029 arvif->is_started = true; 8030 8031 ret = ath10k_mac_vif_setup_ps(arvif); 8032 if (ret) { 8033 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 8034 arvif->vdev_id, ret); 8035 goto err_stop; 8036 } 8037 8038 if (vif->type == NL80211_IFTYPE_MONITOR) { 8039 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 8040 if (ret) { 8041 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 8042 arvif->vdev_id, ret); 8043 goto err_stop; 8044 } 8045 8046 arvif->is_up = true; 8047 } 8048 8049 if (ath10k_mac_can_set_cts_prot(arvif)) { 8050 ret = ath10k_mac_set_cts_prot(arvif); 8051 if (ret) 8052 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 8053 arvif->vdev_id, ret); 8054 } 8055 8056 if (ath10k_peer_stats_enabled(ar) && 8057 ar->hw_params.tx_stats_over_pktlog) { 8058 ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS; 8059 ret = ath10k_wmi_pdev_pktlog_enable(ar, 8060 ar->pktlog_filter); 8061 if (ret) { 8062 ath10k_warn(ar, "failed to enable pktlog %d\n", ret); 8063 goto err_stop; 8064 } 8065 } 8066 8067 mutex_unlock(&ar->conf_mutex); 8068 return 0; 8069 8070 err_stop: 8071 ath10k_vdev_stop(arvif); 8072 arvif->is_started = false; 8073 ath10k_mac_vif_setup_ps(arvif); 8074 8075 err: 8076 mutex_unlock(&ar->conf_mutex); 8077 return ret; 8078 } 8079 8080 static void 8081 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 8082 struct ieee80211_vif *vif, 8083 struct ieee80211_chanctx_conf *ctx) 8084 { 8085 struct ath10k *ar = hw->priv; 8086 struct ath10k_vif *arvif = (void *)vif->drv_priv; 8087 int ret; 8088 8089 mutex_lock(&ar->conf_mutex); 8090 8091 ath10k_dbg(ar, ATH10K_DBG_MAC, 8092 "mac chanctx unassign ptr %pK vdev_id %i\n", 8093 ctx, arvif->vdev_id); 8094 8095 WARN_ON(!arvif->is_started); 8096 8097 if (vif->type == NL80211_IFTYPE_MONITOR) { 8098 WARN_ON(!arvif->is_up); 8099 8100 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 8101 if (ret) 8102 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 8103 arvif->vdev_id, ret); 8104 8105 arvif->is_up = false; 8106 } 8107 8108 ret = ath10k_vdev_stop(arvif); 8109 if (ret) 8110 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 8111 arvif->vdev_id, ret); 8112 8113 arvif->is_started = false; 8114 8115 mutex_unlock(&ar->conf_mutex); 8116 } 8117 8118 static int 8119 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 8120 struct ieee80211_vif_chanctx_switch *vifs, 8121 int n_vifs, 8122 enum ieee80211_chanctx_switch_mode mode) 8123 { 8124 struct ath10k *ar = hw->priv; 8125 8126 mutex_lock(&ar->conf_mutex); 8127 8128 ath10k_dbg(ar, ATH10K_DBG_MAC, 8129 "mac chanctx switch n_vifs %d mode %d\n", 8130 n_vifs, mode); 8131 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 8132 8133 mutex_unlock(&ar->conf_mutex); 8134 return 0; 8135 } 8136 8137 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 8138 struct ieee80211_vif *vif, 8139 struct ieee80211_sta *sta) 8140 { 8141 struct ath10k *ar; 8142 struct ath10k_peer *peer; 8143 8144 ar = hw->priv; 8145 8146 list_for_each_entry(peer, &ar->peers, list) 8147 if (peer->sta == sta) 8148 peer->removed = true; 8149 } 8150 8151 static void ath10k_sta_statistics(struct ieee80211_hw *hw, 8152 struct ieee80211_vif *vif, 8153 struct ieee80211_sta *sta, 8154 struct station_info *sinfo) 8155 { 8156 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 8157 struct ath10k *ar = arsta->arvif->ar; 8158 8159 if (!ath10k_peer_stats_enabled(ar)) 8160 return; 8161 8162 sinfo->rx_duration = arsta->rx_duration; 8163 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 8164 8165 if (!arsta->txrate.legacy && !arsta->txrate.nss) 8166 return; 8167 8168 if (arsta->txrate.legacy) { 8169 sinfo->txrate.legacy = arsta->txrate.legacy; 8170 } else { 8171 sinfo->txrate.mcs = arsta->txrate.mcs; 8172 sinfo->txrate.nss = arsta->txrate.nss; 8173 sinfo->txrate.bw = arsta->txrate.bw; 8174 } 8175 sinfo->txrate.flags = arsta->txrate.flags; 8176 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 8177 } 8178 8179 static const struct ieee80211_ops ath10k_ops = { 8180 .tx = ath10k_mac_op_tx, 8181 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 8182 .start = ath10k_start, 8183 .stop = ath10k_stop, 8184 .config = ath10k_config, 8185 .add_interface = ath10k_add_interface, 8186 .remove_interface = ath10k_remove_interface, 8187 .configure_filter = ath10k_configure_filter, 8188 .bss_info_changed = ath10k_bss_info_changed, 8189 .set_coverage_class = ath10k_mac_op_set_coverage_class, 8190 .hw_scan = ath10k_hw_scan, 8191 .cancel_hw_scan = ath10k_cancel_hw_scan, 8192 .set_key = ath10k_set_key, 8193 .set_default_unicast_key = ath10k_set_default_unicast_key, 8194 .sta_state = ath10k_sta_state, 8195 .sta_set_txpwr = ath10k_sta_set_txpwr, 8196 .conf_tx = ath10k_conf_tx, 8197 .remain_on_channel = ath10k_remain_on_channel, 8198 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 8199 .set_rts_threshold = ath10k_set_rts_threshold, 8200 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 8201 .flush = ath10k_flush, 8202 .tx_last_beacon = ath10k_tx_last_beacon, 8203 .set_antenna = ath10k_set_antenna, 8204 .get_antenna = ath10k_get_antenna, 8205 .reconfig_complete = ath10k_reconfig_complete, 8206 .get_survey = ath10k_get_survey, 8207 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 8208 .sta_rc_update = ath10k_sta_rc_update, 8209 .offset_tsf = ath10k_offset_tsf, 8210 .ampdu_action = ath10k_ampdu_action, 8211 .get_et_sset_count = ath10k_debug_get_et_sset_count, 8212 .get_et_stats = ath10k_debug_get_et_stats, 8213 .get_et_strings = ath10k_debug_get_et_strings, 8214 .add_chanctx = ath10k_mac_op_add_chanctx, 8215 .remove_chanctx = ath10k_mac_op_remove_chanctx, 8216 .change_chanctx = ath10k_mac_op_change_chanctx, 8217 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 8218 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 8219 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 8220 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 8221 .sta_statistics = ath10k_sta_statistics, 8222 8223 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 8224 8225 #ifdef CONFIG_PM 8226 .suspend = ath10k_wow_op_suspend, 8227 .resume = ath10k_wow_op_resume, 8228 .set_wakeup = ath10k_wow_op_set_wakeup, 8229 #endif 8230 #ifdef CONFIG_MAC80211_DEBUGFS 8231 .sta_add_debugfs = ath10k_sta_add_debugfs, 8232 #endif 8233 }; 8234 8235 #define CHAN2G(_channel, _freq, _flags) { \ 8236 .band = NL80211_BAND_2GHZ, \ 8237 .hw_value = (_channel), \ 8238 .center_freq = (_freq), \ 8239 .flags = (_flags), \ 8240 .max_antenna_gain = 0, \ 8241 .max_power = 30, \ 8242 } 8243 8244 #define CHAN5G(_channel, _freq, _flags) { \ 8245 .band = NL80211_BAND_5GHZ, \ 8246 .hw_value = (_channel), \ 8247 .center_freq = (_freq), \ 8248 .flags = (_flags), \ 8249 .max_antenna_gain = 0, \ 8250 .max_power = 30, \ 8251 } 8252 8253 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 8254 CHAN2G(1, 2412, 0), 8255 CHAN2G(2, 2417, 0), 8256 CHAN2G(3, 2422, 0), 8257 CHAN2G(4, 2427, 0), 8258 CHAN2G(5, 2432, 0), 8259 CHAN2G(6, 2437, 0), 8260 CHAN2G(7, 2442, 0), 8261 CHAN2G(8, 2447, 0), 8262 CHAN2G(9, 2452, 0), 8263 CHAN2G(10, 2457, 0), 8264 CHAN2G(11, 2462, 0), 8265 CHAN2G(12, 2467, 0), 8266 CHAN2G(13, 2472, 0), 8267 CHAN2G(14, 2484, 0), 8268 }; 8269 8270 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 8271 CHAN5G(36, 5180, 0), 8272 CHAN5G(40, 5200, 0), 8273 CHAN5G(44, 5220, 0), 8274 CHAN5G(48, 5240, 0), 8275 CHAN5G(52, 5260, 0), 8276 CHAN5G(56, 5280, 0), 8277 CHAN5G(60, 5300, 0), 8278 CHAN5G(64, 5320, 0), 8279 CHAN5G(100, 5500, 0), 8280 CHAN5G(104, 5520, 0), 8281 CHAN5G(108, 5540, 0), 8282 CHAN5G(112, 5560, 0), 8283 CHAN5G(116, 5580, 0), 8284 CHAN5G(120, 5600, 0), 8285 CHAN5G(124, 5620, 0), 8286 CHAN5G(128, 5640, 0), 8287 CHAN5G(132, 5660, 0), 8288 CHAN5G(136, 5680, 0), 8289 CHAN5G(140, 5700, 0), 8290 CHAN5G(144, 5720, 0), 8291 CHAN5G(149, 5745, 0), 8292 CHAN5G(153, 5765, 0), 8293 CHAN5G(157, 5785, 0), 8294 CHAN5G(161, 5805, 0), 8295 CHAN5G(165, 5825, 0), 8296 CHAN5G(169, 5845, 0), 8297 CHAN5G(173, 5865, 0), 8298 /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */ 8299 /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */ 8300 }; 8301 8302 struct ath10k *ath10k_mac_create(size_t priv_size) 8303 { 8304 struct ieee80211_hw *hw; 8305 struct ieee80211_ops *ops; 8306 struct ath10k *ar; 8307 8308 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 8309 if (!ops) 8310 return NULL; 8311 8312 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 8313 if (!hw) { 8314 kfree(ops); 8315 return NULL; 8316 } 8317 8318 ar = hw->priv; 8319 ar->hw = hw; 8320 ar->ops = ops; 8321 8322 return ar; 8323 } 8324 8325 void ath10k_mac_destroy(struct ath10k *ar) 8326 { 8327 struct ieee80211_ops *ops = ar->ops; 8328 8329 ieee80211_free_hw(ar->hw); 8330 kfree(ops); 8331 } 8332 8333 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 8334 { 8335 .max = 8, 8336 .types = BIT(NL80211_IFTYPE_STATION) 8337 | BIT(NL80211_IFTYPE_P2P_CLIENT) 8338 }, 8339 { 8340 .max = 3, 8341 .types = BIT(NL80211_IFTYPE_P2P_GO) 8342 }, 8343 { 8344 .max = 1, 8345 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 8346 }, 8347 { 8348 .max = 7, 8349 .types = BIT(NL80211_IFTYPE_AP) 8350 #ifdef CONFIG_MAC80211_MESH 8351 | BIT(NL80211_IFTYPE_MESH_POINT) 8352 #endif 8353 }, 8354 }; 8355 8356 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 8357 { 8358 .max = 8, 8359 .types = BIT(NL80211_IFTYPE_AP) 8360 #ifdef CONFIG_MAC80211_MESH 8361 | BIT(NL80211_IFTYPE_MESH_POINT) 8362 #endif 8363 }, 8364 { 8365 .max = 1, 8366 .types = BIT(NL80211_IFTYPE_STATION) 8367 }, 8368 }; 8369 8370 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 8371 { 8372 .limits = ath10k_if_limits, 8373 .n_limits = ARRAY_SIZE(ath10k_if_limits), 8374 .max_interfaces = 8, 8375 .num_different_channels = 1, 8376 .beacon_int_infra_match = true, 8377 }, 8378 }; 8379 8380 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 8381 { 8382 .limits = ath10k_10x_if_limits, 8383 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 8384 .max_interfaces = 8, 8385 .num_different_channels = 1, 8386 .beacon_int_infra_match = true, 8387 .beacon_int_min_gcd = 1, 8388 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8389 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8390 BIT(NL80211_CHAN_WIDTH_20) | 8391 BIT(NL80211_CHAN_WIDTH_40) | 8392 BIT(NL80211_CHAN_WIDTH_80), 8393 #endif 8394 }, 8395 }; 8396 8397 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 8398 { 8399 .max = 2, 8400 .types = BIT(NL80211_IFTYPE_STATION), 8401 }, 8402 { 8403 .max = 2, 8404 .types = BIT(NL80211_IFTYPE_AP) | 8405 #ifdef CONFIG_MAC80211_MESH 8406 BIT(NL80211_IFTYPE_MESH_POINT) | 8407 #endif 8408 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8409 BIT(NL80211_IFTYPE_P2P_GO), 8410 }, 8411 { 8412 .max = 1, 8413 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 8414 }, 8415 }; 8416 8417 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 8418 { 8419 .max = 2, 8420 .types = BIT(NL80211_IFTYPE_STATION), 8421 }, 8422 { 8423 .max = 2, 8424 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 8425 }, 8426 { 8427 .max = 1, 8428 .types = BIT(NL80211_IFTYPE_AP) | 8429 #ifdef CONFIG_MAC80211_MESH 8430 BIT(NL80211_IFTYPE_MESH_POINT) | 8431 #endif 8432 BIT(NL80211_IFTYPE_P2P_GO), 8433 }, 8434 { 8435 .max = 1, 8436 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 8437 }, 8438 }; 8439 8440 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 8441 { 8442 .max = 1, 8443 .types = BIT(NL80211_IFTYPE_STATION), 8444 }, 8445 { 8446 .max = 1, 8447 .types = BIT(NL80211_IFTYPE_ADHOC), 8448 }, 8449 }; 8450 8451 /* FIXME: This is not thouroughly tested. These combinations may over- or 8452 * underestimate hw/fw capabilities. 8453 */ 8454 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 8455 { 8456 .limits = ath10k_tlv_if_limit, 8457 .num_different_channels = 1, 8458 .max_interfaces = 4, 8459 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8460 }, 8461 { 8462 .limits = ath10k_tlv_if_limit_ibss, 8463 .num_different_channels = 1, 8464 .max_interfaces = 2, 8465 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8466 }, 8467 }; 8468 8469 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 8470 { 8471 .limits = ath10k_tlv_if_limit, 8472 .num_different_channels = 1, 8473 .max_interfaces = 4, 8474 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8475 }, 8476 { 8477 .limits = ath10k_tlv_qcs_if_limit, 8478 .num_different_channels = 2, 8479 .max_interfaces = 4, 8480 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 8481 }, 8482 { 8483 .limits = ath10k_tlv_if_limit_ibss, 8484 .num_different_channels = 1, 8485 .max_interfaces = 2, 8486 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8487 }, 8488 }; 8489 8490 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 8491 { 8492 .max = 1, 8493 .types = BIT(NL80211_IFTYPE_STATION), 8494 }, 8495 { 8496 .max = 16, 8497 .types = BIT(NL80211_IFTYPE_AP) 8498 #ifdef CONFIG_MAC80211_MESH 8499 | BIT(NL80211_IFTYPE_MESH_POINT) 8500 #endif 8501 }, 8502 }; 8503 8504 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 8505 { 8506 .limits = ath10k_10_4_if_limits, 8507 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 8508 .max_interfaces = 16, 8509 .num_different_channels = 1, 8510 .beacon_int_infra_match = true, 8511 .beacon_int_min_gcd = 1, 8512 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8513 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8514 BIT(NL80211_CHAN_WIDTH_20) | 8515 BIT(NL80211_CHAN_WIDTH_40) | 8516 BIT(NL80211_CHAN_WIDTH_80), 8517 #endif 8518 }, 8519 }; 8520 8521 static const struct 8522 ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = { 8523 { 8524 .limits = ath10k_10_4_if_limits, 8525 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 8526 .max_interfaces = 16, 8527 .num_different_channels = 1, 8528 .beacon_int_infra_match = true, 8529 .beacon_int_min_gcd = 100, 8530 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8531 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8532 BIT(NL80211_CHAN_WIDTH_20) | 8533 BIT(NL80211_CHAN_WIDTH_40) | 8534 BIT(NL80211_CHAN_WIDTH_80), 8535 #endif 8536 }, 8537 }; 8538 8539 static void ath10k_get_arvif_iter(void *data, u8 *mac, 8540 struct ieee80211_vif *vif) 8541 { 8542 struct ath10k_vif_iter *arvif_iter = data; 8543 struct ath10k_vif *arvif = (void *)vif->drv_priv; 8544 8545 if (arvif->vdev_id == arvif_iter->vdev_id) 8546 arvif_iter->arvif = arvif; 8547 } 8548 8549 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 8550 { 8551 struct ath10k_vif_iter arvif_iter; 8552 u32 flags; 8553 8554 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 8555 arvif_iter.vdev_id = vdev_id; 8556 8557 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 8558 ieee80211_iterate_active_interfaces_atomic(ar->hw, 8559 flags, 8560 ath10k_get_arvif_iter, 8561 &arvif_iter); 8562 if (!arvif_iter.arvif) { 8563 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 8564 return NULL; 8565 } 8566 8567 return arvif_iter.arvif; 8568 } 8569 8570 #define WRD_METHOD "WRDD" 8571 #define WRDD_WIFI (0x07) 8572 8573 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 8574 { 8575 union acpi_object *mcc_pkg; 8576 union acpi_object *domain_type; 8577 union acpi_object *mcc_value; 8578 u32 i; 8579 8580 if (wrdd->type != ACPI_TYPE_PACKAGE || 8581 wrdd->package.count < 2 || 8582 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 8583 wrdd->package.elements[0].integer.value != 0) { 8584 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 8585 return 0; 8586 } 8587 8588 for (i = 1; i < wrdd->package.count; ++i) { 8589 mcc_pkg = &wrdd->package.elements[i]; 8590 8591 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 8592 continue; 8593 if (mcc_pkg->package.count < 2) 8594 continue; 8595 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 8596 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 8597 continue; 8598 8599 domain_type = &mcc_pkg->package.elements[0]; 8600 if (domain_type->integer.value != WRDD_WIFI) 8601 continue; 8602 8603 mcc_value = &mcc_pkg->package.elements[1]; 8604 return mcc_value->integer.value; 8605 } 8606 return 0; 8607 } 8608 8609 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 8610 { 8611 acpi_handle root_handle; 8612 acpi_handle handle; 8613 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 8614 acpi_status status; 8615 u32 alpha2_code; 8616 char alpha2[3]; 8617 8618 root_handle = ACPI_HANDLE(ar->dev); 8619 if (!root_handle) 8620 return -EOPNOTSUPP; 8621 8622 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 8623 if (ACPI_FAILURE(status)) { 8624 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8625 "failed to get wrd method %d\n", status); 8626 return -EIO; 8627 } 8628 8629 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 8630 if (ACPI_FAILURE(status)) { 8631 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8632 "failed to call wrdc %d\n", status); 8633 return -EIO; 8634 } 8635 8636 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 8637 kfree(wrdd.pointer); 8638 if (!alpha2_code) 8639 return -EIO; 8640 8641 alpha2[0] = (alpha2_code >> 8) & 0xff; 8642 alpha2[1] = (alpha2_code >> 0) & 0xff; 8643 alpha2[2] = '\0'; 8644 8645 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8646 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 8647 8648 *rd = ath_regd_find_country_by_name(alpha2); 8649 if (*rd == 0xffff) 8650 return -EIO; 8651 8652 *rd |= COUNTRY_ERD_FLAG; 8653 return 0; 8654 } 8655 8656 static int ath10k_mac_init_rd(struct ath10k *ar) 8657 { 8658 int ret; 8659 u16 rd; 8660 8661 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 8662 if (ret) { 8663 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8664 "fallback to eeprom programmed regulatory settings\n"); 8665 rd = ar->hw_eeprom_rd; 8666 } 8667 8668 ar->ath_common.regulatory.current_rd = rd; 8669 return 0; 8670 } 8671 8672 int ath10k_mac_register(struct ath10k *ar) 8673 { 8674 static const u32 cipher_suites[] = { 8675 WLAN_CIPHER_SUITE_WEP40, 8676 WLAN_CIPHER_SUITE_WEP104, 8677 WLAN_CIPHER_SUITE_TKIP, 8678 WLAN_CIPHER_SUITE_CCMP, 8679 8680 /* Do not add hardware supported ciphers before this line. 8681 * Allow software encryption for all chips. Don't forget to 8682 * update n_cipher_suites below. 8683 */ 8684 WLAN_CIPHER_SUITE_AES_CMAC, 8685 WLAN_CIPHER_SUITE_BIP_CMAC_256, 8686 WLAN_CIPHER_SUITE_BIP_GMAC_128, 8687 WLAN_CIPHER_SUITE_BIP_GMAC_256, 8688 8689 /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256 8690 * and CCMP-256 in hardware. 8691 */ 8692 WLAN_CIPHER_SUITE_GCMP, 8693 WLAN_CIPHER_SUITE_GCMP_256, 8694 WLAN_CIPHER_SUITE_CCMP_256, 8695 }; 8696 struct ieee80211_supported_band *band; 8697 void *channels; 8698 int ret; 8699 8700 if (!is_valid_ether_addr(ar->mac_addr)) { 8701 ath10k_warn(ar, "invalid MAC address; choosing random\n"); 8702 eth_random_addr(ar->mac_addr); 8703 } 8704 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8705 8706 SET_IEEE80211_DEV(ar->hw, ar->dev); 8707 8708 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8709 ARRAY_SIZE(ath10k_5ghz_channels)) != 8710 ATH10K_NUM_CHANS); 8711 8712 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8713 channels = kmemdup(ath10k_2ghz_channels, 8714 sizeof(ath10k_2ghz_channels), 8715 GFP_KERNEL); 8716 if (!channels) { 8717 ret = -ENOMEM; 8718 goto err_free; 8719 } 8720 8721 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8722 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8723 band->channels = channels; 8724 8725 if (ar->hw_params.cck_rate_map_rev2) { 8726 band->n_bitrates = ath10k_g_rates_rev2_size; 8727 band->bitrates = ath10k_g_rates_rev2; 8728 } else { 8729 band->n_bitrates = ath10k_g_rates_size; 8730 band->bitrates = ath10k_g_rates; 8731 } 8732 8733 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8734 } 8735 8736 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8737 channels = kmemdup(ath10k_5ghz_channels, 8738 sizeof(ath10k_5ghz_channels), 8739 GFP_KERNEL); 8740 if (!channels) { 8741 ret = -ENOMEM; 8742 goto err_free; 8743 } 8744 8745 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8746 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8747 band->channels = channels; 8748 band->n_bitrates = ath10k_a_rates_size; 8749 band->bitrates = ath10k_a_rates; 8750 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8751 } 8752 8753 wiphy_read_of_freq_limits(ar->hw->wiphy); 8754 ath10k_mac_setup_ht_vht_cap(ar); 8755 8756 ar->hw->wiphy->interface_modes = 8757 BIT(NL80211_IFTYPE_STATION) | 8758 BIT(NL80211_IFTYPE_AP) | 8759 BIT(NL80211_IFTYPE_MESH_POINT); 8760 8761 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8762 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8763 8764 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8765 ar->hw->wiphy->interface_modes |= 8766 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8767 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8768 BIT(NL80211_IFTYPE_P2P_GO); 8769 8770 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8771 8772 if (!test_bit(ATH10K_FW_FEATURE_NO_PS, 8773 ar->running_fw->fw_file.fw_features)) { 8774 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8775 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8776 } 8777 8778 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8779 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8780 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8781 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8782 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8783 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8784 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8785 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8786 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8787 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8788 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8789 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8790 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8791 8792 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8793 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8794 8795 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8796 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8797 8798 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8799 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8800 8801 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8802 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8803 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8804 } 8805 8806 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8807 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8808 8809 if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { 8810 ar->hw->wiphy->max_sched_scan_reqs = 1; 8811 ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; 8812 ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; 8813 ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; 8814 ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; 8815 ar->hw->wiphy->max_sched_scan_plan_interval = 8816 WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; 8817 ar->hw->wiphy->max_sched_scan_plan_iterations = 8818 WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; 8819 } 8820 8821 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8822 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8823 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8824 8825 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8826 8827 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8828 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8829 8830 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8831 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8832 * correct Probe Responses. This is more of a hack advert.. 8833 */ 8834 ar->hw->wiphy->probe_resp_offload |= 8835 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8836 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8837 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8838 } 8839 8840 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || 8841 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { 8842 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8843 if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map)) 8844 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); 8845 } 8846 8847 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) 8848 ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA); 8849 8850 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8851 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8852 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8853 8854 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8855 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8856 NL80211_FEATURE_AP_SCAN; 8857 8858 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8859 8860 ret = ath10k_wow_init(ar); 8861 if (ret) { 8862 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8863 goto err_free; 8864 } 8865 8866 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8867 wiphy_ext_feature_set(ar->hw->wiphy, 8868 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 8869 8870 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) || 8871 test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map)) 8872 wiphy_ext_feature_set(ar->hw->wiphy, 8873 NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); 8874 8875 if (ath10k_peer_stats_enabled(ar) || 8876 test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) 8877 wiphy_ext_feature_set(ar->hw->wiphy, 8878 NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); 8879 8880 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) 8881 wiphy_ext_feature_set(ar->hw->wiphy, 8882 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); 8883 8884 if (test_bit(WMI_SERVICE_TX_PWR_PER_PEER, ar->wmi.svc_map)) 8885 wiphy_ext_feature_set(ar->hw->wiphy, 8886 NL80211_EXT_FEATURE_STA_TX_PWR); 8887 /* 8888 * on LL hardware queues are managed entirely by the FW 8889 * so we only advertise to mac we can do the queues thing 8890 */ 8891 ar->hw->queues = IEEE80211_MAX_QUEUES; 8892 8893 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8894 * something that vdev_ids can't reach so that we don't stop the queue 8895 * accidentally. 8896 */ 8897 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8898 8899 switch (ar->running_fw->fw_file.wmi_op_version) { 8900 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8901 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8902 ar->hw->wiphy->n_iface_combinations = 8903 ARRAY_SIZE(ath10k_if_comb); 8904 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8905 break; 8906 case ATH10K_FW_WMI_OP_VERSION_TLV: 8907 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8908 ar->hw->wiphy->iface_combinations = 8909 ath10k_tlv_qcs_if_comb; 8910 ar->hw->wiphy->n_iface_combinations = 8911 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8912 } else { 8913 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8914 ar->hw->wiphy->n_iface_combinations = 8915 ARRAY_SIZE(ath10k_tlv_if_comb); 8916 } 8917 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8918 break; 8919 case ATH10K_FW_WMI_OP_VERSION_10_1: 8920 case ATH10K_FW_WMI_OP_VERSION_10_2: 8921 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8922 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8923 ar->hw->wiphy->n_iface_combinations = 8924 ARRAY_SIZE(ath10k_10x_if_comb); 8925 break; 8926 case ATH10K_FW_WMI_OP_VERSION_10_4: 8927 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8928 ar->hw->wiphy->n_iface_combinations = 8929 ARRAY_SIZE(ath10k_10_4_if_comb); 8930 if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, 8931 ar->wmi.svc_map)) { 8932 ar->hw->wiphy->iface_combinations = 8933 ath10k_10_4_bcn_int_if_comb; 8934 ar->hw->wiphy->n_iface_combinations = 8935 ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb); 8936 } 8937 break; 8938 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8939 case ATH10K_FW_WMI_OP_VERSION_MAX: 8940 WARN_ON(1); 8941 ret = -EINVAL; 8942 goto err_free; 8943 } 8944 8945 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8946 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8947 8948 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8949 /* Init ath dfs pattern detector */ 8950 ar->ath_common.debug_mask = ATH_DBG_DFS; 8951 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8952 NL80211_DFS_UNSET); 8953 8954 if (!ar->dfs_detector) 8955 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8956 } 8957 8958 ret = ath10k_mac_init_rd(ar); 8959 if (ret) { 8960 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8961 goto err_dfs_detector_exit; 8962 } 8963 8964 /* Disable set_coverage_class for chipsets that do not support it. */ 8965 if (!ar->hw_params.hw_ops->set_coverage_class) 8966 ar->ops->set_coverage_class = NULL; 8967 8968 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8969 ath10k_reg_notifier); 8970 if (ret) { 8971 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8972 goto err_dfs_detector_exit; 8973 } 8974 8975 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 8976 ar->hw->wiphy->features |= 8977 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 8978 } 8979 8980 ar->hw->wiphy->cipher_suites = cipher_suites; 8981 8982 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 8983 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported 8984 * from chip specific hw_param table. 8985 */ 8986 if (!ar->hw_params.n_cipher_suites || 8987 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) { 8988 ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n", 8989 ar->hw_params.n_cipher_suites); 8990 ar->hw_params.n_cipher_suites = 8; 8991 } 8992 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites; 8993 8994 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8995 8996 ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER; 8997 8998 ret = ieee80211_register_hw(ar->hw); 8999 if (ret) { 9000 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 9001 goto err_dfs_detector_exit; 9002 } 9003 9004 if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) { 9005 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); 9006 ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); 9007 } 9008 9009 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 9010 ret = regulatory_hint(ar->hw->wiphy, 9011 ar->ath_common.regulatory.alpha2); 9012 if (ret) 9013 goto err_unregister; 9014 } 9015 9016 return 0; 9017 9018 err_unregister: 9019 ieee80211_unregister_hw(ar->hw); 9020 9021 err_dfs_detector_exit: 9022 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 9023 ar->dfs_detector->exit(ar->dfs_detector); 9024 9025 err_free: 9026 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 9027 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 9028 9029 SET_IEEE80211_DEV(ar->hw, NULL); 9030 return ret; 9031 } 9032 9033 void ath10k_mac_unregister(struct ath10k *ar) 9034 { 9035 ieee80211_unregister_hw(ar->hw); 9036 9037 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 9038 ar->dfs_detector->exit(ar->dfs_detector); 9039 9040 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 9041 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 9042 9043 SET_IEEE80211_DEV(ar->hw, NULL); 9044 } 9045