1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "mac.h" 20 21 #include <net/mac80211.h> 22 #include <linux/etherdevice.h> 23 #include <linux/acpi.h> 24 25 #include "hif.h" 26 #include "core.h" 27 #include "debug.h" 28 #include "wmi.h" 29 #include "htt.h" 30 #include "txrx.h" 31 #include "testmode.h" 32 #include "wmi.h" 33 #include "wmi-tlv.h" 34 #include "wmi-ops.h" 35 #include "wow.h" 36 37 /*********/ 38 /* Rates */ 39 /*********/ 40 41 static struct ieee80211_rate ath10k_rates[] = { 42 { .bitrate = 10, 43 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 44 { .bitrate = 20, 45 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 46 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 47 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 48 { .bitrate = 55, 49 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 50 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 51 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 52 { .bitrate = 110, 53 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 54 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 55 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 56 57 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 58 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 59 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 60 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 61 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 62 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 63 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 64 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 65 }; 66 67 static struct ieee80211_rate ath10k_rates_rev2[] = { 68 { .bitrate = 10, 69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 70 { .bitrate = 20, 71 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 72 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 73 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 74 { .bitrate = 55, 75 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 76 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 77 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 78 { .bitrate = 110, 79 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 80 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 81 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 82 83 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 84 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 85 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 86 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 87 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 88 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 89 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 90 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 91 }; 92 93 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 94 95 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 96 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 97 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 98 #define ath10k_g_rates (ath10k_rates + 0) 99 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 100 101 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 102 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 103 104 static bool ath10k_mac_bitrate_is_cck(int bitrate) 105 { 106 switch (bitrate) { 107 case 10: 108 case 20: 109 case 55: 110 case 110: 111 return true; 112 } 113 114 return false; 115 } 116 117 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 118 { 119 return DIV_ROUND_UP(bitrate, 5) | 120 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 121 } 122 123 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 124 u8 hw_rate, bool cck) 125 { 126 const struct ieee80211_rate *rate; 127 int i; 128 129 for (i = 0; i < sband->n_bitrates; i++) { 130 rate = &sband->bitrates[i]; 131 132 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 133 continue; 134 135 if (rate->hw_value == hw_rate) 136 return i; 137 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 138 rate->hw_value_short == hw_rate) 139 return i; 140 } 141 142 return 0; 143 } 144 145 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 146 u32 bitrate) 147 { 148 int i; 149 150 for (i = 0; i < sband->n_bitrates; i++) 151 if (sband->bitrates[i].bitrate == bitrate) 152 return i; 153 154 return 0; 155 } 156 157 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 158 { 159 switch ((mcs_map >> (2 * nss)) & 0x3) { 160 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 161 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 162 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 163 } 164 return 0; 165 } 166 167 static u32 168 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 169 { 170 int nss; 171 172 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 173 if (ht_mcs_mask[nss]) 174 return nss + 1; 175 176 return 1; 177 } 178 179 static u32 180 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 181 { 182 int nss; 183 184 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 185 if (vht_mcs_mask[nss]) 186 return nss + 1; 187 188 return 1; 189 } 190 191 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 192 { 193 enum wmi_host_platform_type platform_type; 194 int ret; 195 196 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 197 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 198 else 199 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 200 201 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 202 203 if (ret && ret != -EOPNOTSUPP) { 204 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 205 return ret; 206 } 207 208 return 0; 209 } 210 211 /**********/ 212 /* Crypto */ 213 /**********/ 214 215 static int ath10k_send_key(struct ath10k_vif *arvif, 216 struct ieee80211_key_conf *key, 217 enum set_key_cmd cmd, 218 const u8 *macaddr, u32 flags) 219 { 220 struct ath10k *ar = arvif->ar; 221 struct wmi_vdev_install_key_arg arg = { 222 .vdev_id = arvif->vdev_id, 223 .key_idx = key->keyidx, 224 .key_len = key->keylen, 225 .key_data = key->key, 226 .key_flags = flags, 227 .macaddr = macaddr, 228 }; 229 230 lockdep_assert_held(&arvif->ar->conf_mutex); 231 232 switch (key->cipher) { 233 case WLAN_CIPHER_SUITE_CCMP: 234 arg.key_cipher = WMI_CIPHER_AES_CCM; 235 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 236 break; 237 case WLAN_CIPHER_SUITE_TKIP: 238 arg.key_cipher = WMI_CIPHER_TKIP; 239 arg.key_txmic_len = 8; 240 arg.key_rxmic_len = 8; 241 break; 242 case WLAN_CIPHER_SUITE_WEP40: 243 case WLAN_CIPHER_SUITE_WEP104: 244 arg.key_cipher = WMI_CIPHER_WEP; 245 break; 246 case WLAN_CIPHER_SUITE_CCMP_256: 247 arg.key_cipher = WMI_CIPHER_AES_CCM; 248 break; 249 case WLAN_CIPHER_SUITE_GCMP: 250 case WLAN_CIPHER_SUITE_GCMP_256: 251 arg.key_cipher = WMI_CIPHER_AES_GCM; 252 break; 253 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 254 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 255 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 256 case WLAN_CIPHER_SUITE_AES_CMAC: 257 WARN_ON(1); 258 return -EINVAL; 259 default: 260 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 261 return -EOPNOTSUPP; 262 } 263 264 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 265 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 266 267 if (cmd == DISABLE_KEY) { 268 arg.key_cipher = WMI_CIPHER_NONE; 269 arg.key_data = NULL; 270 } 271 272 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 273 } 274 275 static int ath10k_install_key(struct ath10k_vif *arvif, 276 struct ieee80211_key_conf *key, 277 enum set_key_cmd cmd, 278 const u8 *macaddr, u32 flags) 279 { 280 struct ath10k *ar = arvif->ar; 281 int ret; 282 unsigned long time_left; 283 284 lockdep_assert_held(&ar->conf_mutex); 285 286 reinit_completion(&ar->install_key_done); 287 288 if (arvif->nohwcrypt) 289 return 1; 290 291 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 292 if (ret) 293 return ret; 294 295 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 296 if (time_left == 0) 297 return -ETIMEDOUT; 298 299 return 0; 300 } 301 302 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 303 const u8 *addr) 304 { 305 struct ath10k *ar = arvif->ar; 306 struct ath10k_peer *peer; 307 int ret; 308 int i; 309 u32 flags; 310 311 lockdep_assert_held(&ar->conf_mutex); 312 313 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 314 arvif->vif->type != NL80211_IFTYPE_ADHOC && 315 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 316 return -EINVAL; 317 318 spin_lock_bh(&ar->data_lock); 319 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 320 spin_unlock_bh(&ar->data_lock); 321 322 if (!peer) 323 return -ENOENT; 324 325 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 326 if (arvif->wep_keys[i] == NULL) 327 continue; 328 329 switch (arvif->vif->type) { 330 case NL80211_IFTYPE_AP: 331 flags = WMI_KEY_PAIRWISE; 332 333 if (arvif->def_wep_key_idx == i) 334 flags |= WMI_KEY_TX_USAGE; 335 336 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 337 SET_KEY, addr, flags); 338 if (ret < 0) 339 return ret; 340 break; 341 case NL80211_IFTYPE_ADHOC: 342 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 343 SET_KEY, addr, 344 WMI_KEY_PAIRWISE); 345 if (ret < 0) 346 return ret; 347 348 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 349 SET_KEY, addr, WMI_KEY_GROUP); 350 if (ret < 0) 351 return ret; 352 break; 353 default: 354 WARN_ON(1); 355 return -EINVAL; 356 } 357 358 spin_lock_bh(&ar->data_lock); 359 peer->keys[i] = arvif->wep_keys[i]; 360 spin_unlock_bh(&ar->data_lock); 361 } 362 363 /* In some cases (notably with static WEP IBSS with multiple keys) 364 * multicast Tx becomes broken. Both pairwise and groupwise keys are 365 * installed already. Using WMI_KEY_TX_USAGE in different combinations 366 * didn't seem help. Using def_keyid vdev parameter seems to be 367 * effective so use that. 368 * 369 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 370 */ 371 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 372 return 0; 373 374 if (arvif->def_wep_key_idx == -1) 375 return 0; 376 377 ret = ath10k_wmi_vdev_set_param(arvif->ar, 378 arvif->vdev_id, 379 arvif->ar->wmi.vdev_param->def_keyid, 380 arvif->def_wep_key_idx); 381 if (ret) { 382 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 383 arvif->vdev_id, ret); 384 return ret; 385 } 386 387 return 0; 388 } 389 390 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 391 const u8 *addr) 392 { 393 struct ath10k *ar = arvif->ar; 394 struct ath10k_peer *peer; 395 int first_errno = 0; 396 int ret; 397 int i; 398 u32 flags = 0; 399 400 lockdep_assert_held(&ar->conf_mutex); 401 402 spin_lock_bh(&ar->data_lock); 403 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 404 spin_unlock_bh(&ar->data_lock); 405 406 if (!peer) 407 return -ENOENT; 408 409 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 410 if (peer->keys[i] == NULL) 411 continue; 412 413 /* key flags are not required to delete the key */ 414 ret = ath10k_install_key(arvif, peer->keys[i], 415 DISABLE_KEY, addr, flags); 416 if (ret < 0 && first_errno == 0) 417 first_errno = ret; 418 419 if (ret < 0) 420 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 421 i, ret); 422 423 spin_lock_bh(&ar->data_lock); 424 peer->keys[i] = NULL; 425 spin_unlock_bh(&ar->data_lock); 426 } 427 428 return first_errno; 429 } 430 431 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 432 u8 keyidx) 433 { 434 struct ath10k_peer *peer; 435 int i; 436 437 lockdep_assert_held(&ar->data_lock); 438 439 /* We don't know which vdev this peer belongs to, 440 * since WMI doesn't give us that information. 441 * 442 * FIXME: multi-bss needs to be handled. 443 */ 444 peer = ath10k_peer_find(ar, 0, addr); 445 if (!peer) 446 return false; 447 448 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 449 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 450 return true; 451 } 452 453 return false; 454 } 455 456 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 457 struct ieee80211_key_conf *key) 458 { 459 struct ath10k *ar = arvif->ar; 460 struct ath10k_peer *peer; 461 u8 addr[ETH_ALEN]; 462 int first_errno = 0; 463 int ret; 464 int i; 465 u32 flags = 0; 466 467 lockdep_assert_held(&ar->conf_mutex); 468 469 for (;;) { 470 /* since ath10k_install_key we can't hold data_lock all the 471 * time, so we try to remove the keys incrementally 472 */ 473 spin_lock_bh(&ar->data_lock); 474 i = 0; 475 list_for_each_entry(peer, &ar->peers, list) { 476 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 477 if (peer->keys[i] == key) { 478 ether_addr_copy(addr, peer->addr); 479 peer->keys[i] = NULL; 480 break; 481 } 482 } 483 484 if (i < ARRAY_SIZE(peer->keys)) 485 break; 486 } 487 spin_unlock_bh(&ar->data_lock); 488 489 if (i == ARRAY_SIZE(peer->keys)) 490 break; 491 /* key flags are not required to delete the key */ 492 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 493 if (ret < 0 && first_errno == 0) 494 first_errno = ret; 495 496 if (ret) 497 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 498 addr, ret); 499 } 500 501 return first_errno; 502 } 503 504 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 505 struct ieee80211_key_conf *key) 506 { 507 struct ath10k *ar = arvif->ar; 508 struct ath10k_peer *peer; 509 int ret; 510 511 lockdep_assert_held(&ar->conf_mutex); 512 513 list_for_each_entry(peer, &ar->peers, list) { 514 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 515 continue; 516 517 if (ether_addr_equal(peer->addr, arvif->bssid)) 518 continue; 519 520 if (peer->keys[key->keyidx] == key) 521 continue; 522 523 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 524 arvif->vdev_id, key->keyidx); 525 526 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 527 if (ret) { 528 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 529 arvif->vdev_id, peer->addr, ret); 530 return ret; 531 } 532 } 533 534 return 0; 535 } 536 537 /*********************/ 538 /* General utilities */ 539 /*********************/ 540 541 static inline enum wmi_phy_mode 542 chan_to_phymode(const struct cfg80211_chan_def *chandef) 543 { 544 enum wmi_phy_mode phymode = MODE_UNKNOWN; 545 546 switch (chandef->chan->band) { 547 case NL80211_BAND_2GHZ: 548 switch (chandef->width) { 549 case NL80211_CHAN_WIDTH_20_NOHT: 550 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 551 phymode = MODE_11B; 552 else 553 phymode = MODE_11G; 554 break; 555 case NL80211_CHAN_WIDTH_20: 556 phymode = MODE_11NG_HT20; 557 break; 558 case NL80211_CHAN_WIDTH_40: 559 phymode = MODE_11NG_HT40; 560 break; 561 case NL80211_CHAN_WIDTH_5: 562 case NL80211_CHAN_WIDTH_10: 563 case NL80211_CHAN_WIDTH_80: 564 case NL80211_CHAN_WIDTH_80P80: 565 case NL80211_CHAN_WIDTH_160: 566 phymode = MODE_UNKNOWN; 567 break; 568 } 569 break; 570 case NL80211_BAND_5GHZ: 571 switch (chandef->width) { 572 case NL80211_CHAN_WIDTH_20_NOHT: 573 phymode = MODE_11A; 574 break; 575 case NL80211_CHAN_WIDTH_20: 576 phymode = MODE_11NA_HT20; 577 break; 578 case NL80211_CHAN_WIDTH_40: 579 phymode = MODE_11NA_HT40; 580 break; 581 case NL80211_CHAN_WIDTH_80: 582 phymode = MODE_11AC_VHT80; 583 break; 584 case NL80211_CHAN_WIDTH_160: 585 phymode = MODE_11AC_VHT160; 586 break; 587 case NL80211_CHAN_WIDTH_80P80: 588 phymode = MODE_11AC_VHT80_80; 589 break; 590 case NL80211_CHAN_WIDTH_5: 591 case NL80211_CHAN_WIDTH_10: 592 phymode = MODE_UNKNOWN; 593 break; 594 } 595 break; 596 default: 597 break; 598 } 599 600 WARN_ON(phymode == MODE_UNKNOWN); 601 return phymode; 602 } 603 604 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 605 { 606 /* 607 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 608 * 0 for no restriction 609 * 1 for 1/4 us 610 * 2 for 1/2 us 611 * 3 for 1 us 612 * 4 for 2 us 613 * 5 for 4 us 614 * 6 for 8 us 615 * 7 for 16 us 616 */ 617 switch (mpdudensity) { 618 case 0: 619 return 0; 620 case 1: 621 case 2: 622 case 3: 623 /* Our lower layer calculations limit our precision to 624 * 1 microsecond 625 */ 626 return 1; 627 case 4: 628 return 2; 629 case 5: 630 return 4; 631 case 6: 632 return 8; 633 case 7: 634 return 16; 635 default: 636 return 0; 637 } 638 } 639 640 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 641 struct cfg80211_chan_def *def) 642 { 643 struct ieee80211_chanctx_conf *conf; 644 645 rcu_read_lock(); 646 conf = rcu_dereference(vif->chanctx_conf); 647 if (!conf) { 648 rcu_read_unlock(); 649 return -ENOENT; 650 } 651 652 *def = conf->def; 653 rcu_read_unlock(); 654 655 return 0; 656 } 657 658 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 659 struct ieee80211_chanctx_conf *conf, 660 void *data) 661 { 662 int *num = data; 663 664 (*num)++; 665 } 666 667 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 668 { 669 int num = 0; 670 671 ieee80211_iter_chan_contexts_atomic(ar->hw, 672 ath10k_mac_num_chanctxs_iter, 673 &num); 674 675 return num; 676 } 677 678 static void 679 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 680 struct ieee80211_chanctx_conf *conf, 681 void *data) 682 { 683 struct cfg80211_chan_def **def = data; 684 685 *def = &conf->def; 686 } 687 688 static int ath10k_peer_create(struct ath10k *ar, 689 struct ieee80211_vif *vif, 690 struct ieee80211_sta *sta, 691 u32 vdev_id, 692 const u8 *addr, 693 enum wmi_peer_type peer_type) 694 { 695 struct ath10k_vif *arvif; 696 struct ath10k_peer *peer; 697 int num_peers = 0; 698 int ret; 699 700 lockdep_assert_held(&ar->conf_mutex); 701 702 num_peers = ar->num_peers; 703 704 /* Each vdev consumes a peer entry as well */ 705 list_for_each_entry(arvif, &ar->arvifs, list) 706 num_peers++; 707 708 if (num_peers >= ar->max_num_peers) 709 return -ENOBUFS; 710 711 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 712 if (ret) { 713 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 714 addr, vdev_id, ret); 715 return ret; 716 } 717 718 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 719 if (ret) { 720 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 721 addr, vdev_id, ret); 722 return ret; 723 } 724 725 spin_lock_bh(&ar->data_lock); 726 727 peer = ath10k_peer_find(ar, vdev_id, addr); 728 if (!peer) { 729 spin_unlock_bh(&ar->data_lock); 730 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 731 addr, vdev_id); 732 ath10k_wmi_peer_delete(ar, vdev_id, addr); 733 return -ENOENT; 734 } 735 736 peer->vif = vif; 737 peer->sta = sta; 738 739 spin_unlock_bh(&ar->data_lock); 740 741 ar->num_peers++; 742 743 return 0; 744 } 745 746 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 747 { 748 struct ath10k *ar = arvif->ar; 749 u32 param; 750 int ret; 751 752 param = ar->wmi.pdev_param->sta_kickout_th; 753 ret = ath10k_wmi_pdev_set_param(ar, param, 754 ATH10K_KICKOUT_THRESHOLD); 755 if (ret) { 756 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 757 arvif->vdev_id, ret); 758 return ret; 759 } 760 761 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 762 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 763 ATH10K_KEEPALIVE_MIN_IDLE); 764 if (ret) { 765 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 766 arvif->vdev_id, ret); 767 return ret; 768 } 769 770 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 771 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 772 ATH10K_KEEPALIVE_MAX_IDLE); 773 if (ret) { 774 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 775 arvif->vdev_id, ret); 776 return ret; 777 } 778 779 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 780 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 781 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 782 if (ret) { 783 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 784 arvif->vdev_id, ret); 785 return ret; 786 } 787 788 return 0; 789 } 790 791 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 792 { 793 struct ath10k *ar = arvif->ar; 794 u32 vdev_param; 795 796 vdev_param = ar->wmi.vdev_param->rts_threshold; 797 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 798 } 799 800 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 801 { 802 int ret; 803 804 lockdep_assert_held(&ar->conf_mutex); 805 806 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 807 if (ret) 808 return ret; 809 810 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 811 if (ret) 812 return ret; 813 814 ar->num_peers--; 815 816 return 0; 817 } 818 819 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 820 { 821 struct ath10k_peer *peer, *tmp; 822 int peer_id; 823 int i; 824 825 lockdep_assert_held(&ar->conf_mutex); 826 827 spin_lock_bh(&ar->data_lock); 828 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 829 if (peer->vdev_id != vdev_id) 830 continue; 831 832 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 833 peer->addr, vdev_id); 834 835 for_each_set_bit(peer_id, peer->peer_ids, 836 ATH10K_MAX_NUM_PEER_IDS) { 837 ar->peer_map[peer_id] = NULL; 838 } 839 840 /* Double check that peer is properly un-referenced from 841 * the peer_map 842 */ 843 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 844 if (ar->peer_map[i] == peer) { 845 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 846 peer->addr, peer, i); 847 ar->peer_map[i] = NULL; 848 } 849 } 850 851 list_del(&peer->list); 852 kfree(peer); 853 ar->num_peers--; 854 } 855 spin_unlock_bh(&ar->data_lock); 856 } 857 858 static void ath10k_peer_cleanup_all(struct ath10k *ar) 859 { 860 struct ath10k_peer *peer, *tmp; 861 int i; 862 863 lockdep_assert_held(&ar->conf_mutex); 864 865 spin_lock_bh(&ar->data_lock); 866 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 867 list_del(&peer->list); 868 kfree(peer); 869 } 870 871 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 872 ar->peer_map[i] = NULL; 873 874 spin_unlock_bh(&ar->data_lock); 875 876 ar->num_peers = 0; 877 ar->num_stations = 0; 878 } 879 880 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 881 struct ieee80211_sta *sta, 882 enum wmi_tdls_peer_state state) 883 { 884 int ret; 885 struct wmi_tdls_peer_update_cmd_arg arg = {}; 886 struct wmi_tdls_peer_capab_arg cap = {}; 887 struct wmi_channel_arg chan_arg = {}; 888 889 lockdep_assert_held(&ar->conf_mutex); 890 891 arg.vdev_id = vdev_id; 892 arg.peer_state = state; 893 ether_addr_copy(arg.addr, sta->addr); 894 895 cap.peer_max_sp = sta->max_sp; 896 cap.peer_uapsd_queues = sta->uapsd_queues; 897 898 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 899 !sta->tdls_initiator) 900 cap.is_peer_responder = 1; 901 902 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 903 if (ret) { 904 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 905 arg.addr, vdev_id, ret); 906 return ret; 907 } 908 909 return 0; 910 } 911 912 /************************/ 913 /* Interface management */ 914 /************************/ 915 916 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 917 { 918 struct ath10k *ar = arvif->ar; 919 920 lockdep_assert_held(&ar->data_lock); 921 922 if (!arvif->beacon) 923 return; 924 925 if (!arvif->beacon_buf) 926 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 927 arvif->beacon->len, DMA_TO_DEVICE); 928 929 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 930 arvif->beacon_state != ATH10K_BEACON_SENT)) 931 return; 932 933 dev_kfree_skb_any(arvif->beacon); 934 935 arvif->beacon = NULL; 936 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 937 } 938 939 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 940 { 941 struct ath10k *ar = arvif->ar; 942 943 lockdep_assert_held(&ar->data_lock); 944 945 ath10k_mac_vif_beacon_free(arvif); 946 947 if (arvif->beacon_buf) { 948 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 949 arvif->beacon_buf, arvif->beacon_paddr); 950 arvif->beacon_buf = NULL; 951 } 952 } 953 954 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 955 { 956 unsigned long time_left; 957 958 lockdep_assert_held(&ar->conf_mutex); 959 960 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 961 return -ESHUTDOWN; 962 963 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 964 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 965 if (time_left == 0) 966 return -ETIMEDOUT; 967 968 return 0; 969 } 970 971 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 972 { 973 struct cfg80211_chan_def *chandef = NULL; 974 struct ieee80211_channel *channel = NULL; 975 struct wmi_vdev_start_request_arg arg = {}; 976 int ret = 0; 977 978 lockdep_assert_held(&ar->conf_mutex); 979 980 ieee80211_iter_chan_contexts_atomic(ar->hw, 981 ath10k_mac_get_any_chandef_iter, 982 &chandef); 983 if (WARN_ON_ONCE(!chandef)) 984 return -ENOENT; 985 986 channel = chandef->chan; 987 988 arg.vdev_id = vdev_id; 989 arg.channel.freq = channel->center_freq; 990 arg.channel.band_center_freq1 = chandef->center_freq1; 991 arg.channel.band_center_freq2 = chandef->center_freq2; 992 993 /* TODO setup this dynamically, what in case we 994 * don't have any vifs? 995 */ 996 arg.channel.mode = chan_to_phymode(chandef); 997 arg.channel.chan_radar = 998 !!(channel->flags & IEEE80211_CHAN_RADAR); 999 1000 arg.channel.min_power = 0; 1001 arg.channel.max_power = channel->max_power * 2; 1002 arg.channel.max_reg_power = channel->max_reg_power * 2; 1003 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 1004 1005 reinit_completion(&ar->vdev_setup_done); 1006 1007 ret = ath10k_wmi_vdev_start(ar, &arg); 1008 if (ret) { 1009 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 1010 vdev_id, ret); 1011 return ret; 1012 } 1013 1014 ret = ath10k_vdev_setup_sync(ar); 1015 if (ret) { 1016 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1017 vdev_id, ret); 1018 return ret; 1019 } 1020 1021 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1022 if (ret) { 1023 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1024 vdev_id, ret); 1025 goto vdev_stop; 1026 } 1027 1028 ar->monitor_vdev_id = vdev_id; 1029 1030 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1031 ar->monitor_vdev_id); 1032 return 0; 1033 1034 vdev_stop: 1035 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1036 if (ret) 1037 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1038 ar->monitor_vdev_id, ret); 1039 1040 return ret; 1041 } 1042 1043 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1044 { 1045 int ret = 0; 1046 1047 lockdep_assert_held(&ar->conf_mutex); 1048 1049 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1050 if (ret) 1051 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1052 ar->monitor_vdev_id, ret); 1053 1054 reinit_completion(&ar->vdev_setup_done); 1055 1056 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1057 if (ret) 1058 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1059 ar->monitor_vdev_id, ret); 1060 1061 ret = ath10k_vdev_setup_sync(ar); 1062 if (ret) 1063 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1064 ar->monitor_vdev_id, ret); 1065 1066 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1067 ar->monitor_vdev_id); 1068 return ret; 1069 } 1070 1071 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1072 { 1073 int bit, ret = 0; 1074 1075 lockdep_assert_held(&ar->conf_mutex); 1076 1077 if (ar->free_vdev_map == 0) { 1078 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1079 return -ENOMEM; 1080 } 1081 1082 bit = __ffs64(ar->free_vdev_map); 1083 1084 ar->monitor_vdev_id = bit; 1085 1086 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1087 WMI_VDEV_TYPE_MONITOR, 1088 0, ar->mac_addr); 1089 if (ret) { 1090 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1091 ar->monitor_vdev_id, ret); 1092 return ret; 1093 } 1094 1095 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1096 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1097 ar->monitor_vdev_id); 1098 1099 return 0; 1100 } 1101 1102 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1103 { 1104 int ret = 0; 1105 1106 lockdep_assert_held(&ar->conf_mutex); 1107 1108 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1109 if (ret) { 1110 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1111 ar->monitor_vdev_id, ret); 1112 return ret; 1113 } 1114 1115 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1116 1117 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1118 ar->monitor_vdev_id); 1119 return ret; 1120 } 1121 1122 static int ath10k_monitor_start(struct ath10k *ar) 1123 { 1124 int ret; 1125 1126 lockdep_assert_held(&ar->conf_mutex); 1127 1128 ret = ath10k_monitor_vdev_create(ar); 1129 if (ret) { 1130 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1131 return ret; 1132 } 1133 1134 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1135 if (ret) { 1136 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1137 ath10k_monitor_vdev_delete(ar); 1138 return ret; 1139 } 1140 1141 ar->monitor_started = true; 1142 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1143 1144 return 0; 1145 } 1146 1147 static int ath10k_monitor_stop(struct ath10k *ar) 1148 { 1149 int ret; 1150 1151 lockdep_assert_held(&ar->conf_mutex); 1152 1153 ret = ath10k_monitor_vdev_stop(ar); 1154 if (ret) { 1155 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1156 return ret; 1157 } 1158 1159 ret = ath10k_monitor_vdev_delete(ar); 1160 if (ret) { 1161 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1162 return ret; 1163 } 1164 1165 ar->monitor_started = false; 1166 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1167 1168 return 0; 1169 } 1170 1171 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1172 { 1173 int num_ctx; 1174 1175 /* At least one chanctx is required to derive a channel to start 1176 * monitor vdev on. 1177 */ 1178 num_ctx = ath10k_mac_num_chanctxs(ar); 1179 if (num_ctx == 0) 1180 return false; 1181 1182 /* If there's already an existing special monitor interface then don't 1183 * bother creating another monitor vdev. 1184 */ 1185 if (ar->monitor_arvif) 1186 return false; 1187 1188 return ar->monitor || 1189 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1190 ar->running_fw->fw_file.fw_features) && 1191 (ar->filter_flags & FIF_OTHER_BSS)) || 1192 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1193 } 1194 1195 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1196 { 1197 int num_ctx; 1198 1199 num_ctx = ath10k_mac_num_chanctxs(ar); 1200 1201 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1202 * shouldn't allow this but make sure to prevent handling the following 1203 * case anyway since multi-channel DFS hasn't been tested at all. 1204 */ 1205 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1206 return false; 1207 1208 return true; 1209 } 1210 1211 static int ath10k_monitor_recalc(struct ath10k *ar) 1212 { 1213 bool needed; 1214 bool allowed; 1215 int ret; 1216 1217 lockdep_assert_held(&ar->conf_mutex); 1218 1219 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1220 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1221 1222 ath10k_dbg(ar, ATH10K_DBG_MAC, 1223 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1224 ar->monitor_started, needed, allowed); 1225 1226 if (WARN_ON(needed && !allowed)) { 1227 if (ar->monitor_started) { 1228 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1229 1230 ret = ath10k_monitor_stop(ar); 1231 if (ret) 1232 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1233 ret); 1234 /* not serious */ 1235 } 1236 1237 return -EPERM; 1238 } 1239 1240 if (needed == ar->monitor_started) 1241 return 0; 1242 1243 if (needed) 1244 return ath10k_monitor_start(ar); 1245 else 1246 return ath10k_monitor_stop(ar); 1247 } 1248 1249 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1250 { 1251 struct ath10k *ar = arvif->ar; 1252 1253 lockdep_assert_held(&ar->conf_mutex); 1254 1255 if (!arvif->is_started) { 1256 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1257 return false; 1258 } 1259 1260 return true; 1261 } 1262 1263 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1264 { 1265 struct ath10k *ar = arvif->ar; 1266 u32 vdev_param; 1267 1268 lockdep_assert_held(&ar->conf_mutex); 1269 1270 vdev_param = ar->wmi.vdev_param->protection_mode; 1271 1272 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1273 arvif->vdev_id, arvif->use_cts_prot); 1274 1275 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1276 arvif->use_cts_prot ? 1 : 0); 1277 } 1278 1279 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1280 { 1281 struct ath10k *ar = arvif->ar; 1282 u32 vdev_param, rts_cts = 0; 1283 1284 lockdep_assert_held(&ar->conf_mutex); 1285 1286 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1287 1288 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1289 1290 if (arvif->num_legacy_stations > 0) 1291 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1292 WMI_RTSCTS_PROFILE); 1293 else 1294 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1295 WMI_RTSCTS_PROFILE); 1296 1297 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1298 arvif->vdev_id, rts_cts); 1299 1300 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1301 rts_cts); 1302 } 1303 1304 static int ath10k_start_cac(struct ath10k *ar) 1305 { 1306 int ret; 1307 1308 lockdep_assert_held(&ar->conf_mutex); 1309 1310 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1311 1312 ret = ath10k_monitor_recalc(ar); 1313 if (ret) { 1314 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1315 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1316 return ret; 1317 } 1318 1319 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1320 ar->monitor_vdev_id); 1321 1322 return 0; 1323 } 1324 1325 static int ath10k_stop_cac(struct ath10k *ar) 1326 { 1327 lockdep_assert_held(&ar->conf_mutex); 1328 1329 /* CAC is not running - do nothing */ 1330 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1331 return 0; 1332 1333 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1334 ath10k_monitor_stop(ar); 1335 1336 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1337 1338 return 0; 1339 } 1340 1341 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1342 struct ieee80211_chanctx_conf *conf, 1343 void *data) 1344 { 1345 bool *ret = data; 1346 1347 if (!*ret && conf->radar_enabled) 1348 *ret = true; 1349 } 1350 1351 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1352 { 1353 bool has_radar = false; 1354 1355 ieee80211_iter_chan_contexts_atomic(ar->hw, 1356 ath10k_mac_has_radar_iter, 1357 &has_radar); 1358 1359 return has_radar; 1360 } 1361 1362 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1363 { 1364 int ret; 1365 1366 lockdep_assert_held(&ar->conf_mutex); 1367 1368 ath10k_stop_cac(ar); 1369 1370 if (!ath10k_mac_has_radar_enabled(ar)) 1371 return; 1372 1373 if (ar->num_started_vdevs > 0) 1374 return; 1375 1376 ret = ath10k_start_cac(ar); 1377 if (ret) { 1378 /* 1379 * Not possible to start CAC on current channel so starting 1380 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1381 * by indicating that radar was detected. 1382 */ 1383 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1384 ieee80211_radar_detected(ar->hw); 1385 } 1386 } 1387 1388 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1389 { 1390 struct ath10k *ar = arvif->ar; 1391 int ret; 1392 1393 lockdep_assert_held(&ar->conf_mutex); 1394 1395 reinit_completion(&ar->vdev_setup_done); 1396 1397 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1398 if (ret) { 1399 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1400 arvif->vdev_id, ret); 1401 return ret; 1402 } 1403 1404 ret = ath10k_vdev_setup_sync(ar); 1405 if (ret) { 1406 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n", 1407 arvif->vdev_id, ret); 1408 return ret; 1409 } 1410 1411 WARN_ON(ar->num_started_vdevs == 0); 1412 1413 if (ar->num_started_vdevs != 0) { 1414 ar->num_started_vdevs--; 1415 ath10k_recalc_radar_detection(ar); 1416 } 1417 1418 return ret; 1419 } 1420 1421 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1422 const struct cfg80211_chan_def *chandef, 1423 bool restart) 1424 { 1425 struct ath10k *ar = arvif->ar; 1426 struct wmi_vdev_start_request_arg arg = {}; 1427 int ret = 0; 1428 1429 lockdep_assert_held(&ar->conf_mutex); 1430 1431 reinit_completion(&ar->vdev_setup_done); 1432 1433 arg.vdev_id = arvif->vdev_id; 1434 arg.dtim_period = arvif->dtim_period; 1435 arg.bcn_intval = arvif->beacon_interval; 1436 1437 arg.channel.freq = chandef->chan->center_freq; 1438 arg.channel.band_center_freq1 = chandef->center_freq1; 1439 arg.channel.band_center_freq2 = chandef->center_freq2; 1440 arg.channel.mode = chan_to_phymode(chandef); 1441 1442 arg.channel.min_power = 0; 1443 arg.channel.max_power = chandef->chan->max_power * 2; 1444 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1445 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1446 1447 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1448 arg.ssid = arvif->u.ap.ssid; 1449 arg.ssid_len = arvif->u.ap.ssid_len; 1450 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1451 1452 /* For now allow DFS for AP mode */ 1453 arg.channel.chan_radar = 1454 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1455 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1456 arg.ssid = arvif->vif->bss_conf.ssid; 1457 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1458 } 1459 1460 ath10k_dbg(ar, ATH10K_DBG_MAC, 1461 "mac vdev %d start center_freq %d phymode %s\n", 1462 arg.vdev_id, arg.channel.freq, 1463 ath10k_wmi_phymode_str(arg.channel.mode)); 1464 1465 if (restart) 1466 ret = ath10k_wmi_vdev_restart(ar, &arg); 1467 else 1468 ret = ath10k_wmi_vdev_start(ar, &arg); 1469 1470 if (ret) { 1471 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1472 arg.vdev_id, ret); 1473 return ret; 1474 } 1475 1476 ret = ath10k_vdev_setup_sync(ar); 1477 if (ret) { 1478 ath10k_warn(ar, 1479 "failed to synchronize setup for vdev %i restart %d: %d\n", 1480 arg.vdev_id, restart, ret); 1481 return ret; 1482 } 1483 1484 ar->num_started_vdevs++; 1485 ath10k_recalc_radar_detection(ar); 1486 1487 return ret; 1488 } 1489 1490 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1491 const struct cfg80211_chan_def *def) 1492 { 1493 return ath10k_vdev_start_restart(arvif, def, false); 1494 } 1495 1496 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1497 const struct cfg80211_chan_def *def) 1498 { 1499 return ath10k_vdev_start_restart(arvif, def, true); 1500 } 1501 1502 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1503 struct sk_buff *bcn) 1504 { 1505 struct ath10k *ar = arvif->ar; 1506 struct ieee80211_mgmt *mgmt; 1507 const u8 *p2p_ie; 1508 int ret; 1509 1510 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1511 return 0; 1512 1513 mgmt = (void *)bcn->data; 1514 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1515 mgmt->u.beacon.variable, 1516 bcn->len - (mgmt->u.beacon.variable - 1517 bcn->data)); 1518 if (!p2p_ie) 1519 return -ENOENT; 1520 1521 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1522 if (ret) { 1523 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1524 arvif->vdev_id, ret); 1525 return ret; 1526 } 1527 1528 return 0; 1529 } 1530 1531 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1532 u8 oui_type, size_t ie_offset) 1533 { 1534 size_t len; 1535 const u8 *next; 1536 const u8 *end; 1537 u8 *ie; 1538 1539 if (WARN_ON(skb->len < ie_offset)) 1540 return -EINVAL; 1541 1542 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1543 skb->data + ie_offset, 1544 skb->len - ie_offset); 1545 if (!ie) 1546 return -ENOENT; 1547 1548 len = ie[1] + 2; 1549 end = skb->data + skb->len; 1550 next = ie + len; 1551 1552 if (WARN_ON(next > end)) 1553 return -EINVAL; 1554 1555 memmove(ie, next, end - next); 1556 skb_trim(skb, skb->len - len); 1557 1558 return 0; 1559 } 1560 1561 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1562 { 1563 struct ath10k *ar = arvif->ar; 1564 struct ieee80211_hw *hw = ar->hw; 1565 struct ieee80211_vif *vif = arvif->vif; 1566 struct ieee80211_mutable_offsets offs = {}; 1567 struct sk_buff *bcn; 1568 int ret; 1569 1570 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1571 return 0; 1572 1573 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1574 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1575 return 0; 1576 1577 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1578 if (!bcn) { 1579 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1580 return -EPERM; 1581 } 1582 1583 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1584 if (ret) { 1585 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1586 kfree_skb(bcn); 1587 return ret; 1588 } 1589 1590 /* P2P IE is inserted by firmware automatically (as configured above) 1591 * so remove it from the base beacon template to avoid duplicate P2P 1592 * IEs in beacon frames. 1593 */ 1594 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1595 offsetof(struct ieee80211_mgmt, 1596 u.beacon.variable)); 1597 1598 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1599 0, NULL, 0); 1600 kfree_skb(bcn); 1601 1602 if (ret) { 1603 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1604 ret); 1605 return ret; 1606 } 1607 1608 return 0; 1609 } 1610 1611 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1612 { 1613 struct ath10k *ar = arvif->ar; 1614 struct ieee80211_hw *hw = ar->hw; 1615 struct ieee80211_vif *vif = arvif->vif; 1616 struct sk_buff *prb; 1617 int ret; 1618 1619 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1620 return 0; 1621 1622 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1623 return 0; 1624 1625 prb = ieee80211_proberesp_get(hw, vif); 1626 if (!prb) { 1627 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1628 return -EPERM; 1629 } 1630 1631 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1632 kfree_skb(prb); 1633 1634 if (ret) { 1635 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1636 ret); 1637 return ret; 1638 } 1639 1640 return 0; 1641 } 1642 1643 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1644 { 1645 struct ath10k *ar = arvif->ar; 1646 struct cfg80211_chan_def def; 1647 int ret; 1648 1649 /* When originally vdev is started during assign_vif_chanctx() some 1650 * information is missing, notably SSID. Firmware revisions with beacon 1651 * offloading require the SSID to be provided during vdev (re)start to 1652 * handle hidden SSID properly. 1653 * 1654 * Vdev restart must be done after vdev has been both started and 1655 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1656 * deliver vdev restart response event causing timeouts during vdev 1657 * syncing in ath10k. 1658 * 1659 * Note: The vdev down/up and template reinstallation could be skipped 1660 * since only wmi-tlv firmware are known to have beacon offload and 1661 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1662 * response delivery. It's probably more robust to keep it as is. 1663 */ 1664 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1665 return 0; 1666 1667 if (WARN_ON(!arvif->is_started)) 1668 return -EINVAL; 1669 1670 if (WARN_ON(!arvif->is_up)) 1671 return -EINVAL; 1672 1673 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1674 return -EINVAL; 1675 1676 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1677 if (ret) { 1678 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1679 arvif->vdev_id, ret); 1680 return ret; 1681 } 1682 1683 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1684 * firmware will crash upon vdev up. 1685 */ 1686 1687 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1688 if (ret) { 1689 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1690 return ret; 1691 } 1692 1693 ret = ath10k_mac_setup_prb_tmpl(arvif); 1694 if (ret) { 1695 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1696 return ret; 1697 } 1698 1699 ret = ath10k_vdev_restart(arvif, &def); 1700 if (ret) { 1701 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1702 arvif->vdev_id, ret); 1703 return ret; 1704 } 1705 1706 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1707 arvif->bssid); 1708 if (ret) { 1709 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1710 arvif->vdev_id, ret); 1711 return ret; 1712 } 1713 1714 return 0; 1715 } 1716 1717 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1718 struct ieee80211_bss_conf *info) 1719 { 1720 struct ath10k *ar = arvif->ar; 1721 int ret = 0; 1722 1723 lockdep_assert_held(&arvif->ar->conf_mutex); 1724 1725 if (!info->enable_beacon) { 1726 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1727 if (ret) 1728 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1729 arvif->vdev_id, ret); 1730 1731 arvif->is_up = false; 1732 1733 spin_lock_bh(&arvif->ar->data_lock); 1734 ath10k_mac_vif_beacon_free(arvif); 1735 spin_unlock_bh(&arvif->ar->data_lock); 1736 1737 return; 1738 } 1739 1740 arvif->tx_seq_no = 0x1000; 1741 1742 arvif->aid = 0; 1743 ether_addr_copy(arvif->bssid, info->bssid); 1744 1745 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1746 arvif->bssid); 1747 if (ret) { 1748 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1749 arvif->vdev_id, ret); 1750 return; 1751 } 1752 1753 arvif->is_up = true; 1754 1755 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1756 if (ret) { 1757 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1758 arvif->vdev_id, ret); 1759 return; 1760 } 1761 1762 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1763 } 1764 1765 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1766 struct ieee80211_bss_conf *info, 1767 const u8 self_peer[ETH_ALEN]) 1768 { 1769 struct ath10k *ar = arvif->ar; 1770 u32 vdev_param; 1771 int ret = 0; 1772 1773 lockdep_assert_held(&arvif->ar->conf_mutex); 1774 1775 if (!info->ibss_joined) { 1776 if (is_zero_ether_addr(arvif->bssid)) 1777 return; 1778 1779 eth_zero_addr(arvif->bssid); 1780 1781 return; 1782 } 1783 1784 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1785 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1786 ATH10K_DEFAULT_ATIM); 1787 if (ret) 1788 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1789 arvif->vdev_id, ret); 1790 } 1791 1792 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1793 { 1794 struct ath10k *ar = arvif->ar; 1795 u32 param; 1796 u32 value; 1797 int ret; 1798 1799 lockdep_assert_held(&arvif->ar->conf_mutex); 1800 1801 if (arvif->u.sta.uapsd) 1802 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1803 else 1804 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1805 1806 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1807 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1808 if (ret) { 1809 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1810 value, arvif->vdev_id, ret); 1811 return ret; 1812 } 1813 1814 return 0; 1815 } 1816 1817 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1818 { 1819 struct ath10k *ar = arvif->ar; 1820 u32 param; 1821 u32 value; 1822 int ret; 1823 1824 lockdep_assert_held(&arvif->ar->conf_mutex); 1825 1826 if (arvif->u.sta.uapsd) 1827 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1828 else 1829 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1830 1831 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1832 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1833 param, value); 1834 if (ret) { 1835 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1836 value, arvif->vdev_id, ret); 1837 return ret; 1838 } 1839 1840 return 0; 1841 } 1842 1843 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1844 { 1845 struct ath10k_vif *arvif; 1846 int num = 0; 1847 1848 lockdep_assert_held(&ar->conf_mutex); 1849 1850 list_for_each_entry(arvif, &ar->arvifs, list) 1851 if (arvif->is_started) 1852 num++; 1853 1854 return num; 1855 } 1856 1857 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1858 { 1859 struct ath10k *ar = arvif->ar; 1860 struct ieee80211_vif *vif = arvif->vif; 1861 struct ieee80211_conf *conf = &ar->hw->conf; 1862 enum wmi_sta_powersave_param param; 1863 enum wmi_sta_ps_mode psmode; 1864 int ret; 1865 int ps_timeout; 1866 bool enable_ps; 1867 1868 lockdep_assert_held(&arvif->ar->conf_mutex); 1869 1870 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1871 return 0; 1872 1873 enable_ps = arvif->ps; 1874 1875 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1876 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1877 ar->running_fw->fw_file.fw_features)) { 1878 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1879 arvif->vdev_id); 1880 enable_ps = false; 1881 } 1882 1883 if (!arvif->is_started) { 1884 /* mac80211 can update vif powersave state while disconnected. 1885 * Firmware doesn't behave nicely and consumes more power than 1886 * necessary if PS is disabled on a non-started vdev. Hence 1887 * force-enable PS for non-running vdevs. 1888 */ 1889 psmode = WMI_STA_PS_MODE_ENABLED; 1890 } else if (enable_ps) { 1891 psmode = WMI_STA_PS_MODE_ENABLED; 1892 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1893 1894 ps_timeout = conf->dynamic_ps_timeout; 1895 if (ps_timeout == 0) { 1896 /* Firmware doesn't like 0 */ 1897 ps_timeout = ieee80211_tu_to_usec( 1898 vif->bss_conf.beacon_int) / 1000; 1899 } 1900 1901 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1902 ps_timeout); 1903 if (ret) { 1904 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1905 arvif->vdev_id, ret); 1906 return ret; 1907 } 1908 } else { 1909 psmode = WMI_STA_PS_MODE_DISABLED; 1910 } 1911 1912 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1913 arvif->vdev_id, psmode ? "enable" : "disable"); 1914 1915 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1916 if (ret) { 1917 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1918 psmode, arvif->vdev_id, ret); 1919 return ret; 1920 } 1921 1922 return 0; 1923 } 1924 1925 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1926 { 1927 struct ath10k *ar = arvif->ar; 1928 struct wmi_sta_keepalive_arg arg = {}; 1929 int ret; 1930 1931 lockdep_assert_held(&arvif->ar->conf_mutex); 1932 1933 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1934 return 0; 1935 1936 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1937 return 0; 1938 1939 /* Some firmware revisions have a bug and ignore the `enabled` field. 1940 * Instead use the interval to disable the keepalive. 1941 */ 1942 arg.vdev_id = arvif->vdev_id; 1943 arg.enabled = 1; 1944 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1945 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1946 1947 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1948 if (ret) { 1949 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1950 arvif->vdev_id, ret); 1951 return ret; 1952 } 1953 1954 return 0; 1955 } 1956 1957 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1958 { 1959 struct ath10k *ar = arvif->ar; 1960 struct ieee80211_vif *vif = arvif->vif; 1961 int ret; 1962 1963 lockdep_assert_held(&arvif->ar->conf_mutex); 1964 1965 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1966 return; 1967 1968 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1969 return; 1970 1971 if (!vif->csa_active) 1972 return; 1973 1974 if (!arvif->is_up) 1975 return; 1976 1977 if (!ieee80211_csa_is_complete(vif)) { 1978 ieee80211_csa_update_counter(vif); 1979 1980 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1981 if (ret) 1982 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1983 ret); 1984 1985 ret = ath10k_mac_setup_prb_tmpl(arvif); 1986 if (ret) 1987 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 1988 ret); 1989 } else { 1990 ieee80211_csa_finish(vif); 1991 } 1992 } 1993 1994 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 1995 { 1996 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 1997 ap_csa_work); 1998 struct ath10k *ar = arvif->ar; 1999 2000 mutex_lock(&ar->conf_mutex); 2001 ath10k_mac_vif_ap_csa_count_down(arvif); 2002 mutex_unlock(&ar->conf_mutex); 2003 } 2004 2005 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 2006 struct ieee80211_vif *vif) 2007 { 2008 struct sk_buff *skb = data; 2009 struct ieee80211_mgmt *mgmt = (void *)skb->data; 2010 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2011 2012 if (vif->type != NL80211_IFTYPE_STATION) 2013 return; 2014 2015 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2016 return; 2017 2018 cancel_delayed_work(&arvif->connection_loss_work); 2019 } 2020 2021 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2022 { 2023 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2024 IEEE80211_IFACE_ITER_NORMAL, 2025 ath10k_mac_handle_beacon_iter, 2026 skb); 2027 } 2028 2029 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2030 struct ieee80211_vif *vif) 2031 { 2032 u32 *vdev_id = data; 2033 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2034 struct ath10k *ar = arvif->ar; 2035 struct ieee80211_hw *hw = ar->hw; 2036 2037 if (arvif->vdev_id != *vdev_id) 2038 return; 2039 2040 if (!arvif->is_up) 2041 return; 2042 2043 ieee80211_beacon_loss(vif); 2044 2045 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2046 * (done by mac80211) succeeds but beacons do not resume then it 2047 * doesn't make sense to continue operation. Queue connection loss work 2048 * which can be cancelled when beacon is received. 2049 */ 2050 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2051 ATH10K_CONNECTION_LOSS_HZ); 2052 } 2053 2054 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2055 { 2056 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2057 IEEE80211_IFACE_ITER_NORMAL, 2058 ath10k_mac_handle_beacon_miss_iter, 2059 &vdev_id); 2060 } 2061 2062 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2063 { 2064 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2065 connection_loss_work.work); 2066 struct ieee80211_vif *vif = arvif->vif; 2067 2068 if (!arvif->is_up) 2069 return; 2070 2071 ieee80211_connection_loss(vif); 2072 } 2073 2074 /**********************/ 2075 /* Station management */ 2076 /**********************/ 2077 2078 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2079 struct ieee80211_vif *vif) 2080 { 2081 /* Some firmware revisions have unstable STA powersave when listen 2082 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2083 * generate NullFunc frames properly even if buffered frames have been 2084 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2085 * buffered frames. Often pinging the device from AP would simply fail. 2086 * 2087 * As a workaround set it to 1. 2088 */ 2089 if (vif->type == NL80211_IFTYPE_STATION) 2090 return 1; 2091 2092 return ar->hw->conf.listen_interval; 2093 } 2094 2095 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2096 struct ieee80211_vif *vif, 2097 struct ieee80211_sta *sta, 2098 struct wmi_peer_assoc_complete_arg *arg) 2099 { 2100 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2101 u32 aid; 2102 2103 lockdep_assert_held(&ar->conf_mutex); 2104 2105 if (vif->type == NL80211_IFTYPE_STATION) 2106 aid = vif->bss_conf.aid; 2107 else 2108 aid = sta->aid; 2109 2110 ether_addr_copy(arg->addr, sta->addr); 2111 arg->vdev_id = arvif->vdev_id; 2112 arg->peer_aid = aid; 2113 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2114 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2115 arg->peer_num_spatial_streams = 1; 2116 arg->peer_caps = vif->bss_conf.assoc_capability; 2117 } 2118 2119 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2120 struct ieee80211_vif *vif, 2121 struct ieee80211_sta *sta, 2122 struct wmi_peer_assoc_complete_arg *arg) 2123 { 2124 struct ieee80211_bss_conf *info = &vif->bss_conf; 2125 struct cfg80211_chan_def def; 2126 struct cfg80211_bss *bss; 2127 const u8 *rsnie = NULL; 2128 const u8 *wpaie = NULL; 2129 2130 lockdep_assert_held(&ar->conf_mutex); 2131 2132 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2133 return; 2134 2135 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2136 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2137 if (bss) { 2138 const struct cfg80211_bss_ies *ies; 2139 2140 rcu_read_lock(); 2141 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2142 2143 ies = rcu_dereference(bss->ies); 2144 2145 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2146 WLAN_OUI_TYPE_MICROSOFT_WPA, 2147 ies->data, 2148 ies->len); 2149 rcu_read_unlock(); 2150 cfg80211_put_bss(ar->hw->wiphy, bss); 2151 } 2152 2153 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2154 if (rsnie || wpaie) { 2155 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2156 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2157 } 2158 2159 if (wpaie) { 2160 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2161 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2162 } 2163 2164 if (sta->mfp && 2165 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2166 ar->running_fw->fw_file.fw_features)) { 2167 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2168 } 2169 } 2170 2171 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2172 struct ieee80211_vif *vif, 2173 struct ieee80211_sta *sta, 2174 struct wmi_peer_assoc_complete_arg *arg) 2175 { 2176 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2177 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2178 struct cfg80211_chan_def def; 2179 const struct ieee80211_supported_band *sband; 2180 const struct ieee80211_rate *rates; 2181 enum nl80211_band band; 2182 u32 ratemask; 2183 u8 rate; 2184 int i; 2185 2186 lockdep_assert_held(&ar->conf_mutex); 2187 2188 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2189 return; 2190 2191 band = def.chan->band; 2192 sband = ar->hw->wiphy->bands[band]; 2193 ratemask = sta->supp_rates[band]; 2194 ratemask &= arvif->bitrate_mask.control[band].legacy; 2195 rates = sband->bitrates; 2196 2197 rateset->num_rates = 0; 2198 2199 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2200 if (!(ratemask & 1)) 2201 continue; 2202 2203 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2204 rateset->rates[rateset->num_rates] = rate; 2205 rateset->num_rates++; 2206 } 2207 } 2208 2209 static bool 2210 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2211 { 2212 int nss; 2213 2214 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2215 if (ht_mcs_mask[nss]) 2216 return false; 2217 2218 return true; 2219 } 2220 2221 static bool 2222 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2223 { 2224 int nss; 2225 2226 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2227 if (vht_mcs_mask[nss]) 2228 return false; 2229 2230 return true; 2231 } 2232 2233 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2234 struct ieee80211_vif *vif, 2235 struct ieee80211_sta *sta, 2236 struct wmi_peer_assoc_complete_arg *arg) 2237 { 2238 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2239 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2240 struct cfg80211_chan_def def; 2241 enum nl80211_band band; 2242 const u8 *ht_mcs_mask; 2243 const u16 *vht_mcs_mask; 2244 int i, n; 2245 u8 max_nss; 2246 u32 stbc; 2247 2248 lockdep_assert_held(&ar->conf_mutex); 2249 2250 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2251 return; 2252 2253 if (!ht_cap->ht_supported) 2254 return; 2255 2256 band = def.chan->band; 2257 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2258 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2259 2260 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2261 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2262 return; 2263 2264 arg->peer_flags |= ar->wmi.peer_flags->ht; 2265 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2266 ht_cap->ampdu_factor)) - 1; 2267 2268 arg->peer_mpdu_density = 2269 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2270 2271 arg->peer_ht_caps = ht_cap->cap; 2272 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2273 2274 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2275 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2276 2277 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2278 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2279 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2280 } 2281 2282 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2283 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2284 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2285 2286 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2287 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2288 } 2289 2290 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2291 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2292 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2293 } 2294 2295 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2296 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2297 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2298 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2299 arg->peer_rate_caps |= stbc; 2300 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2301 } 2302 2303 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2304 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2305 else if (ht_cap->mcs.rx_mask[1]) 2306 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2307 2308 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2309 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2310 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2311 max_nss = (i / 8) + 1; 2312 arg->peer_ht_rates.rates[n++] = i; 2313 } 2314 2315 /* 2316 * This is a workaround for HT-enabled STAs which break the spec 2317 * and have no HT capabilities RX mask (no HT RX MCS map). 2318 * 2319 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2320 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2321 * 2322 * Firmware asserts if such situation occurs. 2323 */ 2324 if (n == 0) { 2325 arg->peer_ht_rates.num_rates = 8; 2326 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2327 arg->peer_ht_rates.rates[i] = i; 2328 } else { 2329 arg->peer_ht_rates.num_rates = n; 2330 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2331 } 2332 2333 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2334 arg->addr, 2335 arg->peer_ht_rates.num_rates, 2336 arg->peer_num_spatial_streams); 2337 } 2338 2339 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2340 struct ath10k_vif *arvif, 2341 struct ieee80211_sta *sta) 2342 { 2343 u32 uapsd = 0; 2344 u32 max_sp = 0; 2345 int ret = 0; 2346 2347 lockdep_assert_held(&ar->conf_mutex); 2348 2349 if (sta->wme && sta->uapsd_queues) { 2350 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2351 sta->uapsd_queues, sta->max_sp); 2352 2353 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2354 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2355 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2356 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2357 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2358 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2359 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2360 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2361 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2362 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2363 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2364 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2365 2366 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2367 max_sp = sta->max_sp; 2368 2369 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2370 sta->addr, 2371 WMI_AP_PS_PEER_PARAM_UAPSD, 2372 uapsd); 2373 if (ret) { 2374 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2375 arvif->vdev_id, ret); 2376 return ret; 2377 } 2378 2379 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2380 sta->addr, 2381 WMI_AP_PS_PEER_PARAM_MAX_SP, 2382 max_sp); 2383 if (ret) { 2384 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2385 arvif->vdev_id, ret); 2386 return ret; 2387 } 2388 2389 /* TODO setup this based on STA listen interval and 2390 * beacon interval. Currently we don't know 2391 * sta->listen_interval - mac80211 patch required. 2392 * Currently use 10 seconds 2393 */ 2394 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2395 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2396 10); 2397 if (ret) { 2398 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2399 arvif->vdev_id, ret); 2400 return ret; 2401 } 2402 } 2403 2404 return 0; 2405 } 2406 2407 static u16 2408 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2409 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2410 { 2411 int idx_limit; 2412 int nss; 2413 u16 mcs_map; 2414 u16 mcs; 2415 2416 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2417 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2418 vht_mcs_limit[nss]; 2419 2420 if (mcs_map) 2421 idx_limit = fls(mcs_map) - 1; 2422 else 2423 idx_limit = -1; 2424 2425 switch (idx_limit) { 2426 case 0: /* fall through */ 2427 case 1: /* fall through */ 2428 case 2: /* fall through */ 2429 case 3: /* fall through */ 2430 case 4: /* fall through */ 2431 case 5: /* fall through */ 2432 case 6: /* fall through */ 2433 default: 2434 /* see ath10k_mac_can_set_bitrate_mask() */ 2435 WARN_ON(1); 2436 /* fall through */ 2437 case -1: 2438 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2439 break; 2440 case 7: 2441 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2442 break; 2443 case 8: 2444 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2445 break; 2446 case 9: 2447 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2448 break; 2449 } 2450 2451 tx_mcs_set &= ~(0x3 << (nss * 2)); 2452 tx_mcs_set |= mcs << (nss * 2); 2453 } 2454 2455 return tx_mcs_set; 2456 } 2457 2458 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2459 struct ieee80211_vif *vif, 2460 struct ieee80211_sta *sta, 2461 struct wmi_peer_assoc_complete_arg *arg) 2462 { 2463 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2464 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2465 struct cfg80211_chan_def def; 2466 enum nl80211_band band; 2467 const u16 *vht_mcs_mask; 2468 u8 ampdu_factor; 2469 u8 max_nss, vht_mcs; 2470 int i; 2471 2472 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2473 return; 2474 2475 if (!vht_cap->vht_supported) 2476 return; 2477 2478 band = def.chan->band; 2479 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2480 2481 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2482 return; 2483 2484 arg->peer_flags |= ar->wmi.peer_flags->vht; 2485 2486 if (def.chan->band == NL80211_BAND_2GHZ) 2487 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2488 2489 arg->peer_vht_caps = vht_cap->cap; 2490 2491 ampdu_factor = (vht_cap->cap & 2492 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2493 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2494 2495 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2496 * zero in VHT IE. Using it would result in degraded throughput. 2497 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2498 * it if VHT max_mpdu is smaller. 2499 */ 2500 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2501 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2502 ampdu_factor)) - 1); 2503 2504 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2505 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2506 2507 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2508 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2509 2510 /* Calculate peer NSS capability from VHT capabilities if STA 2511 * supports VHT. 2512 */ 2513 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 2514 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 2515 (2 * i) & 3; 2516 2517 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && 2518 vht_mcs_mask[i]) 2519 max_nss = i + 1; 2520 } 2521 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2522 arg->peer_vht_rates.rx_max_rate = 2523 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2524 arg->peer_vht_rates.rx_mcs_set = 2525 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2526 arg->peer_vht_rates.tx_max_rate = 2527 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2528 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2529 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2530 2531 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2532 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2533 2534 if (arg->peer_vht_rates.rx_max_rate && 2535 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) { 2536 switch (arg->peer_vht_rates.rx_max_rate) { 2537 case 1560: 2538 /* Must be 2x2 at 160Mhz is all it can do. */ 2539 arg->peer_bw_rxnss_override = 2; 2540 break; 2541 case 780: 2542 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */ 2543 arg->peer_bw_rxnss_override = 1; 2544 break; 2545 } 2546 } 2547 } 2548 2549 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2550 struct ieee80211_vif *vif, 2551 struct ieee80211_sta *sta, 2552 struct wmi_peer_assoc_complete_arg *arg) 2553 { 2554 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2555 2556 switch (arvif->vdev_type) { 2557 case WMI_VDEV_TYPE_AP: 2558 if (sta->wme) 2559 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2560 2561 if (sta->wme && sta->uapsd_queues) { 2562 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2563 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2564 } 2565 break; 2566 case WMI_VDEV_TYPE_STA: 2567 if (sta->wme) 2568 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2569 break; 2570 case WMI_VDEV_TYPE_IBSS: 2571 if (sta->wme) 2572 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2573 break; 2574 default: 2575 break; 2576 } 2577 2578 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2579 sta->addr, !!(arg->peer_flags & 2580 arvif->ar->wmi.peer_flags->qos)); 2581 } 2582 2583 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2584 { 2585 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2586 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2587 } 2588 2589 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2590 struct ieee80211_sta *sta) 2591 { 2592 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2593 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2594 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2595 return MODE_11AC_VHT160; 2596 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2597 return MODE_11AC_VHT80_80; 2598 default: 2599 /* not sure if this is a valid case? */ 2600 return MODE_11AC_VHT160; 2601 } 2602 } 2603 2604 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2605 return MODE_11AC_VHT80; 2606 2607 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2608 return MODE_11AC_VHT40; 2609 2610 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2611 return MODE_11AC_VHT20; 2612 2613 return MODE_UNKNOWN; 2614 } 2615 2616 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2617 struct ieee80211_vif *vif, 2618 struct ieee80211_sta *sta, 2619 struct wmi_peer_assoc_complete_arg *arg) 2620 { 2621 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2622 struct cfg80211_chan_def def; 2623 enum nl80211_band band; 2624 const u8 *ht_mcs_mask; 2625 const u16 *vht_mcs_mask; 2626 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2627 2628 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2629 return; 2630 2631 band = def.chan->band; 2632 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2633 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2634 2635 switch (band) { 2636 case NL80211_BAND_2GHZ: 2637 if (sta->vht_cap.vht_supported && 2638 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2639 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2640 phymode = MODE_11AC_VHT40; 2641 else 2642 phymode = MODE_11AC_VHT20; 2643 } else if (sta->ht_cap.ht_supported && 2644 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2645 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2646 phymode = MODE_11NG_HT40; 2647 else 2648 phymode = MODE_11NG_HT20; 2649 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2650 phymode = MODE_11G; 2651 } else { 2652 phymode = MODE_11B; 2653 } 2654 2655 break; 2656 case NL80211_BAND_5GHZ: 2657 /* 2658 * Check VHT first. 2659 */ 2660 if (sta->vht_cap.vht_supported && 2661 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2662 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2663 } else if (sta->ht_cap.ht_supported && 2664 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2665 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2666 phymode = MODE_11NA_HT40; 2667 else 2668 phymode = MODE_11NA_HT20; 2669 } else { 2670 phymode = MODE_11A; 2671 } 2672 2673 break; 2674 default: 2675 break; 2676 } 2677 2678 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2679 sta->addr, ath10k_wmi_phymode_str(phymode)); 2680 2681 arg->peer_phymode = phymode; 2682 WARN_ON(phymode == MODE_UNKNOWN); 2683 } 2684 2685 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2686 struct ieee80211_vif *vif, 2687 struct ieee80211_sta *sta, 2688 struct wmi_peer_assoc_complete_arg *arg) 2689 { 2690 lockdep_assert_held(&ar->conf_mutex); 2691 2692 memset(arg, 0, sizeof(*arg)); 2693 2694 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2695 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2696 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2697 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2698 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2699 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2700 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2701 2702 return 0; 2703 } 2704 2705 static const u32 ath10k_smps_map[] = { 2706 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2707 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2708 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2709 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2710 }; 2711 2712 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2713 const u8 *addr, 2714 const struct ieee80211_sta_ht_cap *ht_cap) 2715 { 2716 int smps; 2717 2718 if (!ht_cap->ht_supported) 2719 return 0; 2720 2721 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2722 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2723 2724 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2725 return -EINVAL; 2726 2727 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2728 WMI_PEER_SMPS_STATE, 2729 ath10k_smps_map[smps]); 2730 } 2731 2732 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2733 struct ieee80211_vif *vif, 2734 struct ieee80211_sta_vht_cap vht_cap) 2735 { 2736 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2737 int ret; 2738 u32 param; 2739 u32 value; 2740 2741 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2742 return 0; 2743 2744 if (!(ar->vht_cap_info & 2745 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2746 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2747 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2748 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2749 return 0; 2750 2751 param = ar->wmi.vdev_param->txbf; 2752 value = 0; 2753 2754 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2755 return 0; 2756 2757 /* The following logic is correct. If a remote STA advertises support 2758 * for being a beamformer then we should enable us being a beamformee. 2759 */ 2760 2761 if (ar->vht_cap_info & 2762 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2763 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2764 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2765 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2766 2767 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2768 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2769 } 2770 2771 if (ar->vht_cap_info & 2772 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2773 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2774 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2775 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2776 2777 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2778 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2779 } 2780 2781 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2782 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2783 2784 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2785 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2786 2787 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2788 if (ret) { 2789 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2790 value, ret); 2791 return ret; 2792 } 2793 2794 return 0; 2795 } 2796 2797 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2798 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2799 struct ieee80211_vif *vif, 2800 struct ieee80211_bss_conf *bss_conf) 2801 { 2802 struct ath10k *ar = hw->priv; 2803 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2804 struct ieee80211_sta_ht_cap ht_cap; 2805 struct ieee80211_sta_vht_cap vht_cap; 2806 struct wmi_peer_assoc_complete_arg peer_arg; 2807 struct ieee80211_sta *ap_sta; 2808 int ret; 2809 2810 lockdep_assert_held(&ar->conf_mutex); 2811 2812 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2813 arvif->vdev_id, arvif->bssid, arvif->aid); 2814 2815 rcu_read_lock(); 2816 2817 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2818 if (!ap_sta) { 2819 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2820 bss_conf->bssid, arvif->vdev_id); 2821 rcu_read_unlock(); 2822 return; 2823 } 2824 2825 /* ap_sta must be accessed only within rcu section which must be left 2826 * before calling ath10k_setup_peer_smps() which might sleep. 2827 */ 2828 ht_cap = ap_sta->ht_cap; 2829 vht_cap = ap_sta->vht_cap; 2830 2831 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2832 if (ret) { 2833 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2834 bss_conf->bssid, arvif->vdev_id, ret); 2835 rcu_read_unlock(); 2836 return; 2837 } 2838 2839 rcu_read_unlock(); 2840 2841 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2842 if (ret) { 2843 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2844 bss_conf->bssid, arvif->vdev_id, ret); 2845 return; 2846 } 2847 2848 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2849 if (ret) { 2850 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2851 arvif->vdev_id, ret); 2852 return; 2853 } 2854 2855 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2856 if (ret) { 2857 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2858 arvif->vdev_id, bss_conf->bssid, ret); 2859 return; 2860 } 2861 2862 ath10k_dbg(ar, ATH10K_DBG_MAC, 2863 "mac vdev %d up (associated) bssid %pM aid %d\n", 2864 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2865 2866 WARN_ON(arvif->is_up); 2867 2868 arvif->aid = bss_conf->aid; 2869 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2870 2871 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2872 if (ret) { 2873 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2874 arvif->vdev_id, ret); 2875 return; 2876 } 2877 2878 arvif->is_up = true; 2879 2880 /* Workaround: Some firmware revisions (tested with qca6174 2881 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2882 * poked with peer param command. 2883 */ 2884 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2885 WMI_PEER_DUMMY_VAR, 1); 2886 if (ret) { 2887 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2888 arvif->bssid, arvif->vdev_id, ret); 2889 return; 2890 } 2891 } 2892 2893 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2894 struct ieee80211_vif *vif) 2895 { 2896 struct ath10k *ar = hw->priv; 2897 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2898 struct ieee80211_sta_vht_cap vht_cap = {}; 2899 int ret; 2900 2901 lockdep_assert_held(&ar->conf_mutex); 2902 2903 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2904 arvif->vdev_id, arvif->bssid); 2905 2906 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2907 if (ret) 2908 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2909 arvif->vdev_id, ret); 2910 2911 arvif->def_wep_key_idx = -1; 2912 2913 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2914 if (ret) { 2915 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2916 arvif->vdev_id, ret); 2917 return; 2918 } 2919 2920 arvif->is_up = false; 2921 2922 cancel_delayed_work_sync(&arvif->connection_loss_work); 2923 } 2924 2925 static int ath10k_station_assoc(struct ath10k *ar, 2926 struct ieee80211_vif *vif, 2927 struct ieee80211_sta *sta, 2928 bool reassoc) 2929 { 2930 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2931 struct wmi_peer_assoc_complete_arg peer_arg; 2932 int ret = 0; 2933 2934 lockdep_assert_held(&ar->conf_mutex); 2935 2936 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2937 if (ret) { 2938 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2939 sta->addr, arvif->vdev_id, ret); 2940 return ret; 2941 } 2942 2943 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2944 if (ret) { 2945 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2946 sta->addr, arvif->vdev_id, ret); 2947 return ret; 2948 } 2949 2950 /* Re-assoc is run only to update supported rates for given station. It 2951 * doesn't make much sense to reconfigure the peer completely. 2952 */ 2953 if (!reassoc) { 2954 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2955 &sta->ht_cap); 2956 if (ret) { 2957 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2958 arvif->vdev_id, ret); 2959 return ret; 2960 } 2961 2962 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2963 if (ret) { 2964 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2965 sta->addr, arvif->vdev_id, ret); 2966 return ret; 2967 } 2968 2969 if (!sta->wme) { 2970 arvif->num_legacy_stations++; 2971 ret = ath10k_recalc_rtscts_prot(arvif); 2972 if (ret) { 2973 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2974 arvif->vdev_id, ret); 2975 return ret; 2976 } 2977 } 2978 2979 /* Plumb cached keys only for static WEP */ 2980 if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) { 2981 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2982 if (ret) { 2983 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2984 arvif->vdev_id, ret); 2985 return ret; 2986 } 2987 } 2988 } 2989 2990 return ret; 2991 } 2992 2993 static int ath10k_station_disassoc(struct ath10k *ar, 2994 struct ieee80211_vif *vif, 2995 struct ieee80211_sta *sta) 2996 { 2997 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2998 int ret = 0; 2999 3000 lockdep_assert_held(&ar->conf_mutex); 3001 3002 if (!sta->wme) { 3003 arvif->num_legacy_stations--; 3004 ret = ath10k_recalc_rtscts_prot(arvif); 3005 if (ret) { 3006 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 3007 arvif->vdev_id, ret); 3008 return ret; 3009 } 3010 } 3011 3012 ret = ath10k_clear_peer_keys(arvif, sta->addr); 3013 if (ret) { 3014 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 3015 arvif->vdev_id, ret); 3016 return ret; 3017 } 3018 3019 return ret; 3020 } 3021 3022 /**************/ 3023 /* Regulatory */ 3024 /**************/ 3025 3026 static int ath10k_update_channel_list(struct ath10k *ar) 3027 { 3028 struct ieee80211_hw *hw = ar->hw; 3029 struct ieee80211_supported_band **bands; 3030 enum nl80211_band band; 3031 struct ieee80211_channel *channel; 3032 struct wmi_scan_chan_list_arg arg = {0}; 3033 struct wmi_channel_arg *ch; 3034 bool passive; 3035 int len; 3036 int ret; 3037 int i; 3038 3039 lockdep_assert_held(&ar->conf_mutex); 3040 3041 bands = hw->wiphy->bands; 3042 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3043 if (!bands[band]) 3044 continue; 3045 3046 for (i = 0; i < bands[band]->n_channels; i++) { 3047 if (bands[band]->channels[i].flags & 3048 IEEE80211_CHAN_DISABLED) 3049 continue; 3050 3051 arg.n_channels++; 3052 } 3053 } 3054 3055 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3056 arg.channels = kzalloc(len, GFP_KERNEL); 3057 if (!arg.channels) 3058 return -ENOMEM; 3059 3060 ch = arg.channels; 3061 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3062 if (!bands[band]) 3063 continue; 3064 3065 for (i = 0; i < bands[band]->n_channels; i++) { 3066 channel = &bands[band]->channels[i]; 3067 3068 if (channel->flags & IEEE80211_CHAN_DISABLED) 3069 continue; 3070 3071 ch->allow_ht = true; 3072 3073 /* FIXME: when should we really allow VHT? */ 3074 ch->allow_vht = true; 3075 3076 ch->allow_ibss = 3077 !(channel->flags & IEEE80211_CHAN_NO_IR); 3078 3079 ch->ht40plus = 3080 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3081 3082 ch->chan_radar = 3083 !!(channel->flags & IEEE80211_CHAN_RADAR); 3084 3085 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3086 ch->passive = passive; 3087 3088 ch->freq = channel->center_freq; 3089 ch->band_center_freq1 = channel->center_freq; 3090 ch->min_power = 0; 3091 ch->max_power = channel->max_power * 2; 3092 ch->max_reg_power = channel->max_reg_power * 2; 3093 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3094 ch->reg_class_id = 0; /* FIXME */ 3095 3096 /* FIXME: why use only legacy modes, why not any 3097 * HT/VHT modes? Would that even make any 3098 * difference? 3099 */ 3100 if (channel->band == NL80211_BAND_2GHZ) 3101 ch->mode = MODE_11G; 3102 else 3103 ch->mode = MODE_11A; 3104 3105 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3106 continue; 3107 3108 ath10k_dbg(ar, ATH10K_DBG_WMI, 3109 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3110 ch - arg.channels, arg.n_channels, 3111 ch->freq, ch->max_power, ch->max_reg_power, 3112 ch->max_antenna_gain, ch->mode); 3113 3114 ch++; 3115 } 3116 } 3117 3118 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3119 kfree(arg.channels); 3120 3121 return ret; 3122 } 3123 3124 static enum wmi_dfs_region 3125 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3126 { 3127 switch (dfs_region) { 3128 case NL80211_DFS_UNSET: 3129 return WMI_UNINIT_DFS_DOMAIN; 3130 case NL80211_DFS_FCC: 3131 return WMI_FCC_DFS_DOMAIN; 3132 case NL80211_DFS_ETSI: 3133 return WMI_ETSI_DFS_DOMAIN; 3134 case NL80211_DFS_JP: 3135 return WMI_MKK4_DFS_DOMAIN; 3136 } 3137 return WMI_UNINIT_DFS_DOMAIN; 3138 } 3139 3140 static void ath10k_regd_update(struct ath10k *ar) 3141 { 3142 struct reg_dmn_pair_mapping *regpair; 3143 int ret; 3144 enum wmi_dfs_region wmi_dfs_reg; 3145 enum nl80211_dfs_regions nl_dfs_reg; 3146 3147 lockdep_assert_held(&ar->conf_mutex); 3148 3149 ret = ath10k_update_channel_list(ar); 3150 if (ret) 3151 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3152 3153 regpair = ar->ath_common.regulatory.regpair; 3154 3155 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3156 nl_dfs_reg = ar->dfs_detector->region; 3157 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3158 } else { 3159 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3160 } 3161 3162 /* Target allows setting up per-band regdomain but ath_common provides 3163 * a combined one only 3164 */ 3165 ret = ath10k_wmi_pdev_set_regdomain(ar, 3166 regpair->reg_domain, 3167 regpair->reg_domain, /* 2ghz */ 3168 regpair->reg_domain, /* 5ghz */ 3169 regpair->reg_2ghz_ctl, 3170 regpair->reg_5ghz_ctl, 3171 wmi_dfs_reg); 3172 if (ret) 3173 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3174 } 3175 3176 static void ath10k_mac_update_channel_list(struct ath10k *ar, 3177 struct ieee80211_supported_band *band) 3178 { 3179 int i; 3180 3181 if (ar->low_5ghz_chan && ar->high_5ghz_chan) { 3182 for (i = 0; i < band->n_channels; i++) { 3183 if (band->channels[i].center_freq < ar->low_5ghz_chan || 3184 band->channels[i].center_freq > ar->high_5ghz_chan) 3185 band->channels[i].flags |= 3186 IEEE80211_CHAN_DISABLED; 3187 } 3188 } 3189 } 3190 3191 static void ath10k_reg_notifier(struct wiphy *wiphy, 3192 struct regulatory_request *request) 3193 { 3194 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3195 struct ath10k *ar = hw->priv; 3196 bool result; 3197 3198 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3199 3200 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3201 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3202 request->dfs_region); 3203 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3204 request->dfs_region); 3205 if (!result) 3206 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3207 request->dfs_region); 3208 } 3209 3210 mutex_lock(&ar->conf_mutex); 3211 if (ar->state == ATH10K_STATE_ON) 3212 ath10k_regd_update(ar); 3213 mutex_unlock(&ar->conf_mutex); 3214 3215 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 3216 ath10k_mac_update_channel_list(ar, 3217 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); 3218 } 3219 3220 static void ath10k_stop_radar_confirmation(struct ath10k *ar) 3221 { 3222 spin_lock_bh(&ar->data_lock); 3223 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED; 3224 spin_unlock_bh(&ar->data_lock); 3225 3226 cancel_work_sync(&ar->radar_confirmation_work); 3227 } 3228 3229 /***************/ 3230 /* TX handlers */ 3231 /***************/ 3232 3233 enum ath10k_mac_tx_path { 3234 ATH10K_MAC_TX_HTT, 3235 ATH10K_MAC_TX_HTT_MGMT, 3236 ATH10K_MAC_TX_WMI_MGMT, 3237 ATH10K_MAC_TX_UNKNOWN, 3238 }; 3239 3240 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3241 { 3242 lockdep_assert_held(&ar->htt.tx_lock); 3243 3244 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3245 ar->tx_paused |= BIT(reason); 3246 ieee80211_stop_queues(ar->hw); 3247 } 3248 3249 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3250 struct ieee80211_vif *vif) 3251 { 3252 struct ath10k *ar = data; 3253 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3254 3255 if (arvif->tx_paused) 3256 return; 3257 3258 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3259 } 3260 3261 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3262 { 3263 lockdep_assert_held(&ar->htt.tx_lock); 3264 3265 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3266 ar->tx_paused &= ~BIT(reason); 3267 3268 if (ar->tx_paused) 3269 return; 3270 3271 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3272 IEEE80211_IFACE_ITER_RESUME_ALL, 3273 ath10k_mac_tx_unlock_iter, 3274 ar); 3275 3276 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3277 } 3278 3279 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3280 { 3281 struct ath10k *ar = arvif->ar; 3282 3283 lockdep_assert_held(&ar->htt.tx_lock); 3284 3285 WARN_ON(reason >= BITS_PER_LONG); 3286 arvif->tx_paused |= BIT(reason); 3287 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3288 } 3289 3290 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3291 { 3292 struct ath10k *ar = arvif->ar; 3293 3294 lockdep_assert_held(&ar->htt.tx_lock); 3295 3296 WARN_ON(reason >= BITS_PER_LONG); 3297 arvif->tx_paused &= ~BIT(reason); 3298 3299 if (ar->tx_paused) 3300 return; 3301 3302 if (arvif->tx_paused) 3303 return; 3304 3305 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3306 } 3307 3308 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3309 enum wmi_tlv_tx_pause_id pause_id, 3310 enum wmi_tlv_tx_pause_action action) 3311 { 3312 struct ath10k *ar = arvif->ar; 3313 3314 lockdep_assert_held(&ar->htt.tx_lock); 3315 3316 switch (action) { 3317 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3318 ath10k_mac_vif_tx_lock(arvif, pause_id); 3319 break; 3320 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3321 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3322 break; 3323 default: 3324 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3325 "received unknown tx pause action %d on vdev %i, ignoring\n", 3326 action, arvif->vdev_id); 3327 break; 3328 } 3329 } 3330 3331 struct ath10k_mac_tx_pause { 3332 u32 vdev_id; 3333 enum wmi_tlv_tx_pause_id pause_id; 3334 enum wmi_tlv_tx_pause_action action; 3335 }; 3336 3337 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3338 struct ieee80211_vif *vif) 3339 { 3340 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3341 struct ath10k_mac_tx_pause *arg = data; 3342 3343 if (arvif->vdev_id != arg->vdev_id) 3344 return; 3345 3346 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3347 } 3348 3349 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3350 enum wmi_tlv_tx_pause_id pause_id, 3351 enum wmi_tlv_tx_pause_action action) 3352 { 3353 struct ath10k_mac_tx_pause arg = { 3354 .vdev_id = vdev_id, 3355 .pause_id = pause_id, 3356 .action = action, 3357 }; 3358 3359 spin_lock_bh(&ar->htt.tx_lock); 3360 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3361 IEEE80211_IFACE_ITER_RESUME_ALL, 3362 ath10k_mac_handle_tx_pause_iter, 3363 &arg); 3364 spin_unlock_bh(&ar->htt.tx_lock); 3365 } 3366 3367 static enum ath10k_hw_txrx_mode 3368 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3369 struct ieee80211_vif *vif, 3370 struct ieee80211_sta *sta, 3371 struct sk_buff *skb) 3372 { 3373 const struct ieee80211_hdr *hdr = (void *)skb->data; 3374 __le16 fc = hdr->frame_control; 3375 3376 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3377 return ATH10K_HW_TXRX_RAW; 3378 3379 if (ieee80211_is_mgmt(fc)) 3380 return ATH10K_HW_TXRX_MGMT; 3381 3382 /* Workaround: 3383 * 3384 * NullFunc frames are mostly used to ping if a client or AP are still 3385 * reachable and responsive. This implies tx status reports must be 3386 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3387 * come to a conclusion that the other end disappeared and tear down 3388 * BSS connection or it can never disconnect from BSS/client (which is 3389 * the case). 3390 * 3391 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3392 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3393 * which seems to deliver correct tx reports for NullFunc frames. The 3394 * downside of using it is it ignores client powersave state so it can 3395 * end up disconnecting sleeping clients in AP mode. It should fix STA 3396 * mode though because AP don't sleep. 3397 */ 3398 if (ar->htt.target_version_major < 3 && 3399 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3400 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3401 ar->running_fw->fw_file.fw_features)) 3402 return ATH10K_HW_TXRX_MGMT; 3403 3404 /* Workaround: 3405 * 3406 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3407 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3408 * to work with Ethernet txmode so use it. 3409 * 3410 * FIXME: Check if raw mode works with TDLS. 3411 */ 3412 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3413 return ATH10K_HW_TXRX_ETHERNET; 3414 3415 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 3416 return ATH10K_HW_TXRX_RAW; 3417 3418 return ATH10K_HW_TXRX_NATIVE_WIFI; 3419 } 3420 3421 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3422 struct sk_buff *skb) 3423 { 3424 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3425 const struct ieee80211_hdr *hdr = (void *)skb->data; 3426 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3427 IEEE80211_TX_CTL_INJECTED; 3428 3429 if (!ieee80211_has_protected(hdr->frame_control)) 3430 return false; 3431 3432 if ((info->flags & mask) == mask) 3433 return false; 3434 3435 if (vif) 3436 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3437 3438 return true; 3439 } 3440 3441 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3442 * Control in the header. 3443 */ 3444 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3445 { 3446 struct ieee80211_hdr *hdr = (void *)skb->data; 3447 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3448 u8 *qos_ctl; 3449 3450 if (!ieee80211_is_data_qos(hdr->frame_control)) 3451 return; 3452 3453 qos_ctl = ieee80211_get_qos_ctl(hdr); 3454 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3455 skb->data, (void *)qos_ctl - (void *)skb->data); 3456 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3457 3458 /* Some firmware revisions don't handle sending QoS NullFunc well. 3459 * These frames are mainly used for CQM purposes so it doesn't really 3460 * matter whether QoS NullFunc or NullFunc are sent. 3461 */ 3462 hdr = (void *)skb->data; 3463 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3464 cb->flags &= ~ATH10K_SKB_F_QOS; 3465 3466 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3467 } 3468 3469 static void ath10k_tx_h_8023(struct sk_buff *skb) 3470 { 3471 struct ieee80211_hdr *hdr; 3472 struct rfc1042_hdr *rfc1042; 3473 struct ethhdr *eth; 3474 size_t hdrlen; 3475 u8 da[ETH_ALEN]; 3476 u8 sa[ETH_ALEN]; 3477 __be16 type; 3478 3479 hdr = (void *)skb->data; 3480 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3481 rfc1042 = (void *)skb->data + hdrlen; 3482 3483 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3484 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3485 type = rfc1042->snap_type; 3486 3487 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3488 skb_push(skb, sizeof(*eth)); 3489 3490 eth = (void *)skb->data; 3491 ether_addr_copy(eth->h_dest, da); 3492 ether_addr_copy(eth->h_source, sa); 3493 eth->h_proto = type; 3494 } 3495 3496 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3497 struct ieee80211_vif *vif, 3498 struct sk_buff *skb) 3499 { 3500 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3501 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3502 3503 /* This is case only for P2P_GO */ 3504 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3505 return; 3506 3507 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3508 spin_lock_bh(&ar->data_lock); 3509 if (arvif->u.ap.noa_data) 3510 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3511 GFP_ATOMIC)) 3512 skb_put_data(skb, arvif->u.ap.noa_data, 3513 arvif->u.ap.noa_len); 3514 spin_unlock_bh(&ar->data_lock); 3515 } 3516 } 3517 3518 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3519 struct ieee80211_vif *vif, 3520 struct ieee80211_txq *txq, 3521 struct sk_buff *skb) 3522 { 3523 struct ieee80211_hdr *hdr = (void *)skb->data; 3524 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3525 3526 cb->flags = 0; 3527 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3528 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3529 3530 if (ieee80211_is_mgmt(hdr->frame_control)) 3531 cb->flags |= ATH10K_SKB_F_MGMT; 3532 3533 if (ieee80211_is_data_qos(hdr->frame_control)) 3534 cb->flags |= ATH10K_SKB_F_QOS; 3535 3536 cb->vif = vif; 3537 cb->txq = txq; 3538 } 3539 3540 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3541 { 3542 /* FIXME: Not really sure since when the behaviour changed. At some 3543 * point new firmware stopped requiring creation of peer entries for 3544 * offchannel tx (and actually creating them causes issues with wmi-htc 3545 * tx credit replenishment and reliability). Assuming it's at least 3.4 3546 * because that's when the `freq` was introduced to TX_FRM HTT command. 3547 */ 3548 return (ar->htt.target_version_major >= 3 && 3549 ar->htt.target_version_minor >= 4 && 3550 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3551 } 3552 3553 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3554 { 3555 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3556 int ret = 0; 3557 3558 spin_lock_bh(&ar->data_lock); 3559 3560 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3561 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3562 ret = -ENOSPC; 3563 goto unlock; 3564 } 3565 3566 __skb_queue_tail(q, skb); 3567 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3568 3569 unlock: 3570 spin_unlock_bh(&ar->data_lock); 3571 3572 return ret; 3573 } 3574 3575 static enum ath10k_mac_tx_path 3576 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3577 struct sk_buff *skb, 3578 enum ath10k_hw_txrx_mode txmode) 3579 { 3580 switch (txmode) { 3581 case ATH10K_HW_TXRX_RAW: 3582 case ATH10K_HW_TXRX_NATIVE_WIFI: 3583 case ATH10K_HW_TXRX_ETHERNET: 3584 return ATH10K_MAC_TX_HTT; 3585 case ATH10K_HW_TXRX_MGMT: 3586 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3587 ar->running_fw->fw_file.fw_features) || 3588 test_bit(WMI_SERVICE_MGMT_TX_WMI, 3589 ar->wmi.svc_map)) 3590 return ATH10K_MAC_TX_WMI_MGMT; 3591 else if (ar->htt.target_version_major >= 3) 3592 return ATH10K_MAC_TX_HTT; 3593 else 3594 return ATH10K_MAC_TX_HTT_MGMT; 3595 } 3596 3597 return ATH10K_MAC_TX_UNKNOWN; 3598 } 3599 3600 static int ath10k_mac_tx_submit(struct ath10k *ar, 3601 enum ath10k_hw_txrx_mode txmode, 3602 enum ath10k_mac_tx_path txpath, 3603 struct sk_buff *skb) 3604 { 3605 struct ath10k_htt *htt = &ar->htt; 3606 int ret = -EINVAL; 3607 3608 switch (txpath) { 3609 case ATH10K_MAC_TX_HTT: 3610 ret = ath10k_htt_tx(htt, txmode, skb); 3611 break; 3612 case ATH10K_MAC_TX_HTT_MGMT: 3613 ret = ath10k_htt_mgmt_tx(htt, skb); 3614 break; 3615 case ATH10K_MAC_TX_WMI_MGMT: 3616 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3617 break; 3618 case ATH10K_MAC_TX_UNKNOWN: 3619 WARN_ON_ONCE(1); 3620 ret = -EINVAL; 3621 break; 3622 } 3623 3624 if (ret) { 3625 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3626 ret); 3627 ieee80211_free_txskb(ar->hw, skb); 3628 } 3629 3630 return ret; 3631 } 3632 3633 /* This function consumes the sk_buff regardless of return value as far as 3634 * caller is concerned so no freeing is necessary afterwards. 3635 */ 3636 static int ath10k_mac_tx(struct ath10k *ar, 3637 struct ieee80211_vif *vif, 3638 enum ath10k_hw_txrx_mode txmode, 3639 enum ath10k_mac_tx_path txpath, 3640 struct sk_buff *skb) 3641 { 3642 struct ieee80211_hw *hw = ar->hw; 3643 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3644 int ret; 3645 3646 /* We should disable CCK RATE due to P2P */ 3647 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3648 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3649 3650 switch (txmode) { 3651 case ATH10K_HW_TXRX_MGMT: 3652 case ATH10K_HW_TXRX_NATIVE_WIFI: 3653 ath10k_tx_h_nwifi(hw, skb); 3654 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3655 ath10k_tx_h_seq_no(vif, skb); 3656 break; 3657 case ATH10K_HW_TXRX_ETHERNET: 3658 ath10k_tx_h_8023(skb); 3659 break; 3660 case ATH10K_HW_TXRX_RAW: 3661 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 3662 WARN_ON_ONCE(1); 3663 ieee80211_free_txskb(hw, skb); 3664 return -ENOTSUPP; 3665 } 3666 } 3667 3668 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3669 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3670 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3671 skb); 3672 3673 skb_queue_tail(&ar->offchan_tx_queue, skb); 3674 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3675 return 0; 3676 } 3677 } 3678 3679 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3680 if (ret) { 3681 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3682 return ret; 3683 } 3684 3685 return 0; 3686 } 3687 3688 void ath10k_offchan_tx_purge(struct ath10k *ar) 3689 { 3690 struct sk_buff *skb; 3691 3692 for (;;) { 3693 skb = skb_dequeue(&ar->offchan_tx_queue); 3694 if (!skb) 3695 break; 3696 3697 ieee80211_free_txskb(ar->hw, skb); 3698 } 3699 } 3700 3701 void ath10k_offchan_tx_work(struct work_struct *work) 3702 { 3703 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3704 struct ath10k_peer *peer; 3705 struct ath10k_vif *arvif; 3706 enum ath10k_hw_txrx_mode txmode; 3707 enum ath10k_mac_tx_path txpath; 3708 struct ieee80211_hdr *hdr; 3709 struct ieee80211_vif *vif; 3710 struct ieee80211_sta *sta; 3711 struct sk_buff *skb; 3712 const u8 *peer_addr; 3713 int vdev_id; 3714 int ret; 3715 unsigned long time_left; 3716 bool tmp_peer_created = false; 3717 3718 /* FW requirement: We must create a peer before FW will send out 3719 * an offchannel frame. Otherwise the frame will be stuck and 3720 * never transmitted. We delete the peer upon tx completion. 3721 * It is unlikely that a peer for offchannel tx will already be 3722 * present. However it may be in some rare cases so account for that. 3723 * Otherwise we might remove a legitimate peer and break stuff. 3724 */ 3725 3726 for (;;) { 3727 skb = skb_dequeue(&ar->offchan_tx_queue); 3728 if (!skb) 3729 break; 3730 3731 mutex_lock(&ar->conf_mutex); 3732 3733 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3734 skb); 3735 3736 hdr = (struct ieee80211_hdr *)skb->data; 3737 peer_addr = ieee80211_get_DA(hdr); 3738 3739 spin_lock_bh(&ar->data_lock); 3740 vdev_id = ar->scan.vdev_id; 3741 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3742 spin_unlock_bh(&ar->data_lock); 3743 3744 if (peer) 3745 /* FIXME: should this use ath10k_warn()? */ 3746 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3747 peer_addr, vdev_id); 3748 3749 if (!peer) { 3750 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3751 peer_addr, 3752 WMI_PEER_TYPE_DEFAULT); 3753 if (ret) 3754 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3755 peer_addr, vdev_id, ret); 3756 tmp_peer_created = (ret == 0); 3757 } 3758 3759 spin_lock_bh(&ar->data_lock); 3760 reinit_completion(&ar->offchan_tx_completed); 3761 ar->offchan_tx_skb = skb; 3762 spin_unlock_bh(&ar->data_lock); 3763 3764 /* It's safe to access vif and sta - conf_mutex guarantees that 3765 * sta_state() and remove_interface() are locked exclusively 3766 * out wrt to this offchannel worker. 3767 */ 3768 arvif = ath10k_get_arvif(ar, vdev_id); 3769 if (arvif) { 3770 vif = arvif->vif; 3771 sta = ieee80211_find_sta(vif, peer_addr); 3772 } else { 3773 vif = NULL; 3774 sta = NULL; 3775 } 3776 3777 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3778 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3779 3780 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3781 if (ret) { 3782 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3783 ret); 3784 /* not serious */ 3785 } 3786 3787 time_left = 3788 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3789 if (time_left == 0) 3790 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3791 skb); 3792 3793 if (!peer && tmp_peer_created) { 3794 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3795 if (ret) 3796 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3797 peer_addr, vdev_id, ret); 3798 } 3799 3800 mutex_unlock(&ar->conf_mutex); 3801 } 3802 } 3803 3804 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3805 { 3806 struct sk_buff *skb; 3807 3808 for (;;) { 3809 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3810 if (!skb) 3811 break; 3812 3813 ieee80211_free_txskb(ar->hw, skb); 3814 } 3815 } 3816 3817 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3818 { 3819 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3820 struct sk_buff *skb; 3821 dma_addr_t paddr; 3822 int ret; 3823 3824 for (;;) { 3825 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3826 if (!skb) 3827 break; 3828 3829 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, 3830 ar->running_fw->fw_file.fw_features)) { 3831 paddr = dma_map_single(ar->dev, skb->data, 3832 skb->len, DMA_TO_DEVICE); 3833 if (!paddr) 3834 continue; 3835 ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); 3836 if (ret) { 3837 ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", 3838 ret); 3839 dma_unmap_single(ar->dev, paddr, skb->len, 3840 DMA_FROM_DEVICE); 3841 ieee80211_free_txskb(ar->hw, skb); 3842 } 3843 } else { 3844 ret = ath10k_wmi_mgmt_tx(ar, skb); 3845 if (ret) { 3846 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3847 ret); 3848 ieee80211_free_txskb(ar->hw, skb); 3849 } 3850 } 3851 } 3852 } 3853 3854 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3855 { 3856 struct ath10k_txq *artxq; 3857 3858 if (!txq) 3859 return; 3860 3861 artxq = (void *)txq->drv_priv; 3862 INIT_LIST_HEAD(&artxq->list); 3863 } 3864 3865 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3866 { 3867 struct ath10k_txq *artxq; 3868 struct ath10k_skb_cb *cb; 3869 struct sk_buff *msdu; 3870 int msdu_id; 3871 3872 if (!txq) 3873 return; 3874 3875 artxq = (void *)txq->drv_priv; 3876 spin_lock_bh(&ar->txqs_lock); 3877 if (!list_empty(&artxq->list)) 3878 list_del_init(&artxq->list); 3879 spin_unlock_bh(&ar->txqs_lock); 3880 3881 spin_lock_bh(&ar->htt.tx_lock); 3882 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3883 cb = ATH10K_SKB_CB(msdu); 3884 if (cb->txq == txq) 3885 cb->txq = NULL; 3886 } 3887 spin_unlock_bh(&ar->htt.tx_lock); 3888 } 3889 3890 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3891 u16 peer_id, 3892 u8 tid) 3893 { 3894 struct ath10k_peer *peer; 3895 3896 lockdep_assert_held(&ar->data_lock); 3897 3898 peer = ar->peer_map[peer_id]; 3899 if (!peer) 3900 return NULL; 3901 3902 if (peer->removed) 3903 return NULL; 3904 3905 if (peer->sta) 3906 return peer->sta->txq[tid]; 3907 else if (peer->vif) 3908 return peer->vif->txq; 3909 else 3910 return NULL; 3911 } 3912 3913 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3914 struct ieee80211_txq *txq) 3915 { 3916 struct ath10k *ar = hw->priv; 3917 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3918 3919 /* No need to get locks */ 3920 3921 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3922 return true; 3923 3924 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3925 return true; 3926 3927 if (artxq->num_fw_queued < artxq->num_push_allowed) 3928 return true; 3929 3930 return false; 3931 } 3932 3933 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 3934 struct ieee80211_txq *txq) 3935 { 3936 struct ath10k *ar = hw->priv; 3937 struct ath10k_htt *htt = &ar->htt; 3938 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3939 struct ieee80211_vif *vif = txq->vif; 3940 struct ieee80211_sta *sta = txq->sta; 3941 enum ath10k_hw_txrx_mode txmode; 3942 enum ath10k_mac_tx_path txpath; 3943 struct sk_buff *skb; 3944 struct ieee80211_hdr *hdr; 3945 size_t skb_len; 3946 bool is_mgmt, is_presp; 3947 int ret; 3948 3949 spin_lock_bh(&ar->htt.tx_lock); 3950 ret = ath10k_htt_tx_inc_pending(htt); 3951 spin_unlock_bh(&ar->htt.tx_lock); 3952 3953 if (ret) 3954 return ret; 3955 3956 skb = ieee80211_tx_dequeue(hw, txq); 3957 if (!skb) { 3958 spin_lock_bh(&ar->htt.tx_lock); 3959 ath10k_htt_tx_dec_pending(htt); 3960 spin_unlock_bh(&ar->htt.tx_lock); 3961 3962 return -ENOENT; 3963 } 3964 3965 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 3966 3967 skb_len = skb->len; 3968 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3969 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3970 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 3971 3972 if (is_mgmt) { 3973 hdr = (struct ieee80211_hdr *)skb->data; 3974 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 3975 3976 spin_lock_bh(&ar->htt.tx_lock); 3977 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 3978 3979 if (ret) { 3980 ath10k_htt_tx_dec_pending(htt); 3981 spin_unlock_bh(&ar->htt.tx_lock); 3982 return ret; 3983 } 3984 spin_unlock_bh(&ar->htt.tx_lock); 3985 } 3986 3987 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3988 if (unlikely(ret)) { 3989 ath10k_warn(ar, "failed to push frame: %d\n", ret); 3990 3991 spin_lock_bh(&ar->htt.tx_lock); 3992 ath10k_htt_tx_dec_pending(htt); 3993 if (is_mgmt) 3994 ath10k_htt_tx_mgmt_dec_pending(htt); 3995 spin_unlock_bh(&ar->htt.tx_lock); 3996 3997 return ret; 3998 } 3999 4000 spin_lock_bh(&ar->htt.tx_lock); 4001 artxq->num_fw_queued++; 4002 spin_unlock_bh(&ar->htt.tx_lock); 4003 4004 return skb_len; 4005 } 4006 4007 void ath10k_mac_tx_push_pending(struct ath10k *ar) 4008 { 4009 struct ieee80211_hw *hw = ar->hw; 4010 struct ieee80211_txq *txq; 4011 struct ath10k_txq *artxq; 4012 struct ath10k_txq *last; 4013 int ret; 4014 int max; 4015 4016 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 4017 return; 4018 4019 spin_lock_bh(&ar->txqs_lock); 4020 rcu_read_lock(); 4021 4022 last = list_last_entry(&ar->txqs, struct ath10k_txq, list); 4023 while (!list_empty(&ar->txqs)) { 4024 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 4025 txq = container_of((void *)artxq, struct ieee80211_txq, 4026 drv_priv); 4027 4028 /* Prevent aggressive sta/tid taking over tx queue */ 4029 max = 16; 4030 ret = 0; 4031 while (ath10k_mac_tx_can_push(hw, txq) && max--) { 4032 ret = ath10k_mac_tx_push_txq(hw, txq); 4033 if (ret < 0) 4034 break; 4035 } 4036 4037 list_del_init(&artxq->list); 4038 if (ret != -ENOENT) 4039 list_add_tail(&artxq->list, &ar->txqs); 4040 4041 ath10k_htt_tx_txq_update(hw, txq); 4042 4043 if (artxq == last || (ret < 0 && ret != -ENOENT)) 4044 break; 4045 } 4046 4047 rcu_read_unlock(); 4048 spin_unlock_bh(&ar->txqs_lock); 4049 } 4050 4051 /************/ 4052 /* Scanning */ 4053 /************/ 4054 4055 void __ath10k_scan_finish(struct ath10k *ar) 4056 { 4057 lockdep_assert_held(&ar->data_lock); 4058 4059 switch (ar->scan.state) { 4060 case ATH10K_SCAN_IDLE: 4061 break; 4062 case ATH10K_SCAN_RUNNING: 4063 case ATH10K_SCAN_ABORTING: 4064 if (!ar->scan.is_roc) { 4065 struct cfg80211_scan_info info = { 4066 .aborted = (ar->scan.state == 4067 ATH10K_SCAN_ABORTING), 4068 }; 4069 4070 ieee80211_scan_completed(ar->hw, &info); 4071 } else if (ar->scan.roc_notify) { 4072 ieee80211_remain_on_channel_expired(ar->hw); 4073 } 4074 /* fall through */ 4075 case ATH10K_SCAN_STARTING: 4076 ar->scan.state = ATH10K_SCAN_IDLE; 4077 ar->scan_channel = NULL; 4078 ar->scan.roc_freq = 0; 4079 ath10k_offchan_tx_purge(ar); 4080 cancel_delayed_work(&ar->scan.timeout); 4081 complete(&ar->scan.completed); 4082 break; 4083 } 4084 } 4085 4086 void ath10k_scan_finish(struct ath10k *ar) 4087 { 4088 spin_lock_bh(&ar->data_lock); 4089 __ath10k_scan_finish(ar); 4090 spin_unlock_bh(&ar->data_lock); 4091 } 4092 4093 static int ath10k_scan_stop(struct ath10k *ar) 4094 { 4095 struct wmi_stop_scan_arg arg = { 4096 .req_id = 1, /* FIXME */ 4097 .req_type = WMI_SCAN_STOP_ONE, 4098 .u.scan_id = ATH10K_SCAN_ID, 4099 }; 4100 int ret; 4101 4102 lockdep_assert_held(&ar->conf_mutex); 4103 4104 ret = ath10k_wmi_stop_scan(ar, &arg); 4105 if (ret) { 4106 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4107 goto out; 4108 } 4109 4110 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4111 if (ret == 0) { 4112 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4113 ret = -ETIMEDOUT; 4114 } else if (ret > 0) { 4115 ret = 0; 4116 } 4117 4118 out: 4119 /* Scan state should be updated upon scan completion but in case 4120 * firmware fails to deliver the event (for whatever reason) it is 4121 * desired to clean up scan state anyway. Firmware may have just 4122 * dropped the scan completion event delivery due to transport pipe 4123 * being overflown with data and/or it can recover on its own before 4124 * next scan request is submitted. 4125 */ 4126 spin_lock_bh(&ar->data_lock); 4127 if (ar->scan.state != ATH10K_SCAN_IDLE) 4128 __ath10k_scan_finish(ar); 4129 spin_unlock_bh(&ar->data_lock); 4130 4131 return ret; 4132 } 4133 4134 static void ath10k_scan_abort(struct ath10k *ar) 4135 { 4136 int ret; 4137 4138 lockdep_assert_held(&ar->conf_mutex); 4139 4140 spin_lock_bh(&ar->data_lock); 4141 4142 switch (ar->scan.state) { 4143 case ATH10K_SCAN_IDLE: 4144 /* This can happen if timeout worker kicked in and called 4145 * abortion while scan completion was being processed. 4146 */ 4147 break; 4148 case ATH10K_SCAN_STARTING: 4149 case ATH10K_SCAN_ABORTING: 4150 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4151 ath10k_scan_state_str(ar->scan.state), 4152 ar->scan.state); 4153 break; 4154 case ATH10K_SCAN_RUNNING: 4155 ar->scan.state = ATH10K_SCAN_ABORTING; 4156 spin_unlock_bh(&ar->data_lock); 4157 4158 ret = ath10k_scan_stop(ar); 4159 if (ret) 4160 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4161 4162 spin_lock_bh(&ar->data_lock); 4163 break; 4164 } 4165 4166 spin_unlock_bh(&ar->data_lock); 4167 } 4168 4169 void ath10k_scan_timeout_work(struct work_struct *work) 4170 { 4171 struct ath10k *ar = container_of(work, struct ath10k, 4172 scan.timeout.work); 4173 4174 mutex_lock(&ar->conf_mutex); 4175 ath10k_scan_abort(ar); 4176 mutex_unlock(&ar->conf_mutex); 4177 } 4178 4179 static int ath10k_start_scan(struct ath10k *ar, 4180 const struct wmi_start_scan_arg *arg) 4181 { 4182 int ret; 4183 4184 lockdep_assert_held(&ar->conf_mutex); 4185 4186 ret = ath10k_wmi_start_scan(ar, arg); 4187 if (ret) 4188 return ret; 4189 4190 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4191 if (ret == 0) { 4192 ret = ath10k_scan_stop(ar); 4193 if (ret) 4194 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4195 4196 return -ETIMEDOUT; 4197 } 4198 4199 /* If we failed to start the scan, return error code at 4200 * this point. This is probably due to some issue in the 4201 * firmware, but no need to wedge the driver due to that... 4202 */ 4203 spin_lock_bh(&ar->data_lock); 4204 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4205 spin_unlock_bh(&ar->data_lock); 4206 return -EINVAL; 4207 } 4208 spin_unlock_bh(&ar->data_lock); 4209 4210 return 0; 4211 } 4212 4213 /**********************/ 4214 /* mac80211 callbacks */ 4215 /**********************/ 4216 4217 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4218 struct ieee80211_tx_control *control, 4219 struct sk_buff *skb) 4220 { 4221 struct ath10k *ar = hw->priv; 4222 struct ath10k_htt *htt = &ar->htt; 4223 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4224 struct ieee80211_vif *vif = info->control.vif; 4225 struct ieee80211_sta *sta = control->sta; 4226 struct ieee80211_txq *txq = NULL; 4227 struct ieee80211_hdr *hdr = (void *)skb->data; 4228 enum ath10k_hw_txrx_mode txmode; 4229 enum ath10k_mac_tx_path txpath; 4230 bool is_htt; 4231 bool is_mgmt; 4232 bool is_presp; 4233 int ret; 4234 4235 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 4236 4237 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4238 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4239 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4240 txpath == ATH10K_MAC_TX_HTT_MGMT); 4241 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4242 4243 if (is_htt) { 4244 spin_lock_bh(&ar->htt.tx_lock); 4245 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4246 4247 ret = ath10k_htt_tx_inc_pending(htt); 4248 if (ret) { 4249 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4250 ret); 4251 spin_unlock_bh(&ar->htt.tx_lock); 4252 ieee80211_free_txskb(ar->hw, skb); 4253 return; 4254 } 4255 4256 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4257 if (ret) { 4258 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4259 ret); 4260 ath10k_htt_tx_dec_pending(htt); 4261 spin_unlock_bh(&ar->htt.tx_lock); 4262 ieee80211_free_txskb(ar->hw, skb); 4263 return; 4264 } 4265 spin_unlock_bh(&ar->htt.tx_lock); 4266 } 4267 4268 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4269 if (ret) { 4270 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4271 if (is_htt) { 4272 spin_lock_bh(&ar->htt.tx_lock); 4273 ath10k_htt_tx_dec_pending(htt); 4274 if (is_mgmt) 4275 ath10k_htt_tx_mgmt_dec_pending(htt); 4276 spin_unlock_bh(&ar->htt.tx_lock); 4277 } 4278 return; 4279 } 4280 } 4281 4282 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4283 struct ieee80211_txq *txq) 4284 { 4285 struct ath10k *ar = hw->priv; 4286 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4287 struct ieee80211_txq *f_txq; 4288 struct ath10k_txq *f_artxq; 4289 int ret = 0; 4290 int max = 16; 4291 4292 spin_lock_bh(&ar->txqs_lock); 4293 if (list_empty(&artxq->list)) 4294 list_add_tail(&artxq->list, &ar->txqs); 4295 4296 f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 4297 f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); 4298 list_del_init(&f_artxq->list); 4299 4300 while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { 4301 ret = ath10k_mac_tx_push_txq(hw, f_txq); 4302 if (ret < 0) 4303 break; 4304 } 4305 if (ret != -ENOENT) 4306 list_add_tail(&f_artxq->list, &ar->txqs); 4307 spin_unlock_bh(&ar->txqs_lock); 4308 4309 ath10k_htt_tx_txq_update(hw, f_txq); 4310 ath10k_htt_tx_txq_update(hw, txq); 4311 } 4312 4313 /* Must not be called with conf_mutex held as workers can use that also. */ 4314 void ath10k_drain_tx(struct ath10k *ar) 4315 { 4316 /* make sure rcu-protected mac80211 tx path itself is drained */ 4317 synchronize_net(); 4318 4319 ath10k_offchan_tx_purge(ar); 4320 ath10k_mgmt_over_wmi_tx_purge(ar); 4321 4322 cancel_work_sync(&ar->offchan_tx_work); 4323 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4324 } 4325 4326 void ath10k_halt(struct ath10k *ar) 4327 { 4328 struct ath10k_vif *arvif; 4329 4330 lockdep_assert_held(&ar->conf_mutex); 4331 4332 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4333 ar->filter_flags = 0; 4334 ar->monitor = false; 4335 ar->monitor_arvif = NULL; 4336 4337 if (ar->monitor_started) 4338 ath10k_monitor_stop(ar); 4339 4340 ar->monitor_started = false; 4341 ar->tx_paused = 0; 4342 4343 ath10k_scan_finish(ar); 4344 ath10k_peer_cleanup_all(ar); 4345 ath10k_stop_radar_confirmation(ar); 4346 ath10k_core_stop(ar); 4347 ath10k_hif_power_down(ar); 4348 4349 spin_lock_bh(&ar->data_lock); 4350 list_for_each_entry(arvif, &ar->arvifs, list) 4351 ath10k_mac_vif_beacon_cleanup(arvif); 4352 spin_unlock_bh(&ar->data_lock); 4353 } 4354 4355 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4356 { 4357 struct ath10k *ar = hw->priv; 4358 4359 mutex_lock(&ar->conf_mutex); 4360 4361 *tx_ant = ar->cfg_tx_chainmask; 4362 *rx_ant = ar->cfg_rx_chainmask; 4363 4364 mutex_unlock(&ar->conf_mutex); 4365 4366 return 0; 4367 } 4368 4369 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4370 { 4371 /* It is not clear that allowing gaps in chainmask 4372 * is helpful. Probably it will not do what user 4373 * is hoping for, so warn in that case. 4374 */ 4375 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4376 return; 4377 4378 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4379 dbg, cm); 4380 } 4381 4382 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4383 { 4384 int nsts = ar->vht_cap_info; 4385 4386 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4387 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4388 4389 /* If firmware does not deliver to host number of space-time 4390 * streams supported, assume it support up to 4 BF STS and return 4391 * the value for VHT CAP: nsts-1) 4392 */ 4393 if (nsts == 0) 4394 return 3; 4395 4396 return nsts; 4397 } 4398 4399 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4400 { 4401 int sound_dim = ar->vht_cap_info; 4402 4403 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4404 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4405 4406 /* If the sounding dimension is not advertised by the firmware, 4407 * let's use a default value of 1 4408 */ 4409 if (sound_dim == 0) 4410 return 1; 4411 4412 return sound_dim; 4413 } 4414 4415 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4416 { 4417 struct ieee80211_sta_vht_cap vht_cap = {0}; 4418 struct ath10k_hw_params *hw = &ar->hw_params; 4419 u16 mcs_map; 4420 u32 val; 4421 int i; 4422 4423 vht_cap.vht_supported = 1; 4424 vht_cap.cap = ar->vht_cap_info; 4425 4426 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4427 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4428 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4429 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4430 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4431 4432 vht_cap.cap |= val; 4433 } 4434 4435 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4436 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4437 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4438 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4439 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4440 4441 vht_cap.cap |= val; 4442 } 4443 4444 /* Currently the firmware seems to be buggy, don't enable 80+80 4445 * mode until that's resolved. 4446 */ 4447 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4448 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0) 4449 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4450 4451 mcs_map = 0; 4452 for (i = 0; i < 8; i++) { 4453 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4454 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4455 else 4456 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4457 } 4458 4459 if (ar->cfg_tx_chainmask <= 1) 4460 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4461 4462 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4463 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4464 4465 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do 4466 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give 4467 * user-space a clue if that is the case. 4468 */ 4469 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) && 4470 (hw->vht160_mcs_rx_highest != 0 || 4471 hw->vht160_mcs_tx_highest != 0)) { 4472 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest); 4473 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest); 4474 } 4475 4476 return vht_cap; 4477 } 4478 4479 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4480 { 4481 int i; 4482 struct ieee80211_sta_ht_cap ht_cap = {0}; 4483 4484 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4485 return ht_cap; 4486 4487 ht_cap.ht_supported = 1; 4488 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4489 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4490 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4491 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4492 ht_cap.cap |= 4493 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4494 4495 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4496 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4497 4498 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4499 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4500 4501 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4502 u32 smps; 4503 4504 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4505 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4506 4507 ht_cap.cap |= smps; 4508 } 4509 4510 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4511 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4512 4513 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4514 u32 stbc; 4515 4516 stbc = ar->ht_cap_info; 4517 stbc &= WMI_HT_CAP_RX_STBC; 4518 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4519 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4520 stbc &= IEEE80211_HT_CAP_RX_STBC; 4521 4522 ht_cap.cap |= stbc; 4523 } 4524 4525 if (ar->ht_cap_info & WMI_HT_CAP_LDPC) 4526 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4527 4528 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4529 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4530 4531 /* max AMSDU is implicitly taken from vht_cap_info */ 4532 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4533 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4534 4535 for (i = 0; i < ar->num_rf_chains; i++) { 4536 if (ar->cfg_rx_chainmask & BIT(i)) 4537 ht_cap.mcs.rx_mask[i] = 0xFF; 4538 } 4539 4540 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4541 4542 return ht_cap; 4543 } 4544 4545 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4546 { 4547 struct ieee80211_supported_band *band; 4548 struct ieee80211_sta_vht_cap vht_cap; 4549 struct ieee80211_sta_ht_cap ht_cap; 4550 4551 ht_cap = ath10k_get_ht_cap(ar); 4552 vht_cap = ath10k_create_vht_cap(ar); 4553 4554 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4555 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4556 band->ht_cap = ht_cap; 4557 } 4558 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4559 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4560 band->ht_cap = ht_cap; 4561 band->vht_cap = vht_cap; 4562 } 4563 } 4564 4565 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4566 { 4567 int ret; 4568 4569 lockdep_assert_held(&ar->conf_mutex); 4570 4571 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4572 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4573 4574 ar->cfg_tx_chainmask = tx_ant; 4575 ar->cfg_rx_chainmask = rx_ant; 4576 4577 if ((ar->state != ATH10K_STATE_ON) && 4578 (ar->state != ATH10K_STATE_RESTARTED)) 4579 return 0; 4580 4581 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4582 tx_ant); 4583 if (ret) { 4584 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4585 ret, tx_ant); 4586 return ret; 4587 } 4588 4589 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4590 rx_ant); 4591 if (ret) { 4592 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4593 ret, rx_ant); 4594 return ret; 4595 } 4596 4597 /* Reload HT/VHT capability */ 4598 ath10k_mac_setup_ht_vht_cap(ar); 4599 4600 return 0; 4601 } 4602 4603 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4604 { 4605 struct ath10k *ar = hw->priv; 4606 int ret; 4607 4608 mutex_lock(&ar->conf_mutex); 4609 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4610 mutex_unlock(&ar->conf_mutex); 4611 return ret; 4612 } 4613 4614 static int ath10k_start(struct ieee80211_hw *hw) 4615 { 4616 struct ath10k *ar = hw->priv; 4617 u32 param; 4618 int ret = 0; 4619 4620 /* 4621 * This makes sense only when restarting hw. It is harmless to call 4622 * unconditionally. This is necessary to make sure no HTT/WMI tx 4623 * commands will be submitted while restarting. 4624 */ 4625 ath10k_drain_tx(ar); 4626 4627 mutex_lock(&ar->conf_mutex); 4628 4629 switch (ar->state) { 4630 case ATH10K_STATE_OFF: 4631 ar->state = ATH10K_STATE_ON; 4632 break; 4633 case ATH10K_STATE_RESTARTING: 4634 ar->state = ATH10K_STATE_RESTARTED; 4635 break; 4636 case ATH10K_STATE_ON: 4637 case ATH10K_STATE_RESTARTED: 4638 case ATH10K_STATE_WEDGED: 4639 WARN_ON(1); 4640 ret = -EINVAL; 4641 goto err; 4642 case ATH10K_STATE_UTF: 4643 ret = -EBUSY; 4644 goto err; 4645 } 4646 4647 ret = ath10k_hif_power_up(ar); 4648 if (ret) { 4649 ath10k_err(ar, "Could not init hif: %d\n", ret); 4650 goto err_off; 4651 } 4652 4653 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4654 &ar->normal_mode_fw); 4655 if (ret) { 4656 ath10k_err(ar, "Could not init core: %d\n", ret); 4657 goto err_power_down; 4658 } 4659 4660 param = ar->wmi.pdev_param->pmf_qos; 4661 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4662 if (ret) { 4663 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4664 goto err_core_stop; 4665 } 4666 4667 param = ar->wmi.pdev_param->dynamic_bw; 4668 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4669 if (ret) { 4670 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4671 goto err_core_stop; 4672 } 4673 4674 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4675 ret = ath10k_wmi_adaptive_qcs(ar, true); 4676 if (ret) { 4677 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4678 ret); 4679 goto err_core_stop; 4680 } 4681 } 4682 4683 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4684 param = ar->wmi.pdev_param->burst_enable; 4685 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4686 if (ret) { 4687 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4688 goto err_core_stop; 4689 } 4690 } 4691 4692 param = ar->wmi.pdev_param->idle_ps_config; 4693 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4694 if (ret && ret != -EOPNOTSUPP) { 4695 ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); 4696 goto err_core_stop; 4697 } 4698 4699 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4700 4701 /* 4702 * By default FW set ARP frames ac to voice (6). In that case ARP 4703 * exchange is not working properly for UAPSD enabled AP. ARP requests 4704 * which arrives with access category 0 are processed by network stack 4705 * and send back with access category 0, but FW changes access category 4706 * to 6. Set ARP frames access category to best effort (0) solves 4707 * this problem. 4708 */ 4709 4710 param = ar->wmi.pdev_param->arp_ac_override; 4711 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4712 if (ret) { 4713 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4714 ret); 4715 goto err_core_stop; 4716 } 4717 4718 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4719 ar->running_fw->fw_file.fw_features)) { 4720 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4721 WMI_CCA_DETECT_LEVEL_AUTO, 4722 WMI_CCA_DETECT_MARGIN_AUTO); 4723 if (ret) { 4724 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4725 ret); 4726 goto err_core_stop; 4727 } 4728 } 4729 4730 param = ar->wmi.pdev_param->ani_enable; 4731 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4732 if (ret) { 4733 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4734 ret); 4735 goto err_core_stop; 4736 } 4737 4738 ar->ani_enabled = true; 4739 4740 if (ath10k_peer_stats_enabled(ar)) { 4741 param = ar->wmi.pdev_param->peer_stats_update_period; 4742 ret = ath10k_wmi_pdev_set_param(ar, param, 4743 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4744 if (ret) { 4745 ath10k_warn(ar, 4746 "failed to set peer stats period : %d\n", 4747 ret); 4748 goto err_core_stop; 4749 } 4750 } 4751 4752 param = ar->wmi.pdev_param->enable_btcoex; 4753 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4754 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4755 ar->running_fw->fw_file.fw_features)) { 4756 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4757 if (ret) { 4758 ath10k_warn(ar, 4759 "failed to set btcoex param: %d\n", ret); 4760 goto err_core_stop; 4761 } 4762 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4763 } 4764 4765 ar->num_started_vdevs = 0; 4766 ath10k_regd_update(ar); 4767 4768 ath10k_spectral_start(ar); 4769 ath10k_thermal_set_throttling(ar); 4770 4771 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; 4772 4773 mutex_unlock(&ar->conf_mutex); 4774 return 0; 4775 4776 err_core_stop: 4777 ath10k_core_stop(ar); 4778 4779 err_power_down: 4780 ath10k_hif_power_down(ar); 4781 4782 err_off: 4783 ar->state = ATH10K_STATE_OFF; 4784 4785 err: 4786 mutex_unlock(&ar->conf_mutex); 4787 return ret; 4788 } 4789 4790 static void ath10k_stop(struct ieee80211_hw *hw) 4791 { 4792 struct ath10k *ar = hw->priv; 4793 4794 ath10k_drain_tx(ar); 4795 4796 mutex_lock(&ar->conf_mutex); 4797 if (ar->state != ATH10K_STATE_OFF) { 4798 ath10k_halt(ar); 4799 ar->state = ATH10K_STATE_OFF; 4800 } 4801 mutex_unlock(&ar->conf_mutex); 4802 4803 cancel_work_sync(&ar->set_coverage_class_work); 4804 cancel_delayed_work_sync(&ar->scan.timeout); 4805 cancel_work_sync(&ar->restart_work); 4806 } 4807 4808 static int ath10k_config_ps(struct ath10k *ar) 4809 { 4810 struct ath10k_vif *arvif; 4811 int ret = 0; 4812 4813 lockdep_assert_held(&ar->conf_mutex); 4814 4815 list_for_each_entry(arvif, &ar->arvifs, list) { 4816 ret = ath10k_mac_vif_setup_ps(arvif); 4817 if (ret) { 4818 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4819 break; 4820 } 4821 } 4822 4823 return ret; 4824 } 4825 4826 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4827 { 4828 int ret; 4829 u32 param; 4830 4831 lockdep_assert_held(&ar->conf_mutex); 4832 4833 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4834 4835 param = ar->wmi.pdev_param->txpower_limit2g; 4836 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4837 if (ret) { 4838 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 4839 txpower, ret); 4840 return ret; 4841 } 4842 4843 param = ar->wmi.pdev_param->txpower_limit5g; 4844 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4845 if (ret) { 4846 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 4847 txpower, ret); 4848 return ret; 4849 } 4850 4851 return 0; 4852 } 4853 4854 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 4855 { 4856 struct ath10k_vif *arvif; 4857 int ret, txpower = -1; 4858 4859 lockdep_assert_held(&ar->conf_mutex); 4860 4861 list_for_each_entry(arvif, &ar->arvifs, list) { 4862 if (arvif->txpower <= 0) 4863 continue; 4864 4865 if (txpower == -1) 4866 txpower = arvif->txpower; 4867 else 4868 txpower = min(txpower, arvif->txpower); 4869 } 4870 4871 if (txpower == -1) 4872 return 0; 4873 4874 ret = ath10k_mac_txpower_setup(ar, txpower); 4875 if (ret) { 4876 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 4877 txpower, ret); 4878 return ret; 4879 } 4880 4881 return 0; 4882 } 4883 4884 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 4885 { 4886 struct ath10k *ar = hw->priv; 4887 struct ieee80211_conf *conf = &hw->conf; 4888 int ret = 0; 4889 4890 mutex_lock(&ar->conf_mutex); 4891 4892 if (changed & IEEE80211_CONF_CHANGE_PS) 4893 ath10k_config_ps(ar); 4894 4895 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 4896 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 4897 ret = ath10k_monitor_recalc(ar); 4898 if (ret) 4899 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4900 } 4901 4902 mutex_unlock(&ar->conf_mutex); 4903 return ret; 4904 } 4905 4906 static u32 get_nss_from_chainmask(u16 chain_mask) 4907 { 4908 if ((chain_mask & 0xf) == 0xf) 4909 return 4; 4910 else if ((chain_mask & 0x7) == 0x7) 4911 return 3; 4912 else if ((chain_mask & 0x3) == 0x3) 4913 return 2; 4914 return 1; 4915 } 4916 4917 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 4918 { 4919 u32 value = 0; 4920 struct ath10k *ar = arvif->ar; 4921 int nsts; 4922 int sound_dim; 4923 4924 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 4925 return 0; 4926 4927 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 4928 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4929 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 4930 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 4931 4932 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4933 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4934 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 4935 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 4936 4937 if (!value) 4938 return 0; 4939 4940 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 4941 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 4942 4943 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 4944 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 4945 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 4946 4947 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 4948 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 4949 4950 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 4951 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 4952 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 4953 4954 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 4955 ar->wmi.vdev_param->txbf, value); 4956 } 4957 4958 /* 4959 * TODO: 4960 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 4961 * because we will send mgmt frames without CCK. This requirement 4962 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 4963 * in the TX packet. 4964 */ 4965 static int ath10k_add_interface(struct ieee80211_hw *hw, 4966 struct ieee80211_vif *vif) 4967 { 4968 struct ath10k *ar = hw->priv; 4969 struct ath10k_vif *arvif = (void *)vif->drv_priv; 4970 struct ath10k_peer *peer; 4971 enum wmi_sta_powersave_param param; 4972 int ret = 0; 4973 u32 value; 4974 int bit; 4975 int i; 4976 u32 vdev_param; 4977 4978 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 4979 4980 mutex_lock(&ar->conf_mutex); 4981 4982 memset(arvif, 0, sizeof(*arvif)); 4983 ath10k_mac_txq_init(vif->txq); 4984 4985 arvif->ar = ar; 4986 arvif->vif = vif; 4987 4988 INIT_LIST_HEAD(&arvif->list); 4989 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 4990 INIT_DELAYED_WORK(&arvif->connection_loss_work, 4991 ath10k_mac_vif_sta_connection_loss_work); 4992 4993 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 4994 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 4995 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 4996 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 4997 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 4998 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 4999 } 5000 5001 if (ar->num_peers >= ar->max_num_peers) { 5002 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 5003 ret = -ENOBUFS; 5004 goto err; 5005 } 5006 5007 if (ar->free_vdev_map == 0) { 5008 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 5009 ret = -EBUSY; 5010 goto err; 5011 } 5012 bit = __ffs64(ar->free_vdev_map); 5013 5014 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 5015 bit, ar->free_vdev_map); 5016 5017 arvif->vdev_id = bit; 5018 arvif->vdev_subtype = 5019 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 5020 5021 switch (vif->type) { 5022 case NL80211_IFTYPE_P2P_DEVICE: 5023 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5024 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5025 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 5026 break; 5027 case NL80211_IFTYPE_UNSPECIFIED: 5028 case NL80211_IFTYPE_STATION: 5029 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5030 if (vif->p2p) 5031 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5032 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 5033 break; 5034 case NL80211_IFTYPE_ADHOC: 5035 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 5036 break; 5037 case NL80211_IFTYPE_MESH_POINT: 5038 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 5039 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5040 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 5041 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5042 ret = -EINVAL; 5043 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 5044 goto err; 5045 } 5046 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5047 break; 5048 case NL80211_IFTYPE_AP: 5049 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5050 5051 if (vif->p2p) 5052 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5053 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 5054 break; 5055 case NL80211_IFTYPE_MONITOR: 5056 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 5057 break; 5058 default: 5059 WARN_ON(1); 5060 break; 5061 } 5062 5063 /* Using vdev_id as queue number will make it very easy to do per-vif 5064 * tx queue locking. This shouldn't wrap due to interface combinations 5065 * but do a modulo for correctness sake and prevent using offchannel tx 5066 * queues for regular vif tx. 5067 */ 5068 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5069 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 5070 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5071 5072 /* Some firmware revisions don't wait for beacon tx completion before 5073 * sending another SWBA event. This could lead to hardware using old 5074 * (freed) beacon data in some cases, e.g. tx credit starvation 5075 * combined with missed TBTT. This is very very rare. 5076 * 5077 * On non-IOMMU-enabled hosts this could be a possible security issue 5078 * because hw could beacon some random data on the air. On 5079 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 5080 * device would crash. 5081 * 5082 * Since there are no beacon tx completions (implicit nor explicit) 5083 * propagated to host the only workaround for this is to allocate a 5084 * DMA-coherent buffer for a lifetime of a vif and use it for all 5085 * beacon tx commands. Worst case for this approach is some beacons may 5086 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 5087 */ 5088 if (vif->type == NL80211_IFTYPE_ADHOC || 5089 vif->type == NL80211_IFTYPE_MESH_POINT || 5090 vif->type == NL80211_IFTYPE_AP) { 5091 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5092 IEEE80211_MAX_FRAME_LEN, 5093 &arvif->beacon_paddr, 5094 GFP_ATOMIC); 5095 if (!arvif->beacon_buf) { 5096 ret = -ENOMEM; 5097 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5098 ret); 5099 goto err; 5100 } 5101 } 5102 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 5103 arvif->nohwcrypt = true; 5104 5105 if (arvif->nohwcrypt && 5106 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5107 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 5108 goto err; 5109 } 5110 5111 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 5112 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5113 arvif->beacon_buf ? "single-buf" : "per-skb"); 5114 5115 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 5116 arvif->vdev_subtype, vif->addr); 5117 if (ret) { 5118 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5119 arvif->vdev_id, ret); 5120 goto err; 5121 } 5122 5123 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5124 spin_lock_bh(&ar->data_lock); 5125 list_add(&arvif->list, &ar->arvifs); 5126 spin_unlock_bh(&ar->data_lock); 5127 5128 /* It makes no sense to have firmware do keepalives. mac80211 already 5129 * takes care of this with idle connection polling. 5130 */ 5131 ret = ath10k_mac_vif_disable_keepalive(arvif); 5132 if (ret) { 5133 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5134 arvif->vdev_id, ret); 5135 goto err_vdev_delete; 5136 } 5137 5138 arvif->def_wep_key_idx = -1; 5139 5140 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5141 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5142 ATH10K_HW_TXRX_NATIVE_WIFI); 5143 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5144 if (ret && ret != -EOPNOTSUPP) { 5145 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5146 arvif->vdev_id, ret); 5147 goto err_vdev_delete; 5148 } 5149 5150 /* Configuring number of spatial stream for monitor interface is causing 5151 * target assert in qca9888 and qca6174. 5152 */ 5153 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5154 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5155 5156 vdev_param = ar->wmi.vdev_param->nss; 5157 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5158 nss); 5159 if (ret) { 5160 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5161 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5162 ret); 5163 goto err_vdev_delete; 5164 } 5165 } 5166 5167 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5168 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5169 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5170 vif->addr, WMI_PEER_TYPE_DEFAULT); 5171 if (ret) { 5172 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5173 arvif->vdev_id, ret); 5174 goto err_vdev_delete; 5175 } 5176 5177 spin_lock_bh(&ar->data_lock); 5178 5179 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5180 if (!peer) { 5181 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5182 vif->addr, arvif->vdev_id); 5183 spin_unlock_bh(&ar->data_lock); 5184 ret = -ENOENT; 5185 goto err_peer_delete; 5186 } 5187 5188 arvif->peer_id = find_first_bit(peer->peer_ids, 5189 ATH10K_MAX_NUM_PEER_IDS); 5190 5191 spin_unlock_bh(&ar->data_lock); 5192 } else { 5193 arvif->peer_id = HTT_INVALID_PEERID; 5194 } 5195 5196 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5197 ret = ath10k_mac_set_kickout(arvif); 5198 if (ret) { 5199 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5200 arvif->vdev_id, ret); 5201 goto err_peer_delete; 5202 } 5203 } 5204 5205 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5206 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5207 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5208 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5209 param, value); 5210 if (ret) { 5211 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5212 arvif->vdev_id, ret); 5213 goto err_peer_delete; 5214 } 5215 5216 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5217 if (ret) { 5218 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5219 arvif->vdev_id, ret); 5220 goto err_peer_delete; 5221 } 5222 5223 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5224 if (ret) { 5225 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5226 arvif->vdev_id, ret); 5227 goto err_peer_delete; 5228 } 5229 } 5230 5231 ret = ath10k_mac_set_txbf_conf(arvif); 5232 if (ret) { 5233 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5234 arvif->vdev_id, ret); 5235 goto err_peer_delete; 5236 } 5237 5238 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5239 if (ret) { 5240 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5241 arvif->vdev_id, ret); 5242 goto err_peer_delete; 5243 } 5244 5245 arvif->txpower = vif->bss_conf.txpower; 5246 ret = ath10k_mac_txpower_recalc(ar); 5247 if (ret) { 5248 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5249 goto err_peer_delete; 5250 } 5251 5252 if (vif->type == NL80211_IFTYPE_MONITOR) { 5253 ar->monitor_arvif = arvif; 5254 ret = ath10k_monitor_recalc(ar); 5255 if (ret) { 5256 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5257 goto err_peer_delete; 5258 } 5259 } 5260 5261 spin_lock_bh(&ar->htt.tx_lock); 5262 if (!ar->tx_paused) 5263 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5264 spin_unlock_bh(&ar->htt.tx_lock); 5265 5266 mutex_unlock(&ar->conf_mutex); 5267 return 0; 5268 5269 err_peer_delete: 5270 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5271 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 5272 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5273 5274 err_vdev_delete: 5275 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5276 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5277 spin_lock_bh(&ar->data_lock); 5278 list_del(&arvif->list); 5279 spin_unlock_bh(&ar->data_lock); 5280 5281 err: 5282 if (arvif->beacon_buf) { 5283 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5284 arvif->beacon_buf, arvif->beacon_paddr); 5285 arvif->beacon_buf = NULL; 5286 } 5287 5288 mutex_unlock(&ar->conf_mutex); 5289 5290 return ret; 5291 } 5292 5293 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5294 { 5295 int i; 5296 5297 for (i = 0; i < BITS_PER_LONG; i++) 5298 ath10k_mac_vif_tx_unlock(arvif, i); 5299 } 5300 5301 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5302 struct ieee80211_vif *vif) 5303 { 5304 struct ath10k *ar = hw->priv; 5305 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5306 struct ath10k_peer *peer; 5307 int ret; 5308 int i; 5309 5310 cancel_work_sync(&arvif->ap_csa_work); 5311 cancel_delayed_work_sync(&arvif->connection_loss_work); 5312 5313 mutex_lock(&ar->conf_mutex); 5314 5315 spin_lock_bh(&ar->data_lock); 5316 ath10k_mac_vif_beacon_cleanup(arvif); 5317 spin_unlock_bh(&ar->data_lock); 5318 5319 ret = ath10k_spectral_vif_stop(arvif); 5320 if (ret) 5321 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5322 arvif->vdev_id, ret); 5323 5324 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5325 spin_lock_bh(&ar->data_lock); 5326 list_del(&arvif->list); 5327 spin_unlock_bh(&ar->data_lock); 5328 5329 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5330 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5331 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5332 vif->addr); 5333 if (ret) 5334 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5335 arvif->vdev_id, ret); 5336 5337 kfree(arvif->u.ap.noa_data); 5338 } 5339 5340 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5341 arvif->vdev_id); 5342 5343 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5344 if (ret) 5345 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5346 arvif->vdev_id, ret); 5347 5348 /* Some firmware revisions don't notify host about self-peer removal 5349 * until after associated vdev is deleted. 5350 */ 5351 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5352 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5353 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5354 vif->addr); 5355 if (ret) 5356 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5357 arvif->vdev_id, ret); 5358 5359 spin_lock_bh(&ar->data_lock); 5360 ar->num_peers--; 5361 spin_unlock_bh(&ar->data_lock); 5362 } 5363 5364 spin_lock_bh(&ar->data_lock); 5365 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5366 peer = ar->peer_map[i]; 5367 if (!peer) 5368 continue; 5369 5370 if (peer->vif == vif) { 5371 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5372 vif->addr, arvif->vdev_id); 5373 peer->vif = NULL; 5374 } 5375 } 5376 spin_unlock_bh(&ar->data_lock); 5377 5378 ath10k_peer_cleanup(ar, arvif->vdev_id); 5379 ath10k_mac_txq_unref(ar, vif->txq); 5380 5381 if (vif->type == NL80211_IFTYPE_MONITOR) { 5382 ar->monitor_arvif = NULL; 5383 ret = ath10k_monitor_recalc(ar); 5384 if (ret) 5385 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5386 } 5387 5388 ret = ath10k_mac_txpower_recalc(ar); 5389 if (ret) 5390 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5391 5392 spin_lock_bh(&ar->htt.tx_lock); 5393 ath10k_mac_vif_tx_unlock_all(arvif); 5394 spin_unlock_bh(&ar->htt.tx_lock); 5395 5396 ath10k_mac_txq_unref(ar, vif->txq); 5397 5398 mutex_unlock(&ar->conf_mutex); 5399 } 5400 5401 /* 5402 * FIXME: Has to be verified. 5403 */ 5404 #define SUPPORTED_FILTERS \ 5405 (FIF_ALLMULTI | \ 5406 FIF_CONTROL | \ 5407 FIF_PSPOLL | \ 5408 FIF_OTHER_BSS | \ 5409 FIF_BCN_PRBRESP_PROMISC | \ 5410 FIF_PROBE_REQ | \ 5411 FIF_FCSFAIL) 5412 5413 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5414 unsigned int changed_flags, 5415 unsigned int *total_flags, 5416 u64 multicast) 5417 { 5418 struct ath10k *ar = hw->priv; 5419 int ret; 5420 5421 mutex_lock(&ar->conf_mutex); 5422 5423 changed_flags &= SUPPORTED_FILTERS; 5424 *total_flags &= SUPPORTED_FILTERS; 5425 ar->filter_flags = *total_flags; 5426 5427 ret = ath10k_monitor_recalc(ar); 5428 if (ret) 5429 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5430 5431 mutex_unlock(&ar->conf_mutex); 5432 } 5433 5434 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5435 struct ieee80211_vif *vif, 5436 struct ieee80211_bss_conf *info, 5437 u32 changed) 5438 { 5439 struct ath10k *ar = hw->priv; 5440 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5441 int ret = 0; 5442 u32 vdev_param, pdev_param, slottime, preamble; 5443 5444 mutex_lock(&ar->conf_mutex); 5445 5446 if (changed & BSS_CHANGED_IBSS) 5447 ath10k_control_ibss(arvif, info, vif->addr); 5448 5449 if (changed & BSS_CHANGED_BEACON_INT) { 5450 arvif->beacon_interval = info->beacon_int; 5451 vdev_param = ar->wmi.vdev_param->beacon_interval; 5452 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5453 arvif->beacon_interval); 5454 ath10k_dbg(ar, ATH10K_DBG_MAC, 5455 "mac vdev %d beacon_interval %d\n", 5456 arvif->vdev_id, arvif->beacon_interval); 5457 5458 if (ret) 5459 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5460 arvif->vdev_id, ret); 5461 } 5462 5463 if (changed & BSS_CHANGED_BEACON) { 5464 ath10k_dbg(ar, ATH10K_DBG_MAC, 5465 "vdev %d set beacon tx mode to staggered\n", 5466 arvif->vdev_id); 5467 5468 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5469 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5470 WMI_BEACON_STAGGERED_MODE); 5471 if (ret) 5472 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5473 arvif->vdev_id, ret); 5474 5475 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5476 if (ret) 5477 ath10k_warn(ar, "failed to update beacon template: %d\n", 5478 ret); 5479 5480 if (ieee80211_vif_is_mesh(vif)) { 5481 /* mesh doesn't use SSID but firmware needs it */ 5482 strncpy(arvif->u.ap.ssid, "mesh", 5483 sizeof(arvif->u.ap.ssid)); 5484 arvif->u.ap.ssid_len = 4; 5485 } 5486 } 5487 5488 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5489 ret = ath10k_mac_setup_prb_tmpl(arvif); 5490 if (ret) 5491 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5492 arvif->vdev_id, ret); 5493 } 5494 5495 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5496 arvif->dtim_period = info->dtim_period; 5497 5498 ath10k_dbg(ar, ATH10K_DBG_MAC, 5499 "mac vdev %d dtim_period %d\n", 5500 arvif->vdev_id, arvif->dtim_period); 5501 5502 vdev_param = ar->wmi.vdev_param->dtim_period; 5503 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5504 arvif->dtim_period); 5505 if (ret) 5506 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5507 arvif->vdev_id, ret); 5508 } 5509 5510 if (changed & BSS_CHANGED_SSID && 5511 vif->type == NL80211_IFTYPE_AP) { 5512 arvif->u.ap.ssid_len = info->ssid_len; 5513 if (info->ssid_len) 5514 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5515 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5516 } 5517 5518 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5519 ether_addr_copy(arvif->bssid, info->bssid); 5520 5521 if (changed & BSS_CHANGED_BEACON_ENABLED) 5522 ath10k_control_beaconing(arvif, info); 5523 5524 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5525 arvif->use_cts_prot = info->use_cts_prot; 5526 5527 ret = ath10k_recalc_rtscts_prot(arvif); 5528 if (ret) 5529 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5530 arvif->vdev_id, ret); 5531 5532 if (ath10k_mac_can_set_cts_prot(arvif)) { 5533 ret = ath10k_mac_set_cts_prot(arvif); 5534 if (ret) 5535 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5536 arvif->vdev_id, ret); 5537 } 5538 } 5539 5540 if (changed & BSS_CHANGED_ERP_SLOT) { 5541 if (info->use_short_slot) 5542 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5543 5544 else 5545 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5546 5547 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5548 arvif->vdev_id, slottime); 5549 5550 vdev_param = ar->wmi.vdev_param->slot_time; 5551 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5552 slottime); 5553 if (ret) 5554 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5555 arvif->vdev_id, ret); 5556 } 5557 5558 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5559 if (info->use_short_preamble) 5560 preamble = WMI_VDEV_PREAMBLE_SHORT; 5561 else 5562 preamble = WMI_VDEV_PREAMBLE_LONG; 5563 5564 ath10k_dbg(ar, ATH10K_DBG_MAC, 5565 "mac vdev %d preamble %dn", 5566 arvif->vdev_id, preamble); 5567 5568 vdev_param = ar->wmi.vdev_param->preamble; 5569 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5570 preamble); 5571 if (ret) 5572 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5573 arvif->vdev_id, ret); 5574 } 5575 5576 if (changed & BSS_CHANGED_ASSOC) { 5577 if (info->assoc) { 5578 /* Workaround: Make sure monitor vdev is not running 5579 * when associating to prevent some firmware revisions 5580 * (e.g. 10.1 and 10.2) from crashing. 5581 */ 5582 if (ar->monitor_started) 5583 ath10k_monitor_stop(ar); 5584 ath10k_bss_assoc(hw, vif, info); 5585 ath10k_monitor_recalc(ar); 5586 } else { 5587 ath10k_bss_disassoc(hw, vif); 5588 } 5589 } 5590 5591 if (changed & BSS_CHANGED_TXPOWER) { 5592 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5593 arvif->vdev_id, info->txpower); 5594 5595 arvif->txpower = info->txpower; 5596 ret = ath10k_mac_txpower_recalc(ar); 5597 if (ret) 5598 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5599 } 5600 5601 if (changed & BSS_CHANGED_PS) { 5602 arvif->ps = vif->bss_conf.ps; 5603 5604 ret = ath10k_config_ps(ar); 5605 if (ret) 5606 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5607 arvif->vdev_id, ret); 5608 } 5609 5610 mutex_unlock(&ar->conf_mutex); 5611 } 5612 5613 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5614 { 5615 struct ath10k *ar = hw->priv; 5616 5617 /* This function should never be called if setting the coverage class 5618 * is not supported on this hardware. 5619 */ 5620 if (!ar->hw_params.hw_ops->set_coverage_class) { 5621 WARN_ON_ONCE(1); 5622 return; 5623 } 5624 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5625 } 5626 5627 struct ath10k_mac_tdls_iter_data { 5628 u32 num_tdls_stations; 5629 struct ieee80211_vif *curr_vif; 5630 }; 5631 5632 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5633 struct ieee80211_sta *sta) 5634 { 5635 struct ath10k_mac_tdls_iter_data *iter_data = data; 5636 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5637 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5638 5639 if (sta->tdls && sta_vif == iter_data->curr_vif) 5640 iter_data->num_tdls_stations++; 5641 } 5642 5643 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5644 struct ieee80211_vif *vif) 5645 { 5646 struct ath10k_mac_tdls_iter_data data = {}; 5647 5648 data.curr_vif = vif; 5649 5650 ieee80211_iterate_stations_atomic(hw, 5651 ath10k_mac_tdls_vif_stations_count_iter, 5652 &data); 5653 return data.num_tdls_stations; 5654 } 5655 5656 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, 5657 struct ieee80211_vif *vif) 5658 { 5659 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5660 int *num_tdls_vifs = data; 5661 5662 if (vif->type != NL80211_IFTYPE_STATION) 5663 return; 5664 5665 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0) 5666 (*num_tdls_vifs)++; 5667 } 5668 5669 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw) 5670 { 5671 int num_tdls_vifs = 0; 5672 5673 ieee80211_iterate_active_interfaces_atomic(hw, 5674 IEEE80211_IFACE_ITER_NORMAL, 5675 ath10k_mac_tdls_vifs_count_iter, 5676 &num_tdls_vifs); 5677 return num_tdls_vifs; 5678 } 5679 5680 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5681 struct ieee80211_vif *vif, 5682 struct ieee80211_scan_request *hw_req) 5683 { 5684 struct ath10k *ar = hw->priv; 5685 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5686 struct cfg80211_scan_request *req = &hw_req->req; 5687 struct wmi_start_scan_arg arg; 5688 int ret = 0; 5689 int i; 5690 u32 scan_timeout; 5691 5692 mutex_lock(&ar->conf_mutex); 5693 5694 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 5695 ret = -EBUSY; 5696 goto exit; 5697 } 5698 5699 spin_lock_bh(&ar->data_lock); 5700 switch (ar->scan.state) { 5701 case ATH10K_SCAN_IDLE: 5702 reinit_completion(&ar->scan.started); 5703 reinit_completion(&ar->scan.completed); 5704 ar->scan.state = ATH10K_SCAN_STARTING; 5705 ar->scan.is_roc = false; 5706 ar->scan.vdev_id = arvif->vdev_id; 5707 ret = 0; 5708 break; 5709 case ATH10K_SCAN_STARTING: 5710 case ATH10K_SCAN_RUNNING: 5711 case ATH10K_SCAN_ABORTING: 5712 ret = -EBUSY; 5713 break; 5714 } 5715 spin_unlock_bh(&ar->data_lock); 5716 5717 if (ret) 5718 goto exit; 5719 5720 memset(&arg, 0, sizeof(arg)); 5721 ath10k_wmi_start_scan_init(ar, &arg); 5722 arg.vdev_id = arvif->vdev_id; 5723 arg.scan_id = ATH10K_SCAN_ID; 5724 5725 if (req->ie_len) { 5726 arg.ie_len = req->ie_len; 5727 memcpy(arg.ie, req->ie, arg.ie_len); 5728 } 5729 5730 if (req->n_ssids) { 5731 arg.n_ssids = req->n_ssids; 5732 for (i = 0; i < arg.n_ssids; i++) { 5733 arg.ssids[i].len = req->ssids[i].ssid_len; 5734 arg.ssids[i].ssid = req->ssids[i].ssid; 5735 } 5736 } else { 5737 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5738 } 5739 5740 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { 5741 arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; 5742 ether_addr_copy(arg.mac_addr.addr, req->mac_addr); 5743 ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); 5744 } 5745 5746 if (req->n_channels) { 5747 arg.n_channels = req->n_channels; 5748 for (i = 0; i < arg.n_channels; i++) 5749 arg.channels[i] = req->channels[i]->center_freq; 5750 } 5751 5752 /* if duration is set, default dwell times will be overwritten */ 5753 if (req->duration) { 5754 arg.dwell_time_active = req->duration; 5755 arg.dwell_time_passive = req->duration; 5756 arg.burst_duration_ms = req->duration; 5757 5758 scan_timeout = min_t(u32, arg.max_rest_time * 5759 (arg.n_channels - 1) + (req->duration + 5760 ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * 5761 arg.n_channels, arg.max_scan_time + 200); 5762 5763 } else { 5764 /* Add a 200ms margin to account for event/command processing */ 5765 scan_timeout = arg.max_scan_time + 200; 5766 } 5767 5768 ret = ath10k_start_scan(ar, &arg); 5769 if (ret) { 5770 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 5771 spin_lock_bh(&ar->data_lock); 5772 ar->scan.state = ATH10K_SCAN_IDLE; 5773 spin_unlock_bh(&ar->data_lock); 5774 } 5775 5776 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5777 msecs_to_jiffies(scan_timeout)); 5778 5779 exit: 5780 mutex_unlock(&ar->conf_mutex); 5781 return ret; 5782 } 5783 5784 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 5785 struct ieee80211_vif *vif) 5786 { 5787 struct ath10k *ar = hw->priv; 5788 5789 mutex_lock(&ar->conf_mutex); 5790 ath10k_scan_abort(ar); 5791 mutex_unlock(&ar->conf_mutex); 5792 5793 cancel_delayed_work_sync(&ar->scan.timeout); 5794 } 5795 5796 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 5797 struct ath10k_vif *arvif, 5798 enum set_key_cmd cmd, 5799 struct ieee80211_key_conf *key) 5800 { 5801 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 5802 int ret; 5803 5804 /* 10.1 firmware branch requires default key index to be set to group 5805 * key index after installing it. Otherwise FW/HW Txes corrupted 5806 * frames with multi-vif APs. This is not required for main firmware 5807 * branch (e.g. 636). 5808 * 5809 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 5810 * 5811 * FIXME: It remains unknown if this is required for multi-vif STA 5812 * interfaces on 10.1. 5813 */ 5814 5815 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 5816 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 5817 return; 5818 5819 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 5820 return; 5821 5822 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 5823 return; 5824 5825 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5826 return; 5827 5828 if (cmd != SET_KEY) 5829 return; 5830 5831 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5832 key->keyidx); 5833 if (ret) 5834 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 5835 arvif->vdev_id, ret); 5836 } 5837 5838 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 5839 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 5840 struct ieee80211_key_conf *key) 5841 { 5842 struct ath10k *ar = hw->priv; 5843 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5844 struct ath10k_peer *peer; 5845 const u8 *peer_addr; 5846 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 5847 key->cipher == WLAN_CIPHER_SUITE_WEP104; 5848 int ret = 0; 5849 int ret2; 5850 u32 flags = 0; 5851 u32 flags2; 5852 5853 /* this one needs to be done in software */ 5854 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 5855 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 5856 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 5857 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 5858 return 1; 5859 5860 if (arvif->nohwcrypt) 5861 return 1; 5862 5863 if (key->keyidx > WMI_MAX_KEY_INDEX) 5864 return -ENOSPC; 5865 5866 mutex_lock(&ar->conf_mutex); 5867 5868 if (sta) 5869 peer_addr = sta->addr; 5870 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 5871 peer_addr = vif->bss_conf.bssid; 5872 else 5873 peer_addr = vif->addr; 5874 5875 key->hw_key_idx = key->keyidx; 5876 5877 if (is_wep) { 5878 if (cmd == SET_KEY) 5879 arvif->wep_keys[key->keyidx] = key; 5880 else 5881 arvif->wep_keys[key->keyidx] = NULL; 5882 } 5883 5884 /* the peer should not disappear in mid-way (unless FW goes awry) since 5885 * we already hold conf_mutex. we just make sure its there now. 5886 */ 5887 spin_lock_bh(&ar->data_lock); 5888 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5889 spin_unlock_bh(&ar->data_lock); 5890 5891 if (!peer) { 5892 if (cmd == SET_KEY) { 5893 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 5894 peer_addr); 5895 ret = -EOPNOTSUPP; 5896 goto exit; 5897 } else { 5898 /* if the peer doesn't exist there is no key to disable anymore */ 5899 goto exit; 5900 } 5901 } 5902 5903 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5904 flags |= WMI_KEY_PAIRWISE; 5905 else 5906 flags |= WMI_KEY_GROUP; 5907 5908 if (is_wep) { 5909 if (cmd == DISABLE_KEY) 5910 ath10k_clear_vdev_key(arvif, key); 5911 5912 /* When WEP keys are uploaded it's possible that there are 5913 * stations associated already (e.g. when merging) without any 5914 * keys. Static WEP needs an explicit per-peer key upload. 5915 */ 5916 if (vif->type == NL80211_IFTYPE_ADHOC && 5917 cmd == SET_KEY) 5918 ath10k_mac_vif_update_wep_key(arvif, key); 5919 5920 /* 802.1x never sets the def_wep_key_idx so each set_key() 5921 * call changes default tx key. 5922 * 5923 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 5924 * after first set_key(). 5925 */ 5926 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 5927 flags |= WMI_KEY_TX_USAGE; 5928 } 5929 5930 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 5931 if (ret) { 5932 WARN_ON(ret > 0); 5933 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 5934 arvif->vdev_id, peer_addr, ret); 5935 goto exit; 5936 } 5937 5938 /* mac80211 sets static WEP keys as groupwise while firmware requires 5939 * them to be installed twice as both pairwise and groupwise. 5940 */ 5941 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 5942 flags2 = flags; 5943 flags2 &= ~WMI_KEY_GROUP; 5944 flags2 |= WMI_KEY_PAIRWISE; 5945 5946 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 5947 if (ret) { 5948 WARN_ON(ret > 0); 5949 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 5950 arvif->vdev_id, peer_addr, ret); 5951 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 5952 peer_addr, flags); 5953 if (ret2) { 5954 WARN_ON(ret2 > 0); 5955 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 5956 arvif->vdev_id, peer_addr, ret2); 5957 } 5958 goto exit; 5959 } 5960 } 5961 5962 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 5963 5964 spin_lock_bh(&ar->data_lock); 5965 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5966 if (peer && cmd == SET_KEY) 5967 peer->keys[key->keyidx] = key; 5968 else if (peer && cmd == DISABLE_KEY) 5969 peer->keys[key->keyidx] = NULL; 5970 else if (peer == NULL) 5971 /* impossible unless FW goes crazy */ 5972 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 5973 spin_unlock_bh(&ar->data_lock); 5974 5975 if (sta && sta->tdls) 5976 ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5977 WMI_PEER_AUTHORIZE, 1); 5978 5979 exit: 5980 mutex_unlock(&ar->conf_mutex); 5981 return ret; 5982 } 5983 5984 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 5985 struct ieee80211_vif *vif, 5986 int keyidx) 5987 { 5988 struct ath10k *ar = hw->priv; 5989 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5990 int ret; 5991 5992 mutex_lock(&arvif->ar->conf_mutex); 5993 5994 if (arvif->ar->state != ATH10K_STATE_ON) 5995 goto unlock; 5996 5997 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 5998 arvif->vdev_id, keyidx); 5999 6000 ret = ath10k_wmi_vdev_set_param(arvif->ar, 6001 arvif->vdev_id, 6002 arvif->ar->wmi.vdev_param->def_keyid, 6003 keyidx); 6004 6005 if (ret) { 6006 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 6007 arvif->vdev_id, 6008 ret); 6009 goto unlock; 6010 } 6011 6012 arvif->def_wep_key_idx = keyidx; 6013 6014 unlock: 6015 mutex_unlock(&arvif->ar->conf_mutex); 6016 } 6017 6018 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 6019 { 6020 struct ath10k *ar; 6021 struct ath10k_vif *arvif; 6022 struct ath10k_sta *arsta; 6023 struct ieee80211_sta *sta; 6024 struct cfg80211_chan_def def; 6025 enum nl80211_band band; 6026 const u8 *ht_mcs_mask; 6027 const u16 *vht_mcs_mask; 6028 u32 changed, bw, nss, smps; 6029 int err; 6030 6031 arsta = container_of(wk, struct ath10k_sta, update_wk); 6032 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 6033 arvif = arsta->arvif; 6034 ar = arvif->ar; 6035 6036 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 6037 return; 6038 6039 band = def.chan->band; 6040 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 6041 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 6042 6043 spin_lock_bh(&ar->data_lock); 6044 6045 changed = arsta->changed; 6046 arsta->changed = 0; 6047 6048 bw = arsta->bw; 6049 nss = arsta->nss; 6050 smps = arsta->smps; 6051 6052 spin_unlock_bh(&ar->data_lock); 6053 6054 mutex_lock(&ar->conf_mutex); 6055 6056 nss = max_t(u32, 1, nss); 6057 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 6058 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6059 6060 if (changed & IEEE80211_RC_BW_CHANGED) { 6061 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 6062 sta->addr, bw); 6063 6064 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6065 WMI_PEER_CHAN_WIDTH, bw); 6066 if (err) 6067 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 6068 sta->addr, bw, err); 6069 } 6070 6071 if (changed & IEEE80211_RC_NSS_CHANGED) { 6072 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 6073 sta->addr, nss); 6074 6075 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6076 WMI_PEER_NSS, nss); 6077 if (err) 6078 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 6079 sta->addr, nss, err); 6080 } 6081 6082 if (changed & IEEE80211_RC_SMPS_CHANGED) { 6083 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 6084 sta->addr, smps); 6085 6086 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6087 WMI_PEER_SMPS_STATE, smps); 6088 if (err) 6089 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 6090 sta->addr, smps, err); 6091 } 6092 6093 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 6094 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", 6095 sta->addr); 6096 6097 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 6098 if (err) 6099 ath10k_warn(ar, "failed to reassociate station: %pM\n", 6100 sta->addr); 6101 } 6102 6103 mutex_unlock(&ar->conf_mutex); 6104 } 6105 6106 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 6107 struct ieee80211_sta *sta) 6108 { 6109 struct ath10k *ar = arvif->ar; 6110 6111 lockdep_assert_held(&ar->conf_mutex); 6112 6113 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6114 return 0; 6115 6116 if (ar->num_stations >= ar->max_num_stations) 6117 return -ENOBUFS; 6118 6119 ar->num_stations++; 6120 6121 return 0; 6122 } 6123 6124 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 6125 struct ieee80211_sta *sta) 6126 { 6127 struct ath10k *ar = arvif->ar; 6128 6129 lockdep_assert_held(&ar->conf_mutex); 6130 6131 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6132 return; 6133 6134 ar->num_stations--; 6135 } 6136 6137 static int ath10k_sta_state(struct ieee80211_hw *hw, 6138 struct ieee80211_vif *vif, 6139 struct ieee80211_sta *sta, 6140 enum ieee80211_sta_state old_state, 6141 enum ieee80211_sta_state new_state) 6142 { 6143 struct ath10k *ar = hw->priv; 6144 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6145 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6146 struct ath10k_peer *peer; 6147 int ret = 0; 6148 int i; 6149 6150 if (old_state == IEEE80211_STA_NOTEXIST && 6151 new_state == IEEE80211_STA_NONE) { 6152 memset(arsta, 0, sizeof(*arsta)); 6153 arsta->arvif = arvif; 6154 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6155 6156 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6157 ath10k_mac_txq_init(sta->txq[i]); 6158 } 6159 6160 /* cancel must be done outside the mutex to avoid deadlock */ 6161 if ((old_state == IEEE80211_STA_NONE && 6162 new_state == IEEE80211_STA_NOTEXIST)) 6163 cancel_work_sync(&arsta->update_wk); 6164 6165 mutex_lock(&ar->conf_mutex); 6166 6167 if (old_state == IEEE80211_STA_NOTEXIST && 6168 new_state == IEEE80211_STA_NONE) { 6169 /* 6170 * New station addition. 6171 */ 6172 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6173 u32 num_tdls_stations; 6174 u32 num_tdls_vifs; 6175 6176 ath10k_dbg(ar, ATH10K_DBG_MAC, 6177 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6178 arvif->vdev_id, sta->addr, 6179 ar->num_stations + 1, ar->max_num_stations, 6180 ar->num_peers + 1, ar->max_num_peers); 6181 6182 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6183 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); 6184 6185 if (sta->tdls) { 6186 if (num_tdls_stations >= ar->max_num_tdls_vdevs) { 6187 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6188 arvif->vdev_id, 6189 ar->max_num_tdls_vdevs); 6190 ret = -ELNRNG; 6191 goto exit; 6192 } 6193 peer_type = WMI_PEER_TYPE_TDLS; 6194 } 6195 6196 ret = ath10k_mac_inc_num_stations(arvif, sta); 6197 if (ret) { 6198 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6199 ar->max_num_stations); 6200 goto exit; 6201 } 6202 6203 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6204 sta->addr, peer_type); 6205 if (ret) { 6206 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6207 sta->addr, arvif->vdev_id, ret); 6208 ath10k_mac_dec_num_stations(arvif, sta); 6209 goto exit; 6210 } 6211 6212 spin_lock_bh(&ar->data_lock); 6213 6214 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6215 if (!peer) { 6216 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6217 vif->addr, arvif->vdev_id); 6218 spin_unlock_bh(&ar->data_lock); 6219 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6220 ath10k_mac_dec_num_stations(arvif, sta); 6221 ret = -ENOENT; 6222 goto exit; 6223 } 6224 6225 arsta->peer_id = find_first_bit(peer->peer_ids, 6226 ATH10K_MAX_NUM_PEER_IDS); 6227 6228 spin_unlock_bh(&ar->data_lock); 6229 6230 if (!sta->tdls) 6231 goto exit; 6232 6233 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6234 WMI_TDLS_ENABLE_ACTIVE); 6235 if (ret) { 6236 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6237 arvif->vdev_id, ret); 6238 ath10k_peer_delete(ar, arvif->vdev_id, 6239 sta->addr); 6240 ath10k_mac_dec_num_stations(arvif, sta); 6241 goto exit; 6242 } 6243 6244 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6245 WMI_TDLS_PEER_STATE_PEERING); 6246 if (ret) { 6247 ath10k_warn(ar, 6248 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6249 sta->addr, arvif->vdev_id, ret); 6250 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6251 ath10k_mac_dec_num_stations(arvif, sta); 6252 6253 if (num_tdls_stations != 0) 6254 goto exit; 6255 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6256 WMI_TDLS_DISABLE); 6257 } 6258 } else if ((old_state == IEEE80211_STA_NONE && 6259 new_state == IEEE80211_STA_NOTEXIST)) { 6260 /* 6261 * Existing station deletion. 6262 */ 6263 ath10k_dbg(ar, ATH10K_DBG_MAC, 6264 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6265 arvif->vdev_id, sta->addr, sta); 6266 6267 if (sta->tdls) { 6268 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, 6269 sta, 6270 WMI_TDLS_PEER_STATE_TEARDOWN); 6271 if (ret) 6272 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", 6273 sta->addr, 6274 WMI_TDLS_PEER_STATE_TEARDOWN, ret); 6275 } 6276 6277 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6278 if (ret) 6279 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6280 sta->addr, arvif->vdev_id, ret); 6281 6282 ath10k_mac_dec_num_stations(arvif, sta); 6283 6284 spin_lock_bh(&ar->data_lock); 6285 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6286 peer = ar->peer_map[i]; 6287 if (!peer) 6288 continue; 6289 6290 if (peer->sta == sta) { 6291 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6292 sta->addr, peer, i, arvif->vdev_id); 6293 peer->sta = NULL; 6294 6295 /* Clean up the peer object as well since we 6296 * must have failed to do this above. 6297 */ 6298 list_del(&peer->list); 6299 ar->peer_map[i] = NULL; 6300 kfree(peer); 6301 ar->num_peers--; 6302 } 6303 } 6304 spin_unlock_bh(&ar->data_lock); 6305 6306 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6307 ath10k_mac_txq_unref(ar, sta->txq[i]); 6308 6309 if (!sta->tdls) 6310 goto exit; 6311 6312 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6313 goto exit; 6314 6315 /* This was the last tdls peer in current vif */ 6316 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6317 WMI_TDLS_DISABLE); 6318 if (ret) { 6319 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6320 arvif->vdev_id, ret); 6321 } 6322 } else if (old_state == IEEE80211_STA_AUTH && 6323 new_state == IEEE80211_STA_ASSOC && 6324 (vif->type == NL80211_IFTYPE_AP || 6325 vif->type == NL80211_IFTYPE_MESH_POINT || 6326 vif->type == NL80211_IFTYPE_ADHOC)) { 6327 /* 6328 * New association. 6329 */ 6330 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6331 sta->addr); 6332 6333 ret = ath10k_station_assoc(ar, vif, sta, false); 6334 if (ret) 6335 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6336 sta->addr, arvif->vdev_id, ret); 6337 } else if (old_state == IEEE80211_STA_ASSOC && 6338 new_state == IEEE80211_STA_AUTHORIZED && 6339 sta->tdls) { 6340 /* 6341 * Tdls station authorized. 6342 */ 6343 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6344 sta->addr); 6345 6346 ret = ath10k_station_assoc(ar, vif, sta, false); 6347 if (ret) { 6348 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6349 sta->addr, arvif->vdev_id, ret); 6350 goto exit; 6351 } 6352 6353 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6354 WMI_TDLS_PEER_STATE_CONNECTED); 6355 if (ret) 6356 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6357 sta->addr, arvif->vdev_id, ret); 6358 } else if (old_state == IEEE80211_STA_ASSOC && 6359 new_state == IEEE80211_STA_AUTH && 6360 (vif->type == NL80211_IFTYPE_AP || 6361 vif->type == NL80211_IFTYPE_MESH_POINT || 6362 vif->type == NL80211_IFTYPE_ADHOC)) { 6363 /* 6364 * Disassociation. 6365 */ 6366 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6367 sta->addr); 6368 6369 ret = ath10k_station_disassoc(ar, vif, sta); 6370 if (ret) 6371 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6372 sta->addr, arvif->vdev_id, ret); 6373 } 6374 exit: 6375 mutex_unlock(&ar->conf_mutex); 6376 return ret; 6377 } 6378 6379 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6380 u16 ac, bool enable) 6381 { 6382 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6383 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6384 u32 prio = 0, acc = 0; 6385 u32 value = 0; 6386 int ret = 0; 6387 6388 lockdep_assert_held(&ar->conf_mutex); 6389 6390 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6391 return 0; 6392 6393 switch (ac) { 6394 case IEEE80211_AC_VO: 6395 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6396 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6397 prio = 7; 6398 acc = 3; 6399 break; 6400 case IEEE80211_AC_VI: 6401 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6402 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6403 prio = 5; 6404 acc = 2; 6405 break; 6406 case IEEE80211_AC_BE: 6407 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6408 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6409 prio = 2; 6410 acc = 1; 6411 break; 6412 case IEEE80211_AC_BK: 6413 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6414 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6415 prio = 0; 6416 acc = 0; 6417 break; 6418 } 6419 6420 if (enable) 6421 arvif->u.sta.uapsd |= value; 6422 else 6423 arvif->u.sta.uapsd &= ~value; 6424 6425 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6426 WMI_STA_PS_PARAM_UAPSD, 6427 arvif->u.sta.uapsd); 6428 if (ret) { 6429 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6430 goto exit; 6431 } 6432 6433 if (arvif->u.sta.uapsd) 6434 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6435 else 6436 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6437 6438 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6439 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6440 value); 6441 if (ret) 6442 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6443 6444 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6445 if (ret) { 6446 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6447 arvif->vdev_id, ret); 6448 return ret; 6449 } 6450 6451 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6452 if (ret) { 6453 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6454 arvif->vdev_id, ret); 6455 return ret; 6456 } 6457 6458 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6459 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6460 /* Only userspace can make an educated decision when to send 6461 * trigger frame. The following effectively disables u-UAPSD 6462 * autotrigger in firmware (which is enabled by default 6463 * provided the autotrigger service is available). 6464 */ 6465 6466 arg.wmm_ac = acc; 6467 arg.user_priority = prio; 6468 arg.service_interval = 0; 6469 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6470 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6471 6472 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6473 arvif->bssid, &arg, 1); 6474 if (ret) { 6475 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6476 ret); 6477 return ret; 6478 } 6479 } 6480 6481 exit: 6482 return ret; 6483 } 6484 6485 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6486 struct ieee80211_vif *vif, u16 ac, 6487 const struct ieee80211_tx_queue_params *params) 6488 { 6489 struct ath10k *ar = hw->priv; 6490 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6491 struct wmi_wmm_params_arg *p = NULL; 6492 int ret; 6493 6494 mutex_lock(&ar->conf_mutex); 6495 6496 switch (ac) { 6497 case IEEE80211_AC_VO: 6498 p = &arvif->wmm_params.ac_vo; 6499 break; 6500 case IEEE80211_AC_VI: 6501 p = &arvif->wmm_params.ac_vi; 6502 break; 6503 case IEEE80211_AC_BE: 6504 p = &arvif->wmm_params.ac_be; 6505 break; 6506 case IEEE80211_AC_BK: 6507 p = &arvif->wmm_params.ac_bk; 6508 break; 6509 } 6510 6511 if (WARN_ON(!p)) { 6512 ret = -EINVAL; 6513 goto exit; 6514 } 6515 6516 p->cwmin = params->cw_min; 6517 p->cwmax = params->cw_max; 6518 p->aifs = params->aifs; 6519 6520 /* 6521 * The channel time duration programmed in the HW is in absolute 6522 * microseconds, while mac80211 gives the txop in units of 6523 * 32 microseconds. 6524 */ 6525 p->txop = params->txop * 32; 6526 6527 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6528 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6529 &arvif->wmm_params); 6530 if (ret) { 6531 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6532 arvif->vdev_id, ret); 6533 goto exit; 6534 } 6535 } else { 6536 /* This won't work well with multi-interface cases but it's 6537 * better than nothing. 6538 */ 6539 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6540 if (ret) { 6541 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6542 goto exit; 6543 } 6544 } 6545 6546 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6547 if (ret) 6548 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6549 6550 exit: 6551 mutex_unlock(&ar->conf_mutex); 6552 return ret; 6553 } 6554 6555 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6556 6557 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6558 struct ieee80211_vif *vif, 6559 struct ieee80211_channel *chan, 6560 int duration, 6561 enum ieee80211_roc_type type) 6562 { 6563 struct ath10k *ar = hw->priv; 6564 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6565 struct wmi_start_scan_arg arg; 6566 int ret = 0; 6567 u32 scan_time_msec; 6568 6569 mutex_lock(&ar->conf_mutex); 6570 6571 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 6572 ret = -EBUSY; 6573 goto exit; 6574 } 6575 6576 spin_lock_bh(&ar->data_lock); 6577 switch (ar->scan.state) { 6578 case ATH10K_SCAN_IDLE: 6579 reinit_completion(&ar->scan.started); 6580 reinit_completion(&ar->scan.completed); 6581 reinit_completion(&ar->scan.on_channel); 6582 ar->scan.state = ATH10K_SCAN_STARTING; 6583 ar->scan.is_roc = true; 6584 ar->scan.vdev_id = arvif->vdev_id; 6585 ar->scan.roc_freq = chan->center_freq; 6586 ar->scan.roc_notify = true; 6587 ret = 0; 6588 break; 6589 case ATH10K_SCAN_STARTING: 6590 case ATH10K_SCAN_RUNNING: 6591 case ATH10K_SCAN_ABORTING: 6592 ret = -EBUSY; 6593 break; 6594 } 6595 spin_unlock_bh(&ar->data_lock); 6596 6597 if (ret) 6598 goto exit; 6599 6600 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6601 6602 memset(&arg, 0, sizeof(arg)); 6603 ath10k_wmi_start_scan_init(ar, &arg); 6604 arg.vdev_id = arvif->vdev_id; 6605 arg.scan_id = ATH10K_SCAN_ID; 6606 arg.n_channels = 1; 6607 arg.channels[0] = chan->center_freq; 6608 arg.dwell_time_active = scan_time_msec; 6609 arg.dwell_time_passive = scan_time_msec; 6610 arg.max_scan_time = scan_time_msec; 6611 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6612 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6613 arg.burst_duration_ms = duration; 6614 6615 ret = ath10k_start_scan(ar, &arg); 6616 if (ret) { 6617 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6618 spin_lock_bh(&ar->data_lock); 6619 ar->scan.state = ATH10K_SCAN_IDLE; 6620 spin_unlock_bh(&ar->data_lock); 6621 goto exit; 6622 } 6623 6624 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6625 if (ret == 0) { 6626 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6627 6628 ret = ath10k_scan_stop(ar); 6629 if (ret) 6630 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6631 6632 ret = -ETIMEDOUT; 6633 goto exit; 6634 } 6635 6636 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6637 msecs_to_jiffies(duration)); 6638 6639 ret = 0; 6640 exit: 6641 mutex_unlock(&ar->conf_mutex); 6642 return ret; 6643 } 6644 6645 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6646 { 6647 struct ath10k *ar = hw->priv; 6648 6649 mutex_lock(&ar->conf_mutex); 6650 6651 spin_lock_bh(&ar->data_lock); 6652 ar->scan.roc_notify = false; 6653 spin_unlock_bh(&ar->data_lock); 6654 6655 ath10k_scan_abort(ar); 6656 6657 mutex_unlock(&ar->conf_mutex); 6658 6659 cancel_delayed_work_sync(&ar->scan.timeout); 6660 6661 return 0; 6662 } 6663 6664 /* 6665 * Both RTS and Fragmentation threshold are interface-specific 6666 * in ath10k, but device-specific in mac80211. 6667 */ 6668 6669 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6670 { 6671 struct ath10k *ar = hw->priv; 6672 struct ath10k_vif *arvif; 6673 int ret = 0; 6674 6675 mutex_lock(&ar->conf_mutex); 6676 list_for_each_entry(arvif, &ar->arvifs, list) { 6677 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 6678 arvif->vdev_id, value); 6679 6680 ret = ath10k_mac_set_rts(arvif, value); 6681 if (ret) { 6682 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 6683 arvif->vdev_id, ret); 6684 break; 6685 } 6686 } 6687 mutex_unlock(&ar->conf_mutex); 6688 6689 return ret; 6690 } 6691 6692 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6693 { 6694 /* Even though there's a WMI enum for fragmentation threshold no known 6695 * firmware actually implements it. Moreover it is not possible to rely 6696 * frame fragmentation to mac80211 because firmware clears the "more 6697 * fragments" bit in frame control making it impossible for remote 6698 * devices to reassemble frames. 6699 * 6700 * Hence implement a dummy callback just to say fragmentation isn't 6701 * supported. This effectively prevents mac80211 from doing frame 6702 * fragmentation in software. 6703 */ 6704 return -EOPNOTSUPP; 6705 } 6706 6707 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6708 u32 queues, bool drop) 6709 { 6710 struct ath10k *ar = hw->priv; 6711 bool skip; 6712 long time_left; 6713 6714 /* mac80211 doesn't care if we really xmit queued frames or not 6715 * we'll collect those frames either way if we stop/delete vdevs 6716 */ 6717 if (drop) 6718 return; 6719 6720 mutex_lock(&ar->conf_mutex); 6721 6722 if (ar->state == ATH10K_STATE_WEDGED) 6723 goto skip; 6724 6725 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 6726 bool empty; 6727 6728 spin_lock_bh(&ar->htt.tx_lock); 6729 empty = (ar->htt.num_pending_tx == 0); 6730 spin_unlock_bh(&ar->htt.tx_lock); 6731 6732 skip = (ar->state == ATH10K_STATE_WEDGED) || 6733 test_bit(ATH10K_FLAG_CRASH_FLUSH, 6734 &ar->dev_flags); 6735 6736 (empty || skip); 6737 }), ATH10K_FLUSH_TIMEOUT_HZ); 6738 6739 if (time_left == 0 || skip) 6740 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 6741 skip, ar->state, time_left); 6742 6743 skip: 6744 mutex_unlock(&ar->conf_mutex); 6745 } 6746 6747 /* TODO: Implement this function properly 6748 * For now it is needed to reply to Probe Requests in IBSS mode. 6749 * Propably we need this information from FW. 6750 */ 6751 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 6752 { 6753 return 1; 6754 } 6755 6756 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 6757 enum ieee80211_reconfig_type reconfig_type) 6758 { 6759 struct ath10k *ar = hw->priv; 6760 6761 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 6762 return; 6763 6764 mutex_lock(&ar->conf_mutex); 6765 6766 /* If device failed to restart it will be in a different state, e.g. 6767 * ATH10K_STATE_WEDGED 6768 */ 6769 if (ar->state == ATH10K_STATE_RESTARTED) { 6770 ath10k_info(ar, "device successfully recovered\n"); 6771 ar->state = ATH10K_STATE_ON; 6772 ieee80211_wake_queues(ar->hw); 6773 } 6774 6775 mutex_unlock(&ar->conf_mutex); 6776 } 6777 6778 static void 6779 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 6780 struct ieee80211_channel *channel) 6781 { 6782 int ret; 6783 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 6784 6785 lockdep_assert_held(&ar->conf_mutex); 6786 6787 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 6788 (ar->rx_channel != channel)) 6789 return; 6790 6791 if (ar->scan.state != ATH10K_SCAN_IDLE) { 6792 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 6793 return; 6794 } 6795 6796 reinit_completion(&ar->bss_survey_done); 6797 6798 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 6799 if (ret) { 6800 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 6801 return; 6802 } 6803 6804 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 6805 if (!ret) { 6806 ath10k_warn(ar, "bss channel survey timed out\n"); 6807 return; 6808 } 6809 } 6810 6811 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 6812 struct survey_info *survey) 6813 { 6814 struct ath10k *ar = hw->priv; 6815 struct ieee80211_supported_band *sband; 6816 struct survey_info *ar_survey = &ar->survey[idx]; 6817 int ret = 0; 6818 6819 mutex_lock(&ar->conf_mutex); 6820 6821 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 6822 if (sband && idx >= sband->n_channels) { 6823 idx -= sband->n_channels; 6824 sband = NULL; 6825 } 6826 6827 if (!sband) 6828 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 6829 6830 if (!sband || idx >= sband->n_channels) { 6831 ret = -ENOENT; 6832 goto exit; 6833 } 6834 6835 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 6836 6837 spin_lock_bh(&ar->data_lock); 6838 memcpy(survey, ar_survey, sizeof(*survey)); 6839 spin_unlock_bh(&ar->data_lock); 6840 6841 survey->channel = &sband->channels[idx]; 6842 6843 if (ar->rx_channel == survey->channel) 6844 survey->filled |= SURVEY_INFO_IN_USE; 6845 6846 exit: 6847 mutex_unlock(&ar->conf_mutex); 6848 return ret; 6849 } 6850 6851 static bool 6852 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 6853 enum nl80211_band band, 6854 const struct cfg80211_bitrate_mask *mask) 6855 { 6856 int num_rates = 0; 6857 int i; 6858 6859 num_rates += hweight32(mask->control[band].legacy); 6860 6861 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 6862 num_rates += hweight8(mask->control[band].ht_mcs[i]); 6863 6864 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 6865 num_rates += hweight16(mask->control[band].vht_mcs[i]); 6866 6867 return num_rates == 1; 6868 } 6869 6870 static bool 6871 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 6872 enum nl80211_band band, 6873 const struct cfg80211_bitrate_mask *mask, 6874 int *nss) 6875 { 6876 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6877 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 6878 u8 ht_nss_mask = 0; 6879 u8 vht_nss_mask = 0; 6880 int i; 6881 6882 if (mask->control[band].legacy) 6883 return false; 6884 6885 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6886 if (mask->control[band].ht_mcs[i] == 0) 6887 continue; 6888 else if (mask->control[band].ht_mcs[i] == 6889 sband->ht_cap.mcs.rx_mask[i]) 6890 ht_nss_mask |= BIT(i); 6891 else 6892 return false; 6893 } 6894 6895 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6896 if (mask->control[band].vht_mcs[i] == 0) 6897 continue; 6898 else if (mask->control[band].vht_mcs[i] == 6899 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 6900 vht_nss_mask |= BIT(i); 6901 else 6902 return false; 6903 } 6904 6905 if (ht_nss_mask != vht_nss_mask) 6906 return false; 6907 6908 if (ht_nss_mask == 0) 6909 return false; 6910 6911 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 6912 return false; 6913 6914 *nss = fls(ht_nss_mask); 6915 6916 return true; 6917 } 6918 6919 static int 6920 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 6921 enum nl80211_band band, 6922 const struct cfg80211_bitrate_mask *mask, 6923 u8 *rate, u8 *nss) 6924 { 6925 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6926 int rate_idx; 6927 int i; 6928 u16 bitrate; 6929 u8 preamble; 6930 u8 hw_rate; 6931 6932 if (hweight32(mask->control[band].legacy) == 1) { 6933 rate_idx = ffs(mask->control[band].legacy) - 1; 6934 6935 hw_rate = sband->bitrates[rate_idx].hw_value; 6936 bitrate = sband->bitrates[rate_idx].bitrate; 6937 6938 if (ath10k_mac_bitrate_is_cck(bitrate)) 6939 preamble = WMI_RATE_PREAMBLE_CCK; 6940 else 6941 preamble = WMI_RATE_PREAMBLE_OFDM; 6942 6943 *nss = 1; 6944 *rate = preamble << 6 | 6945 (*nss - 1) << 4 | 6946 hw_rate << 0; 6947 6948 return 0; 6949 } 6950 6951 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6952 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 6953 *nss = i + 1; 6954 *rate = WMI_RATE_PREAMBLE_HT << 6 | 6955 (*nss - 1) << 4 | 6956 (ffs(mask->control[band].ht_mcs[i]) - 1); 6957 6958 return 0; 6959 } 6960 } 6961 6962 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6963 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 6964 *nss = i + 1; 6965 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 6966 (*nss - 1) << 4 | 6967 (ffs(mask->control[band].vht_mcs[i]) - 1); 6968 6969 return 0; 6970 } 6971 } 6972 6973 return -EINVAL; 6974 } 6975 6976 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 6977 u8 rate, u8 nss, u8 sgi, u8 ldpc) 6978 { 6979 struct ath10k *ar = arvif->ar; 6980 u32 vdev_param; 6981 int ret; 6982 6983 lockdep_assert_held(&ar->conf_mutex); 6984 6985 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 6986 arvif->vdev_id, rate, nss, sgi); 6987 6988 vdev_param = ar->wmi.vdev_param->fixed_rate; 6989 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 6990 if (ret) { 6991 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 6992 rate, ret); 6993 return ret; 6994 } 6995 6996 vdev_param = ar->wmi.vdev_param->nss; 6997 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 6998 if (ret) { 6999 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 7000 return ret; 7001 } 7002 7003 vdev_param = ar->wmi.vdev_param->sgi; 7004 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 7005 if (ret) { 7006 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 7007 return ret; 7008 } 7009 7010 vdev_param = ar->wmi.vdev_param->ldpc; 7011 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 7012 if (ret) { 7013 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 7014 return ret; 7015 } 7016 7017 return 0; 7018 } 7019 7020 static bool 7021 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 7022 enum nl80211_band band, 7023 const struct cfg80211_bitrate_mask *mask) 7024 { 7025 int i; 7026 u16 vht_mcs; 7027 7028 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 7029 * to express all VHT MCS rate masks. Effectively only the following 7030 * ranges can be used: none, 0-7, 0-8 and 0-9. 7031 */ 7032 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 7033 vht_mcs = mask->control[band].vht_mcs[i]; 7034 7035 switch (vht_mcs) { 7036 case 0: 7037 case BIT(8) - 1: 7038 case BIT(9) - 1: 7039 case BIT(10) - 1: 7040 break; 7041 default: 7042 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 7043 return false; 7044 } 7045 } 7046 7047 return true; 7048 } 7049 7050 static void ath10k_mac_set_bitrate_mask_iter(void *data, 7051 struct ieee80211_sta *sta) 7052 { 7053 struct ath10k_vif *arvif = data; 7054 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7055 struct ath10k *ar = arvif->ar; 7056 7057 if (arsta->arvif != arvif) 7058 return; 7059 7060 spin_lock_bh(&ar->data_lock); 7061 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 7062 spin_unlock_bh(&ar->data_lock); 7063 7064 ieee80211_queue_work(ar->hw, &arsta->update_wk); 7065 } 7066 7067 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 7068 struct ieee80211_vif *vif, 7069 const struct cfg80211_bitrate_mask *mask) 7070 { 7071 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7072 struct cfg80211_chan_def def; 7073 struct ath10k *ar = arvif->ar; 7074 enum nl80211_band band; 7075 const u8 *ht_mcs_mask; 7076 const u16 *vht_mcs_mask; 7077 u8 rate; 7078 u8 nss; 7079 u8 sgi; 7080 u8 ldpc; 7081 int single_nss; 7082 int ret; 7083 7084 if (ath10k_mac_vif_chan(vif, &def)) 7085 return -EPERM; 7086 7087 band = def.chan->band; 7088 ht_mcs_mask = mask->control[band].ht_mcs; 7089 vht_mcs_mask = mask->control[band].vht_mcs; 7090 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7091 7092 sgi = mask->control[band].gi; 7093 if (sgi == NL80211_TXRATE_FORCE_LGI) 7094 return -EINVAL; 7095 7096 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 7097 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 7098 &rate, &nss); 7099 if (ret) { 7100 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 7101 arvif->vdev_id, ret); 7102 return ret; 7103 } 7104 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7105 &single_nss)) { 7106 rate = WMI_FIXED_RATE_NONE; 7107 nss = single_nss; 7108 } else { 7109 rate = WMI_FIXED_RATE_NONE; 7110 nss = min(ar->num_rf_chains, 7111 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 7112 ath10k_mac_max_vht_nss(vht_mcs_mask))); 7113 7114 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 7115 return -EINVAL; 7116 7117 mutex_lock(&ar->conf_mutex); 7118 7119 arvif->bitrate_mask = *mask; 7120 ieee80211_iterate_stations_atomic(ar->hw, 7121 ath10k_mac_set_bitrate_mask_iter, 7122 arvif); 7123 7124 mutex_unlock(&ar->conf_mutex); 7125 } 7126 7127 mutex_lock(&ar->conf_mutex); 7128 7129 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7130 if (ret) { 7131 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 7132 arvif->vdev_id, ret); 7133 goto exit; 7134 } 7135 7136 exit: 7137 mutex_unlock(&ar->conf_mutex); 7138 7139 return ret; 7140 } 7141 7142 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 7143 struct ieee80211_vif *vif, 7144 struct ieee80211_sta *sta, 7145 u32 changed) 7146 { 7147 struct ath10k *ar = hw->priv; 7148 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7149 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7150 struct ath10k_peer *peer; 7151 u32 bw, smps; 7152 7153 spin_lock_bh(&ar->data_lock); 7154 7155 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 7156 if (!peer) { 7157 spin_unlock_bh(&ar->data_lock); 7158 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", 7159 sta->addr, arvif->vdev_id); 7160 return; 7161 } 7162 7163 ath10k_dbg(ar, ATH10K_DBG_MAC, 7164 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 7165 sta->addr, changed, sta->bandwidth, sta->rx_nss, 7166 sta->smps_mode); 7167 7168 if (changed & IEEE80211_RC_BW_CHANGED) { 7169 bw = WMI_PEER_CHWIDTH_20MHZ; 7170 7171 switch (sta->bandwidth) { 7172 case IEEE80211_STA_RX_BW_20: 7173 bw = WMI_PEER_CHWIDTH_20MHZ; 7174 break; 7175 case IEEE80211_STA_RX_BW_40: 7176 bw = WMI_PEER_CHWIDTH_40MHZ; 7177 break; 7178 case IEEE80211_STA_RX_BW_80: 7179 bw = WMI_PEER_CHWIDTH_80MHZ; 7180 break; 7181 case IEEE80211_STA_RX_BW_160: 7182 bw = WMI_PEER_CHWIDTH_160MHZ; 7183 break; 7184 default: 7185 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7186 sta->bandwidth, sta->addr); 7187 bw = WMI_PEER_CHWIDTH_20MHZ; 7188 break; 7189 } 7190 7191 arsta->bw = bw; 7192 } 7193 7194 if (changed & IEEE80211_RC_NSS_CHANGED) 7195 arsta->nss = sta->rx_nss; 7196 7197 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7198 smps = WMI_PEER_SMPS_PS_NONE; 7199 7200 switch (sta->smps_mode) { 7201 case IEEE80211_SMPS_AUTOMATIC: 7202 case IEEE80211_SMPS_OFF: 7203 smps = WMI_PEER_SMPS_PS_NONE; 7204 break; 7205 case IEEE80211_SMPS_STATIC: 7206 smps = WMI_PEER_SMPS_STATIC; 7207 break; 7208 case IEEE80211_SMPS_DYNAMIC: 7209 smps = WMI_PEER_SMPS_DYNAMIC; 7210 break; 7211 case IEEE80211_SMPS_NUM_MODES: 7212 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7213 sta->smps_mode, sta->addr); 7214 smps = WMI_PEER_SMPS_PS_NONE; 7215 break; 7216 } 7217 7218 arsta->smps = smps; 7219 } 7220 7221 arsta->changed |= changed; 7222 7223 spin_unlock_bh(&ar->data_lock); 7224 7225 ieee80211_queue_work(hw, &arsta->update_wk); 7226 } 7227 7228 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7229 struct ieee80211_vif *vif, s64 tsf_offset) 7230 { 7231 struct ath10k *ar = hw->priv; 7232 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7233 u32 offset, vdev_param; 7234 int ret; 7235 7236 if (tsf_offset < 0) { 7237 vdev_param = ar->wmi.vdev_param->dec_tsf; 7238 offset = -tsf_offset; 7239 } else { 7240 vdev_param = ar->wmi.vdev_param->inc_tsf; 7241 offset = tsf_offset; 7242 } 7243 7244 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7245 vdev_param, offset); 7246 7247 if (ret && ret != -EOPNOTSUPP) 7248 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7249 offset, vdev_param, ret); 7250 } 7251 7252 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7253 struct ieee80211_vif *vif, 7254 struct ieee80211_ampdu_params *params) 7255 { 7256 struct ath10k *ar = hw->priv; 7257 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7258 struct ieee80211_sta *sta = params->sta; 7259 enum ieee80211_ampdu_mlme_action action = params->action; 7260 u16 tid = params->tid; 7261 7262 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7263 arvif->vdev_id, sta->addr, tid, action); 7264 7265 switch (action) { 7266 case IEEE80211_AMPDU_RX_START: 7267 case IEEE80211_AMPDU_RX_STOP: 7268 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7269 * creation/removal. Do we need to verify this? 7270 */ 7271 return 0; 7272 case IEEE80211_AMPDU_TX_START: 7273 case IEEE80211_AMPDU_TX_STOP_CONT: 7274 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7275 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7276 case IEEE80211_AMPDU_TX_OPERATIONAL: 7277 /* Firmware offloads Tx aggregation entirely so deny mac80211 7278 * Tx aggregation requests. 7279 */ 7280 return -EOPNOTSUPP; 7281 } 7282 7283 return -EINVAL; 7284 } 7285 7286 static void 7287 ath10k_mac_update_rx_channel(struct ath10k *ar, 7288 struct ieee80211_chanctx_conf *ctx, 7289 struct ieee80211_vif_chanctx_switch *vifs, 7290 int n_vifs) 7291 { 7292 struct cfg80211_chan_def *def = NULL; 7293 7294 /* Both locks are required because ar->rx_channel is modified. This 7295 * allows readers to hold either lock. 7296 */ 7297 lockdep_assert_held(&ar->conf_mutex); 7298 lockdep_assert_held(&ar->data_lock); 7299 7300 WARN_ON(ctx && vifs); 7301 WARN_ON(vifs && !n_vifs); 7302 7303 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7304 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7305 * ppdu on Rx may reduce performance on low-end systems. It should be 7306 * possible to make tables/hashmaps to speed the lookup up (be vary of 7307 * cpu data cache lines though regarding sizes) but to keep the initial 7308 * implementation simple and less intrusive fallback to the slow lookup 7309 * only for multi-channel cases. Single-channel cases will remain to 7310 * use the old channel derival and thus performance should not be 7311 * affected much. 7312 */ 7313 rcu_read_lock(); 7314 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7315 ieee80211_iter_chan_contexts_atomic(ar->hw, 7316 ath10k_mac_get_any_chandef_iter, 7317 &def); 7318 7319 if (vifs) 7320 def = &vifs[0].new_ctx->def; 7321 7322 ar->rx_channel = def->chan; 7323 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7324 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7325 /* During driver restart due to firmware assert, since mac80211 7326 * already has valid channel context for given radio, channel 7327 * context iteration return num_chanctx > 0. So fix rx_channel 7328 * when restart is in progress. 7329 */ 7330 ar->rx_channel = ctx->def.chan; 7331 } else { 7332 ar->rx_channel = NULL; 7333 } 7334 rcu_read_unlock(); 7335 } 7336 7337 static void 7338 ath10k_mac_update_vif_chan(struct ath10k *ar, 7339 struct ieee80211_vif_chanctx_switch *vifs, 7340 int n_vifs) 7341 { 7342 struct ath10k_vif *arvif; 7343 int ret; 7344 int i; 7345 7346 lockdep_assert_held(&ar->conf_mutex); 7347 7348 /* First stop monitor interface. Some FW versions crash if there's a 7349 * lone monitor interface. 7350 */ 7351 if (ar->monitor_started) 7352 ath10k_monitor_stop(ar); 7353 7354 for (i = 0; i < n_vifs; i++) { 7355 arvif = (void *)vifs[i].vif->drv_priv; 7356 7357 ath10k_dbg(ar, ATH10K_DBG_MAC, 7358 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7359 arvif->vdev_id, 7360 vifs[i].old_ctx->def.chan->center_freq, 7361 vifs[i].new_ctx->def.chan->center_freq, 7362 vifs[i].old_ctx->def.width, 7363 vifs[i].new_ctx->def.width); 7364 7365 if (WARN_ON(!arvif->is_started)) 7366 continue; 7367 7368 if (WARN_ON(!arvif->is_up)) 7369 continue; 7370 7371 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7372 if (ret) { 7373 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7374 arvif->vdev_id, ret); 7375 continue; 7376 } 7377 } 7378 7379 /* All relevant vdevs are downed and associated channel resources 7380 * should be available for the channel switch now. 7381 */ 7382 7383 spin_lock_bh(&ar->data_lock); 7384 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7385 spin_unlock_bh(&ar->data_lock); 7386 7387 for (i = 0; i < n_vifs; i++) { 7388 arvif = (void *)vifs[i].vif->drv_priv; 7389 7390 if (WARN_ON(!arvif->is_started)) 7391 continue; 7392 7393 if (WARN_ON(!arvif->is_up)) 7394 continue; 7395 7396 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7397 if (ret) 7398 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7399 ret); 7400 7401 ret = ath10k_mac_setup_prb_tmpl(arvif); 7402 if (ret) 7403 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7404 ret); 7405 7406 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7407 if (ret) { 7408 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7409 arvif->vdev_id, ret); 7410 continue; 7411 } 7412 7413 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7414 arvif->bssid); 7415 if (ret) { 7416 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7417 arvif->vdev_id, ret); 7418 continue; 7419 } 7420 } 7421 7422 ath10k_monitor_recalc(ar); 7423 } 7424 7425 static int 7426 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7427 struct ieee80211_chanctx_conf *ctx) 7428 { 7429 struct ath10k *ar = hw->priv; 7430 7431 ath10k_dbg(ar, ATH10K_DBG_MAC, 7432 "mac chanctx add freq %hu width %d ptr %pK\n", 7433 ctx->def.chan->center_freq, ctx->def.width, ctx); 7434 7435 mutex_lock(&ar->conf_mutex); 7436 7437 spin_lock_bh(&ar->data_lock); 7438 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7439 spin_unlock_bh(&ar->data_lock); 7440 7441 ath10k_recalc_radar_detection(ar); 7442 ath10k_monitor_recalc(ar); 7443 7444 mutex_unlock(&ar->conf_mutex); 7445 7446 return 0; 7447 } 7448 7449 static void 7450 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7451 struct ieee80211_chanctx_conf *ctx) 7452 { 7453 struct ath10k *ar = hw->priv; 7454 7455 ath10k_dbg(ar, ATH10K_DBG_MAC, 7456 "mac chanctx remove freq %hu width %d ptr %pK\n", 7457 ctx->def.chan->center_freq, ctx->def.width, ctx); 7458 7459 mutex_lock(&ar->conf_mutex); 7460 7461 spin_lock_bh(&ar->data_lock); 7462 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7463 spin_unlock_bh(&ar->data_lock); 7464 7465 ath10k_recalc_radar_detection(ar); 7466 ath10k_monitor_recalc(ar); 7467 7468 mutex_unlock(&ar->conf_mutex); 7469 } 7470 7471 struct ath10k_mac_change_chanctx_arg { 7472 struct ieee80211_chanctx_conf *ctx; 7473 struct ieee80211_vif_chanctx_switch *vifs; 7474 int n_vifs; 7475 int next_vif; 7476 }; 7477 7478 static void 7479 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7480 struct ieee80211_vif *vif) 7481 { 7482 struct ath10k_mac_change_chanctx_arg *arg = data; 7483 7484 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7485 return; 7486 7487 arg->n_vifs++; 7488 } 7489 7490 static void 7491 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7492 struct ieee80211_vif *vif) 7493 { 7494 struct ath10k_mac_change_chanctx_arg *arg = data; 7495 struct ieee80211_chanctx_conf *ctx; 7496 7497 ctx = rcu_access_pointer(vif->chanctx_conf); 7498 if (ctx != arg->ctx) 7499 return; 7500 7501 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7502 return; 7503 7504 arg->vifs[arg->next_vif].vif = vif; 7505 arg->vifs[arg->next_vif].old_ctx = ctx; 7506 arg->vifs[arg->next_vif].new_ctx = ctx; 7507 arg->next_vif++; 7508 } 7509 7510 static void 7511 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7512 struct ieee80211_chanctx_conf *ctx, 7513 u32 changed) 7514 { 7515 struct ath10k *ar = hw->priv; 7516 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7517 7518 mutex_lock(&ar->conf_mutex); 7519 7520 ath10k_dbg(ar, ATH10K_DBG_MAC, 7521 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7522 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7523 7524 /* This shouldn't really happen because channel switching should use 7525 * switch_vif_chanctx(). 7526 */ 7527 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7528 goto unlock; 7529 7530 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7531 ieee80211_iterate_active_interfaces_atomic( 7532 hw, 7533 IEEE80211_IFACE_ITER_NORMAL, 7534 ath10k_mac_change_chanctx_cnt_iter, 7535 &arg); 7536 if (arg.n_vifs == 0) 7537 goto radar; 7538 7539 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7540 GFP_KERNEL); 7541 if (!arg.vifs) 7542 goto radar; 7543 7544 ieee80211_iterate_active_interfaces_atomic( 7545 hw, 7546 IEEE80211_IFACE_ITER_NORMAL, 7547 ath10k_mac_change_chanctx_fill_iter, 7548 &arg); 7549 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7550 kfree(arg.vifs); 7551 } 7552 7553 radar: 7554 ath10k_recalc_radar_detection(ar); 7555 7556 /* FIXME: How to configure Rx chains properly? */ 7557 7558 /* No other actions are actually necessary. Firmware maintains channel 7559 * definitions per vdev internally and there's no host-side channel 7560 * context abstraction to configure, e.g. channel width. 7561 */ 7562 7563 unlock: 7564 mutex_unlock(&ar->conf_mutex); 7565 } 7566 7567 static int 7568 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7569 struct ieee80211_vif *vif, 7570 struct ieee80211_chanctx_conf *ctx) 7571 { 7572 struct ath10k *ar = hw->priv; 7573 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7574 int ret; 7575 7576 mutex_lock(&ar->conf_mutex); 7577 7578 ath10k_dbg(ar, ATH10K_DBG_MAC, 7579 "mac chanctx assign ptr %pK vdev_id %i\n", 7580 ctx, arvif->vdev_id); 7581 7582 if (WARN_ON(arvif->is_started)) { 7583 mutex_unlock(&ar->conf_mutex); 7584 return -EBUSY; 7585 } 7586 7587 ret = ath10k_vdev_start(arvif, &ctx->def); 7588 if (ret) { 7589 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 7590 arvif->vdev_id, vif->addr, 7591 ctx->def.chan->center_freq, ret); 7592 goto err; 7593 } 7594 7595 arvif->is_started = true; 7596 7597 ret = ath10k_mac_vif_setup_ps(arvif); 7598 if (ret) { 7599 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 7600 arvif->vdev_id, ret); 7601 goto err_stop; 7602 } 7603 7604 if (vif->type == NL80211_IFTYPE_MONITOR) { 7605 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 7606 if (ret) { 7607 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 7608 arvif->vdev_id, ret); 7609 goto err_stop; 7610 } 7611 7612 arvif->is_up = true; 7613 } 7614 7615 if (ath10k_mac_can_set_cts_prot(arvif)) { 7616 ret = ath10k_mac_set_cts_prot(arvif); 7617 if (ret) 7618 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 7619 arvif->vdev_id, ret); 7620 } 7621 7622 if (ath10k_peer_stats_enabled(ar)) { 7623 ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS; 7624 ret = ath10k_wmi_pdev_pktlog_enable(ar, 7625 ar->pktlog_filter); 7626 if (ret) { 7627 ath10k_warn(ar, "failed to enable pktlog %d\n", ret); 7628 goto err_stop; 7629 } 7630 } 7631 7632 mutex_unlock(&ar->conf_mutex); 7633 return 0; 7634 7635 err_stop: 7636 ath10k_vdev_stop(arvif); 7637 arvif->is_started = false; 7638 ath10k_mac_vif_setup_ps(arvif); 7639 7640 err: 7641 mutex_unlock(&ar->conf_mutex); 7642 return ret; 7643 } 7644 7645 static void 7646 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7647 struct ieee80211_vif *vif, 7648 struct ieee80211_chanctx_conf *ctx) 7649 { 7650 struct ath10k *ar = hw->priv; 7651 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7652 int ret; 7653 7654 mutex_lock(&ar->conf_mutex); 7655 7656 ath10k_dbg(ar, ATH10K_DBG_MAC, 7657 "mac chanctx unassign ptr %pK vdev_id %i\n", 7658 ctx, arvif->vdev_id); 7659 7660 WARN_ON(!arvif->is_started); 7661 7662 if (vif->type == NL80211_IFTYPE_MONITOR) { 7663 WARN_ON(!arvif->is_up); 7664 7665 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7666 if (ret) 7667 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 7668 arvif->vdev_id, ret); 7669 7670 arvif->is_up = false; 7671 } 7672 7673 ret = ath10k_vdev_stop(arvif); 7674 if (ret) 7675 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 7676 arvif->vdev_id, ret); 7677 7678 arvif->is_started = false; 7679 7680 mutex_unlock(&ar->conf_mutex); 7681 } 7682 7683 static int 7684 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7685 struct ieee80211_vif_chanctx_switch *vifs, 7686 int n_vifs, 7687 enum ieee80211_chanctx_switch_mode mode) 7688 { 7689 struct ath10k *ar = hw->priv; 7690 7691 mutex_lock(&ar->conf_mutex); 7692 7693 ath10k_dbg(ar, ATH10K_DBG_MAC, 7694 "mac chanctx switch n_vifs %d mode %d\n", 7695 n_vifs, mode); 7696 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 7697 7698 mutex_unlock(&ar->conf_mutex); 7699 return 0; 7700 } 7701 7702 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 7703 struct ieee80211_vif *vif, 7704 struct ieee80211_sta *sta) 7705 { 7706 struct ath10k *ar; 7707 struct ath10k_peer *peer; 7708 7709 ar = hw->priv; 7710 7711 list_for_each_entry(peer, &ar->peers, list) 7712 if (peer->sta == sta) 7713 peer->removed = true; 7714 } 7715 7716 static void ath10k_sta_statistics(struct ieee80211_hw *hw, 7717 struct ieee80211_vif *vif, 7718 struct ieee80211_sta *sta, 7719 struct station_info *sinfo) 7720 { 7721 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7722 struct ath10k *ar = arsta->arvif->ar; 7723 7724 if (!ath10k_peer_stats_enabled(ar)) 7725 return; 7726 7727 sinfo->rx_duration = arsta->rx_duration; 7728 sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION; 7729 7730 if (!arsta->txrate.legacy && !arsta->txrate.nss) 7731 return; 7732 7733 if (arsta->txrate.legacy) { 7734 sinfo->txrate.legacy = arsta->txrate.legacy; 7735 } else { 7736 sinfo->txrate.mcs = arsta->txrate.mcs; 7737 sinfo->txrate.nss = arsta->txrate.nss; 7738 sinfo->txrate.bw = arsta->txrate.bw; 7739 } 7740 sinfo->txrate.flags = arsta->txrate.flags; 7741 sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE; 7742 } 7743 7744 static const struct ieee80211_ops ath10k_ops = { 7745 .tx = ath10k_mac_op_tx, 7746 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 7747 .start = ath10k_start, 7748 .stop = ath10k_stop, 7749 .config = ath10k_config, 7750 .add_interface = ath10k_add_interface, 7751 .remove_interface = ath10k_remove_interface, 7752 .configure_filter = ath10k_configure_filter, 7753 .bss_info_changed = ath10k_bss_info_changed, 7754 .set_coverage_class = ath10k_mac_op_set_coverage_class, 7755 .hw_scan = ath10k_hw_scan, 7756 .cancel_hw_scan = ath10k_cancel_hw_scan, 7757 .set_key = ath10k_set_key, 7758 .set_default_unicast_key = ath10k_set_default_unicast_key, 7759 .sta_state = ath10k_sta_state, 7760 .conf_tx = ath10k_conf_tx, 7761 .remain_on_channel = ath10k_remain_on_channel, 7762 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 7763 .set_rts_threshold = ath10k_set_rts_threshold, 7764 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 7765 .flush = ath10k_flush, 7766 .tx_last_beacon = ath10k_tx_last_beacon, 7767 .set_antenna = ath10k_set_antenna, 7768 .get_antenna = ath10k_get_antenna, 7769 .reconfig_complete = ath10k_reconfig_complete, 7770 .get_survey = ath10k_get_survey, 7771 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 7772 .sta_rc_update = ath10k_sta_rc_update, 7773 .offset_tsf = ath10k_offset_tsf, 7774 .ampdu_action = ath10k_ampdu_action, 7775 .get_et_sset_count = ath10k_debug_get_et_sset_count, 7776 .get_et_stats = ath10k_debug_get_et_stats, 7777 .get_et_strings = ath10k_debug_get_et_strings, 7778 .add_chanctx = ath10k_mac_op_add_chanctx, 7779 .remove_chanctx = ath10k_mac_op_remove_chanctx, 7780 .change_chanctx = ath10k_mac_op_change_chanctx, 7781 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 7782 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 7783 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 7784 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 7785 .sta_statistics = ath10k_sta_statistics, 7786 7787 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 7788 7789 #ifdef CONFIG_PM 7790 .suspend = ath10k_wow_op_suspend, 7791 .resume = ath10k_wow_op_resume, 7792 .set_wakeup = ath10k_wow_op_set_wakeup, 7793 #endif 7794 #ifdef CONFIG_MAC80211_DEBUGFS 7795 .sta_add_debugfs = ath10k_sta_add_debugfs, 7796 #endif 7797 }; 7798 7799 #define CHAN2G(_channel, _freq, _flags) { \ 7800 .band = NL80211_BAND_2GHZ, \ 7801 .hw_value = (_channel), \ 7802 .center_freq = (_freq), \ 7803 .flags = (_flags), \ 7804 .max_antenna_gain = 0, \ 7805 .max_power = 30, \ 7806 } 7807 7808 #define CHAN5G(_channel, _freq, _flags) { \ 7809 .band = NL80211_BAND_5GHZ, \ 7810 .hw_value = (_channel), \ 7811 .center_freq = (_freq), \ 7812 .flags = (_flags), \ 7813 .max_antenna_gain = 0, \ 7814 .max_power = 30, \ 7815 } 7816 7817 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 7818 CHAN2G(1, 2412, 0), 7819 CHAN2G(2, 2417, 0), 7820 CHAN2G(3, 2422, 0), 7821 CHAN2G(4, 2427, 0), 7822 CHAN2G(5, 2432, 0), 7823 CHAN2G(6, 2437, 0), 7824 CHAN2G(7, 2442, 0), 7825 CHAN2G(8, 2447, 0), 7826 CHAN2G(9, 2452, 0), 7827 CHAN2G(10, 2457, 0), 7828 CHAN2G(11, 2462, 0), 7829 CHAN2G(12, 2467, 0), 7830 CHAN2G(13, 2472, 0), 7831 CHAN2G(14, 2484, 0), 7832 }; 7833 7834 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 7835 CHAN5G(36, 5180, 0), 7836 CHAN5G(40, 5200, 0), 7837 CHAN5G(44, 5220, 0), 7838 CHAN5G(48, 5240, 0), 7839 CHAN5G(52, 5260, 0), 7840 CHAN5G(56, 5280, 0), 7841 CHAN5G(60, 5300, 0), 7842 CHAN5G(64, 5320, 0), 7843 CHAN5G(100, 5500, 0), 7844 CHAN5G(104, 5520, 0), 7845 CHAN5G(108, 5540, 0), 7846 CHAN5G(112, 5560, 0), 7847 CHAN5G(116, 5580, 0), 7848 CHAN5G(120, 5600, 0), 7849 CHAN5G(124, 5620, 0), 7850 CHAN5G(128, 5640, 0), 7851 CHAN5G(132, 5660, 0), 7852 CHAN5G(136, 5680, 0), 7853 CHAN5G(140, 5700, 0), 7854 CHAN5G(144, 5720, 0), 7855 CHAN5G(149, 5745, 0), 7856 CHAN5G(153, 5765, 0), 7857 CHAN5G(157, 5785, 0), 7858 CHAN5G(161, 5805, 0), 7859 CHAN5G(165, 5825, 0), 7860 CHAN5G(169, 5845, 0), 7861 }; 7862 7863 struct ath10k *ath10k_mac_create(size_t priv_size) 7864 { 7865 struct ieee80211_hw *hw; 7866 struct ieee80211_ops *ops; 7867 struct ath10k *ar; 7868 7869 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 7870 if (!ops) 7871 return NULL; 7872 7873 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 7874 if (!hw) { 7875 kfree(ops); 7876 return NULL; 7877 } 7878 7879 ar = hw->priv; 7880 ar->hw = hw; 7881 ar->ops = ops; 7882 7883 return ar; 7884 } 7885 7886 void ath10k_mac_destroy(struct ath10k *ar) 7887 { 7888 struct ieee80211_ops *ops = ar->ops; 7889 7890 ieee80211_free_hw(ar->hw); 7891 kfree(ops); 7892 } 7893 7894 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 7895 { 7896 .max = 8, 7897 .types = BIT(NL80211_IFTYPE_STATION) 7898 | BIT(NL80211_IFTYPE_P2P_CLIENT) 7899 }, 7900 { 7901 .max = 3, 7902 .types = BIT(NL80211_IFTYPE_P2P_GO) 7903 }, 7904 { 7905 .max = 1, 7906 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 7907 }, 7908 { 7909 .max = 7, 7910 .types = BIT(NL80211_IFTYPE_AP) 7911 #ifdef CONFIG_MAC80211_MESH 7912 | BIT(NL80211_IFTYPE_MESH_POINT) 7913 #endif 7914 }, 7915 }; 7916 7917 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 7918 { 7919 .max = 8, 7920 .types = BIT(NL80211_IFTYPE_AP) 7921 #ifdef CONFIG_MAC80211_MESH 7922 | BIT(NL80211_IFTYPE_MESH_POINT) 7923 #endif 7924 }, 7925 { 7926 .max = 1, 7927 .types = BIT(NL80211_IFTYPE_STATION) 7928 }, 7929 }; 7930 7931 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 7932 { 7933 .limits = ath10k_if_limits, 7934 .n_limits = ARRAY_SIZE(ath10k_if_limits), 7935 .max_interfaces = 8, 7936 .num_different_channels = 1, 7937 .beacon_int_infra_match = true, 7938 }, 7939 }; 7940 7941 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 7942 { 7943 .limits = ath10k_10x_if_limits, 7944 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 7945 .max_interfaces = 8, 7946 .num_different_channels = 1, 7947 .beacon_int_infra_match = true, 7948 .beacon_int_min_gcd = 1, 7949 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7950 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7951 BIT(NL80211_CHAN_WIDTH_20) | 7952 BIT(NL80211_CHAN_WIDTH_40) | 7953 BIT(NL80211_CHAN_WIDTH_80), 7954 #endif 7955 }, 7956 }; 7957 7958 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 7959 { 7960 .max = 2, 7961 .types = BIT(NL80211_IFTYPE_STATION), 7962 }, 7963 { 7964 .max = 2, 7965 .types = BIT(NL80211_IFTYPE_AP) | 7966 #ifdef CONFIG_MAC80211_MESH 7967 BIT(NL80211_IFTYPE_MESH_POINT) | 7968 #endif 7969 BIT(NL80211_IFTYPE_P2P_CLIENT) | 7970 BIT(NL80211_IFTYPE_P2P_GO), 7971 }, 7972 { 7973 .max = 1, 7974 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7975 }, 7976 }; 7977 7978 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 7979 { 7980 .max = 2, 7981 .types = BIT(NL80211_IFTYPE_STATION), 7982 }, 7983 { 7984 .max = 2, 7985 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 7986 }, 7987 { 7988 .max = 1, 7989 .types = BIT(NL80211_IFTYPE_AP) | 7990 #ifdef CONFIG_MAC80211_MESH 7991 BIT(NL80211_IFTYPE_MESH_POINT) | 7992 #endif 7993 BIT(NL80211_IFTYPE_P2P_GO), 7994 }, 7995 { 7996 .max = 1, 7997 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7998 }, 7999 }; 8000 8001 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 8002 { 8003 .max = 1, 8004 .types = BIT(NL80211_IFTYPE_STATION), 8005 }, 8006 { 8007 .max = 1, 8008 .types = BIT(NL80211_IFTYPE_ADHOC), 8009 }, 8010 }; 8011 8012 /* FIXME: This is not thouroughly tested. These combinations may over- or 8013 * underestimate hw/fw capabilities. 8014 */ 8015 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 8016 { 8017 .limits = ath10k_tlv_if_limit, 8018 .num_different_channels = 1, 8019 .max_interfaces = 4, 8020 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8021 }, 8022 { 8023 .limits = ath10k_tlv_if_limit_ibss, 8024 .num_different_channels = 1, 8025 .max_interfaces = 2, 8026 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8027 }, 8028 }; 8029 8030 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 8031 { 8032 .limits = ath10k_tlv_if_limit, 8033 .num_different_channels = 1, 8034 .max_interfaces = 4, 8035 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8036 }, 8037 { 8038 .limits = ath10k_tlv_qcs_if_limit, 8039 .num_different_channels = 2, 8040 .max_interfaces = 4, 8041 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 8042 }, 8043 { 8044 .limits = ath10k_tlv_if_limit_ibss, 8045 .num_different_channels = 1, 8046 .max_interfaces = 2, 8047 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8048 }, 8049 }; 8050 8051 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 8052 { 8053 .max = 1, 8054 .types = BIT(NL80211_IFTYPE_STATION), 8055 }, 8056 { 8057 .max = 16, 8058 .types = BIT(NL80211_IFTYPE_AP) 8059 #ifdef CONFIG_MAC80211_MESH 8060 | BIT(NL80211_IFTYPE_MESH_POINT) 8061 #endif 8062 }, 8063 }; 8064 8065 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 8066 { 8067 .limits = ath10k_10_4_if_limits, 8068 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 8069 .max_interfaces = 16, 8070 .num_different_channels = 1, 8071 .beacon_int_infra_match = true, 8072 .beacon_int_min_gcd = 1, 8073 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8074 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8075 BIT(NL80211_CHAN_WIDTH_20) | 8076 BIT(NL80211_CHAN_WIDTH_40) | 8077 BIT(NL80211_CHAN_WIDTH_80), 8078 #endif 8079 }, 8080 }; 8081 8082 static void ath10k_get_arvif_iter(void *data, u8 *mac, 8083 struct ieee80211_vif *vif) 8084 { 8085 struct ath10k_vif_iter *arvif_iter = data; 8086 struct ath10k_vif *arvif = (void *)vif->drv_priv; 8087 8088 if (arvif->vdev_id == arvif_iter->vdev_id) 8089 arvif_iter->arvif = arvif; 8090 } 8091 8092 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 8093 { 8094 struct ath10k_vif_iter arvif_iter; 8095 u32 flags; 8096 8097 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 8098 arvif_iter.vdev_id = vdev_id; 8099 8100 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 8101 ieee80211_iterate_active_interfaces_atomic(ar->hw, 8102 flags, 8103 ath10k_get_arvif_iter, 8104 &arvif_iter); 8105 if (!arvif_iter.arvif) { 8106 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 8107 return NULL; 8108 } 8109 8110 return arvif_iter.arvif; 8111 } 8112 8113 #define WRD_METHOD "WRDD" 8114 #define WRDD_WIFI (0x07) 8115 8116 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 8117 { 8118 union acpi_object *mcc_pkg; 8119 union acpi_object *domain_type; 8120 union acpi_object *mcc_value; 8121 u32 i; 8122 8123 if (wrdd->type != ACPI_TYPE_PACKAGE || 8124 wrdd->package.count < 2 || 8125 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 8126 wrdd->package.elements[0].integer.value != 0) { 8127 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 8128 return 0; 8129 } 8130 8131 for (i = 1; i < wrdd->package.count; ++i) { 8132 mcc_pkg = &wrdd->package.elements[i]; 8133 8134 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 8135 continue; 8136 if (mcc_pkg->package.count < 2) 8137 continue; 8138 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 8139 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 8140 continue; 8141 8142 domain_type = &mcc_pkg->package.elements[0]; 8143 if (domain_type->integer.value != WRDD_WIFI) 8144 continue; 8145 8146 mcc_value = &mcc_pkg->package.elements[1]; 8147 return mcc_value->integer.value; 8148 } 8149 return 0; 8150 } 8151 8152 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 8153 { 8154 struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev); 8155 acpi_handle root_handle; 8156 acpi_handle handle; 8157 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 8158 acpi_status status; 8159 u32 alpha2_code; 8160 char alpha2[3]; 8161 8162 root_handle = ACPI_HANDLE(&pdev->dev); 8163 if (!root_handle) 8164 return -EOPNOTSUPP; 8165 8166 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 8167 if (ACPI_FAILURE(status)) { 8168 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8169 "failed to get wrd method %d\n", status); 8170 return -EIO; 8171 } 8172 8173 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 8174 if (ACPI_FAILURE(status)) { 8175 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8176 "failed to call wrdc %d\n", status); 8177 return -EIO; 8178 } 8179 8180 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 8181 kfree(wrdd.pointer); 8182 if (!alpha2_code) 8183 return -EIO; 8184 8185 alpha2[0] = (alpha2_code >> 8) & 0xff; 8186 alpha2[1] = (alpha2_code >> 0) & 0xff; 8187 alpha2[2] = '\0'; 8188 8189 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8190 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 8191 8192 *rd = ath_regd_find_country_by_name(alpha2); 8193 if (*rd == 0xffff) 8194 return -EIO; 8195 8196 *rd |= COUNTRY_ERD_FLAG; 8197 return 0; 8198 } 8199 8200 static int ath10k_mac_init_rd(struct ath10k *ar) 8201 { 8202 int ret; 8203 u16 rd; 8204 8205 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 8206 if (ret) { 8207 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8208 "fallback to eeprom programmed regulatory settings\n"); 8209 rd = ar->hw_eeprom_rd; 8210 } 8211 8212 ar->ath_common.regulatory.current_rd = rd; 8213 return 0; 8214 } 8215 8216 int ath10k_mac_register(struct ath10k *ar) 8217 { 8218 static const u32 cipher_suites[] = { 8219 WLAN_CIPHER_SUITE_WEP40, 8220 WLAN_CIPHER_SUITE_WEP104, 8221 WLAN_CIPHER_SUITE_TKIP, 8222 WLAN_CIPHER_SUITE_CCMP, 8223 8224 /* Do not add hardware supported ciphers before this line. 8225 * Allow software encryption for all chips. Don't forget to 8226 * update n_cipher_suites below. 8227 */ 8228 WLAN_CIPHER_SUITE_AES_CMAC, 8229 WLAN_CIPHER_SUITE_BIP_CMAC_256, 8230 WLAN_CIPHER_SUITE_BIP_GMAC_128, 8231 WLAN_CIPHER_SUITE_BIP_GMAC_256, 8232 8233 /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256 8234 * and CCMP-256 in hardware. 8235 */ 8236 WLAN_CIPHER_SUITE_GCMP, 8237 WLAN_CIPHER_SUITE_GCMP_256, 8238 WLAN_CIPHER_SUITE_CCMP_256, 8239 }; 8240 struct ieee80211_supported_band *band; 8241 void *channels; 8242 int ret; 8243 8244 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8245 8246 SET_IEEE80211_DEV(ar->hw, ar->dev); 8247 8248 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8249 ARRAY_SIZE(ath10k_5ghz_channels)) != 8250 ATH10K_NUM_CHANS); 8251 8252 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8253 channels = kmemdup(ath10k_2ghz_channels, 8254 sizeof(ath10k_2ghz_channels), 8255 GFP_KERNEL); 8256 if (!channels) { 8257 ret = -ENOMEM; 8258 goto err_free; 8259 } 8260 8261 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8262 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8263 band->channels = channels; 8264 8265 if (ar->hw_params.cck_rate_map_rev2) { 8266 band->n_bitrates = ath10k_g_rates_rev2_size; 8267 band->bitrates = ath10k_g_rates_rev2; 8268 } else { 8269 band->n_bitrates = ath10k_g_rates_size; 8270 band->bitrates = ath10k_g_rates; 8271 } 8272 8273 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8274 } 8275 8276 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8277 channels = kmemdup(ath10k_5ghz_channels, 8278 sizeof(ath10k_5ghz_channels), 8279 GFP_KERNEL); 8280 if (!channels) { 8281 ret = -ENOMEM; 8282 goto err_free; 8283 } 8284 8285 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8286 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8287 band->channels = channels; 8288 band->n_bitrates = ath10k_a_rates_size; 8289 band->bitrates = ath10k_a_rates; 8290 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8291 } 8292 8293 ath10k_mac_setup_ht_vht_cap(ar); 8294 8295 ar->hw->wiphy->interface_modes = 8296 BIT(NL80211_IFTYPE_STATION) | 8297 BIT(NL80211_IFTYPE_AP) | 8298 BIT(NL80211_IFTYPE_MESH_POINT); 8299 8300 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8301 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8302 8303 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8304 ar->hw->wiphy->interface_modes |= 8305 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8306 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8307 BIT(NL80211_IFTYPE_P2P_GO); 8308 8309 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8310 8311 if (!test_bit(ATH10K_FW_FEATURE_NO_PS, 8312 ar->running_fw->fw_file.fw_features)) { 8313 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8314 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8315 } 8316 8317 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8318 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8319 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8320 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8321 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8322 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8323 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8324 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8325 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8326 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8327 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8328 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8329 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8330 8331 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8332 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8333 8334 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8335 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8336 8337 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8338 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8339 8340 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8341 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8342 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8343 } 8344 8345 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8346 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8347 8348 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8349 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8350 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8351 8352 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8353 8354 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8355 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8356 8357 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8358 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8359 * correct Probe Responses. This is more of a hack advert.. 8360 */ 8361 ar->hw->wiphy->probe_resp_offload |= 8362 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8363 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8364 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8365 } 8366 8367 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || 8368 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { 8369 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8370 if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map)) 8371 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); 8372 } 8373 8374 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) 8375 ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA); 8376 8377 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8378 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8379 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8380 8381 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8382 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8383 NL80211_FEATURE_AP_SCAN; 8384 8385 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8386 8387 ret = ath10k_wow_init(ar); 8388 if (ret) { 8389 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8390 goto err_free; 8391 } 8392 8393 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8394 wiphy_ext_feature_set(ar->hw->wiphy, 8395 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 8396 8397 /* 8398 * on LL hardware queues are managed entirely by the FW 8399 * so we only advertise to mac we can do the queues thing 8400 */ 8401 ar->hw->queues = IEEE80211_MAX_QUEUES; 8402 8403 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8404 * something that vdev_ids can't reach so that we don't stop the queue 8405 * accidentally. 8406 */ 8407 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8408 8409 switch (ar->running_fw->fw_file.wmi_op_version) { 8410 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8411 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8412 ar->hw->wiphy->n_iface_combinations = 8413 ARRAY_SIZE(ath10k_if_comb); 8414 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8415 break; 8416 case ATH10K_FW_WMI_OP_VERSION_TLV: 8417 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8418 ar->hw->wiphy->iface_combinations = 8419 ath10k_tlv_qcs_if_comb; 8420 ar->hw->wiphy->n_iface_combinations = 8421 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8422 } else { 8423 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8424 ar->hw->wiphy->n_iface_combinations = 8425 ARRAY_SIZE(ath10k_tlv_if_comb); 8426 } 8427 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8428 break; 8429 case ATH10K_FW_WMI_OP_VERSION_10_1: 8430 case ATH10K_FW_WMI_OP_VERSION_10_2: 8431 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8432 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8433 ar->hw->wiphy->n_iface_combinations = 8434 ARRAY_SIZE(ath10k_10x_if_comb); 8435 break; 8436 case ATH10K_FW_WMI_OP_VERSION_10_4: 8437 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8438 ar->hw->wiphy->n_iface_combinations = 8439 ARRAY_SIZE(ath10k_10_4_if_comb); 8440 break; 8441 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8442 case ATH10K_FW_WMI_OP_VERSION_MAX: 8443 WARN_ON(1); 8444 ret = -EINVAL; 8445 goto err_free; 8446 } 8447 8448 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8449 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8450 8451 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8452 /* Init ath dfs pattern detector */ 8453 ar->ath_common.debug_mask = ATH_DBG_DFS; 8454 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8455 NL80211_DFS_UNSET); 8456 8457 if (!ar->dfs_detector) 8458 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8459 } 8460 8461 ret = ath10k_mac_init_rd(ar); 8462 if (ret) { 8463 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8464 goto err_dfs_detector_exit; 8465 } 8466 8467 /* Disable set_coverage_class for chipsets that do not support it. */ 8468 if (!ar->hw_params.hw_ops->set_coverage_class) 8469 ar->ops->set_coverage_class = NULL; 8470 8471 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8472 ath10k_reg_notifier); 8473 if (ret) { 8474 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8475 goto err_dfs_detector_exit; 8476 } 8477 8478 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 8479 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); 8480 if (ret) { 8481 ath10k_err(ar, "failed to set prob req oui: %i\n", ret); 8482 goto err_dfs_detector_exit; 8483 } 8484 8485 ar->hw->wiphy->features |= 8486 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 8487 } 8488 8489 ar->hw->wiphy->cipher_suites = cipher_suites; 8490 8491 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 8492 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported 8493 * from chip specific hw_param table. 8494 */ 8495 if (!ar->hw_params.n_cipher_suites || 8496 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) { 8497 ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n", 8498 ar->hw_params.n_cipher_suites); 8499 ar->hw_params.n_cipher_suites = 8; 8500 } 8501 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites; 8502 8503 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8504 8505 ret = ieee80211_register_hw(ar->hw); 8506 if (ret) { 8507 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 8508 goto err_dfs_detector_exit; 8509 } 8510 8511 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 8512 ret = regulatory_hint(ar->hw->wiphy, 8513 ar->ath_common.regulatory.alpha2); 8514 if (ret) 8515 goto err_unregister; 8516 } 8517 8518 return 0; 8519 8520 err_unregister: 8521 ieee80211_unregister_hw(ar->hw); 8522 8523 err_dfs_detector_exit: 8524 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8525 ar->dfs_detector->exit(ar->dfs_detector); 8526 8527 err_free: 8528 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8529 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8530 8531 SET_IEEE80211_DEV(ar->hw, NULL); 8532 return ret; 8533 } 8534 8535 void ath10k_mac_unregister(struct ath10k *ar) 8536 { 8537 ieee80211_unregister_hw(ar->hw); 8538 8539 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8540 ar->dfs_detector->exit(ar->dfs_detector); 8541 8542 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8543 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8544 8545 SET_IEEE80211_DEV(ar->hw, NULL); 8546 } 8547