1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "mac.h" 19 20 #include <net/mac80211.h> 21 #include <linux/etherdevice.h> 22 #include <linux/acpi.h> 23 24 #include "hif.h" 25 #include "core.h" 26 #include "debug.h" 27 #include "wmi.h" 28 #include "htt.h" 29 #include "txrx.h" 30 #include "testmode.h" 31 #include "wmi.h" 32 #include "wmi-tlv.h" 33 #include "wmi-ops.h" 34 #include "wow.h" 35 36 /*********/ 37 /* Rates */ 38 /*********/ 39 40 static struct ieee80211_rate ath10k_rates[] = { 41 { .bitrate = 10, 42 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 43 { .bitrate = 20, 44 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 45 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 46 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 47 { .bitrate = 55, 48 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 49 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 50 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 51 { .bitrate = 110, 52 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 53 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 54 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 55 56 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 57 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 58 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 59 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 60 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 61 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 62 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 63 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 64 }; 65 66 static struct ieee80211_rate ath10k_rates_rev2[] = { 67 { .bitrate = 10, 68 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 69 { .bitrate = 20, 70 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 71 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 72 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 73 { .bitrate = 55, 74 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 75 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 76 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 77 { .bitrate = 110, 78 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 79 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 80 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 81 82 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 83 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 84 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 85 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 86 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 87 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 88 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 89 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 90 }; 91 92 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 93 94 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 95 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 96 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 97 #define ath10k_g_rates (ath10k_rates + 0) 98 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 99 100 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 101 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 102 103 static bool ath10k_mac_bitrate_is_cck(int bitrate) 104 { 105 switch (bitrate) { 106 case 10: 107 case 20: 108 case 55: 109 case 110: 110 return true; 111 } 112 113 return false; 114 } 115 116 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 117 { 118 return DIV_ROUND_UP(bitrate, 5) | 119 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 120 } 121 122 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 123 u8 hw_rate, bool cck) 124 { 125 const struct ieee80211_rate *rate; 126 int i; 127 128 for (i = 0; i < sband->n_bitrates; i++) { 129 rate = &sband->bitrates[i]; 130 131 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 132 continue; 133 134 if (rate->hw_value == hw_rate) 135 return i; 136 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 137 rate->hw_value_short == hw_rate) 138 return i; 139 } 140 141 return 0; 142 } 143 144 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 145 u32 bitrate) 146 { 147 int i; 148 149 for (i = 0; i < sband->n_bitrates; i++) 150 if (sband->bitrates[i].bitrate == bitrate) 151 return i; 152 153 return 0; 154 } 155 156 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 157 { 158 switch ((mcs_map >> (2 * nss)) & 0x3) { 159 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 160 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 161 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 162 } 163 return 0; 164 } 165 166 static u32 167 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 168 { 169 int nss; 170 171 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 172 if (ht_mcs_mask[nss]) 173 return nss + 1; 174 175 return 1; 176 } 177 178 static u32 179 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 180 { 181 int nss; 182 183 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 184 if (vht_mcs_mask[nss]) 185 return nss + 1; 186 187 return 1; 188 } 189 190 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 191 { 192 enum wmi_host_platform_type platform_type; 193 int ret; 194 195 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 196 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 197 else 198 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 199 200 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 201 202 if (ret && ret != -EOPNOTSUPP) { 203 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 204 return ret; 205 } 206 207 return 0; 208 } 209 210 /**********/ 211 /* Crypto */ 212 /**********/ 213 214 static int ath10k_send_key(struct ath10k_vif *arvif, 215 struct ieee80211_key_conf *key, 216 enum set_key_cmd cmd, 217 const u8 *macaddr, u32 flags) 218 { 219 struct ath10k *ar = arvif->ar; 220 struct wmi_vdev_install_key_arg arg = { 221 .vdev_id = arvif->vdev_id, 222 .key_idx = key->keyidx, 223 .key_len = key->keylen, 224 .key_data = key->key, 225 .key_flags = flags, 226 .macaddr = macaddr, 227 }; 228 229 lockdep_assert_held(&arvif->ar->conf_mutex); 230 231 switch (key->cipher) { 232 case WLAN_CIPHER_SUITE_CCMP: 233 arg.key_cipher = WMI_CIPHER_AES_CCM; 234 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 235 break; 236 case WLAN_CIPHER_SUITE_TKIP: 237 arg.key_cipher = WMI_CIPHER_TKIP; 238 arg.key_txmic_len = 8; 239 arg.key_rxmic_len = 8; 240 break; 241 case WLAN_CIPHER_SUITE_WEP40: 242 case WLAN_CIPHER_SUITE_WEP104: 243 arg.key_cipher = WMI_CIPHER_WEP; 244 break; 245 case WLAN_CIPHER_SUITE_CCMP_256: 246 arg.key_cipher = WMI_CIPHER_AES_CCM; 247 break; 248 case WLAN_CIPHER_SUITE_GCMP: 249 case WLAN_CIPHER_SUITE_GCMP_256: 250 arg.key_cipher = WMI_CIPHER_AES_GCM; 251 break; 252 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 253 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 254 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 255 case WLAN_CIPHER_SUITE_AES_CMAC: 256 WARN_ON(1); 257 return -EINVAL; 258 default: 259 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 260 return -EOPNOTSUPP; 261 } 262 263 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 264 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 265 266 if (cmd == DISABLE_KEY) { 267 arg.key_cipher = WMI_CIPHER_NONE; 268 arg.key_data = NULL; 269 } 270 271 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 272 } 273 274 static int ath10k_install_key(struct ath10k_vif *arvif, 275 struct ieee80211_key_conf *key, 276 enum set_key_cmd cmd, 277 const u8 *macaddr, u32 flags) 278 { 279 struct ath10k *ar = arvif->ar; 280 int ret; 281 unsigned long time_left; 282 283 lockdep_assert_held(&ar->conf_mutex); 284 285 reinit_completion(&ar->install_key_done); 286 287 if (arvif->nohwcrypt) 288 return 1; 289 290 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 291 if (ret) 292 return ret; 293 294 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 295 if (time_left == 0) 296 return -ETIMEDOUT; 297 298 return 0; 299 } 300 301 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 302 const u8 *addr) 303 { 304 struct ath10k *ar = arvif->ar; 305 struct ath10k_peer *peer; 306 int ret; 307 int i; 308 u32 flags; 309 310 lockdep_assert_held(&ar->conf_mutex); 311 312 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 313 arvif->vif->type != NL80211_IFTYPE_ADHOC && 314 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 315 return -EINVAL; 316 317 spin_lock_bh(&ar->data_lock); 318 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 319 spin_unlock_bh(&ar->data_lock); 320 321 if (!peer) 322 return -ENOENT; 323 324 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 325 if (arvif->wep_keys[i] == NULL) 326 continue; 327 328 switch (arvif->vif->type) { 329 case NL80211_IFTYPE_AP: 330 flags = WMI_KEY_PAIRWISE; 331 332 if (arvif->def_wep_key_idx == i) 333 flags |= WMI_KEY_TX_USAGE; 334 335 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 336 SET_KEY, addr, flags); 337 if (ret < 0) 338 return ret; 339 break; 340 case NL80211_IFTYPE_ADHOC: 341 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 342 SET_KEY, addr, 343 WMI_KEY_PAIRWISE); 344 if (ret < 0) 345 return ret; 346 347 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 348 SET_KEY, addr, WMI_KEY_GROUP); 349 if (ret < 0) 350 return ret; 351 break; 352 default: 353 WARN_ON(1); 354 return -EINVAL; 355 } 356 357 spin_lock_bh(&ar->data_lock); 358 peer->keys[i] = arvif->wep_keys[i]; 359 spin_unlock_bh(&ar->data_lock); 360 } 361 362 /* In some cases (notably with static WEP IBSS with multiple keys) 363 * multicast Tx becomes broken. Both pairwise and groupwise keys are 364 * installed already. Using WMI_KEY_TX_USAGE in different combinations 365 * didn't seem help. Using def_keyid vdev parameter seems to be 366 * effective so use that. 367 * 368 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 369 */ 370 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 371 return 0; 372 373 if (arvif->def_wep_key_idx == -1) 374 return 0; 375 376 ret = ath10k_wmi_vdev_set_param(arvif->ar, 377 arvif->vdev_id, 378 arvif->ar->wmi.vdev_param->def_keyid, 379 arvif->def_wep_key_idx); 380 if (ret) { 381 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 382 arvif->vdev_id, ret); 383 return ret; 384 } 385 386 return 0; 387 } 388 389 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 390 const u8 *addr) 391 { 392 struct ath10k *ar = arvif->ar; 393 struct ath10k_peer *peer; 394 int first_errno = 0; 395 int ret; 396 int i; 397 u32 flags = 0; 398 399 lockdep_assert_held(&ar->conf_mutex); 400 401 spin_lock_bh(&ar->data_lock); 402 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 403 spin_unlock_bh(&ar->data_lock); 404 405 if (!peer) 406 return -ENOENT; 407 408 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 409 if (peer->keys[i] == NULL) 410 continue; 411 412 /* key flags are not required to delete the key */ 413 ret = ath10k_install_key(arvif, peer->keys[i], 414 DISABLE_KEY, addr, flags); 415 if (ret < 0 && first_errno == 0) 416 first_errno = ret; 417 418 if (ret < 0) 419 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 420 i, ret); 421 422 spin_lock_bh(&ar->data_lock); 423 peer->keys[i] = NULL; 424 spin_unlock_bh(&ar->data_lock); 425 } 426 427 return first_errno; 428 } 429 430 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 431 u8 keyidx) 432 { 433 struct ath10k_peer *peer; 434 int i; 435 436 lockdep_assert_held(&ar->data_lock); 437 438 /* We don't know which vdev this peer belongs to, 439 * since WMI doesn't give us that information. 440 * 441 * FIXME: multi-bss needs to be handled. 442 */ 443 peer = ath10k_peer_find(ar, 0, addr); 444 if (!peer) 445 return false; 446 447 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 448 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 449 return true; 450 } 451 452 return false; 453 } 454 455 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 456 struct ieee80211_key_conf *key) 457 { 458 struct ath10k *ar = arvif->ar; 459 struct ath10k_peer *peer; 460 u8 addr[ETH_ALEN]; 461 int first_errno = 0; 462 int ret; 463 int i; 464 u32 flags = 0; 465 466 lockdep_assert_held(&ar->conf_mutex); 467 468 for (;;) { 469 /* since ath10k_install_key we can't hold data_lock all the 470 * time, so we try to remove the keys incrementally 471 */ 472 spin_lock_bh(&ar->data_lock); 473 i = 0; 474 list_for_each_entry(peer, &ar->peers, list) { 475 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 476 if (peer->keys[i] == key) { 477 ether_addr_copy(addr, peer->addr); 478 peer->keys[i] = NULL; 479 break; 480 } 481 } 482 483 if (i < ARRAY_SIZE(peer->keys)) 484 break; 485 } 486 spin_unlock_bh(&ar->data_lock); 487 488 if (i == ARRAY_SIZE(peer->keys)) 489 break; 490 /* key flags are not required to delete the key */ 491 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 492 if (ret < 0 && first_errno == 0) 493 first_errno = ret; 494 495 if (ret) 496 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 497 addr, ret); 498 } 499 500 return first_errno; 501 } 502 503 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 504 struct ieee80211_key_conf *key) 505 { 506 struct ath10k *ar = arvif->ar; 507 struct ath10k_peer *peer; 508 int ret; 509 510 lockdep_assert_held(&ar->conf_mutex); 511 512 list_for_each_entry(peer, &ar->peers, list) { 513 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 514 continue; 515 516 if (ether_addr_equal(peer->addr, arvif->bssid)) 517 continue; 518 519 if (peer->keys[key->keyidx] == key) 520 continue; 521 522 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 523 arvif->vdev_id, key->keyidx); 524 525 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 526 if (ret) { 527 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 528 arvif->vdev_id, peer->addr, ret); 529 return ret; 530 } 531 } 532 533 return 0; 534 } 535 536 /*********************/ 537 /* General utilities */ 538 /*********************/ 539 540 static inline enum wmi_phy_mode 541 chan_to_phymode(const struct cfg80211_chan_def *chandef) 542 { 543 enum wmi_phy_mode phymode = MODE_UNKNOWN; 544 545 switch (chandef->chan->band) { 546 case NL80211_BAND_2GHZ: 547 switch (chandef->width) { 548 case NL80211_CHAN_WIDTH_20_NOHT: 549 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 550 phymode = MODE_11B; 551 else 552 phymode = MODE_11G; 553 break; 554 case NL80211_CHAN_WIDTH_20: 555 phymode = MODE_11NG_HT20; 556 break; 557 case NL80211_CHAN_WIDTH_40: 558 phymode = MODE_11NG_HT40; 559 break; 560 case NL80211_CHAN_WIDTH_5: 561 case NL80211_CHAN_WIDTH_10: 562 case NL80211_CHAN_WIDTH_80: 563 case NL80211_CHAN_WIDTH_80P80: 564 case NL80211_CHAN_WIDTH_160: 565 phymode = MODE_UNKNOWN; 566 break; 567 } 568 break; 569 case NL80211_BAND_5GHZ: 570 switch (chandef->width) { 571 case NL80211_CHAN_WIDTH_20_NOHT: 572 phymode = MODE_11A; 573 break; 574 case NL80211_CHAN_WIDTH_20: 575 phymode = MODE_11NA_HT20; 576 break; 577 case NL80211_CHAN_WIDTH_40: 578 phymode = MODE_11NA_HT40; 579 break; 580 case NL80211_CHAN_WIDTH_80: 581 phymode = MODE_11AC_VHT80; 582 break; 583 case NL80211_CHAN_WIDTH_160: 584 phymode = MODE_11AC_VHT160; 585 break; 586 case NL80211_CHAN_WIDTH_80P80: 587 phymode = MODE_11AC_VHT80_80; 588 break; 589 case NL80211_CHAN_WIDTH_5: 590 case NL80211_CHAN_WIDTH_10: 591 phymode = MODE_UNKNOWN; 592 break; 593 } 594 break; 595 default: 596 break; 597 } 598 599 WARN_ON(phymode == MODE_UNKNOWN); 600 return phymode; 601 } 602 603 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 604 { 605 /* 606 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 607 * 0 for no restriction 608 * 1 for 1/4 us 609 * 2 for 1/2 us 610 * 3 for 1 us 611 * 4 for 2 us 612 * 5 for 4 us 613 * 6 for 8 us 614 * 7 for 16 us 615 */ 616 switch (mpdudensity) { 617 case 0: 618 return 0; 619 case 1: 620 case 2: 621 case 3: 622 /* Our lower layer calculations limit our precision to 623 * 1 microsecond 624 */ 625 return 1; 626 case 4: 627 return 2; 628 case 5: 629 return 4; 630 case 6: 631 return 8; 632 case 7: 633 return 16; 634 default: 635 return 0; 636 } 637 } 638 639 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 640 struct cfg80211_chan_def *def) 641 { 642 struct ieee80211_chanctx_conf *conf; 643 644 rcu_read_lock(); 645 conf = rcu_dereference(vif->chanctx_conf); 646 if (!conf) { 647 rcu_read_unlock(); 648 return -ENOENT; 649 } 650 651 *def = conf->def; 652 rcu_read_unlock(); 653 654 return 0; 655 } 656 657 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 658 struct ieee80211_chanctx_conf *conf, 659 void *data) 660 { 661 int *num = data; 662 663 (*num)++; 664 } 665 666 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 667 { 668 int num = 0; 669 670 ieee80211_iter_chan_contexts_atomic(ar->hw, 671 ath10k_mac_num_chanctxs_iter, 672 &num); 673 674 return num; 675 } 676 677 static void 678 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 679 struct ieee80211_chanctx_conf *conf, 680 void *data) 681 { 682 struct cfg80211_chan_def **def = data; 683 684 *def = &conf->def; 685 } 686 687 static int ath10k_peer_create(struct ath10k *ar, 688 struct ieee80211_vif *vif, 689 struct ieee80211_sta *sta, 690 u32 vdev_id, 691 const u8 *addr, 692 enum wmi_peer_type peer_type) 693 { 694 struct ath10k_vif *arvif; 695 struct ath10k_peer *peer; 696 int num_peers = 0; 697 int ret; 698 699 lockdep_assert_held(&ar->conf_mutex); 700 701 num_peers = ar->num_peers; 702 703 /* Each vdev consumes a peer entry as well */ 704 list_for_each_entry(arvif, &ar->arvifs, list) 705 num_peers++; 706 707 if (num_peers >= ar->max_num_peers) 708 return -ENOBUFS; 709 710 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 711 if (ret) { 712 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 713 addr, vdev_id, ret); 714 return ret; 715 } 716 717 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 718 if (ret) { 719 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 720 addr, vdev_id, ret); 721 return ret; 722 } 723 724 spin_lock_bh(&ar->data_lock); 725 726 peer = ath10k_peer_find(ar, vdev_id, addr); 727 if (!peer) { 728 spin_unlock_bh(&ar->data_lock); 729 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 730 addr, vdev_id); 731 ath10k_wmi_peer_delete(ar, vdev_id, addr); 732 return -ENOENT; 733 } 734 735 peer->vif = vif; 736 peer->sta = sta; 737 738 spin_unlock_bh(&ar->data_lock); 739 740 ar->num_peers++; 741 742 return 0; 743 } 744 745 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 746 { 747 struct ath10k *ar = arvif->ar; 748 u32 param; 749 int ret; 750 751 param = ar->wmi.pdev_param->sta_kickout_th; 752 ret = ath10k_wmi_pdev_set_param(ar, param, 753 ATH10K_KICKOUT_THRESHOLD); 754 if (ret) { 755 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 756 arvif->vdev_id, ret); 757 return ret; 758 } 759 760 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 761 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 762 ATH10K_KEEPALIVE_MIN_IDLE); 763 if (ret) { 764 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 765 arvif->vdev_id, ret); 766 return ret; 767 } 768 769 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 770 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 771 ATH10K_KEEPALIVE_MAX_IDLE); 772 if (ret) { 773 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 774 arvif->vdev_id, ret); 775 return ret; 776 } 777 778 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 779 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 780 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 781 if (ret) { 782 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 783 arvif->vdev_id, ret); 784 return ret; 785 } 786 787 return 0; 788 } 789 790 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 791 { 792 struct ath10k *ar = arvif->ar; 793 u32 vdev_param; 794 795 vdev_param = ar->wmi.vdev_param->rts_threshold; 796 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 797 } 798 799 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 800 { 801 int ret; 802 803 lockdep_assert_held(&ar->conf_mutex); 804 805 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 806 if (ret) 807 return ret; 808 809 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 810 if (ret) 811 return ret; 812 813 ar->num_peers--; 814 815 return 0; 816 } 817 818 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 819 { 820 struct ath10k_peer *peer, *tmp; 821 int peer_id; 822 int i; 823 824 lockdep_assert_held(&ar->conf_mutex); 825 826 spin_lock_bh(&ar->data_lock); 827 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 828 if (peer->vdev_id != vdev_id) 829 continue; 830 831 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 832 peer->addr, vdev_id); 833 834 for_each_set_bit(peer_id, peer->peer_ids, 835 ATH10K_MAX_NUM_PEER_IDS) { 836 ar->peer_map[peer_id] = NULL; 837 } 838 839 /* Double check that peer is properly un-referenced from 840 * the peer_map 841 */ 842 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 843 if (ar->peer_map[i] == peer) { 844 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 845 peer->addr, peer, i); 846 ar->peer_map[i] = NULL; 847 } 848 } 849 850 list_del(&peer->list); 851 kfree(peer); 852 ar->num_peers--; 853 } 854 spin_unlock_bh(&ar->data_lock); 855 } 856 857 static void ath10k_peer_cleanup_all(struct ath10k *ar) 858 { 859 struct ath10k_peer *peer, *tmp; 860 int i; 861 862 lockdep_assert_held(&ar->conf_mutex); 863 864 spin_lock_bh(&ar->data_lock); 865 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 866 list_del(&peer->list); 867 kfree(peer); 868 } 869 870 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 871 ar->peer_map[i] = NULL; 872 873 spin_unlock_bh(&ar->data_lock); 874 875 ar->num_peers = 0; 876 ar->num_stations = 0; 877 } 878 879 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 880 struct ieee80211_sta *sta, 881 enum wmi_tdls_peer_state state) 882 { 883 int ret; 884 struct wmi_tdls_peer_update_cmd_arg arg = {}; 885 struct wmi_tdls_peer_capab_arg cap = {}; 886 struct wmi_channel_arg chan_arg = {}; 887 888 lockdep_assert_held(&ar->conf_mutex); 889 890 arg.vdev_id = vdev_id; 891 arg.peer_state = state; 892 ether_addr_copy(arg.addr, sta->addr); 893 894 cap.peer_max_sp = sta->max_sp; 895 cap.peer_uapsd_queues = sta->uapsd_queues; 896 897 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 898 !sta->tdls_initiator) 899 cap.is_peer_responder = 1; 900 901 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 902 if (ret) { 903 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 904 arg.addr, vdev_id, ret); 905 return ret; 906 } 907 908 return 0; 909 } 910 911 /************************/ 912 /* Interface management */ 913 /************************/ 914 915 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 916 { 917 struct ath10k *ar = arvif->ar; 918 919 lockdep_assert_held(&ar->data_lock); 920 921 if (!arvif->beacon) 922 return; 923 924 if (!arvif->beacon_buf) 925 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 926 arvif->beacon->len, DMA_TO_DEVICE); 927 928 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 929 arvif->beacon_state != ATH10K_BEACON_SENT)) 930 return; 931 932 dev_kfree_skb_any(arvif->beacon); 933 934 arvif->beacon = NULL; 935 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 936 } 937 938 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 939 { 940 struct ath10k *ar = arvif->ar; 941 942 lockdep_assert_held(&ar->data_lock); 943 944 ath10k_mac_vif_beacon_free(arvif); 945 946 if (arvif->beacon_buf) { 947 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 948 arvif->beacon_buf, arvif->beacon_paddr); 949 arvif->beacon_buf = NULL; 950 } 951 } 952 953 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 954 { 955 unsigned long time_left; 956 957 lockdep_assert_held(&ar->conf_mutex); 958 959 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 960 return -ESHUTDOWN; 961 962 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 963 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 964 if (time_left == 0) 965 return -ETIMEDOUT; 966 967 return 0; 968 } 969 970 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 971 { 972 struct cfg80211_chan_def *chandef = NULL; 973 struct ieee80211_channel *channel = NULL; 974 struct wmi_vdev_start_request_arg arg = {}; 975 int ret = 0; 976 977 lockdep_assert_held(&ar->conf_mutex); 978 979 ieee80211_iter_chan_contexts_atomic(ar->hw, 980 ath10k_mac_get_any_chandef_iter, 981 &chandef); 982 if (WARN_ON_ONCE(!chandef)) 983 return -ENOENT; 984 985 channel = chandef->chan; 986 987 arg.vdev_id = vdev_id; 988 arg.channel.freq = channel->center_freq; 989 arg.channel.band_center_freq1 = chandef->center_freq1; 990 arg.channel.band_center_freq2 = chandef->center_freq2; 991 992 /* TODO setup this dynamically, what in case we 993 * don't have any vifs? 994 */ 995 arg.channel.mode = chan_to_phymode(chandef); 996 arg.channel.chan_radar = 997 !!(channel->flags & IEEE80211_CHAN_RADAR); 998 999 arg.channel.min_power = 0; 1000 arg.channel.max_power = channel->max_power * 2; 1001 arg.channel.max_reg_power = channel->max_reg_power * 2; 1002 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 1003 1004 reinit_completion(&ar->vdev_setup_done); 1005 1006 ret = ath10k_wmi_vdev_start(ar, &arg); 1007 if (ret) { 1008 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 1009 vdev_id, ret); 1010 return ret; 1011 } 1012 1013 ret = ath10k_vdev_setup_sync(ar); 1014 if (ret) { 1015 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1016 vdev_id, ret); 1017 return ret; 1018 } 1019 1020 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1021 if (ret) { 1022 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1023 vdev_id, ret); 1024 goto vdev_stop; 1025 } 1026 1027 ar->monitor_vdev_id = vdev_id; 1028 1029 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1030 ar->monitor_vdev_id); 1031 return 0; 1032 1033 vdev_stop: 1034 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1035 if (ret) 1036 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1037 ar->monitor_vdev_id, ret); 1038 1039 return ret; 1040 } 1041 1042 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1043 { 1044 int ret = 0; 1045 1046 lockdep_assert_held(&ar->conf_mutex); 1047 1048 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1049 if (ret) 1050 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1051 ar->monitor_vdev_id, ret); 1052 1053 reinit_completion(&ar->vdev_setup_done); 1054 1055 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1056 if (ret) 1057 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1058 ar->monitor_vdev_id, ret); 1059 1060 ret = ath10k_vdev_setup_sync(ar); 1061 if (ret) 1062 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1063 ar->monitor_vdev_id, ret); 1064 1065 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1066 ar->monitor_vdev_id); 1067 return ret; 1068 } 1069 1070 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1071 { 1072 int bit, ret = 0; 1073 1074 lockdep_assert_held(&ar->conf_mutex); 1075 1076 if (ar->free_vdev_map == 0) { 1077 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1078 return -ENOMEM; 1079 } 1080 1081 bit = __ffs64(ar->free_vdev_map); 1082 1083 ar->monitor_vdev_id = bit; 1084 1085 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1086 WMI_VDEV_TYPE_MONITOR, 1087 0, ar->mac_addr); 1088 if (ret) { 1089 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1090 ar->monitor_vdev_id, ret); 1091 return ret; 1092 } 1093 1094 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1095 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1096 ar->monitor_vdev_id); 1097 1098 return 0; 1099 } 1100 1101 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1102 { 1103 int ret = 0; 1104 1105 lockdep_assert_held(&ar->conf_mutex); 1106 1107 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1108 if (ret) { 1109 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1110 ar->monitor_vdev_id, ret); 1111 return ret; 1112 } 1113 1114 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1115 1116 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1117 ar->monitor_vdev_id); 1118 return ret; 1119 } 1120 1121 static int ath10k_monitor_start(struct ath10k *ar) 1122 { 1123 int ret; 1124 1125 lockdep_assert_held(&ar->conf_mutex); 1126 1127 ret = ath10k_monitor_vdev_create(ar); 1128 if (ret) { 1129 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1130 return ret; 1131 } 1132 1133 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1134 if (ret) { 1135 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1136 ath10k_monitor_vdev_delete(ar); 1137 return ret; 1138 } 1139 1140 ar->monitor_started = true; 1141 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1142 1143 return 0; 1144 } 1145 1146 static int ath10k_monitor_stop(struct ath10k *ar) 1147 { 1148 int ret; 1149 1150 lockdep_assert_held(&ar->conf_mutex); 1151 1152 ret = ath10k_monitor_vdev_stop(ar); 1153 if (ret) { 1154 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1155 return ret; 1156 } 1157 1158 ret = ath10k_monitor_vdev_delete(ar); 1159 if (ret) { 1160 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1161 return ret; 1162 } 1163 1164 ar->monitor_started = false; 1165 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1166 1167 return 0; 1168 } 1169 1170 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1171 { 1172 int num_ctx; 1173 1174 /* At least one chanctx is required to derive a channel to start 1175 * monitor vdev on. 1176 */ 1177 num_ctx = ath10k_mac_num_chanctxs(ar); 1178 if (num_ctx == 0) 1179 return false; 1180 1181 /* If there's already an existing special monitor interface then don't 1182 * bother creating another monitor vdev. 1183 */ 1184 if (ar->monitor_arvif) 1185 return false; 1186 1187 return ar->monitor || 1188 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1189 ar->running_fw->fw_file.fw_features) && 1190 (ar->filter_flags & FIF_OTHER_BSS)) || 1191 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1192 } 1193 1194 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1195 { 1196 int num_ctx; 1197 1198 num_ctx = ath10k_mac_num_chanctxs(ar); 1199 1200 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1201 * shouldn't allow this but make sure to prevent handling the following 1202 * case anyway since multi-channel DFS hasn't been tested at all. 1203 */ 1204 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1205 return false; 1206 1207 return true; 1208 } 1209 1210 static int ath10k_monitor_recalc(struct ath10k *ar) 1211 { 1212 bool needed; 1213 bool allowed; 1214 int ret; 1215 1216 lockdep_assert_held(&ar->conf_mutex); 1217 1218 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1219 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1220 1221 ath10k_dbg(ar, ATH10K_DBG_MAC, 1222 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1223 ar->monitor_started, needed, allowed); 1224 1225 if (WARN_ON(needed && !allowed)) { 1226 if (ar->monitor_started) { 1227 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1228 1229 ret = ath10k_monitor_stop(ar); 1230 if (ret) 1231 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1232 ret); 1233 /* not serious */ 1234 } 1235 1236 return -EPERM; 1237 } 1238 1239 if (needed == ar->monitor_started) 1240 return 0; 1241 1242 if (needed) 1243 return ath10k_monitor_start(ar); 1244 else 1245 return ath10k_monitor_stop(ar); 1246 } 1247 1248 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1249 { 1250 struct ath10k *ar = arvif->ar; 1251 1252 lockdep_assert_held(&ar->conf_mutex); 1253 1254 if (!arvif->is_started) { 1255 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1256 return false; 1257 } 1258 1259 return true; 1260 } 1261 1262 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1263 { 1264 struct ath10k *ar = arvif->ar; 1265 u32 vdev_param; 1266 1267 lockdep_assert_held(&ar->conf_mutex); 1268 1269 vdev_param = ar->wmi.vdev_param->protection_mode; 1270 1271 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1272 arvif->vdev_id, arvif->use_cts_prot); 1273 1274 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1275 arvif->use_cts_prot ? 1 : 0); 1276 } 1277 1278 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1279 { 1280 struct ath10k *ar = arvif->ar; 1281 u32 vdev_param, rts_cts = 0; 1282 1283 lockdep_assert_held(&ar->conf_mutex); 1284 1285 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1286 1287 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1288 1289 if (arvif->num_legacy_stations > 0) 1290 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1291 WMI_RTSCTS_PROFILE); 1292 else 1293 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1294 WMI_RTSCTS_PROFILE); 1295 1296 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1297 arvif->vdev_id, rts_cts); 1298 1299 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1300 rts_cts); 1301 } 1302 1303 static int ath10k_start_cac(struct ath10k *ar) 1304 { 1305 int ret; 1306 1307 lockdep_assert_held(&ar->conf_mutex); 1308 1309 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1310 1311 ret = ath10k_monitor_recalc(ar); 1312 if (ret) { 1313 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1314 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1315 return ret; 1316 } 1317 1318 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1319 ar->monitor_vdev_id); 1320 1321 return 0; 1322 } 1323 1324 static int ath10k_stop_cac(struct ath10k *ar) 1325 { 1326 lockdep_assert_held(&ar->conf_mutex); 1327 1328 /* CAC is not running - do nothing */ 1329 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1330 return 0; 1331 1332 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1333 ath10k_monitor_stop(ar); 1334 1335 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1336 1337 return 0; 1338 } 1339 1340 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1341 struct ieee80211_chanctx_conf *conf, 1342 void *data) 1343 { 1344 bool *ret = data; 1345 1346 if (!*ret && conf->radar_enabled) 1347 *ret = true; 1348 } 1349 1350 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1351 { 1352 bool has_radar = false; 1353 1354 ieee80211_iter_chan_contexts_atomic(ar->hw, 1355 ath10k_mac_has_radar_iter, 1356 &has_radar); 1357 1358 return has_radar; 1359 } 1360 1361 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1362 { 1363 int ret; 1364 1365 lockdep_assert_held(&ar->conf_mutex); 1366 1367 ath10k_stop_cac(ar); 1368 1369 if (!ath10k_mac_has_radar_enabled(ar)) 1370 return; 1371 1372 if (ar->num_started_vdevs > 0) 1373 return; 1374 1375 ret = ath10k_start_cac(ar); 1376 if (ret) { 1377 /* 1378 * Not possible to start CAC on current channel so starting 1379 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1380 * by indicating that radar was detected. 1381 */ 1382 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1383 ieee80211_radar_detected(ar->hw); 1384 } 1385 } 1386 1387 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1388 { 1389 struct ath10k *ar = arvif->ar; 1390 int ret; 1391 1392 lockdep_assert_held(&ar->conf_mutex); 1393 1394 reinit_completion(&ar->vdev_setup_done); 1395 1396 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1397 if (ret) { 1398 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1399 arvif->vdev_id, ret); 1400 return ret; 1401 } 1402 1403 ret = ath10k_vdev_setup_sync(ar); 1404 if (ret) { 1405 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n", 1406 arvif->vdev_id, ret); 1407 return ret; 1408 } 1409 1410 WARN_ON(ar->num_started_vdevs == 0); 1411 1412 if (ar->num_started_vdevs != 0) { 1413 ar->num_started_vdevs--; 1414 ath10k_recalc_radar_detection(ar); 1415 } 1416 1417 return ret; 1418 } 1419 1420 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1421 const struct cfg80211_chan_def *chandef, 1422 bool restart) 1423 { 1424 struct ath10k *ar = arvif->ar; 1425 struct wmi_vdev_start_request_arg arg = {}; 1426 int ret = 0; 1427 1428 lockdep_assert_held(&ar->conf_mutex); 1429 1430 reinit_completion(&ar->vdev_setup_done); 1431 1432 arg.vdev_id = arvif->vdev_id; 1433 arg.dtim_period = arvif->dtim_period; 1434 arg.bcn_intval = arvif->beacon_interval; 1435 1436 arg.channel.freq = chandef->chan->center_freq; 1437 arg.channel.band_center_freq1 = chandef->center_freq1; 1438 arg.channel.band_center_freq2 = chandef->center_freq2; 1439 arg.channel.mode = chan_to_phymode(chandef); 1440 1441 arg.channel.min_power = 0; 1442 arg.channel.max_power = chandef->chan->max_power * 2; 1443 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1444 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1445 1446 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1447 arg.ssid = arvif->u.ap.ssid; 1448 arg.ssid_len = arvif->u.ap.ssid_len; 1449 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1450 1451 /* For now allow DFS for AP mode */ 1452 arg.channel.chan_radar = 1453 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1454 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1455 arg.ssid = arvif->vif->bss_conf.ssid; 1456 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1457 } 1458 1459 ath10k_dbg(ar, ATH10K_DBG_MAC, 1460 "mac vdev %d start center_freq %d phymode %s\n", 1461 arg.vdev_id, arg.channel.freq, 1462 ath10k_wmi_phymode_str(arg.channel.mode)); 1463 1464 if (restart) 1465 ret = ath10k_wmi_vdev_restart(ar, &arg); 1466 else 1467 ret = ath10k_wmi_vdev_start(ar, &arg); 1468 1469 if (ret) { 1470 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1471 arg.vdev_id, ret); 1472 return ret; 1473 } 1474 1475 ret = ath10k_vdev_setup_sync(ar); 1476 if (ret) { 1477 ath10k_warn(ar, 1478 "failed to synchronize setup for vdev %i restart %d: %d\n", 1479 arg.vdev_id, restart, ret); 1480 return ret; 1481 } 1482 1483 ar->num_started_vdevs++; 1484 ath10k_recalc_radar_detection(ar); 1485 1486 return ret; 1487 } 1488 1489 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1490 const struct cfg80211_chan_def *def) 1491 { 1492 return ath10k_vdev_start_restart(arvif, def, false); 1493 } 1494 1495 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1496 const struct cfg80211_chan_def *def) 1497 { 1498 return ath10k_vdev_start_restart(arvif, def, true); 1499 } 1500 1501 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1502 struct sk_buff *bcn) 1503 { 1504 struct ath10k *ar = arvif->ar; 1505 struct ieee80211_mgmt *mgmt; 1506 const u8 *p2p_ie; 1507 int ret; 1508 1509 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1510 return 0; 1511 1512 mgmt = (void *)bcn->data; 1513 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1514 mgmt->u.beacon.variable, 1515 bcn->len - (mgmt->u.beacon.variable - 1516 bcn->data)); 1517 if (!p2p_ie) 1518 return -ENOENT; 1519 1520 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1521 if (ret) { 1522 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1523 arvif->vdev_id, ret); 1524 return ret; 1525 } 1526 1527 return 0; 1528 } 1529 1530 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1531 u8 oui_type, size_t ie_offset) 1532 { 1533 size_t len; 1534 const u8 *next; 1535 const u8 *end; 1536 u8 *ie; 1537 1538 if (WARN_ON(skb->len < ie_offset)) 1539 return -EINVAL; 1540 1541 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1542 skb->data + ie_offset, 1543 skb->len - ie_offset); 1544 if (!ie) 1545 return -ENOENT; 1546 1547 len = ie[1] + 2; 1548 end = skb->data + skb->len; 1549 next = ie + len; 1550 1551 if (WARN_ON(next > end)) 1552 return -EINVAL; 1553 1554 memmove(ie, next, end - next); 1555 skb_trim(skb, skb->len - len); 1556 1557 return 0; 1558 } 1559 1560 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1561 { 1562 struct ath10k *ar = arvif->ar; 1563 struct ieee80211_hw *hw = ar->hw; 1564 struct ieee80211_vif *vif = arvif->vif; 1565 struct ieee80211_mutable_offsets offs = {}; 1566 struct sk_buff *bcn; 1567 int ret; 1568 1569 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1570 return 0; 1571 1572 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1573 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1574 return 0; 1575 1576 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1577 if (!bcn) { 1578 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1579 return -EPERM; 1580 } 1581 1582 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1583 if (ret) { 1584 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1585 kfree_skb(bcn); 1586 return ret; 1587 } 1588 1589 /* P2P IE is inserted by firmware automatically (as configured above) 1590 * so remove it from the base beacon template to avoid duplicate P2P 1591 * IEs in beacon frames. 1592 */ 1593 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1594 offsetof(struct ieee80211_mgmt, 1595 u.beacon.variable)); 1596 1597 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1598 0, NULL, 0); 1599 kfree_skb(bcn); 1600 1601 if (ret) { 1602 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1603 ret); 1604 return ret; 1605 } 1606 1607 return 0; 1608 } 1609 1610 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1611 { 1612 struct ath10k *ar = arvif->ar; 1613 struct ieee80211_hw *hw = ar->hw; 1614 struct ieee80211_vif *vif = arvif->vif; 1615 struct sk_buff *prb; 1616 int ret; 1617 1618 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1619 return 0; 1620 1621 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1622 return 0; 1623 1624 prb = ieee80211_proberesp_get(hw, vif); 1625 if (!prb) { 1626 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1627 return -EPERM; 1628 } 1629 1630 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1631 kfree_skb(prb); 1632 1633 if (ret) { 1634 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1635 ret); 1636 return ret; 1637 } 1638 1639 return 0; 1640 } 1641 1642 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1643 { 1644 struct ath10k *ar = arvif->ar; 1645 struct cfg80211_chan_def def; 1646 int ret; 1647 1648 /* When originally vdev is started during assign_vif_chanctx() some 1649 * information is missing, notably SSID. Firmware revisions with beacon 1650 * offloading require the SSID to be provided during vdev (re)start to 1651 * handle hidden SSID properly. 1652 * 1653 * Vdev restart must be done after vdev has been both started and 1654 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1655 * deliver vdev restart response event causing timeouts during vdev 1656 * syncing in ath10k. 1657 * 1658 * Note: The vdev down/up and template reinstallation could be skipped 1659 * since only wmi-tlv firmware are known to have beacon offload and 1660 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1661 * response delivery. It's probably more robust to keep it as is. 1662 */ 1663 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1664 return 0; 1665 1666 if (WARN_ON(!arvif->is_started)) 1667 return -EINVAL; 1668 1669 if (WARN_ON(!arvif->is_up)) 1670 return -EINVAL; 1671 1672 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1673 return -EINVAL; 1674 1675 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1676 if (ret) { 1677 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1678 arvif->vdev_id, ret); 1679 return ret; 1680 } 1681 1682 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1683 * firmware will crash upon vdev up. 1684 */ 1685 1686 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1687 if (ret) { 1688 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1689 return ret; 1690 } 1691 1692 ret = ath10k_mac_setup_prb_tmpl(arvif); 1693 if (ret) { 1694 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1695 return ret; 1696 } 1697 1698 ret = ath10k_vdev_restart(arvif, &def); 1699 if (ret) { 1700 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1701 arvif->vdev_id, ret); 1702 return ret; 1703 } 1704 1705 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1706 arvif->bssid); 1707 if (ret) { 1708 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1709 arvif->vdev_id, ret); 1710 return ret; 1711 } 1712 1713 return 0; 1714 } 1715 1716 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1717 struct ieee80211_bss_conf *info) 1718 { 1719 struct ath10k *ar = arvif->ar; 1720 int ret = 0; 1721 1722 lockdep_assert_held(&arvif->ar->conf_mutex); 1723 1724 if (!info->enable_beacon) { 1725 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1726 if (ret) 1727 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1728 arvif->vdev_id, ret); 1729 1730 arvif->is_up = false; 1731 1732 spin_lock_bh(&arvif->ar->data_lock); 1733 ath10k_mac_vif_beacon_free(arvif); 1734 spin_unlock_bh(&arvif->ar->data_lock); 1735 1736 return; 1737 } 1738 1739 arvif->tx_seq_no = 0x1000; 1740 1741 arvif->aid = 0; 1742 ether_addr_copy(arvif->bssid, info->bssid); 1743 1744 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1745 arvif->bssid); 1746 if (ret) { 1747 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1748 arvif->vdev_id, ret); 1749 return; 1750 } 1751 1752 arvif->is_up = true; 1753 1754 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1755 if (ret) { 1756 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1757 arvif->vdev_id, ret); 1758 return; 1759 } 1760 1761 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1762 } 1763 1764 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1765 struct ieee80211_bss_conf *info, 1766 const u8 self_peer[ETH_ALEN]) 1767 { 1768 struct ath10k *ar = arvif->ar; 1769 u32 vdev_param; 1770 int ret = 0; 1771 1772 lockdep_assert_held(&arvif->ar->conf_mutex); 1773 1774 if (!info->ibss_joined) { 1775 if (is_zero_ether_addr(arvif->bssid)) 1776 return; 1777 1778 eth_zero_addr(arvif->bssid); 1779 1780 return; 1781 } 1782 1783 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1784 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1785 ATH10K_DEFAULT_ATIM); 1786 if (ret) 1787 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1788 arvif->vdev_id, ret); 1789 } 1790 1791 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1792 { 1793 struct ath10k *ar = arvif->ar; 1794 u32 param; 1795 u32 value; 1796 int ret; 1797 1798 lockdep_assert_held(&arvif->ar->conf_mutex); 1799 1800 if (arvif->u.sta.uapsd) 1801 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1802 else 1803 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1804 1805 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1806 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1807 if (ret) { 1808 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1809 value, arvif->vdev_id, ret); 1810 return ret; 1811 } 1812 1813 return 0; 1814 } 1815 1816 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1817 { 1818 struct ath10k *ar = arvif->ar; 1819 u32 param; 1820 u32 value; 1821 int ret; 1822 1823 lockdep_assert_held(&arvif->ar->conf_mutex); 1824 1825 if (arvif->u.sta.uapsd) 1826 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1827 else 1828 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1829 1830 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1831 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1832 param, value); 1833 if (ret) { 1834 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1835 value, arvif->vdev_id, ret); 1836 return ret; 1837 } 1838 1839 return 0; 1840 } 1841 1842 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1843 { 1844 struct ath10k_vif *arvif; 1845 int num = 0; 1846 1847 lockdep_assert_held(&ar->conf_mutex); 1848 1849 list_for_each_entry(arvif, &ar->arvifs, list) 1850 if (arvif->is_started) 1851 num++; 1852 1853 return num; 1854 } 1855 1856 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1857 { 1858 struct ath10k *ar = arvif->ar; 1859 struct ieee80211_vif *vif = arvif->vif; 1860 struct ieee80211_conf *conf = &ar->hw->conf; 1861 enum wmi_sta_powersave_param param; 1862 enum wmi_sta_ps_mode psmode; 1863 int ret; 1864 int ps_timeout; 1865 bool enable_ps; 1866 1867 lockdep_assert_held(&arvif->ar->conf_mutex); 1868 1869 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1870 return 0; 1871 1872 enable_ps = arvif->ps; 1873 1874 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1875 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1876 ar->running_fw->fw_file.fw_features)) { 1877 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1878 arvif->vdev_id); 1879 enable_ps = false; 1880 } 1881 1882 if (!arvif->is_started) { 1883 /* mac80211 can update vif powersave state while disconnected. 1884 * Firmware doesn't behave nicely and consumes more power than 1885 * necessary if PS is disabled on a non-started vdev. Hence 1886 * force-enable PS for non-running vdevs. 1887 */ 1888 psmode = WMI_STA_PS_MODE_ENABLED; 1889 } else if (enable_ps) { 1890 psmode = WMI_STA_PS_MODE_ENABLED; 1891 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1892 1893 ps_timeout = conf->dynamic_ps_timeout; 1894 if (ps_timeout == 0) { 1895 /* Firmware doesn't like 0 */ 1896 ps_timeout = ieee80211_tu_to_usec( 1897 vif->bss_conf.beacon_int) / 1000; 1898 } 1899 1900 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1901 ps_timeout); 1902 if (ret) { 1903 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1904 arvif->vdev_id, ret); 1905 return ret; 1906 } 1907 } else { 1908 psmode = WMI_STA_PS_MODE_DISABLED; 1909 } 1910 1911 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1912 arvif->vdev_id, psmode ? "enable" : "disable"); 1913 1914 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1915 if (ret) { 1916 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1917 psmode, arvif->vdev_id, ret); 1918 return ret; 1919 } 1920 1921 return 0; 1922 } 1923 1924 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1925 { 1926 struct ath10k *ar = arvif->ar; 1927 struct wmi_sta_keepalive_arg arg = {}; 1928 int ret; 1929 1930 lockdep_assert_held(&arvif->ar->conf_mutex); 1931 1932 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1933 return 0; 1934 1935 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1936 return 0; 1937 1938 /* Some firmware revisions have a bug and ignore the `enabled` field. 1939 * Instead use the interval to disable the keepalive. 1940 */ 1941 arg.vdev_id = arvif->vdev_id; 1942 arg.enabled = 1; 1943 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1944 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1945 1946 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1947 if (ret) { 1948 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1949 arvif->vdev_id, ret); 1950 return ret; 1951 } 1952 1953 return 0; 1954 } 1955 1956 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1957 { 1958 struct ath10k *ar = arvif->ar; 1959 struct ieee80211_vif *vif = arvif->vif; 1960 int ret; 1961 1962 lockdep_assert_held(&arvif->ar->conf_mutex); 1963 1964 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1965 return; 1966 1967 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1968 return; 1969 1970 if (!vif->csa_active) 1971 return; 1972 1973 if (!arvif->is_up) 1974 return; 1975 1976 if (!ieee80211_csa_is_complete(vif)) { 1977 ieee80211_csa_update_counter(vif); 1978 1979 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1980 if (ret) 1981 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1982 ret); 1983 1984 ret = ath10k_mac_setup_prb_tmpl(arvif); 1985 if (ret) 1986 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 1987 ret); 1988 } else { 1989 ieee80211_csa_finish(vif); 1990 } 1991 } 1992 1993 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 1994 { 1995 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 1996 ap_csa_work); 1997 struct ath10k *ar = arvif->ar; 1998 1999 mutex_lock(&ar->conf_mutex); 2000 ath10k_mac_vif_ap_csa_count_down(arvif); 2001 mutex_unlock(&ar->conf_mutex); 2002 } 2003 2004 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 2005 struct ieee80211_vif *vif) 2006 { 2007 struct sk_buff *skb = data; 2008 struct ieee80211_mgmt *mgmt = (void *)skb->data; 2009 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2010 2011 if (vif->type != NL80211_IFTYPE_STATION) 2012 return; 2013 2014 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2015 return; 2016 2017 cancel_delayed_work(&arvif->connection_loss_work); 2018 } 2019 2020 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2021 { 2022 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2023 IEEE80211_IFACE_ITER_NORMAL, 2024 ath10k_mac_handle_beacon_iter, 2025 skb); 2026 } 2027 2028 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2029 struct ieee80211_vif *vif) 2030 { 2031 u32 *vdev_id = data; 2032 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2033 struct ath10k *ar = arvif->ar; 2034 struct ieee80211_hw *hw = ar->hw; 2035 2036 if (arvif->vdev_id != *vdev_id) 2037 return; 2038 2039 if (!arvif->is_up) 2040 return; 2041 2042 ieee80211_beacon_loss(vif); 2043 2044 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2045 * (done by mac80211) succeeds but beacons do not resume then it 2046 * doesn't make sense to continue operation. Queue connection loss work 2047 * which can be cancelled when beacon is received. 2048 */ 2049 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2050 ATH10K_CONNECTION_LOSS_HZ); 2051 } 2052 2053 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2054 { 2055 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2056 IEEE80211_IFACE_ITER_NORMAL, 2057 ath10k_mac_handle_beacon_miss_iter, 2058 &vdev_id); 2059 } 2060 2061 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2062 { 2063 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2064 connection_loss_work.work); 2065 struct ieee80211_vif *vif = arvif->vif; 2066 2067 if (!arvif->is_up) 2068 return; 2069 2070 ieee80211_connection_loss(vif); 2071 } 2072 2073 /**********************/ 2074 /* Station management */ 2075 /**********************/ 2076 2077 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2078 struct ieee80211_vif *vif) 2079 { 2080 /* Some firmware revisions have unstable STA powersave when listen 2081 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2082 * generate NullFunc frames properly even if buffered frames have been 2083 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2084 * buffered frames. Often pinging the device from AP would simply fail. 2085 * 2086 * As a workaround set it to 1. 2087 */ 2088 if (vif->type == NL80211_IFTYPE_STATION) 2089 return 1; 2090 2091 return ar->hw->conf.listen_interval; 2092 } 2093 2094 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2095 struct ieee80211_vif *vif, 2096 struct ieee80211_sta *sta, 2097 struct wmi_peer_assoc_complete_arg *arg) 2098 { 2099 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2100 u32 aid; 2101 2102 lockdep_assert_held(&ar->conf_mutex); 2103 2104 if (vif->type == NL80211_IFTYPE_STATION) 2105 aid = vif->bss_conf.aid; 2106 else 2107 aid = sta->aid; 2108 2109 ether_addr_copy(arg->addr, sta->addr); 2110 arg->vdev_id = arvif->vdev_id; 2111 arg->peer_aid = aid; 2112 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2113 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2114 arg->peer_num_spatial_streams = 1; 2115 arg->peer_caps = vif->bss_conf.assoc_capability; 2116 } 2117 2118 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2119 struct ieee80211_vif *vif, 2120 struct ieee80211_sta *sta, 2121 struct wmi_peer_assoc_complete_arg *arg) 2122 { 2123 struct ieee80211_bss_conf *info = &vif->bss_conf; 2124 struct cfg80211_chan_def def; 2125 struct cfg80211_bss *bss; 2126 const u8 *rsnie = NULL; 2127 const u8 *wpaie = NULL; 2128 2129 lockdep_assert_held(&ar->conf_mutex); 2130 2131 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2132 return; 2133 2134 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2135 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2136 if (bss) { 2137 const struct cfg80211_bss_ies *ies; 2138 2139 rcu_read_lock(); 2140 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2141 2142 ies = rcu_dereference(bss->ies); 2143 2144 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2145 WLAN_OUI_TYPE_MICROSOFT_WPA, 2146 ies->data, 2147 ies->len); 2148 rcu_read_unlock(); 2149 cfg80211_put_bss(ar->hw->wiphy, bss); 2150 } 2151 2152 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2153 if (rsnie || wpaie) { 2154 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2155 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2156 } 2157 2158 if (wpaie) { 2159 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2160 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2161 } 2162 2163 if (sta->mfp && 2164 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2165 ar->running_fw->fw_file.fw_features)) { 2166 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2167 } 2168 } 2169 2170 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2171 struct ieee80211_vif *vif, 2172 struct ieee80211_sta *sta, 2173 struct wmi_peer_assoc_complete_arg *arg) 2174 { 2175 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2176 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2177 struct cfg80211_chan_def def; 2178 const struct ieee80211_supported_band *sband; 2179 const struct ieee80211_rate *rates; 2180 enum nl80211_band band; 2181 u32 ratemask; 2182 u8 rate; 2183 int i; 2184 2185 lockdep_assert_held(&ar->conf_mutex); 2186 2187 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2188 return; 2189 2190 band = def.chan->band; 2191 sband = ar->hw->wiphy->bands[band]; 2192 ratemask = sta->supp_rates[band]; 2193 ratemask &= arvif->bitrate_mask.control[band].legacy; 2194 rates = sband->bitrates; 2195 2196 rateset->num_rates = 0; 2197 2198 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2199 if (!(ratemask & 1)) 2200 continue; 2201 2202 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2203 rateset->rates[rateset->num_rates] = rate; 2204 rateset->num_rates++; 2205 } 2206 } 2207 2208 static bool 2209 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2210 { 2211 int nss; 2212 2213 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2214 if (ht_mcs_mask[nss]) 2215 return false; 2216 2217 return true; 2218 } 2219 2220 static bool 2221 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2222 { 2223 int nss; 2224 2225 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2226 if (vht_mcs_mask[nss]) 2227 return false; 2228 2229 return true; 2230 } 2231 2232 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2233 struct ieee80211_vif *vif, 2234 struct ieee80211_sta *sta, 2235 struct wmi_peer_assoc_complete_arg *arg) 2236 { 2237 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2238 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2239 struct cfg80211_chan_def def; 2240 enum nl80211_band band; 2241 const u8 *ht_mcs_mask; 2242 const u16 *vht_mcs_mask; 2243 int i, n; 2244 u8 max_nss; 2245 u32 stbc; 2246 2247 lockdep_assert_held(&ar->conf_mutex); 2248 2249 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2250 return; 2251 2252 if (!ht_cap->ht_supported) 2253 return; 2254 2255 band = def.chan->band; 2256 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2257 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2258 2259 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2260 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2261 return; 2262 2263 arg->peer_flags |= ar->wmi.peer_flags->ht; 2264 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2265 ht_cap->ampdu_factor)) - 1; 2266 2267 arg->peer_mpdu_density = 2268 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2269 2270 arg->peer_ht_caps = ht_cap->cap; 2271 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2272 2273 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2274 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2275 2276 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2277 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2278 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2279 } 2280 2281 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2282 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2283 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2284 2285 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2286 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2287 } 2288 2289 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2290 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2291 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2292 } 2293 2294 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2295 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2296 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2297 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2298 arg->peer_rate_caps |= stbc; 2299 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2300 } 2301 2302 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2303 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2304 else if (ht_cap->mcs.rx_mask[1]) 2305 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2306 2307 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2308 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2309 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2310 max_nss = (i / 8) + 1; 2311 arg->peer_ht_rates.rates[n++] = i; 2312 } 2313 2314 /* 2315 * This is a workaround for HT-enabled STAs which break the spec 2316 * and have no HT capabilities RX mask (no HT RX MCS map). 2317 * 2318 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2319 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2320 * 2321 * Firmware asserts if such situation occurs. 2322 */ 2323 if (n == 0) { 2324 arg->peer_ht_rates.num_rates = 8; 2325 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2326 arg->peer_ht_rates.rates[i] = i; 2327 } else { 2328 arg->peer_ht_rates.num_rates = n; 2329 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2330 } 2331 2332 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2333 arg->addr, 2334 arg->peer_ht_rates.num_rates, 2335 arg->peer_num_spatial_streams); 2336 } 2337 2338 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2339 struct ath10k_vif *arvif, 2340 struct ieee80211_sta *sta) 2341 { 2342 u32 uapsd = 0; 2343 u32 max_sp = 0; 2344 int ret = 0; 2345 2346 lockdep_assert_held(&ar->conf_mutex); 2347 2348 if (sta->wme && sta->uapsd_queues) { 2349 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2350 sta->uapsd_queues, sta->max_sp); 2351 2352 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2353 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2354 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2355 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2356 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2357 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2358 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2359 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2360 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2361 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2362 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2363 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2364 2365 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2366 max_sp = sta->max_sp; 2367 2368 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2369 sta->addr, 2370 WMI_AP_PS_PEER_PARAM_UAPSD, 2371 uapsd); 2372 if (ret) { 2373 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2374 arvif->vdev_id, ret); 2375 return ret; 2376 } 2377 2378 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2379 sta->addr, 2380 WMI_AP_PS_PEER_PARAM_MAX_SP, 2381 max_sp); 2382 if (ret) { 2383 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2384 arvif->vdev_id, ret); 2385 return ret; 2386 } 2387 2388 /* TODO setup this based on STA listen interval and 2389 * beacon interval. Currently we don't know 2390 * sta->listen_interval - mac80211 patch required. 2391 * Currently use 10 seconds 2392 */ 2393 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2394 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2395 10); 2396 if (ret) { 2397 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2398 arvif->vdev_id, ret); 2399 return ret; 2400 } 2401 } 2402 2403 return 0; 2404 } 2405 2406 static u16 2407 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2408 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2409 { 2410 int idx_limit; 2411 int nss; 2412 u16 mcs_map; 2413 u16 mcs; 2414 2415 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2416 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2417 vht_mcs_limit[nss]; 2418 2419 if (mcs_map) 2420 idx_limit = fls(mcs_map) - 1; 2421 else 2422 idx_limit = -1; 2423 2424 switch (idx_limit) { 2425 case 0: /* fall through */ 2426 case 1: /* fall through */ 2427 case 2: /* fall through */ 2428 case 3: /* fall through */ 2429 case 4: /* fall through */ 2430 case 5: /* fall through */ 2431 case 6: /* fall through */ 2432 default: 2433 /* see ath10k_mac_can_set_bitrate_mask() */ 2434 WARN_ON(1); 2435 /* fall through */ 2436 case -1: 2437 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2438 break; 2439 case 7: 2440 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2441 break; 2442 case 8: 2443 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2444 break; 2445 case 9: 2446 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2447 break; 2448 } 2449 2450 tx_mcs_set &= ~(0x3 << (nss * 2)); 2451 tx_mcs_set |= mcs << (nss * 2); 2452 } 2453 2454 return tx_mcs_set; 2455 } 2456 2457 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2458 struct ieee80211_vif *vif, 2459 struct ieee80211_sta *sta, 2460 struct wmi_peer_assoc_complete_arg *arg) 2461 { 2462 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2463 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2464 struct cfg80211_chan_def def; 2465 enum nl80211_band band; 2466 const u16 *vht_mcs_mask; 2467 u8 ampdu_factor; 2468 u8 max_nss, vht_mcs; 2469 int i; 2470 2471 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2472 return; 2473 2474 if (!vht_cap->vht_supported) 2475 return; 2476 2477 band = def.chan->band; 2478 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2479 2480 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2481 return; 2482 2483 arg->peer_flags |= ar->wmi.peer_flags->vht; 2484 2485 if (def.chan->band == NL80211_BAND_2GHZ) 2486 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2487 2488 arg->peer_vht_caps = vht_cap->cap; 2489 2490 ampdu_factor = (vht_cap->cap & 2491 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2492 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2493 2494 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2495 * zero in VHT IE. Using it would result in degraded throughput. 2496 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2497 * it if VHT max_mpdu is smaller. 2498 */ 2499 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2500 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2501 ampdu_factor)) - 1); 2502 2503 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2504 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2505 2506 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2507 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2508 2509 /* Calculate peer NSS capability from VHT capabilities if STA 2510 * supports VHT. 2511 */ 2512 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 2513 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 2514 (2 * i) & 3; 2515 2516 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && 2517 vht_mcs_mask[i]) 2518 max_nss = i + 1; 2519 } 2520 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2521 arg->peer_vht_rates.rx_max_rate = 2522 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2523 arg->peer_vht_rates.rx_mcs_set = 2524 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2525 arg->peer_vht_rates.tx_max_rate = 2526 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2527 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2528 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2529 2530 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2531 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2532 2533 if (arg->peer_vht_rates.rx_max_rate && 2534 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) { 2535 switch (arg->peer_vht_rates.rx_max_rate) { 2536 case 1560: 2537 /* Must be 2x2 at 160Mhz is all it can do. */ 2538 arg->peer_bw_rxnss_override = 2; 2539 break; 2540 case 780: 2541 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */ 2542 arg->peer_bw_rxnss_override = 1; 2543 break; 2544 } 2545 } 2546 } 2547 2548 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2549 struct ieee80211_vif *vif, 2550 struct ieee80211_sta *sta, 2551 struct wmi_peer_assoc_complete_arg *arg) 2552 { 2553 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2554 2555 switch (arvif->vdev_type) { 2556 case WMI_VDEV_TYPE_AP: 2557 if (sta->wme) 2558 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2559 2560 if (sta->wme && sta->uapsd_queues) { 2561 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2562 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2563 } 2564 break; 2565 case WMI_VDEV_TYPE_STA: 2566 if (vif->bss_conf.qos) 2567 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2568 break; 2569 case WMI_VDEV_TYPE_IBSS: 2570 if (sta->wme) 2571 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2572 break; 2573 default: 2574 break; 2575 } 2576 2577 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2578 sta->addr, !!(arg->peer_flags & 2579 arvif->ar->wmi.peer_flags->qos)); 2580 } 2581 2582 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2583 { 2584 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2585 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2586 } 2587 2588 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2589 struct ieee80211_sta *sta) 2590 { 2591 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2592 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2593 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2594 return MODE_11AC_VHT160; 2595 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2596 return MODE_11AC_VHT80_80; 2597 default: 2598 /* not sure if this is a valid case? */ 2599 return MODE_11AC_VHT160; 2600 } 2601 } 2602 2603 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2604 return MODE_11AC_VHT80; 2605 2606 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2607 return MODE_11AC_VHT40; 2608 2609 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2610 return MODE_11AC_VHT20; 2611 2612 return MODE_UNKNOWN; 2613 } 2614 2615 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2616 struct ieee80211_vif *vif, 2617 struct ieee80211_sta *sta, 2618 struct wmi_peer_assoc_complete_arg *arg) 2619 { 2620 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2621 struct cfg80211_chan_def def; 2622 enum nl80211_band band; 2623 const u8 *ht_mcs_mask; 2624 const u16 *vht_mcs_mask; 2625 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2626 2627 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2628 return; 2629 2630 band = def.chan->band; 2631 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2632 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2633 2634 switch (band) { 2635 case NL80211_BAND_2GHZ: 2636 if (sta->vht_cap.vht_supported && 2637 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2638 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2639 phymode = MODE_11AC_VHT40; 2640 else 2641 phymode = MODE_11AC_VHT20; 2642 } else if (sta->ht_cap.ht_supported && 2643 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2644 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2645 phymode = MODE_11NG_HT40; 2646 else 2647 phymode = MODE_11NG_HT20; 2648 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2649 phymode = MODE_11G; 2650 } else { 2651 phymode = MODE_11B; 2652 } 2653 2654 break; 2655 case NL80211_BAND_5GHZ: 2656 /* 2657 * Check VHT first. 2658 */ 2659 if (sta->vht_cap.vht_supported && 2660 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2661 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2662 } else if (sta->ht_cap.ht_supported && 2663 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2664 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2665 phymode = MODE_11NA_HT40; 2666 else 2667 phymode = MODE_11NA_HT20; 2668 } else { 2669 phymode = MODE_11A; 2670 } 2671 2672 break; 2673 default: 2674 break; 2675 } 2676 2677 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2678 sta->addr, ath10k_wmi_phymode_str(phymode)); 2679 2680 arg->peer_phymode = phymode; 2681 WARN_ON(phymode == MODE_UNKNOWN); 2682 } 2683 2684 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2685 struct ieee80211_vif *vif, 2686 struct ieee80211_sta *sta, 2687 struct wmi_peer_assoc_complete_arg *arg) 2688 { 2689 lockdep_assert_held(&ar->conf_mutex); 2690 2691 memset(arg, 0, sizeof(*arg)); 2692 2693 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2694 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2695 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2696 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2697 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2698 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2699 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2700 2701 return 0; 2702 } 2703 2704 static const u32 ath10k_smps_map[] = { 2705 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2706 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2707 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2708 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2709 }; 2710 2711 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2712 const u8 *addr, 2713 const struct ieee80211_sta_ht_cap *ht_cap) 2714 { 2715 int smps; 2716 2717 if (!ht_cap->ht_supported) 2718 return 0; 2719 2720 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2721 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2722 2723 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2724 return -EINVAL; 2725 2726 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2727 WMI_PEER_SMPS_STATE, 2728 ath10k_smps_map[smps]); 2729 } 2730 2731 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2732 struct ieee80211_vif *vif, 2733 struct ieee80211_sta_vht_cap vht_cap) 2734 { 2735 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2736 int ret; 2737 u32 param; 2738 u32 value; 2739 2740 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2741 return 0; 2742 2743 if (!(ar->vht_cap_info & 2744 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2745 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2746 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2747 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2748 return 0; 2749 2750 param = ar->wmi.vdev_param->txbf; 2751 value = 0; 2752 2753 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2754 return 0; 2755 2756 /* The following logic is correct. If a remote STA advertises support 2757 * for being a beamformer then we should enable us being a beamformee. 2758 */ 2759 2760 if (ar->vht_cap_info & 2761 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2762 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2763 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2764 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2765 2766 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2767 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2768 } 2769 2770 if (ar->vht_cap_info & 2771 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2772 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2773 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2774 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2775 2776 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2777 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2778 } 2779 2780 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2781 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2782 2783 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2784 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2785 2786 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2787 if (ret) { 2788 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2789 value, ret); 2790 return ret; 2791 } 2792 2793 return 0; 2794 } 2795 2796 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2797 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2798 struct ieee80211_vif *vif, 2799 struct ieee80211_bss_conf *bss_conf) 2800 { 2801 struct ath10k *ar = hw->priv; 2802 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2803 struct ieee80211_sta_ht_cap ht_cap; 2804 struct ieee80211_sta_vht_cap vht_cap; 2805 struct wmi_peer_assoc_complete_arg peer_arg; 2806 struct ieee80211_sta *ap_sta; 2807 int ret; 2808 2809 lockdep_assert_held(&ar->conf_mutex); 2810 2811 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2812 arvif->vdev_id, arvif->bssid, arvif->aid); 2813 2814 rcu_read_lock(); 2815 2816 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2817 if (!ap_sta) { 2818 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2819 bss_conf->bssid, arvif->vdev_id); 2820 rcu_read_unlock(); 2821 return; 2822 } 2823 2824 /* ap_sta must be accessed only within rcu section which must be left 2825 * before calling ath10k_setup_peer_smps() which might sleep. 2826 */ 2827 ht_cap = ap_sta->ht_cap; 2828 vht_cap = ap_sta->vht_cap; 2829 2830 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2831 if (ret) { 2832 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2833 bss_conf->bssid, arvif->vdev_id, ret); 2834 rcu_read_unlock(); 2835 return; 2836 } 2837 2838 rcu_read_unlock(); 2839 2840 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2841 if (ret) { 2842 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2843 bss_conf->bssid, arvif->vdev_id, ret); 2844 return; 2845 } 2846 2847 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2848 if (ret) { 2849 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2850 arvif->vdev_id, ret); 2851 return; 2852 } 2853 2854 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2855 if (ret) { 2856 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2857 arvif->vdev_id, bss_conf->bssid, ret); 2858 return; 2859 } 2860 2861 ath10k_dbg(ar, ATH10K_DBG_MAC, 2862 "mac vdev %d up (associated) bssid %pM aid %d\n", 2863 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2864 2865 WARN_ON(arvif->is_up); 2866 2867 arvif->aid = bss_conf->aid; 2868 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2869 2870 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2871 if (ret) { 2872 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2873 arvif->vdev_id, ret); 2874 return; 2875 } 2876 2877 arvif->is_up = true; 2878 2879 /* Workaround: Some firmware revisions (tested with qca6174 2880 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2881 * poked with peer param command. 2882 */ 2883 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2884 WMI_PEER_DUMMY_VAR, 1); 2885 if (ret) { 2886 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2887 arvif->bssid, arvif->vdev_id, ret); 2888 return; 2889 } 2890 } 2891 2892 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2893 struct ieee80211_vif *vif) 2894 { 2895 struct ath10k *ar = hw->priv; 2896 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2897 struct ieee80211_sta_vht_cap vht_cap = {}; 2898 int ret; 2899 2900 lockdep_assert_held(&ar->conf_mutex); 2901 2902 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2903 arvif->vdev_id, arvif->bssid); 2904 2905 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2906 if (ret) 2907 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2908 arvif->vdev_id, ret); 2909 2910 arvif->def_wep_key_idx = -1; 2911 2912 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2913 if (ret) { 2914 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2915 arvif->vdev_id, ret); 2916 return; 2917 } 2918 2919 arvif->is_up = false; 2920 2921 cancel_delayed_work_sync(&arvif->connection_loss_work); 2922 } 2923 2924 static int ath10k_station_assoc(struct ath10k *ar, 2925 struct ieee80211_vif *vif, 2926 struct ieee80211_sta *sta, 2927 bool reassoc) 2928 { 2929 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2930 struct wmi_peer_assoc_complete_arg peer_arg; 2931 int ret = 0; 2932 2933 lockdep_assert_held(&ar->conf_mutex); 2934 2935 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2936 if (ret) { 2937 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2938 sta->addr, arvif->vdev_id, ret); 2939 return ret; 2940 } 2941 2942 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2943 if (ret) { 2944 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2945 sta->addr, arvif->vdev_id, ret); 2946 return ret; 2947 } 2948 2949 /* Re-assoc is run only to update supported rates for given station. It 2950 * doesn't make much sense to reconfigure the peer completely. 2951 */ 2952 if (!reassoc) { 2953 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2954 &sta->ht_cap); 2955 if (ret) { 2956 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2957 arvif->vdev_id, ret); 2958 return ret; 2959 } 2960 2961 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2962 if (ret) { 2963 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2964 sta->addr, arvif->vdev_id, ret); 2965 return ret; 2966 } 2967 2968 if (!sta->wme) { 2969 arvif->num_legacy_stations++; 2970 ret = ath10k_recalc_rtscts_prot(arvif); 2971 if (ret) { 2972 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2973 arvif->vdev_id, ret); 2974 return ret; 2975 } 2976 } 2977 2978 /* Plumb cached keys only for static WEP */ 2979 if (arvif->def_wep_key_idx != -1) { 2980 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2981 if (ret) { 2982 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2983 arvif->vdev_id, ret); 2984 return ret; 2985 } 2986 } 2987 } 2988 2989 return ret; 2990 } 2991 2992 static int ath10k_station_disassoc(struct ath10k *ar, 2993 struct ieee80211_vif *vif, 2994 struct ieee80211_sta *sta) 2995 { 2996 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2997 int ret = 0; 2998 2999 lockdep_assert_held(&ar->conf_mutex); 3000 3001 if (!sta->wme) { 3002 arvif->num_legacy_stations--; 3003 ret = ath10k_recalc_rtscts_prot(arvif); 3004 if (ret) { 3005 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 3006 arvif->vdev_id, ret); 3007 return ret; 3008 } 3009 } 3010 3011 ret = ath10k_clear_peer_keys(arvif, sta->addr); 3012 if (ret) { 3013 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 3014 arvif->vdev_id, ret); 3015 return ret; 3016 } 3017 3018 return ret; 3019 } 3020 3021 /**************/ 3022 /* Regulatory */ 3023 /**************/ 3024 3025 static int ath10k_update_channel_list(struct ath10k *ar) 3026 { 3027 struct ieee80211_hw *hw = ar->hw; 3028 struct ieee80211_supported_band **bands; 3029 enum nl80211_band band; 3030 struct ieee80211_channel *channel; 3031 struct wmi_scan_chan_list_arg arg = {0}; 3032 struct wmi_channel_arg *ch; 3033 bool passive; 3034 int len; 3035 int ret; 3036 int i; 3037 3038 lockdep_assert_held(&ar->conf_mutex); 3039 3040 bands = hw->wiphy->bands; 3041 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3042 if (!bands[band]) 3043 continue; 3044 3045 for (i = 0; i < bands[band]->n_channels; i++) { 3046 if (bands[band]->channels[i].flags & 3047 IEEE80211_CHAN_DISABLED) 3048 continue; 3049 3050 arg.n_channels++; 3051 } 3052 } 3053 3054 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3055 arg.channels = kzalloc(len, GFP_KERNEL); 3056 if (!arg.channels) 3057 return -ENOMEM; 3058 3059 ch = arg.channels; 3060 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3061 if (!bands[band]) 3062 continue; 3063 3064 for (i = 0; i < bands[band]->n_channels; i++) { 3065 channel = &bands[band]->channels[i]; 3066 3067 if (channel->flags & IEEE80211_CHAN_DISABLED) 3068 continue; 3069 3070 ch->allow_ht = true; 3071 3072 /* FIXME: when should we really allow VHT? */ 3073 ch->allow_vht = true; 3074 3075 ch->allow_ibss = 3076 !(channel->flags & IEEE80211_CHAN_NO_IR); 3077 3078 ch->ht40plus = 3079 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3080 3081 ch->chan_radar = 3082 !!(channel->flags & IEEE80211_CHAN_RADAR); 3083 3084 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3085 ch->passive = passive; 3086 3087 ch->freq = channel->center_freq; 3088 ch->band_center_freq1 = channel->center_freq; 3089 ch->min_power = 0; 3090 ch->max_power = channel->max_power * 2; 3091 ch->max_reg_power = channel->max_reg_power * 2; 3092 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3093 ch->reg_class_id = 0; /* FIXME */ 3094 3095 /* FIXME: why use only legacy modes, why not any 3096 * HT/VHT modes? Would that even make any 3097 * difference? 3098 */ 3099 if (channel->band == NL80211_BAND_2GHZ) 3100 ch->mode = MODE_11G; 3101 else 3102 ch->mode = MODE_11A; 3103 3104 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3105 continue; 3106 3107 ath10k_dbg(ar, ATH10K_DBG_WMI, 3108 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3109 ch - arg.channels, arg.n_channels, 3110 ch->freq, ch->max_power, ch->max_reg_power, 3111 ch->max_antenna_gain, ch->mode); 3112 3113 ch++; 3114 } 3115 } 3116 3117 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3118 kfree(arg.channels); 3119 3120 return ret; 3121 } 3122 3123 static enum wmi_dfs_region 3124 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3125 { 3126 switch (dfs_region) { 3127 case NL80211_DFS_UNSET: 3128 return WMI_UNINIT_DFS_DOMAIN; 3129 case NL80211_DFS_FCC: 3130 return WMI_FCC_DFS_DOMAIN; 3131 case NL80211_DFS_ETSI: 3132 return WMI_ETSI_DFS_DOMAIN; 3133 case NL80211_DFS_JP: 3134 return WMI_MKK4_DFS_DOMAIN; 3135 } 3136 return WMI_UNINIT_DFS_DOMAIN; 3137 } 3138 3139 static void ath10k_regd_update(struct ath10k *ar) 3140 { 3141 struct reg_dmn_pair_mapping *regpair; 3142 int ret; 3143 enum wmi_dfs_region wmi_dfs_reg; 3144 enum nl80211_dfs_regions nl_dfs_reg; 3145 3146 lockdep_assert_held(&ar->conf_mutex); 3147 3148 ret = ath10k_update_channel_list(ar); 3149 if (ret) 3150 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3151 3152 regpair = ar->ath_common.regulatory.regpair; 3153 3154 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3155 nl_dfs_reg = ar->dfs_detector->region; 3156 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3157 } else { 3158 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3159 } 3160 3161 /* Target allows setting up per-band regdomain but ath_common provides 3162 * a combined one only 3163 */ 3164 ret = ath10k_wmi_pdev_set_regdomain(ar, 3165 regpair->reg_domain, 3166 regpair->reg_domain, /* 2ghz */ 3167 regpair->reg_domain, /* 5ghz */ 3168 regpair->reg_2ghz_ctl, 3169 regpair->reg_5ghz_ctl, 3170 wmi_dfs_reg); 3171 if (ret) 3172 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3173 } 3174 3175 static void ath10k_mac_update_channel_list(struct ath10k *ar, 3176 struct ieee80211_supported_band *band) 3177 { 3178 int i; 3179 3180 if (ar->low_5ghz_chan && ar->high_5ghz_chan) { 3181 for (i = 0; i < band->n_channels; i++) { 3182 if (band->channels[i].center_freq < ar->low_5ghz_chan || 3183 band->channels[i].center_freq > ar->high_5ghz_chan) 3184 band->channels[i].flags |= 3185 IEEE80211_CHAN_DISABLED; 3186 } 3187 } 3188 } 3189 3190 static void ath10k_reg_notifier(struct wiphy *wiphy, 3191 struct regulatory_request *request) 3192 { 3193 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3194 struct ath10k *ar = hw->priv; 3195 bool result; 3196 3197 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3198 3199 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3200 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3201 request->dfs_region); 3202 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3203 request->dfs_region); 3204 if (!result) 3205 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3206 request->dfs_region); 3207 } 3208 3209 mutex_lock(&ar->conf_mutex); 3210 if (ar->state == ATH10K_STATE_ON) 3211 ath10k_regd_update(ar); 3212 mutex_unlock(&ar->conf_mutex); 3213 3214 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 3215 ath10k_mac_update_channel_list(ar, 3216 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); 3217 } 3218 3219 /***************/ 3220 /* TX handlers */ 3221 /***************/ 3222 3223 enum ath10k_mac_tx_path { 3224 ATH10K_MAC_TX_HTT, 3225 ATH10K_MAC_TX_HTT_MGMT, 3226 ATH10K_MAC_TX_WMI_MGMT, 3227 ATH10K_MAC_TX_UNKNOWN, 3228 }; 3229 3230 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3231 { 3232 lockdep_assert_held(&ar->htt.tx_lock); 3233 3234 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3235 ar->tx_paused |= BIT(reason); 3236 ieee80211_stop_queues(ar->hw); 3237 } 3238 3239 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3240 struct ieee80211_vif *vif) 3241 { 3242 struct ath10k *ar = data; 3243 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3244 3245 if (arvif->tx_paused) 3246 return; 3247 3248 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3249 } 3250 3251 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3252 { 3253 lockdep_assert_held(&ar->htt.tx_lock); 3254 3255 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3256 ar->tx_paused &= ~BIT(reason); 3257 3258 if (ar->tx_paused) 3259 return; 3260 3261 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3262 IEEE80211_IFACE_ITER_RESUME_ALL, 3263 ath10k_mac_tx_unlock_iter, 3264 ar); 3265 3266 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3267 } 3268 3269 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3270 { 3271 struct ath10k *ar = arvif->ar; 3272 3273 lockdep_assert_held(&ar->htt.tx_lock); 3274 3275 WARN_ON(reason >= BITS_PER_LONG); 3276 arvif->tx_paused |= BIT(reason); 3277 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3278 } 3279 3280 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3281 { 3282 struct ath10k *ar = arvif->ar; 3283 3284 lockdep_assert_held(&ar->htt.tx_lock); 3285 3286 WARN_ON(reason >= BITS_PER_LONG); 3287 arvif->tx_paused &= ~BIT(reason); 3288 3289 if (ar->tx_paused) 3290 return; 3291 3292 if (arvif->tx_paused) 3293 return; 3294 3295 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3296 } 3297 3298 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3299 enum wmi_tlv_tx_pause_id pause_id, 3300 enum wmi_tlv_tx_pause_action action) 3301 { 3302 struct ath10k *ar = arvif->ar; 3303 3304 lockdep_assert_held(&ar->htt.tx_lock); 3305 3306 switch (action) { 3307 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3308 ath10k_mac_vif_tx_lock(arvif, pause_id); 3309 break; 3310 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3311 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3312 break; 3313 default: 3314 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3315 "received unknown tx pause action %d on vdev %i, ignoring\n", 3316 action, arvif->vdev_id); 3317 break; 3318 } 3319 } 3320 3321 struct ath10k_mac_tx_pause { 3322 u32 vdev_id; 3323 enum wmi_tlv_tx_pause_id pause_id; 3324 enum wmi_tlv_tx_pause_action action; 3325 }; 3326 3327 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3328 struct ieee80211_vif *vif) 3329 { 3330 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3331 struct ath10k_mac_tx_pause *arg = data; 3332 3333 if (arvif->vdev_id != arg->vdev_id) 3334 return; 3335 3336 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3337 } 3338 3339 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3340 enum wmi_tlv_tx_pause_id pause_id, 3341 enum wmi_tlv_tx_pause_action action) 3342 { 3343 struct ath10k_mac_tx_pause arg = { 3344 .vdev_id = vdev_id, 3345 .pause_id = pause_id, 3346 .action = action, 3347 }; 3348 3349 spin_lock_bh(&ar->htt.tx_lock); 3350 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3351 IEEE80211_IFACE_ITER_RESUME_ALL, 3352 ath10k_mac_handle_tx_pause_iter, 3353 &arg); 3354 spin_unlock_bh(&ar->htt.tx_lock); 3355 } 3356 3357 static enum ath10k_hw_txrx_mode 3358 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3359 struct ieee80211_vif *vif, 3360 struct ieee80211_sta *sta, 3361 struct sk_buff *skb) 3362 { 3363 const struct ieee80211_hdr *hdr = (void *)skb->data; 3364 __le16 fc = hdr->frame_control; 3365 3366 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3367 return ATH10K_HW_TXRX_RAW; 3368 3369 if (ieee80211_is_mgmt(fc)) 3370 return ATH10K_HW_TXRX_MGMT; 3371 3372 /* Workaround: 3373 * 3374 * NullFunc frames are mostly used to ping if a client or AP are still 3375 * reachable and responsive. This implies tx status reports must be 3376 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3377 * come to a conclusion that the other end disappeared and tear down 3378 * BSS connection or it can never disconnect from BSS/client (which is 3379 * the case). 3380 * 3381 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3382 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3383 * which seems to deliver correct tx reports for NullFunc frames. The 3384 * downside of using it is it ignores client powersave state so it can 3385 * end up disconnecting sleeping clients in AP mode. It should fix STA 3386 * mode though because AP don't sleep. 3387 */ 3388 if (ar->htt.target_version_major < 3 && 3389 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3390 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3391 ar->running_fw->fw_file.fw_features)) 3392 return ATH10K_HW_TXRX_MGMT; 3393 3394 /* Workaround: 3395 * 3396 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3397 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3398 * to work with Ethernet txmode so use it. 3399 * 3400 * FIXME: Check if raw mode works with TDLS. 3401 */ 3402 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3403 return ATH10K_HW_TXRX_ETHERNET; 3404 3405 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 3406 return ATH10K_HW_TXRX_RAW; 3407 3408 return ATH10K_HW_TXRX_NATIVE_WIFI; 3409 } 3410 3411 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3412 struct sk_buff *skb) 3413 { 3414 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3415 const struct ieee80211_hdr *hdr = (void *)skb->data; 3416 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3417 IEEE80211_TX_CTL_INJECTED; 3418 3419 if (!ieee80211_has_protected(hdr->frame_control)) 3420 return false; 3421 3422 if ((info->flags & mask) == mask) 3423 return false; 3424 3425 if (vif) 3426 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3427 3428 return true; 3429 } 3430 3431 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3432 * Control in the header. 3433 */ 3434 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3435 { 3436 struct ieee80211_hdr *hdr = (void *)skb->data; 3437 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3438 u8 *qos_ctl; 3439 3440 if (!ieee80211_is_data_qos(hdr->frame_control)) 3441 return; 3442 3443 qos_ctl = ieee80211_get_qos_ctl(hdr); 3444 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3445 skb->data, (void *)qos_ctl - (void *)skb->data); 3446 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3447 3448 /* Some firmware revisions don't handle sending QoS NullFunc well. 3449 * These frames are mainly used for CQM purposes so it doesn't really 3450 * matter whether QoS NullFunc or NullFunc are sent. 3451 */ 3452 hdr = (void *)skb->data; 3453 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3454 cb->flags &= ~ATH10K_SKB_F_QOS; 3455 3456 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3457 } 3458 3459 static void ath10k_tx_h_8023(struct sk_buff *skb) 3460 { 3461 struct ieee80211_hdr *hdr; 3462 struct rfc1042_hdr *rfc1042; 3463 struct ethhdr *eth; 3464 size_t hdrlen; 3465 u8 da[ETH_ALEN]; 3466 u8 sa[ETH_ALEN]; 3467 __be16 type; 3468 3469 hdr = (void *)skb->data; 3470 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3471 rfc1042 = (void *)skb->data + hdrlen; 3472 3473 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3474 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3475 type = rfc1042->snap_type; 3476 3477 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3478 skb_push(skb, sizeof(*eth)); 3479 3480 eth = (void *)skb->data; 3481 ether_addr_copy(eth->h_dest, da); 3482 ether_addr_copy(eth->h_source, sa); 3483 eth->h_proto = type; 3484 } 3485 3486 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3487 struct ieee80211_vif *vif, 3488 struct sk_buff *skb) 3489 { 3490 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3491 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3492 3493 /* This is case only for P2P_GO */ 3494 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3495 return; 3496 3497 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3498 spin_lock_bh(&ar->data_lock); 3499 if (arvif->u.ap.noa_data) 3500 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3501 GFP_ATOMIC)) 3502 skb_put_data(skb, arvif->u.ap.noa_data, 3503 arvif->u.ap.noa_len); 3504 spin_unlock_bh(&ar->data_lock); 3505 } 3506 } 3507 3508 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3509 struct ieee80211_vif *vif, 3510 struct ieee80211_txq *txq, 3511 struct sk_buff *skb) 3512 { 3513 struct ieee80211_hdr *hdr = (void *)skb->data; 3514 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3515 3516 cb->flags = 0; 3517 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3518 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3519 3520 if (ieee80211_is_mgmt(hdr->frame_control)) 3521 cb->flags |= ATH10K_SKB_F_MGMT; 3522 3523 if (ieee80211_is_data_qos(hdr->frame_control)) 3524 cb->flags |= ATH10K_SKB_F_QOS; 3525 3526 cb->vif = vif; 3527 cb->txq = txq; 3528 } 3529 3530 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3531 { 3532 /* FIXME: Not really sure since when the behaviour changed. At some 3533 * point new firmware stopped requiring creation of peer entries for 3534 * offchannel tx (and actually creating them causes issues with wmi-htc 3535 * tx credit replenishment and reliability). Assuming it's at least 3.4 3536 * because that's when the `freq` was introduced to TX_FRM HTT command. 3537 */ 3538 return (ar->htt.target_version_major >= 3 && 3539 ar->htt.target_version_minor >= 4 && 3540 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3541 } 3542 3543 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3544 { 3545 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3546 int ret = 0; 3547 3548 spin_lock_bh(&ar->data_lock); 3549 3550 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3551 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3552 ret = -ENOSPC; 3553 goto unlock; 3554 } 3555 3556 __skb_queue_tail(q, skb); 3557 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3558 3559 unlock: 3560 spin_unlock_bh(&ar->data_lock); 3561 3562 return ret; 3563 } 3564 3565 static enum ath10k_mac_tx_path 3566 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3567 struct sk_buff *skb, 3568 enum ath10k_hw_txrx_mode txmode) 3569 { 3570 switch (txmode) { 3571 case ATH10K_HW_TXRX_RAW: 3572 case ATH10K_HW_TXRX_NATIVE_WIFI: 3573 case ATH10K_HW_TXRX_ETHERNET: 3574 return ATH10K_MAC_TX_HTT; 3575 case ATH10K_HW_TXRX_MGMT: 3576 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3577 ar->running_fw->fw_file.fw_features)) 3578 return ATH10K_MAC_TX_WMI_MGMT; 3579 else if (ar->htt.target_version_major >= 3) 3580 return ATH10K_MAC_TX_HTT; 3581 else 3582 return ATH10K_MAC_TX_HTT_MGMT; 3583 } 3584 3585 return ATH10K_MAC_TX_UNKNOWN; 3586 } 3587 3588 static int ath10k_mac_tx_submit(struct ath10k *ar, 3589 enum ath10k_hw_txrx_mode txmode, 3590 enum ath10k_mac_tx_path txpath, 3591 struct sk_buff *skb) 3592 { 3593 struct ath10k_htt *htt = &ar->htt; 3594 int ret = -EINVAL; 3595 3596 switch (txpath) { 3597 case ATH10K_MAC_TX_HTT: 3598 ret = ath10k_htt_tx(htt, txmode, skb); 3599 break; 3600 case ATH10K_MAC_TX_HTT_MGMT: 3601 ret = ath10k_htt_mgmt_tx(htt, skb); 3602 break; 3603 case ATH10K_MAC_TX_WMI_MGMT: 3604 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3605 break; 3606 case ATH10K_MAC_TX_UNKNOWN: 3607 WARN_ON_ONCE(1); 3608 ret = -EINVAL; 3609 break; 3610 } 3611 3612 if (ret) { 3613 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3614 ret); 3615 ieee80211_free_txskb(ar->hw, skb); 3616 } 3617 3618 return ret; 3619 } 3620 3621 /* This function consumes the sk_buff regardless of return value as far as 3622 * caller is concerned so no freeing is necessary afterwards. 3623 */ 3624 static int ath10k_mac_tx(struct ath10k *ar, 3625 struct ieee80211_vif *vif, 3626 enum ath10k_hw_txrx_mode txmode, 3627 enum ath10k_mac_tx_path txpath, 3628 struct sk_buff *skb) 3629 { 3630 struct ieee80211_hw *hw = ar->hw; 3631 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3632 int ret; 3633 3634 /* We should disable CCK RATE due to P2P */ 3635 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3636 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3637 3638 switch (txmode) { 3639 case ATH10K_HW_TXRX_MGMT: 3640 case ATH10K_HW_TXRX_NATIVE_WIFI: 3641 ath10k_tx_h_nwifi(hw, skb); 3642 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3643 ath10k_tx_h_seq_no(vif, skb); 3644 break; 3645 case ATH10K_HW_TXRX_ETHERNET: 3646 ath10k_tx_h_8023(skb); 3647 break; 3648 case ATH10K_HW_TXRX_RAW: 3649 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 3650 WARN_ON_ONCE(1); 3651 ieee80211_free_txskb(hw, skb); 3652 return -ENOTSUPP; 3653 } 3654 } 3655 3656 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3657 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3658 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3659 skb); 3660 3661 skb_queue_tail(&ar->offchan_tx_queue, skb); 3662 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3663 return 0; 3664 } 3665 } 3666 3667 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3668 if (ret) { 3669 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3670 return ret; 3671 } 3672 3673 return 0; 3674 } 3675 3676 void ath10k_offchan_tx_purge(struct ath10k *ar) 3677 { 3678 struct sk_buff *skb; 3679 3680 for (;;) { 3681 skb = skb_dequeue(&ar->offchan_tx_queue); 3682 if (!skb) 3683 break; 3684 3685 ieee80211_free_txskb(ar->hw, skb); 3686 } 3687 } 3688 3689 void ath10k_offchan_tx_work(struct work_struct *work) 3690 { 3691 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3692 struct ath10k_peer *peer; 3693 struct ath10k_vif *arvif; 3694 enum ath10k_hw_txrx_mode txmode; 3695 enum ath10k_mac_tx_path txpath; 3696 struct ieee80211_hdr *hdr; 3697 struct ieee80211_vif *vif; 3698 struct ieee80211_sta *sta; 3699 struct sk_buff *skb; 3700 const u8 *peer_addr; 3701 int vdev_id; 3702 int ret; 3703 unsigned long time_left; 3704 bool tmp_peer_created = false; 3705 3706 /* FW requirement: We must create a peer before FW will send out 3707 * an offchannel frame. Otherwise the frame will be stuck and 3708 * never transmitted. We delete the peer upon tx completion. 3709 * It is unlikely that a peer for offchannel tx will already be 3710 * present. However it may be in some rare cases so account for that. 3711 * Otherwise we might remove a legitimate peer and break stuff. 3712 */ 3713 3714 for (;;) { 3715 skb = skb_dequeue(&ar->offchan_tx_queue); 3716 if (!skb) 3717 break; 3718 3719 mutex_lock(&ar->conf_mutex); 3720 3721 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3722 skb); 3723 3724 hdr = (struct ieee80211_hdr *)skb->data; 3725 peer_addr = ieee80211_get_DA(hdr); 3726 3727 spin_lock_bh(&ar->data_lock); 3728 vdev_id = ar->scan.vdev_id; 3729 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3730 spin_unlock_bh(&ar->data_lock); 3731 3732 if (peer) 3733 /* FIXME: should this use ath10k_warn()? */ 3734 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3735 peer_addr, vdev_id); 3736 3737 if (!peer) { 3738 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3739 peer_addr, 3740 WMI_PEER_TYPE_DEFAULT); 3741 if (ret) 3742 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3743 peer_addr, vdev_id, ret); 3744 tmp_peer_created = (ret == 0); 3745 } 3746 3747 spin_lock_bh(&ar->data_lock); 3748 reinit_completion(&ar->offchan_tx_completed); 3749 ar->offchan_tx_skb = skb; 3750 spin_unlock_bh(&ar->data_lock); 3751 3752 /* It's safe to access vif and sta - conf_mutex guarantees that 3753 * sta_state() and remove_interface() are locked exclusively 3754 * out wrt to this offchannel worker. 3755 */ 3756 arvif = ath10k_get_arvif(ar, vdev_id); 3757 if (arvif) { 3758 vif = arvif->vif; 3759 sta = ieee80211_find_sta(vif, peer_addr); 3760 } else { 3761 vif = NULL; 3762 sta = NULL; 3763 } 3764 3765 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3766 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3767 3768 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3769 if (ret) { 3770 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3771 ret); 3772 /* not serious */ 3773 } 3774 3775 time_left = 3776 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3777 if (time_left == 0) 3778 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3779 skb); 3780 3781 if (!peer && tmp_peer_created) { 3782 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3783 if (ret) 3784 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3785 peer_addr, vdev_id, ret); 3786 } 3787 3788 mutex_unlock(&ar->conf_mutex); 3789 } 3790 } 3791 3792 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3793 { 3794 struct sk_buff *skb; 3795 3796 for (;;) { 3797 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3798 if (!skb) 3799 break; 3800 3801 ieee80211_free_txskb(ar->hw, skb); 3802 } 3803 } 3804 3805 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3806 { 3807 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3808 struct sk_buff *skb; 3809 int ret; 3810 3811 for (;;) { 3812 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3813 if (!skb) 3814 break; 3815 3816 ret = ath10k_wmi_mgmt_tx(ar, skb); 3817 if (ret) { 3818 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3819 ret); 3820 ieee80211_free_txskb(ar->hw, skb); 3821 } 3822 } 3823 } 3824 3825 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3826 { 3827 struct ath10k_txq *artxq; 3828 3829 if (!txq) 3830 return; 3831 3832 artxq = (void *)txq->drv_priv; 3833 INIT_LIST_HEAD(&artxq->list); 3834 } 3835 3836 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3837 { 3838 struct ath10k_txq *artxq; 3839 struct ath10k_skb_cb *cb; 3840 struct sk_buff *msdu; 3841 int msdu_id; 3842 3843 if (!txq) 3844 return; 3845 3846 artxq = (void *)txq->drv_priv; 3847 spin_lock_bh(&ar->txqs_lock); 3848 if (!list_empty(&artxq->list)) 3849 list_del_init(&artxq->list); 3850 spin_unlock_bh(&ar->txqs_lock); 3851 3852 spin_lock_bh(&ar->htt.tx_lock); 3853 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3854 cb = ATH10K_SKB_CB(msdu); 3855 if (cb->txq == txq) 3856 cb->txq = NULL; 3857 } 3858 spin_unlock_bh(&ar->htt.tx_lock); 3859 } 3860 3861 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3862 u16 peer_id, 3863 u8 tid) 3864 { 3865 struct ath10k_peer *peer; 3866 3867 lockdep_assert_held(&ar->data_lock); 3868 3869 peer = ar->peer_map[peer_id]; 3870 if (!peer) 3871 return NULL; 3872 3873 if (peer->removed) 3874 return NULL; 3875 3876 if (peer->sta) 3877 return peer->sta->txq[tid]; 3878 else if (peer->vif) 3879 return peer->vif->txq; 3880 else 3881 return NULL; 3882 } 3883 3884 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3885 struct ieee80211_txq *txq) 3886 { 3887 struct ath10k *ar = hw->priv; 3888 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3889 3890 /* No need to get locks */ 3891 3892 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3893 return true; 3894 3895 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3896 return true; 3897 3898 if (artxq->num_fw_queued < artxq->num_push_allowed) 3899 return true; 3900 3901 return false; 3902 } 3903 3904 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 3905 struct ieee80211_txq *txq) 3906 { 3907 struct ath10k *ar = hw->priv; 3908 struct ath10k_htt *htt = &ar->htt; 3909 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3910 struct ieee80211_vif *vif = txq->vif; 3911 struct ieee80211_sta *sta = txq->sta; 3912 enum ath10k_hw_txrx_mode txmode; 3913 enum ath10k_mac_tx_path txpath; 3914 struct sk_buff *skb; 3915 struct ieee80211_hdr *hdr; 3916 size_t skb_len; 3917 bool is_mgmt, is_presp; 3918 int ret; 3919 3920 spin_lock_bh(&ar->htt.tx_lock); 3921 ret = ath10k_htt_tx_inc_pending(htt); 3922 spin_unlock_bh(&ar->htt.tx_lock); 3923 3924 if (ret) 3925 return ret; 3926 3927 skb = ieee80211_tx_dequeue(hw, txq); 3928 if (!skb) { 3929 spin_lock_bh(&ar->htt.tx_lock); 3930 ath10k_htt_tx_dec_pending(htt); 3931 spin_unlock_bh(&ar->htt.tx_lock); 3932 3933 return -ENOENT; 3934 } 3935 3936 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 3937 3938 skb_len = skb->len; 3939 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3940 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3941 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 3942 3943 if (is_mgmt) { 3944 hdr = (struct ieee80211_hdr *)skb->data; 3945 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 3946 3947 spin_lock_bh(&ar->htt.tx_lock); 3948 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 3949 3950 if (ret) { 3951 ath10k_htt_tx_dec_pending(htt); 3952 spin_unlock_bh(&ar->htt.tx_lock); 3953 return ret; 3954 } 3955 spin_unlock_bh(&ar->htt.tx_lock); 3956 } 3957 3958 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3959 if (unlikely(ret)) { 3960 ath10k_warn(ar, "failed to push frame: %d\n", ret); 3961 3962 spin_lock_bh(&ar->htt.tx_lock); 3963 ath10k_htt_tx_dec_pending(htt); 3964 if (is_mgmt) 3965 ath10k_htt_tx_mgmt_dec_pending(htt); 3966 spin_unlock_bh(&ar->htt.tx_lock); 3967 3968 return ret; 3969 } 3970 3971 spin_lock_bh(&ar->htt.tx_lock); 3972 artxq->num_fw_queued++; 3973 spin_unlock_bh(&ar->htt.tx_lock); 3974 3975 return skb_len; 3976 } 3977 3978 void ath10k_mac_tx_push_pending(struct ath10k *ar) 3979 { 3980 struct ieee80211_hw *hw = ar->hw; 3981 struct ieee80211_txq *txq; 3982 struct ath10k_txq *artxq; 3983 struct ath10k_txq *last; 3984 int ret; 3985 int max; 3986 3987 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 3988 return; 3989 3990 spin_lock_bh(&ar->txqs_lock); 3991 rcu_read_lock(); 3992 3993 last = list_last_entry(&ar->txqs, struct ath10k_txq, list); 3994 while (!list_empty(&ar->txqs)) { 3995 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 3996 txq = container_of((void *)artxq, struct ieee80211_txq, 3997 drv_priv); 3998 3999 /* Prevent aggressive sta/tid taking over tx queue */ 4000 max = 16; 4001 ret = 0; 4002 while (ath10k_mac_tx_can_push(hw, txq) && max--) { 4003 ret = ath10k_mac_tx_push_txq(hw, txq); 4004 if (ret < 0) 4005 break; 4006 } 4007 4008 list_del_init(&artxq->list); 4009 if (ret != -ENOENT) 4010 list_add_tail(&artxq->list, &ar->txqs); 4011 4012 ath10k_htt_tx_txq_update(hw, txq); 4013 4014 if (artxq == last || (ret < 0 && ret != -ENOENT)) 4015 break; 4016 } 4017 4018 rcu_read_unlock(); 4019 spin_unlock_bh(&ar->txqs_lock); 4020 } 4021 4022 /************/ 4023 /* Scanning */ 4024 /************/ 4025 4026 void __ath10k_scan_finish(struct ath10k *ar) 4027 { 4028 lockdep_assert_held(&ar->data_lock); 4029 4030 switch (ar->scan.state) { 4031 case ATH10K_SCAN_IDLE: 4032 break; 4033 case ATH10K_SCAN_RUNNING: 4034 case ATH10K_SCAN_ABORTING: 4035 if (!ar->scan.is_roc) { 4036 struct cfg80211_scan_info info = { 4037 .aborted = (ar->scan.state == 4038 ATH10K_SCAN_ABORTING), 4039 }; 4040 4041 ieee80211_scan_completed(ar->hw, &info); 4042 } else if (ar->scan.roc_notify) { 4043 ieee80211_remain_on_channel_expired(ar->hw); 4044 } 4045 /* fall through */ 4046 case ATH10K_SCAN_STARTING: 4047 ar->scan.state = ATH10K_SCAN_IDLE; 4048 ar->scan_channel = NULL; 4049 ar->scan.roc_freq = 0; 4050 ath10k_offchan_tx_purge(ar); 4051 cancel_delayed_work(&ar->scan.timeout); 4052 complete(&ar->scan.completed); 4053 break; 4054 } 4055 } 4056 4057 void ath10k_scan_finish(struct ath10k *ar) 4058 { 4059 spin_lock_bh(&ar->data_lock); 4060 __ath10k_scan_finish(ar); 4061 spin_unlock_bh(&ar->data_lock); 4062 } 4063 4064 static int ath10k_scan_stop(struct ath10k *ar) 4065 { 4066 struct wmi_stop_scan_arg arg = { 4067 .req_id = 1, /* FIXME */ 4068 .req_type = WMI_SCAN_STOP_ONE, 4069 .u.scan_id = ATH10K_SCAN_ID, 4070 }; 4071 int ret; 4072 4073 lockdep_assert_held(&ar->conf_mutex); 4074 4075 ret = ath10k_wmi_stop_scan(ar, &arg); 4076 if (ret) { 4077 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4078 goto out; 4079 } 4080 4081 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4082 if (ret == 0) { 4083 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4084 ret = -ETIMEDOUT; 4085 } else if (ret > 0) { 4086 ret = 0; 4087 } 4088 4089 out: 4090 /* Scan state should be updated upon scan completion but in case 4091 * firmware fails to deliver the event (for whatever reason) it is 4092 * desired to clean up scan state anyway. Firmware may have just 4093 * dropped the scan completion event delivery due to transport pipe 4094 * being overflown with data and/or it can recover on its own before 4095 * next scan request is submitted. 4096 */ 4097 spin_lock_bh(&ar->data_lock); 4098 if (ar->scan.state != ATH10K_SCAN_IDLE) 4099 __ath10k_scan_finish(ar); 4100 spin_unlock_bh(&ar->data_lock); 4101 4102 return ret; 4103 } 4104 4105 static void ath10k_scan_abort(struct ath10k *ar) 4106 { 4107 int ret; 4108 4109 lockdep_assert_held(&ar->conf_mutex); 4110 4111 spin_lock_bh(&ar->data_lock); 4112 4113 switch (ar->scan.state) { 4114 case ATH10K_SCAN_IDLE: 4115 /* This can happen if timeout worker kicked in and called 4116 * abortion while scan completion was being processed. 4117 */ 4118 break; 4119 case ATH10K_SCAN_STARTING: 4120 case ATH10K_SCAN_ABORTING: 4121 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4122 ath10k_scan_state_str(ar->scan.state), 4123 ar->scan.state); 4124 break; 4125 case ATH10K_SCAN_RUNNING: 4126 ar->scan.state = ATH10K_SCAN_ABORTING; 4127 spin_unlock_bh(&ar->data_lock); 4128 4129 ret = ath10k_scan_stop(ar); 4130 if (ret) 4131 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4132 4133 spin_lock_bh(&ar->data_lock); 4134 break; 4135 } 4136 4137 spin_unlock_bh(&ar->data_lock); 4138 } 4139 4140 void ath10k_scan_timeout_work(struct work_struct *work) 4141 { 4142 struct ath10k *ar = container_of(work, struct ath10k, 4143 scan.timeout.work); 4144 4145 mutex_lock(&ar->conf_mutex); 4146 ath10k_scan_abort(ar); 4147 mutex_unlock(&ar->conf_mutex); 4148 } 4149 4150 static int ath10k_start_scan(struct ath10k *ar, 4151 const struct wmi_start_scan_arg *arg) 4152 { 4153 int ret; 4154 4155 lockdep_assert_held(&ar->conf_mutex); 4156 4157 ret = ath10k_wmi_start_scan(ar, arg); 4158 if (ret) 4159 return ret; 4160 4161 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4162 if (ret == 0) { 4163 ret = ath10k_scan_stop(ar); 4164 if (ret) 4165 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4166 4167 return -ETIMEDOUT; 4168 } 4169 4170 /* If we failed to start the scan, return error code at 4171 * this point. This is probably due to some issue in the 4172 * firmware, but no need to wedge the driver due to that... 4173 */ 4174 spin_lock_bh(&ar->data_lock); 4175 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4176 spin_unlock_bh(&ar->data_lock); 4177 return -EINVAL; 4178 } 4179 spin_unlock_bh(&ar->data_lock); 4180 4181 return 0; 4182 } 4183 4184 /**********************/ 4185 /* mac80211 callbacks */ 4186 /**********************/ 4187 4188 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4189 struct ieee80211_tx_control *control, 4190 struct sk_buff *skb) 4191 { 4192 struct ath10k *ar = hw->priv; 4193 struct ath10k_htt *htt = &ar->htt; 4194 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4195 struct ieee80211_vif *vif = info->control.vif; 4196 struct ieee80211_sta *sta = control->sta; 4197 struct ieee80211_txq *txq = NULL; 4198 struct ieee80211_hdr *hdr = (void *)skb->data; 4199 enum ath10k_hw_txrx_mode txmode; 4200 enum ath10k_mac_tx_path txpath; 4201 bool is_htt; 4202 bool is_mgmt; 4203 bool is_presp; 4204 int ret; 4205 4206 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 4207 4208 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4209 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4210 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4211 txpath == ATH10K_MAC_TX_HTT_MGMT); 4212 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4213 4214 if (is_htt) { 4215 spin_lock_bh(&ar->htt.tx_lock); 4216 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4217 4218 ret = ath10k_htt_tx_inc_pending(htt); 4219 if (ret) { 4220 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4221 ret); 4222 spin_unlock_bh(&ar->htt.tx_lock); 4223 ieee80211_free_txskb(ar->hw, skb); 4224 return; 4225 } 4226 4227 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4228 if (ret) { 4229 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4230 ret); 4231 ath10k_htt_tx_dec_pending(htt); 4232 spin_unlock_bh(&ar->htt.tx_lock); 4233 ieee80211_free_txskb(ar->hw, skb); 4234 return; 4235 } 4236 spin_unlock_bh(&ar->htt.tx_lock); 4237 } 4238 4239 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4240 if (ret) { 4241 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4242 if (is_htt) { 4243 spin_lock_bh(&ar->htt.tx_lock); 4244 ath10k_htt_tx_dec_pending(htt); 4245 if (is_mgmt) 4246 ath10k_htt_tx_mgmt_dec_pending(htt); 4247 spin_unlock_bh(&ar->htt.tx_lock); 4248 } 4249 return; 4250 } 4251 } 4252 4253 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4254 struct ieee80211_txq *txq) 4255 { 4256 struct ath10k *ar = hw->priv; 4257 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4258 struct ieee80211_txq *f_txq; 4259 struct ath10k_txq *f_artxq; 4260 int ret = 0; 4261 int max = 16; 4262 4263 spin_lock_bh(&ar->txqs_lock); 4264 if (list_empty(&artxq->list)) 4265 list_add_tail(&artxq->list, &ar->txqs); 4266 4267 f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 4268 f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); 4269 list_del_init(&f_artxq->list); 4270 4271 while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { 4272 ret = ath10k_mac_tx_push_txq(hw, f_txq); 4273 if (ret) 4274 break; 4275 } 4276 if (ret != -ENOENT) 4277 list_add_tail(&f_artxq->list, &ar->txqs); 4278 spin_unlock_bh(&ar->txqs_lock); 4279 4280 ath10k_htt_tx_txq_update(hw, f_txq); 4281 ath10k_htt_tx_txq_update(hw, txq); 4282 } 4283 4284 /* Must not be called with conf_mutex held as workers can use that also. */ 4285 void ath10k_drain_tx(struct ath10k *ar) 4286 { 4287 /* make sure rcu-protected mac80211 tx path itself is drained */ 4288 synchronize_net(); 4289 4290 ath10k_offchan_tx_purge(ar); 4291 ath10k_mgmt_over_wmi_tx_purge(ar); 4292 4293 cancel_work_sync(&ar->offchan_tx_work); 4294 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4295 } 4296 4297 void ath10k_halt(struct ath10k *ar) 4298 { 4299 struct ath10k_vif *arvif; 4300 4301 lockdep_assert_held(&ar->conf_mutex); 4302 4303 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4304 ar->filter_flags = 0; 4305 ar->monitor = false; 4306 ar->monitor_arvif = NULL; 4307 4308 if (ar->monitor_started) 4309 ath10k_monitor_stop(ar); 4310 4311 ar->monitor_started = false; 4312 ar->tx_paused = 0; 4313 4314 ath10k_scan_finish(ar); 4315 ath10k_peer_cleanup_all(ar); 4316 ath10k_core_stop(ar); 4317 ath10k_hif_power_down(ar); 4318 4319 spin_lock_bh(&ar->data_lock); 4320 list_for_each_entry(arvif, &ar->arvifs, list) 4321 ath10k_mac_vif_beacon_cleanup(arvif); 4322 spin_unlock_bh(&ar->data_lock); 4323 } 4324 4325 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4326 { 4327 struct ath10k *ar = hw->priv; 4328 4329 mutex_lock(&ar->conf_mutex); 4330 4331 *tx_ant = ar->cfg_tx_chainmask; 4332 *rx_ant = ar->cfg_rx_chainmask; 4333 4334 mutex_unlock(&ar->conf_mutex); 4335 4336 return 0; 4337 } 4338 4339 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4340 { 4341 /* It is not clear that allowing gaps in chainmask 4342 * is helpful. Probably it will not do what user 4343 * is hoping for, so warn in that case. 4344 */ 4345 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4346 return; 4347 4348 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4349 dbg, cm); 4350 } 4351 4352 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4353 { 4354 int nsts = ar->vht_cap_info; 4355 4356 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4357 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4358 4359 /* If firmware does not deliver to host number of space-time 4360 * streams supported, assume it support up to 4 BF STS and return 4361 * the value for VHT CAP: nsts-1) 4362 */ 4363 if (nsts == 0) 4364 return 3; 4365 4366 return nsts; 4367 } 4368 4369 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4370 { 4371 int sound_dim = ar->vht_cap_info; 4372 4373 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4374 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4375 4376 /* If the sounding dimension is not advertised by the firmware, 4377 * let's use a default value of 1 4378 */ 4379 if (sound_dim == 0) 4380 return 1; 4381 4382 return sound_dim; 4383 } 4384 4385 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4386 { 4387 struct ieee80211_sta_vht_cap vht_cap = {0}; 4388 struct ath10k_hw_params *hw = &ar->hw_params; 4389 u16 mcs_map; 4390 u32 val; 4391 int i; 4392 4393 vht_cap.vht_supported = 1; 4394 vht_cap.cap = ar->vht_cap_info; 4395 4396 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4397 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4398 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4399 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4400 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4401 4402 vht_cap.cap |= val; 4403 } 4404 4405 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4406 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4407 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4408 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4409 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4410 4411 vht_cap.cap |= val; 4412 } 4413 4414 /* Currently the firmware seems to be buggy, don't enable 80+80 4415 * mode until that's resolved. 4416 */ 4417 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4418 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0) 4419 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4420 4421 mcs_map = 0; 4422 for (i = 0; i < 8; i++) { 4423 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4424 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4425 else 4426 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4427 } 4428 4429 if (ar->cfg_tx_chainmask <= 1) 4430 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4431 4432 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4433 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4434 4435 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do 4436 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give 4437 * user-space a clue if that is the case. 4438 */ 4439 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) && 4440 (hw->vht160_mcs_rx_highest != 0 || 4441 hw->vht160_mcs_tx_highest != 0)) { 4442 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest); 4443 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest); 4444 } 4445 4446 return vht_cap; 4447 } 4448 4449 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4450 { 4451 int i; 4452 struct ieee80211_sta_ht_cap ht_cap = {0}; 4453 4454 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4455 return ht_cap; 4456 4457 ht_cap.ht_supported = 1; 4458 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4459 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4460 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4461 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4462 ht_cap.cap |= 4463 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4464 4465 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4466 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4467 4468 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4469 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4470 4471 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4472 u32 smps; 4473 4474 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4475 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4476 4477 ht_cap.cap |= smps; 4478 } 4479 4480 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4481 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4482 4483 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4484 u32 stbc; 4485 4486 stbc = ar->ht_cap_info; 4487 stbc &= WMI_HT_CAP_RX_STBC; 4488 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4489 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4490 stbc &= IEEE80211_HT_CAP_RX_STBC; 4491 4492 ht_cap.cap |= stbc; 4493 } 4494 4495 if (ar->ht_cap_info & WMI_HT_CAP_LDPC) 4496 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4497 4498 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4499 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4500 4501 /* max AMSDU is implicitly taken from vht_cap_info */ 4502 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4503 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4504 4505 for (i = 0; i < ar->num_rf_chains; i++) { 4506 if (ar->cfg_rx_chainmask & BIT(i)) 4507 ht_cap.mcs.rx_mask[i] = 0xFF; 4508 } 4509 4510 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4511 4512 return ht_cap; 4513 } 4514 4515 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4516 { 4517 struct ieee80211_supported_band *band; 4518 struct ieee80211_sta_vht_cap vht_cap; 4519 struct ieee80211_sta_ht_cap ht_cap; 4520 4521 ht_cap = ath10k_get_ht_cap(ar); 4522 vht_cap = ath10k_create_vht_cap(ar); 4523 4524 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4525 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4526 band->ht_cap = ht_cap; 4527 } 4528 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4529 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4530 band->ht_cap = ht_cap; 4531 band->vht_cap = vht_cap; 4532 } 4533 } 4534 4535 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4536 { 4537 int ret; 4538 4539 lockdep_assert_held(&ar->conf_mutex); 4540 4541 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4542 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4543 4544 ar->cfg_tx_chainmask = tx_ant; 4545 ar->cfg_rx_chainmask = rx_ant; 4546 4547 if ((ar->state != ATH10K_STATE_ON) && 4548 (ar->state != ATH10K_STATE_RESTARTED)) 4549 return 0; 4550 4551 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4552 tx_ant); 4553 if (ret) { 4554 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4555 ret, tx_ant); 4556 return ret; 4557 } 4558 4559 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4560 rx_ant); 4561 if (ret) { 4562 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4563 ret, rx_ant); 4564 return ret; 4565 } 4566 4567 /* Reload HT/VHT capability */ 4568 ath10k_mac_setup_ht_vht_cap(ar); 4569 4570 return 0; 4571 } 4572 4573 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4574 { 4575 struct ath10k *ar = hw->priv; 4576 int ret; 4577 4578 mutex_lock(&ar->conf_mutex); 4579 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4580 mutex_unlock(&ar->conf_mutex); 4581 return ret; 4582 } 4583 4584 static int ath10k_start(struct ieee80211_hw *hw) 4585 { 4586 struct ath10k *ar = hw->priv; 4587 u32 param; 4588 int ret = 0; 4589 4590 /* 4591 * This makes sense only when restarting hw. It is harmless to call 4592 * unconditionally. This is necessary to make sure no HTT/WMI tx 4593 * commands will be submitted while restarting. 4594 */ 4595 ath10k_drain_tx(ar); 4596 4597 mutex_lock(&ar->conf_mutex); 4598 4599 switch (ar->state) { 4600 case ATH10K_STATE_OFF: 4601 ar->state = ATH10K_STATE_ON; 4602 break; 4603 case ATH10K_STATE_RESTARTING: 4604 ar->state = ATH10K_STATE_RESTARTED; 4605 break; 4606 case ATH10K_STATE_ON: 4607 case ATH10K_STATE_RESTARTED: 4608 case ATH10K_STATE_WEDGED: 4609 WARN_ON(1); 4610 ret = -EINVAL; 4611 goto err; 4612 case ATH10K_STATE_UTF: 4613 ret = -EBUSY; 4614 goto err; 4615 } 4616 4617 ret = ath10k_hif_power_up(ar); 4618 if (ret) { 4619 ath10k_err(ar, "Could not init hif: %d\n", ret); 4620 goto err_off; 4621 } 4622 4623 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4624 &ar->normal_mode_fw); 4625 if (ret) { 4626 ath10k_err(ar, "Could not init core: %d\n", ret); 4627 goto err_power_down; 4628 } 4629 4630 param = ar->wmi.pdev_param->pmf_qos; 4631 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4632 if (ret) { 4633 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4634 goto err_core_stop; 4635 } 4636 4637 param = ar->wmi.pdev_param->dynamic_bw; 4638 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4639 if (ret) { 4640 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4641 goto err_core_stop; 4642 } 4643 4644 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4645 ret = ath10k_wmi_adaptive_qcs(ar, true); 4646 if (ret) { 4647 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4648 ret); 4649 goto err_core_stop; 4650 } 4651 } 4652 4653 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4654 param = ar->wmi.pdev_param->burst_enable; 4655 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4656 if (ret) { 4657 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4658 goto err_core_stop; 4659 } 4660 } 4661 4662 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4663 4664 /* 4665 * By default FW set ARP frames ac to voice (6). In that case ARP 4666 * exchange is not working properly for UAPSD enabled AP. ARP requests 4667 * which arrives with access category 0 are processed by network stack 4668 * and send back with access category 0, but FW changes access category 4669 * to 6. Set ARP frames access category to best effort (0) solves 4670 * this problem. 4671 */ 4672 4673 param = ar->wmi.pdev_param->arp_ac_override; 4674 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4675 if (ret) { 4676 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4677 ret); 4678 goto err_core_stop; 4679 } 4680 4681 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4682 ar->running_fw->fw_file.fw_features)) { 4683 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4684 WMI_CCA_DETECT_LEVEL_AUTO, 4685 WMI_CCA_DETECT_MARGIN_AUTO); 4686 if (ret) { 4687 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4688 ret); 4689 goto err_core_stop; 4690 } 4691 } 4692 4693 param = ar->wmi.pdev_param->ani_enable; 4694 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4695 if (ret) { 4696 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4697 ret); 4698 goto err_core_stop; 4699 } 4700 4701 ar->ani_enabled = true; 4702 4703 if (ath10k_peer_stats_enabled(ar)) { 4704 param = ar->wmi.pdev_param->peer_stats_update_period; 4705 ret = ath10k_wmi_pdev_set_param(ar, param, 4706 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4707 if (ret) { 4708 ath10k_warn(ar, 4709 "failed to set peer stats period : %d\n", 4710 ret); 4711 goto err_core_stop; 4712 } 4713 } 4714 4715 param = ar->wmi.pdev_param->enable_btcoex; 4716 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4717 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4718 ar->running_fw->fw_file.fw_features)) { 4719 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4720 if (ret) { 4721 ath10k_warn(ar, 4722 "failed to set btcoex param: %d\n", ret); 4723 goto err_core_stop; 4724 } 4725 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4726 } 4727 4728 ar->num_started_vdevs = 0; 4729 ath10k_regd_update(ar); 4730 4731 ath10k_spectral_start(ar); 4732 ath10k_thermal_set_throttling(ar); 4733 4734 mutex_unlock(&ar->conf_mutex); 4735 return 0; 4736 4737 err_core_stop: 4738 ath10k_core_stop(ar); 4739 4740 err_power_down: 4741 ath10k_hif_power_down(ar); 4742 4743 err_off: 4744 ar->state = ATH10K_STATE_OFF; 4745 4746 err: 4747 mutex_unlock(&ar->conf_mutex); 4748 return ret; 4749 } 4750 4751 static void ath10k_stop(struct ieee80211_hw *hw) 4752 { 4753 struct ath10k *ar = hw->priv; 4754 4755 ath10k_drain_tx(ar); 4756 4757 mutex_lock(&ar->conf_mutex); 4758 if (ar->state != ATH10K_STATE_OFF) { 4759 ath10k_halt(ar); 4760 ar->state = ATH10K_STATE_OFF; 4761 } 4762 mutex_unlock(&ar->conf_mutex); 4763 4764 cancel_work_sync(&ar->set_coverage_class_work); 4765 cancel_delayed_work_sync(&ar->scan.timeout); 4766 cancel_work_sync(&ar->restart_work); 4767 } 4768 4769 static int ath10k_config_ps(struct ath10k *ar) 4770 { 4771 struct ath10k_vif *arvif; 4772 int ret = 0; 4773 4774 lockdep_assert_held(&ar->conf_mutex); 4775 4776 list_for_each_entry(arvif, &ar->arvifs, list) { 4777 ret = ath10k_mac_vif_setup_ps(arvif); 4778 if (ret) { 4779 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4780 break; 4781 } 4782 } 4783 4784 return ret; 4785 } 4786 4787 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4788 { 4789 int ret; 4790 u32 param; 4791 4792 lockdep_assert_held(&ar->conf_mutex); 4793 4794 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4795 4796 param = ar->wmi.pdev_param->txpower_limit2g; 4797 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4798 if (ret) { 4799 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 4800 txpower, ret); 4801 return ret; 4802 } 4803 4804 param = ar->wmi.pdev_param->txpower_limit5g; 4805 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4806 if (ret) { 4807 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 4808 txpower, ret); 4809 return ret; 4810 } 4811 4812 return 0; 4813 } 4814 4815 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 4816 { 4817 struct ath10k_vif *arvif; 4818 int ret, txpower = -1; 4819 4820 lockdep_assert_held(&ar->conf_mutex); 4821 4822 list_for_each_entry(arvif, &ar->arvifs, list) { 4823 if (arvif->txpower <= 0) 4824 continue; 4825 4826 if (txpower == -1) 4827 txpower = arvif->txpower; 4828 else 4829 txpower = min(txpower, arvif->txpower); 4830 } 4831 4832 if (txpower == -1) 4833 return 0; 4834 4835 ret = ath10k_mac_txpower_setup(ar, txpower); 4836 if (ret) { 4837 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 4838 txpower, ret); 4839 return ret; 4840 } 4841 4842 return 0; 4843 } 4844 4845 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 4846 { 4847 struct ath10k *ar = hw->priv; 4848 struct ieee80211_conf *conf = &hw->conf; 4849 int ret = 0; 4850 4851 mutex_lock(&ar->conf_mutex); 4852 4853 if (changed & IEEE80211_CONF_CHANGE_PS) 4854 ath10k_config_ps(ar); 4855 4856 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 4857 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 4858 ret = ath10k_monitor_recalc(ar); 4859 if (ret) 4860 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4861 } 4862 4863 mutex_unlock(&ar->conf_mutex); 4864 return ret; 4865 } 4866 4867 static u32 get_nss_from_chainmask(u16 chain_mask) 4868 { 4869 if ((chain_mask & 0xf) == 0xf) 4870 return 4; 4871 else if ((chain_mask & 0x7) == 0x7) 4872 return 3; 4873 else if ((chain_mask & 0x3) == 0x3) 4874 return 2; 4875 return 1; 4876 } 4877 4878 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 4879 { 4880 u32 value = 0; 4881 struct ath10k *ar = arvif->ar; 4882 int nsts; 4883 int sound_dim; 4884 4885 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 4886 return 0; 4887 4888 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 4889 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4890 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 4891 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 4892 4893 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4894 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4895 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 4896 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 4897 4898 if (!value) 4899 return 0; 4900 4901 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 4902 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 4903 4904 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 4905 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 4906 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 4907 4908 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 4909 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 4910 4911 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 4912 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 4913 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 4914 4915 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 4916 ar->wmi.vdev_param->txbf, value); 4917 } 4918 4919 /* 4920 * TODO: 4921 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 4922 * because we will send mgmt frames without CCK. This requirement 4923 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 4924 * in the TX packet. 4925 */ 4926 static int ath10k_add_interface(struct ieee80211_hw *hw, 4927 struct ieee80211_vif *vif) 4928 { 4929 struct ath10k *ar = hw->priv; 4930 struct ath10k_vif *arvif = (void *)vif->drv_priv; 4931 struct ath10k_peer *peer; 4932 enum wmi_sta_powersave_param param; 4933 int ret = 0; 4934 u32 value; 4935 int bit; 4936 int i; 4937 u32 vdev_param; 4938 4939 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 4940 4941 mutex_lock(&ar->conf_mutex); 4942 4943 memset(arvif, 0, sizeof(*arvif)); 4944 ath10k_mac_txq_init(vif->txq); 4945 4946 arvif->ar = ar; 4947 arvif->vif = vif; 4948 4949 INIT_LIST_HEAD(&arvif->list); 4950 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 4951 INIT_DELAYED_WORK(&arvif->connection_loss_work, 4952 ath10k_mac_vif_sta_connection_loss_work); 4953 4954 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 4955 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 4956 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 4957 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 4958 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 4959 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 4960 } 4961 4962 if (ar->num_peers >= ar->max_num_peers) { 4963 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 4964 ret = -ENOBUFS; 4965 goto err; 4966 } 4967 4968 if (ar->free_vdev_map == 0) { 4969 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 4970 ret = -EBUSY; 4971 goto err; 4972 } 4973 bit = __ffs64(ar->free_vdev_map); 4974 4975 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 4976 bit, ar->free_vdev_map); 4977 4978 arvif->vdev_id = bit; 4979 arvif->vdev_subtype = 4980 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 4981 4982 switch (vif->type) { 4983 case NL80211_IFTYPE_P2P_DEVICE: 4984 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4985 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4986 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 4987 break; 4988 case NL80211_IFTYPE_UNSPECIFIED: 4989 case NL80211_IFTYPE_STATION: 4990 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4991 if (vif->p2p) 4992 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4993 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 4994 break; 4995 case NL80211_IFTYPE_ADHOC: 4996 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 4997 break; 4998 case NL80211_IFTYPE_MESH_POINT: 4999 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 5000 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5001 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 5002 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5003 ret = -EINVAL; 5004 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 5005 goto err; 5006 } 5007 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5008 break; 5009 case NL80211_IFTYPE_AP: 5010 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5011 5012 if (vif->p2p) 5013 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5014 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 5015 break; 5016 case NL80211_IFTYPE_MONITOR: 5017 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 5018 break; 5019 default: 5020 WARN_ON(1); 5021 break; 5022 } 5023 5024 /* Using vdev_id as queue number will make it very easy to do per-vif 5025 * tx queue locking. This shouldn't wrap due to interface combinations 5026 * but do a modulo for correctness sake and prevent using offchannel tx 5027 * queues for regular vif tx. 5028 */ 5029 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5030 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 5031 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5032 5033 /* Some firmware revisions don't wait for beacon tx completion before 5034 * sending another SWBA event. This could lead to hardware using old 5035 * (freed) beacon data in some cases, e.g. tx credit starvation 5036 * combined with missed TBTT. This is very very rare. 5037 * 5038 * On non-IOMMU-enabled hosts this could be a possible security issue 5039 * because hw could beacon some random data on the air. On 5040 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 5041 * device would crash. 5042 * 5043 * Since there are no beacon tx completions (implicit nor explicit) 5044 * propagated to host the only workaround for this is to allocate a 5045 * DMA-coherent buffer for a lifetime of a vif and use it for all 5046 * beacon tx commands. Worst case for this approach is some beacons may 5047 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 5048 */ 5049 if (vif->type == NL80211_IFTYPE_ADHOC || 5050 vif->type == NL80211_IFTYPE_MESH_POINT || 5051 vif->type == NL80211_IFTYPE_AP) { 5052 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5053 IEEE80211_MAX_FRAME_LEN, 5054 &arvif->beacon_paddr, 5055 GFP_ATOMIC); 5056 if (!arvif->beacon_buf) { 5057 ret = -ENOMEM; 5058 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5059 ret); 5060 goto err; 5061 } 5062 } 5063 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 5064 arvif->nohwcrypt = true; 5065 5066 if (arvif->nohwcrypt && 5067 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5068 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 5069 goto err; 5070 } 5071 5072 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 5073 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5074 arvif->beacon_buf ? "single-buf" : "per-skb"); 5075 5076 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 5077 arvif->vdev_subtype, vif->addr); 5078 if (ret) { 5079 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5080 arvif->vdev_id, ret); 5081 goto err; 5082 } 5083 5084 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5085 spin_lock_bh(&ar->data_lock); 5086 list_add(&arvif->list, &ar->arvifs); 5087 spin_unlock_bh(&ar->data_lock); 5088 5089 /* It makes no sense to have firmware do keepalives. mac80211 already 5090 * takes care of this with idle connection polling. 5091 */ 5092 ret = ath10k_mac_vif_disable_keepalive(arvif); 5093 if (ret) { 5094 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5095 arvif->vdev_id, ret); 5096 goto err_vdev_delete; 5097 } 5098 5099 arvif->def_wep_key_idx = -1; 5100 5101 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5102 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5103 ATH10K_HW_TXRX_NATIVE_WIFI); 5104 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5105 if (ret && ret != -EOPNOTSUPP) { 5106 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5107 arvif->vdev_id, ret); 5108 goto err_vdev_delete; 5109 } 5110 5111 /* Configuring number of spatial stream for monitor interface is causing 5112 * target assert in qca9888 and qca6174. 5113 */ 5114 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5115 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5116 5117 vdev_param = ar->wmi.vdev_param->nss; 5118 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5119 nss); 5120 if (ret) { 5121 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5122 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5123 ret); 5124 goto err_vdev_delete; 5125 } 5126 } 5127 5128 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5129 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5130 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5131 vif->addr, WMI_PEER_TYPE_DEFAULT); 5132 if (ret) { 5133 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5134 arvif->vdev_id, ret); 5135 goto err_vdev_delete; 5136 } 5137 5138 spin_lock_bh(&ar->data_lock); 5139 5140 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5141 if (!peer) { 5142 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5143 vif->addr, arvif->vdev_id); 5144 spin_unlock_bh(&ar->data_lock); 5145 ret = -ENOENT; 5146 goto err_peer_delete; 5147 } 5148 5149 arvif->peer_id = find_first_bit(peer->peer_ids, 5150 ATH10K_MAX_NUM_PEER_IDS); 5151 5152 spin_unlock_bh(&ar->data_lock); 5153 } else { 5154 arvif->peer_id = HTT_INVALID_PEERID; 5155 } 5156 5157 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5158 ret = ath10k_mac_set_kickout(arvif); 5159 if (ret) { 5160 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5161 arvif->vdev_id, ret); 5162 goto err_peer_delete; 5163 } 5164 } 5165 5166 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5167 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5168 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5169 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5170 param, value); 5171 if (ret) { 5172 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5173 arvif->vdev_id, ret); 5174 goto err_peer_delete; 5175 } 5176 5177 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5178 if (ret) { 5179 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5180 arvif->vdev_id, ret); 5181 goto err_peer_delete; 5182 } 5183 5184 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5185 if (ret) { 5186 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5187 arvif->vdev_id, ret); 5188 goto err_peer_delete; 5189 } 5190 } 5191 5192 ret = ath10k_mac_set_txbf_conf(arvif); 5193 if (ret) { 5194 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5195 arvif->vdev_id, ret); 5196 goto err_peer_delete; 5197 } 5198 5199 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5200 if (ret) { 5201 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5202 arvif->vdev_id, ret); 5203 goto err_peer_delete; 5204 } 5205 5206 arvif->txpower = vif->bss_conf.txpower; 5207 ret = ath10k_mac_txpower_recalc(ar); 5208 if (ret) { 5209 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5210 goto err_peer_delete; 5211 } 5212 5213 if (vif->type == NL80211_IFTYPE_MONITOR) { 5214 ar->monitor_arvif = arvif; 5215 ret = ath10k_monitor_recalc(ar); 5216 if (ret) { 5217 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5218 goto err_peer_delete; 5219 } 5220 } 5221 5222 spin_lock_bh(&ar->htt.tx_lock); 5223 if (!ar->tx_paused) 5224 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5225 spin_unlock_bh(&ar->htt.tx_lock); 5226 5227 mutex_unlock(&ar->conf_mutex); 5228 return 0; 5229 5230 err_peer_delete: 5231 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5232 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 5233 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5234 5235 err_vdev_delete: 5236 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5237 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5238 spin_lock_bh(&ar->data_lock); 5239 list_del(&arvif->list); 5240 spin_unlock_bh(&ar->data_lock); 5241 5242 err: 5243 if (arvif->beacon_buf) { 5244 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5245 arvif->beacon_buf, arvif->beacon_paddr); 5246 arvif->beacon_buf = NULL; 5247 } 5248 5249 mutex_unlock(&ar->conf_mutex); 5250 5251 return ret; 5252 } 5253 5254 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5255 { 5256 int i; 5257 5258 for (i = 0; i < BITS_PER_LONG; i++) 5259 ath10k_mac_vif_tx_unlock(arvif, i); 5260 } 5261 5262 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5263 struct ieee80211_vif *vif) 5264 { 5265 struct ath10k *ar = hw->priv; 5266 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5267 struct ath10k_peer *peer; 5268 int ret; 5269 int i; 5270 5271 cancel_work_sync(&arvif->ap_csa_work); 5272 cancel_delayed_work_sync(&arvif->connection_loss_work); 5273 5274 mutex_lock(&ar->conf_mutex); 5275 5276 spin_lock_bh(&ar->data_lock); 5277 ath10k_mac_vif_beacon_cleanup(arvif); 5278 spin_unlock_bh(&ar->data_lock); 5279 5280 ret = ath10k_spectral_vif_stop(arvif); 5281 if (ret) 5282 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5283 arvif->vdev_id, ret); 5284 5285 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5286 spin_lock_bh(&ar->data_lock); 5287 list_del(&arvif->list); 5288 spin_unlock_bh(&ar->data_lock); 5289 5290 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5291 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5292 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5293 vif->addr); 5294 if (ret) 5295 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5296 arvif->vdev_id, ret); 5297 5298 kfree(arvif->u.ap.noa_data); 5299 } 5300 5301 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5302 arvif->vdev_id); 5303 5304 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5305 if (ret) 5306 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5307 arvif->vdev_id, ret); 5308 5309 /* Some firmware revisions don't notify host about self-peer removal 5310 * until after associated vdev is deleted. 5311 */ 5312 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5313 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5314 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5315 vif->addr); 5316 if (ret) 5317 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5318 arvif->vdev_id, ret); 5319 5320 spin_lock_bh(&ar->data_lock); 5321 ar->num_peers--; 5322 spin_unlock_bh(&ar->data_lock); 5323 } 5324 5325 spin_lock_bh(&ar->data_lock); 5326 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5327 peer = ar->peer_map[i]; 5328 if (!peer) 5329 continue; 5330 5331 if (peer->vif == vif) { 5332 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5333 vif->addr, arvif->vdev_id); 5334 peer->vif = NULL; 5335 } 5336 } 5337 spin_unlock_bh(&ar->data_lock); 5338 5339 ath10k_peer_cleanup(ar, arvif->vdev_id); 5340 ath10k_mac_txq_unref(ar, vif->txq); 5341 5342 if (vif->type == NL80211_IFTYPE_MONITOR) { 5343 ar->monitor_arvif = NULL; 5344 ret = ath10k_monitor_recalc(ar); 5345 if (ret) 5346 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5347 } 5348 5349 ret = ath10k_mac_txpower_recalc(ar); 5350 if (ret) 5351 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5352 5353 spin_lock_bh(&ar->htt.tx_lock); 5354 ath10k_mac_vif_tx_unlock_all(arvif); 5355 spin_unlock_bh(&ar->htt.tx_lock); 5356 5357 ath10k_mac_txq_unref(ar, vif->txq); 5358 5359 mutex_unlock(&ar->conf_mutex); 5360 } 5361 5362 /* 5363 * FIXME: Has to be verified. 5364 */ 5365 #define SUPPORTED_FILTERS \ 5366 (FIF_ALLMULTI | \ 5367 FIF_CONTROL | \ 5368 FIF_PSPOLL | \ 5369 FIF_OTHER_BSS | \ 5370 FIF_BCN_PRBRESP_PROMISC | \ 5371 FIF_PROBE_REQ | \ 5372 FIF_FCSFAIL) 5373 5374 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5375 unsigned int changed_flags, 5376 unsigned int *total_flags, 5377 u64 multicast) 5378 { 5379 struct ath10k *ar = hw->priv; 5380 int ret; 5381 5382 mutex_lock(&ar->conf_mutex); 5383 5384 changed_flags &= SUPPORTED_FILTERS; 5385 *total_flags &= SUPPORTED_FILTERS; 5386 ar->filter_flags = *total_flags; 5387 5388 ret = ath10k_monitor_recalc(ar); 5389 if (ret) 5390 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5391 5392 mutex_unlock(&ar->conf_mutex); 5393 } 5394 5395 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5396 struct ieee80211_vif *vif, 5397 struct ieee80211_bss_conf *info, 5398 u32 changed) 5399 { 5400 struct ath10k *ar = hw->priv; 5401 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5402 int ret = 0; 5403 u32 vdev_param, pdev_param, slottime, preamble; 5404 5405 mutex_lock(&ar->conf_mutex); 5406 5407 if (changed & BSS_CHANGED_IBSS) 5408 ath10k_control_ibss(arvif, info, vif->addr); 5409 5410 if (changed & BSS_CHANGED_BEACON_INT) { 5411 arvif->beacon_interval = info->beacon_int; 5412 vdev_param = ar->wmi.vdev_param->beacon_interval; 5413 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5414 arvif->beacon_interval); 5415 ath10k_dbg(ar, ATH10K_DBG_MAC, 5416 "mac vdev %d beacon_interval %d\n", 5417 arvif->vdev_id, arvif->beacon_interval); 5418 5419 if (ret) 5420 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5421 arvif->vdev_id, ret); 5422 } 5423 5424 if (changed & BSS_CHANGED_BEACON) { 5425 ath10k_dbg(ar, ATH10K_DBG_MAC, 5426 "vdev %d set beacon tx mode to staggered\n", 5427 arvif->vdev_id); 5428 5429 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5430 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5431 WMI_BEACON_STAGGERED_MODE); 5432 if (ret) 5433 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5434 arvif->vdev_id, ret); 5435 5436 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5437 if (ret) 5438 ath10k_warn(ar, "failed to update beacon template: %d\n", 5439 ret); 5440 5441 if (ieee80211_vif_is_mesh(vif)) { 5442 /* mesh doesn't use SSID but firmware needs it */ 5443 strncpy(arvif->u.ap.ssid, "mesh", 5444 sizeof(arvif->u.ap.ssid)); 5445 arvif->u.ap.ssid_len = 4; 5446 } 5447 } 5448 5449 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5450 ret = ath10k_mac_setup_prb_tmpl(arvif); 5451 if (ret) 5452 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5453 arvif->vdev_id, ret); 5454 } 5455 5456 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5457 arvif->dtim_period = info->dtim_period; 5458 5459 ath10k_dbg(ar, ATH10K_DBG_MAC, 5460 "mac vdev %d dtim_period %d\n", 5461 arvif->vdev_id, arvif->dtim_period); 5462 5463 vdev_param = ar->wmi.vdev_param->dtim_period; 5464 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5465 arvif->dtim_period); 5466 if (ret) 5467 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5468 arvif->vdev_id, ret); 5469 } 5470 5471 if (changed & BSS_CHANGED_SSID && 5472 vif->type == NL80211_IFTYPE_AP) { 5473 arvif->u.ap.ssid_len = info->ssid_len; 5474 if (info->ssid_len) 5475 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5476 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5477 } 5478 5479 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5480 ether_addr_copy(arvif->bssid, info->bssid); 5481 5482 if (changed & BSS_CHANGED_BEACON_ENABLED) 5483 ath10k_control_beaconing(arvif, info); 5484 5485 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5486 arvif->use_cts_prot = info->use_cts_prot; 5487 5488 ret = ath10k_recalc_rtscts_prot(arvif); 5489 if (ret) 5490 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5491 arvif->vdev_id, ret); 5492 5493 if (ath10k_mac_can_set_cts_prot(arvif)) { 5494 ret = ath10k_mac_set_cts_prot(arvif); 5495 if (ret) 5496 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5497 arvif->vdev_id, ret); 5498 } 5499 } 5500 5501 if (changed & BSS_CHANGED_ERP_SLOT) { 5502 if (info->use_short_slot) 5503 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5504 5505 else 5506 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5507 5508 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5509 arvif->vdev_id, slottime); 5510 5511 vdev_param = ar->wmi.vdev_param->slot_time; 5512 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5513 slottime); 5514 if (ret) 5515 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5516 arvif->vdev_id, ret); 5517 } 5518 5519 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5520 if (info->use_short_preamble) 5521 preamble = WMI_VDEV_PREAMBLE_SHORT; 5522 else 5523 preamble = WMI_VDEV_PREAMBLE_LONG; 5524 5525 ath10k_dbg(ar, ATH10K_DBG_MAC, 5526 "mac vdev %d preamble %dn", 5527 arvif->vdev_id, preamble); 5528 5529 vdev_param = ar->wmi.vdev_param->preamble; 5530 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5531 preamble); 5532 if (ret) 5533 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5534 arvif->vdev_id, ret); 5535 } 5536 5537 if (changed & BSS_CHANGED_ASSOC) { 5538 if (info->assoc) { 5539 /* Workaround: Make sure monitor vdev is not running 5540 * when associating to prevent some firmware revisions 5541 * (e.g. 10.1 and 10.2) from crashing. 5542 */ 5543 if (ar->monitor_started) 5544 ath10k_monitor_stop(ar); 5545 ath10k_bss_assoc(hw, vif, info); 5546 ath10k_monitor_recalc(ar); 5547 } else { 5548 ath10k_bss_disassoc(hw, vif); 5549 } 5550 } 5551 5552 if (changed & BSS_CHANGED_TXPOWER) { 5553 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5554 arvif->vdev_id, info->txpower); 5555 5556 arvif->txpower = info->txpower; 5557 ret = ath10k_mac_txpower_recalc(ar); 5558 if (ret) 5559 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5560 } 5561 5562 if (changed & BSS_CHANGED_PS) { 5563 arvif->ps = vif->bss_conf.ps; 5564 5565 ret = ath10k_config_ps(ar); 5566 if (ret) 5567 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5568 arvif->vdev_id, ret); 5569 } 5570 5571 mutex_unlock(&ar->conf_mutex); 5572 } 5573 5574 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5575 { 5576 struct ath10k *ar = hw->priv; 5577 5578 /* This function should never be called if setting the coverage class 5579 * is not supported on this hardware. 5580 */ 5581 if (!ar->hw_params.hw_ops->set_coverage_class) { 5582 WARN_ON_ONCE(1); 5583 return; 5584 } 5585 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5586 } 5587 5588 struct ath10k_mac_tdls_iter_data { 5589 u32 num_tdls_stations; 5590 struct ieee80211_vif *curr_vif; 5591 }; 5592 5593 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5594 struct ieee80211_sta *sta) 5595 { 5596 struct ath10k_mac_tdls_iter_data *iter_data = data; 5597 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5598 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5599 5600 if (sta->tdls && sta_vif == iter_data->curr_vif) 5601 iter_data->num_tdls_stations++; 5602 } 5603 5604 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5605 struct ieee80211_vif *vif) 5606 { 5607 struct ath10k_mac_tdls_iter_data data = {}; 5608 5609 data.curr_vif = vif; 5610 5611 ieee80211_iterate_stations_atomic(hw, 5612 ath10k_mac_tdls_vif_stations_count_iter, 5613 &data); 5614 return data.num_tdls_stations; 5615 } 5616 5617 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, 5618 struct ieee80211_vif *vif) 5619 { 5620 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5621 int *num_tdls_vifs = data; 5622 5623 if (vif->type != NL80211_IFTYPE_STATION) 5624 return; 5625 5626 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0) 5627 (*num_tdls_vifs)++; 5628 } 5629 5630 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw) 5631 { 5632 int num_tdls_vifs = 0; 5633 5634 ieee80211_iterate_active_interfaces_atomic(hw, 5635 IEEE80211_IFACE_ITER_NORMAL, 5636 ath10k_mac_tdls_vifs_count_iter, 5637 &num_tdls_vifs); 5638 return num_tdls_vifs; 5639 } 5640 5641 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5642 struct ieee80211_vif *vif, 5643 struct ieee80211_scan_request *hw_req) 5644 { 5645 struct ath10k *ar = hw->priv; 5646 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5647 struct cfg80211_scan_request *req = &hw_req->req; 5648 struct wmi_start_scan_arg arg; 5649 int ret = 0; 5650 int i; 5651 5652 mutex_lock(&ar->conf_mutex); 5653 5654 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 5655 ret = -EBUSY; 5656 goto exit; 5657 } 5658 5659 spin_lock_bh(&ar->data_lock); 5660 switch (ar->scan.state) { 5661 case ATH10K_SCAN_IDLE: 5662 reinit_completion(&ar->scan.started); 5663 reinit_completion(&ar->scan.completed); 5664 ar->scan.state = ATH10K_SCAN_STARTING; 5665 ar->scan.is_roc = false; 5666 ar->scan.vdev_id = arvif->vdev_id; 5667 ret = 0; 5668 break; 5669 case ATH10K_SCAN_STARTING: 5670 case ATH10K_SCAN_RUNNING: 5671 case ATH10K_SCAN_ABORTING: 5672 ret = -EBUSY; 5673 break; 5674 } 5675 spin_unlock_bh(&ar->data_lock); 5676 5677 if (ret) 5678 goto exit; 5679 5680 memset(&arg, 0, sizeof(arg)); 5681 ath10k_wmi_start_scan_init(ar, &arg); 5682 arg.vdev_id = arvif->vdev_id; 5683 arg.scan_id = ATH10K_SCAN_ID; 5684 5685 if (req->ie_len) { 5686 arg.ie_len = req->ie_len; 5687 memcpy(arg.ie, req->ie, arg.ie_len); 5688 } 5689 5690 if (req->n_ssids) { 5691 arg.n_ssids = req->n_ssids; 5692 for (i = 0; i < arg.n_ssids; i++) { 5693 arg.ssids[i].len = req->ssids[i].ssid_len; 5694 arg.ssids[i].ssid = req->ssids[i].ssid; 5695 } 5696 } else { 5697 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5698 } 5699 5700 if (req->n_channels) { 5701 arg.n_channels = req->n_channels; 5702 for (i = 0; i < arg.n_channels; i++) 5703 arg.channels[i] = req->channels[i]->center_freq; 5704 } 5705 5706 ret = ath10k_start_scan(ar, &arg); 5707 if (ret) { 5708 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 5709 spin_lock_bh(&ar->data_lock); 5710 ar->scan.state = ATH10K_SCAN_IDLE; 5711 spin_unlock_bh(&ar->data_lock); 5712 } 5713 5714 /* Add a 200ms margin to account for event/command processing */ 5715 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5716 msecs_to_jiffies(arg.max_scan_time + 5717 200)); 5718 5719 exit: 5720 mutex_unlock(&ar->conf_mutex); 5721 return ret; 5722 } 5723 5724 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 5725 struct ieee80211_vif *vif) 5726 { 5727 struct ath10k *ar = hw->priv; 5728 5729 mutex_lock(&ar->conf_mutex); 5730 ath10k_scan_abort(ar); 5731 mutex_unlock(&ar->conf_mutex); 5732 5733 cancel_delayed_work_sync(&ar->scan.timeout); 5734 } 5735 5736 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 5737 struct ath10k_vif *arvif, 5738 enum set_key_cmd cmd, 5739 struct ieee80211_key_conf *key) 5740 { 5741 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 5742 int ret; 5743 5744 /* 10.1 firmware branch requires default key index to be set to group 5745 * key index after installing it. Otherwise FW/HW Txes corrupted 5746 * frames with multi-vif APs. This is not required for main firmware 5747 * branch (e.g. 636). 5748 * 5749 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 5750 * 5751 * FIXME: It remains unknown if this is required for multi-vif STA 5752 * interfaces on 10.1. 5753 */ 5754 5755 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 5756 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 5757 return; 5758 5759 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 5760 return; 5761 5762 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 5763 return; 5764 5765 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5766 return; 5767 5768 if (cmd != SET_KEY) 5769 return; 5770 5771 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5772 key->keyidx); 5773 if (ret) 5774 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 5775 arvif->vdev_id, ret); 5776 } 5777 5778 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 5779 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 5780 struct ieee80211_key_conf *key) 5781 { 5782 struct ath10k *ar = hw->priv; 5783 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5784 struct ath10k_peer *peer; 5785 const u8 *peer_addr; 5786 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 5787 key->cipher == WLAN_CIPHER_SUITE_WEP104; 5788 int ret = 0; 5789 int ret2; 5790 u32 flags = 0; 5791 u32 flags2; 5792 5793 /* this one needs to be done in software */ 5794 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 5795 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 5796 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 5797 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 5798 return 1; 5799 5800 if (arvif->nohwcrypt) 5801 return 1; 5802 5803 if (key->keyidx > WMI_MAX_KEY_INDEX) 5804 return -ENOSPC; 5805 5806 mutex_lock(&ar->conf_mutex); 5807 5808 if (sta) 5809 peer_addr = sta->addr; 5810 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 5811 peer_addr = vif->bss_conf.bssid; 5812 else 5813 peer_addr = vif->addr; 5814 5815 key->hw_key_idx = key->keyidx; 5816 5817 if (is_wep) { 5818 if (cmd == SET_KEY) 5819 arvif->wep_keys[key->keyidx] = key; 5820 else 5821 arvif->wep_keys[key->keyidx] = NULL; 5822 } 5823 5824 /* the peer should not disappear in mid-way (unless FW goes awry) since 5825 * we already hold conf_mutex. we just make sure its there now. 5826 */ 5827 spin_lock_bh(&ar->data_lock); 5828 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5829 spin_unlock_bh(&ar->data_lock); 5830 5831 if (!peer) { 5832 if (cmd == SET_KEY) { 5833 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 5834 peer_addr); 5835 ret = -EOPNOTSUPP; 5836 goto exit; 5837 } else { 5838 /* if the peer doesn't exist there is no key to disable anymore */ 5839 goto exit; 5840 } 5841 } 5842 5843 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5844 flags |= WMI_KEY_PAIRWISE; 5845 else 5846 flags |= WMI_KEY_GROUP; 5847 5848 if (is_wep) { 5849 if (cmd == DISABLE_KEY) 5850 ath10k_clear_vdev_key(arvif, key); 5851 5852 /* When WEP keys are uploaded it's possible that there are 5853 * stations associated already (e.g. when merging) without any 5854 * keys. Static WEP needs an explicit per-peer key upload. 5855 */ 5856 if (vif->type == NL80211_IFTYPE_ADHOC && 5857 cmd == SET_KEY) 5858 ath10k_mac_vif_update_wep_key(arvif, key); 5859 5860 /* 802.1x never sets the def_wep_key_idx so each set_key() 5861 * call changes default tx key. 5862 * 5863 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 5864 * after first set_key(). 5865 */ 5866 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 5867 flags |= WMI_KEY_TX_USAGE; 5868 } 5869 5870 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 5871 if (ret) { 5872 WARN_ON(ret > 0); 5873 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 5874 arvif->vdev_id, peer_addr, ret); 5875 goto exit; 5876 } 5877 5878 /* mac80211 sets static WEP keys as groupwise while firmware requires 5879 * them to be installed twice as both pairwise and groupwise. 5880 */ 5881 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 5882 flags2 = flags; 5883 flags2 &= ~WMI_KEY_GROUP; 5884 flags2 |= WMI_KEY_PAIRWISE; 5885 5886 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 5887 if (ret) { 5888 WARN_ON(ret > 0); 5889 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 5890 arvif->vdev_id, peer_addr, ret); 5891 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 5892 peer_addr, flags); 5893 if (ret2) { 5894 WARN_ON(ret2 > 0); 5895 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 5896 arvif->vdev_id, peer_addr, ret2); 5897 } 5898 goto exit; 5899 } 5900 } 5901 5902 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 5903 5904 spin_lock_bh(&ar->data_lock); 5905 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5906 if (peer && cmd == SET_KEY) 5907 peer->keys[key->keyidx] = key; 5908 else if (peer && cmd == DISABLE_KEY) 5909 peer->keys[key->keyidx] = NULL; 5910 else if (peer == NULL) 5911 /* impossible unless FW goes crazy */ 5912 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 5913 spin_unlock_bh(&ar->data_lock); 5914 5915 exit: 5916 mutex_unlock(&ar->conf_mutex); 5917 return ret; 5918 } 5919 5920 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 5921 struct ieee80211_vif *vif, 5922 int keyidx) 5923 { 5924 struct ath10k *ar = hw->priv; 5925 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5926 int ret; 5927 5928 mutex_lock(&arvif->ar->conf_mutex); 5929 5930 if (arvif->ar->state != ATH10K_STATE_ON) 5931 goto unlock; 5932 5933 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 5934 arvif->vdev_id, keyidx); 5935 5936 ret = ath10k_wmi_vdev_set_param(arvif->ar, 5937 arvif->vdev_id, 5938 arvif->ar->wmi.vdev_param->def_keyid, 5939 keyidx); 5940 5941 if (ret) { 5942 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 5943 arvif->vdev_id, 5944 ret); 5945 goto unlock; 5946 } 5947 5948 arvif->def_wep_key_idx = keyidx; 5949 5950 unlock: 5951 mutex_unlock(&arvif->ar->conf_mutex); 5952 } 5953 5954 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 5955 { 5956 struct ath10k *ar; 5957 struct ath10k_vif *arvif; 5958 struct ath10k_sta *arsta; 5959 struct ieee80211_sta *sta; 5960 struct cfg80211_chan_def def; 5961 enum nl80211_band band; 5962 const u8 *ht_mcs_mask; 5963 const u16 *vht_mcs_mask; 5964 u32 changed, bw, nss, smps; 5965 int err; 5966 5967 arsta = container_of(wk, struct ath10k_sta, update_wk); 5968 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 5969 arvif = arsta->arvif; 5970 ar = arvif->ar; 5971 5972 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 5973 return; 5974 5975 band = def.chan->band; 5976 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 5977 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 5978 5979 spin_lock_bh(&ar->data_lock); 5980 5981 changed = arsta->changed; 5982 arsta->changed = 0; 5983 5984 bw = arsta->bw; 5985 nss = arsta->nss; 5986 smps = arsta->smps; 5987 5988 spin_unlock_bh(&ar->data_lock); 5989 5990 mutex_lock(&ar->conf_mutex); 5991 5992 nss = max_t(u32, 1, nss); 5993 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 5994 ath10k_mac_max_vht_nss(vht_mcs_mask))); 5995 5996 if (changed & IEEE80211_RC_BW_CHANGED) { 5997 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 5998 sta->addr, bw); 5999 6000 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6001 WMI_PEER_CHAN_WIDTH, bw); 6002 if (err) 6003 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 6004 sta->addr, bw, err); 6005 } 6006 6007 if (changed & IEEE80211_RC_NSS_CHANGED) { 6008 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 6009 sta->addr, nss); 6010 6011 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6012 WMI_PEER_NSS, nss); 6013 if (err) 6014 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 6015 sta->addr, nss, err); 6016 } 6017 6018 if (changed & IEEE80211_RC_SMPS_CHANGED) { 6019 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 6020 sta->addr, smps); 6021 6022 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6023 WMI_PEER_SMPS_STATE, smps); 6024 if (err) 6025 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 6026 sta->addr, smps, err); 6027 } 6028 6029 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || 6030 changed & IEEE80211_RC_NSS_CHANGED) { 6031 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", 6032 sta->addr); 6033 6034 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 6035 if (err) 6036 ath10k_warn(ar, "failed to reassociate station: %pM\n", 6037 sta->addr); 6038 } 6039 6040 mutex_unlock(&ar->conf_mutex); 6041 } 6042 6043 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 6044 struct ieee80211_sta *sta) 6045 { 6046 struct ath10k *ar = arvif->ar; 6047 6048 lockdep_assert_held(&ar->conf_mutex); 6049 6050 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6051 return 0; 6052 6053 if (ar->num_stations >= ar->max_num_stations) 6054 return -ENOBUFS; 6055 6056 ar->num_stations++; 6057 6058 return 0; 6059 } 6060 6061 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 6062 struct ieee80211_sta *sta) 6063 { 6064 struct ath10k *ar = arvif->ar; 6065 6066 lockdep_assert_held(&ar->conf_mutex); 6067 6068 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6069 return; 6070 6071 ar->num_stations--; 6072 } 6073 6074 static int ath10k_sta_state(struct ieee80211_hw *hw, 6075 struct ieee80211_vif *vif, 6076 struct ieee80211_sta *sta, 6077 enum ieee80211_sta_state old_state, 6078 enum ieee80211_sta_state new_state) 6079 { 6080 struct ath10k *ar = hw->priv; 6081 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6082 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6083 struct ath10k_peer *peer; 6084 int ret = 0; 6085 int i; 6086 6087 if (old_state == IEEE80211_STA_NOTEXIST && 6088 new_state == IEEE80211_STA_NONE) { 6089 memset(arsta, 0, sizeof(*arsta)); 6090 arsta->arvif = arvif; 6091 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6092 6093 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6094 ath10k_mac_txq_init(sta->txq[i]); 6095 } 6096 6097 /* cancel must be done outside the mutex to avoid deadlock */ 6098 if ((old_state == IEEE80211_STA_NONE && 6099 new_state == IEEE80211_STA_NOTEXIST)) 6100 cancel_work_sync(&arsta->update_wk); 6101 6102 mutex_lock(&ar->conf_mutex); 6103 6104 if (old_state == IEEE80211_STA_NOTEXIST && 6105 new_state == IEEE80211_STA_NONE) { 6106 /* 6107 * New station addition. 6108 */ 6109 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6110 u32 num_tdls_stations; 6111 u32 num_tdls_vifs; 6112 6113 ath10k_dbg(ar, ATH10K_DBG_MAC, 6114 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6115 arvif->vdev_id, sta->addr, 6116 ar->num_stations + 1, ar->max_num_stations, 6117 ar->num_peers + 1, ar->max_num_peers); 6118 6119 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6120 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); 6121 6122 if (sta->tdls) { 6123 if (num_tdls_stations >= ar->max_num_tdls_vdevs) { 6124 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6125 arvif->vdev_id, 6126 ar->max_num_tdls_vdevs); 6127 ret = -ELNRNG; 6128 goto exit; 6129 } 6130 peer_type = WMI_PEER_TYPE_TDLS; 6131 } 6132 6133 ret = ath10k_mac_inc_num_stations(arvif, sta); 6134 if (ret) { 6135 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6136 ar->max_num_stations); 6137 goto exit; 6138 } 6139 6140 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6141 sta->addr, peer_type); 6142 if (ret) { 6143 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6144 sta->addr, arvif->vdev_id, ret); 6145 ath10k_mac_dec_num_stations(arvif, sta); 6146 goto exit; 6147 } 6148 6149 spin_lock_bh(&ar->data_lock); 6150 6151 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6152 if (!peer) { 6153 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6154 vif->addr, arvif->vdev_id); 6155 spin_unlock_bh(&ar->data_lock); 6156 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6157 ath10k_mac_dec_num_stations(arvif, sta); 6158 ret = -ENOENT; 6159 goto exit; 6160 } 6161 6162 arsta->peer_id = find_first_bit(peer->peer_ids, 6163 ATH10K_MAX_NUM_PEER_IDS); 6164 6165 spin_unlock_bh(&ar->data_lock); 6166 6167 if (!sta->tdls) 6168 goto exit; 6169 6170 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6171 WMI_TDLS_ENABLE_ACTIVE); 6172 if (ret) { 6173 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6174 arvif->vdev_id, ret); 6175 ath10k_peer_delete(ar, arvif->vdev_id, 6176 sta->addr); 6177 ath10k_mac_dec_num_stations(arvif, sta); 6178 goto exit; 6179 } 6180 6181 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6182 WMI_TDLS_PEER_STATE_PEERING); 6183 if (ret) { 6184 ath10k_warn(ar, 6185 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6186 sta->addr, arvif->vdev_id, ret); 6187 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6188 ath10k_mac_dec_num_stations(arvif, sta); 6189 6190 if (num_tdls_stations != 0) 6191 goto exit; 6192 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6193 WMI_TDLS_DISABLE); 6194 } 6195 } else if ((old_state == IEEE80211_STA_NONE && 6196 new_state == IEEE80211_STA_NOTEXIST)) { 6197 /* 6198 * Existing station deletion. 6199 */ 6200 ath10k_dbg(ar, ATH10K_DBG_MAC, 6201 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6202 arvif->vdev_id, sta->addr, sta); 6203 6204 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6205 if (ret) 6206 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6207 sta->addr, arvif->vdev_id, ret); 6208 6209 ath10k_mac_dec_num_stations(arvif, sta); 6210 6211 spin_lock_bh(&ar->data_lock); 6212 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6213 peer = ar->peer_map[i]; 6214 if (!peer) 6215 continue; 6216 6217 if (peer->sta == sta) { 6218 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6219 sta->addr, peer, i, arvif->vdev_id); 6220 peer->sta = NULL; 6221 6222 /* Clean up the peer object as well since we 6223 * must have failed to do this above. 6224 */ 6225 list_del(&peer->list); 6226 ar->peer_map[i] = NULL; 6227 kfree(peer); 6228 ar->num_peers--; 6229 } 6230 } 6231 spin_unlock_bh(&ar->data_lock); 6232 6233 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6234 ath10k_mac_txq_unref(ar, sta->txq[i]); 6235 6236 if (!sta->tdls) 6237 goto exit; 6238 6239 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6240 goto exit; 6241 6242 /* This was the last tdls peer in current vif */ 6243 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6244 WMI_TDLS_DISABLE); 6245 if (ret) { 6246 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6247 arvif->vdev_id, ret); 6248 } 6249 } else if (old_state == IEEE80211_STA_AUTH && 6250 new_state == IEEE80211_STA_ASSOC && 6251 (vif->type == NL80211_IFTYPE_AP || 6252 vif->type == NL80211_IFTYPE_MESH_POINT || 6253 vif->type == NL80211_IFTYPE_ADHOC)) { 6254 /* 6255 * New association. 6256 */ 6257 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6258 sta->addr); 6259 6260 ret = ath10k_station_assoc(ar, vif, sta, false); 6261 if (ret) 6262 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6263 sta->addr, arvif->vdev_id, ret); 6264 } else if (old_state == IEEE80211_STA_ASSOC && 6265 new_state == IEEE80211_STA_AUTHORIZED && 6266 sta->tdls) { 6267 /* 6268 * Tdls station authorized. 6269 */ 6270 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6271 sta->addr); 6272 6273 ret = ath10k_station_assoc(ar, vif, sta, false); 6274 if (ret) { 6275 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6276 sta->addr, arvif->vdev_id, ret); 6277 goto exit; 6278 } 6279 6280 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6281 WMI_TDLS_PEER_STATE_CONNECTED); 6282 if (ret) 6283 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6284 sta->addr, arvif->vdev_id, ret); 6285 } else if (old_state == IEEE80211_STA_ASSOC && 6286 new_state == IEEE80211_STA_AUTH && 6287 (vif->type == NL80211_IFTYPE_AP || 6288 vif->type == NL80211_IFTYPE_MESH_POINT || 6289 vif->type == NL80211_IFTYPE_ADHOC)) { 6290 /* 6291 * Disassociation. 6292 */ 6293 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6294 sta->addr); 6295 6296 ret = ath10k_station_disassoc(ar, vif, sta); 6297 if (ret) 6298 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6299 sta->addr, arvif->vdev_id, ret); 6300 } 6301 exit: 6302 mutex_unlock(&ar->conf_mutex); 6303 return ret; 6304 } 6305 6306 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6307 u16 ac, bool enable) 6308 { 6309 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6310 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6311 u32 prio = 0, acc = 0; 6312 u32 value = 0; 6313 int ret = 0; 6314 6315 lockdep_assert_held(&ar->conf_mutex); 6316 6317 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6318 return 0; 6319 6320 switch (ac) { 6321 case IEEE80211_AC_VO: 6322 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6323 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6324 prio = 7; 6325 acc = 3; 6326 break; 6327 case IEEE80211_AC_VI: 6328 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6329 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6330 prio = 5; 6331 acc = 2; 6332 break; 6333 case IEEE80211_AC_BE: 6334 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6335 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6336 prio = 2; 6337 acc = 1; 6338 break; 6339 case IEEE80211_AC_BK: 6340 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6341 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6342 prio = 0; 6343 acc = 0; 6344 break; 6345 } 6346 6347 if (enable) 6348 arvif->u.sta.uapsd |= value; 6349 else 6350 arvif->u.sta.uapsd &= ~value; 6351 6352 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6353 WMI_STA_PS_PARAM_UAPSD, 6354 arvif->u.sta.uapsd); 6355 if (ret) { 6356 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6357 goto exit; 6358 } 6359 6360 if (arvif->u.sta.uapsd) 6361 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6362 else 6363 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6364 6365 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6366 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6367 value); 6368 if (ret) 6369 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6370 6371 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6372 if (ret) { 6373 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6374 arvif->vdev_id, ret); 6375 return ret; 6376 } 6377 6378 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6379 if (ret) { 6380 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6381 arvif->vdev_id, ret); 6382 return ret; 6383 } 6384 6385 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6386 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6387 /* Only userspace can make an educated decision when to send 6388 * trigger frame. The following effectively disables u-UAPSD 6389 * autotrigger in firmware (which is enabled by default 6390 * provided the autotrigger service is available). 6391 */ 6392 6393 arg.wmm_ac = acc; 6394 arg.user_priority = prio; 6395 arg.service_interval = 0; 6396 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6397 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6398 6399 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6400 arvif->bssid, &arg, 1); 6401 if (ret) { 6402 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6403 ret); 6404 return ret; 6405 } 6406 } 6407 6408 exit: 6409 return ret; 6410 } 6411 6412 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6413 struct ieee80211_vif *vif, u16 ac, 6414 const struct ieee80211_tx_queue_params *params) 6415 { 6416 struct ath10k *ar = hw->priv; 6417 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6418 struct wmi_wmm_params_arg *p = NULL; 6419 int ret; 6420 6421 mutex_lock(&ar->conf_mutex); 6422 6423 switch (ac) { 6424 case IEEE80211_AC_VO: 6425 p = &arvif->wmm_params.ac_vo; 6426 break; 6427 case IEEE80211_AC_VI: 6428 p = &arvif->wmm_params.ac_vi; 6429 break; 6430 case IEEE80211_AC_BE: 6431 p = &arvif->wmm_params.ac_be; 6432 break; 6433 case IEEE80211_AC_BK: 6434 p = &arvif->wmm_params.ac_bk; 6435 break; 6436 } 6437 6438 if (WARN_ON(!p)) { 6439 ret = -EINVAL; 6440 goto exit; 6441 } 6442 6443 p->cwmin = params->cw_min; 6444 p->cwmax = params->cw_max; 6445 p->aifs = params->aifs; 6446 6447 /* 6448 * The channel time duration programmed in the HW is in absolute 6449 * microseconds, while mac80211 gives the txop in units of 6450 * 32 microseconds. 6451 */ 6452 p->txop = params->txop * 32; 6453 6454 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6455 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6456 &arvif->wmm_params); 6457 if (ret) { 6458 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6459 arvif->vdev_id, ret); 6460 goto exit; 6461 } 6462 } else { 6463 /* This won't work well with multi-interface cases but it's 6464 * better than nothing. 6465 */ 6466 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6467 if (ret) { 6468 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6469 goto exit; 6470 } 6471 } 6472 6473 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6474 if (ret) 6475 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6476 6477 exit: 6478 mutex_unlock(&ar->conf_mutex); 6479 return ret; 6480 } 6481 6482 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6483 6484 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6485 struct ieee80211_vif *vif, 6486 struct ieee80211_channel *chan, 6487 int duration, 6488 enum ieee80211_roc_type type) 6489 { 6490 struct ath10k *ar = hw->priv; 6491 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6492 struct wmi_start_scan_arg arg; 6493 int ret = 0; 6494 u32 scan_time_msec; 6495 6496 mutex_lock(&ar->conf_mutex); 6497 6498 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 6499 ret = -EBUSY; 6500 goto exit; 6501 } 6502 6503 spin_lock_bh(&ar->data_lock); 6504 switch (ar->scan.state) { 6505 case ATH10K_SCAN_IDLE: 6506 reinit_completion(&ar->scan.started); 6507 reinit_completion(&ar->scan.completed); 6508 reinit_completion(&ar->scan.on_channel); 6509 ar->scan.state = ATH10K_SCAN_STARTING; 6510 ar->scan.is_roc = true; 6511 ar->scan.vdev_id = arvif->vdev_id; 6512 ar->scan.roc_freq = chan->center_freq; 6513 ar->scan.roc_notify = true; 6514 ret = 0; 6515 break; 6516 case ATH10K_SCAN_STARTING: 6517 case ATH10K_SCAN_RUNNING: 6518 case ATH10K_SCAN_ABORTING: 6519 ret = -EBUSY; 6520 break; 6521 } 6522 spin_unlock_bh(&ar->data_lock); 6523 6524 if (ret) 6525 goto exit; 6526 6527 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6528 6529 memset(&arg, 0, sizeof(arg)); 6530 ath10k_wmi_start_scan_init(ar, &arg); 6531 arg.vdev_id = arvif->vdev_id; 6532 arg.scan_id = ATH10K_SCAN_ID; 6533 arg.n_channels = 1; 6534 arg.channels[0] = chan->center_freq; 6535 arg.dwell_time_active = scan_time_msec; 6536 arg.dwell_time_passive = scan_time_msec; 6537 arg.max_scan_time = scan_time_msec; 6538 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6539 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6540 arg.burst_duration_ms = duration; 6541 6542 ret = ath10k_start_scan(ar, &arg); 6543 if (ret) { 6544 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6545 spin_lock_bh(&ar->data_lock); 6546 ar->scan.state = ATH10K_SCAN_IDLE; 6547 spin_unlock_bh(&ar->data_lock); 6548 goto exit; 6549 } 6550 6551 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6552 if (ret == 0) { 6553 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6554 6555 ret = ath10k_scan_stop(ar); 6556 if (ret) 6557 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6558 6559 ret = -ETIMEDOUT; 6560 goto exit; 6561 } 6562 6563 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6564 msecs_to_jiffies(duration)); 6565 6566 ret = 0; 6567 exit: 6568 mutex_unlock(&ar->conf_mutex); 6569 return ret; 6570 } 6571 6572 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6573 { 6574 struct ath10k *ar = hw->priv; 6575 6576 mutex_lock(&ar->conf_mutex); 6577 6578 spin_lock_bh(&ar->data_lock); 6579 ar->scan.roc_notify = false; 6580 spin_unlock_bh(&ar->data_lock); 6581 6582 ath10k_scan_abort(ar); 6583 6584 mutex_unlock(&ar->conf_mutex); 6585 6586 cancel_delayed_work_sync(&ar->scan.timeout); 6587 6588 return 0; 6589 } 6590 6591 /* 6592 * Both RTS and Fragmentation threshold are interface-specific 6593 * in ath10k, but device-specific in mac80211. 6594 */ 6595 6596 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6597 { 6598 struct ath10k *ar = hw->priv; 6599 struct ath10k_vif *arvif; 6600 int ret = 0; 6601 6602 mutex_lock(&ar->conf_mutex); 6603 list_for_each_entry(arvif, &ar->arvifs, list) { 6604 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 6605 arvif->vdev_id, value); 6606 6607 ret = ath10k_mac_set_rts(arvif, value); 6608 if (ret) { 6609 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 6610 arvif->vdev_id, ret); 6611 break; 6612 } 6613 } 6614 mutex_unlock(&ar->conf_mutex); 6615 6616 return ret; 6617 } 6618 6619 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6620 { 6621 /* Even though there's a WMI enum for fragmentation threshold no known 6622 * firmware actually implements it. Moreover it is not possible to rely 6623 * frame fragmentation to mac80211 because firmware clears the "more 6624 * fragments" bit in frame control making it impossible for remote 6625 * devices to reassemble frames. 6626 * 6627 * Hence implement a dummy callback just to say fragmentation isn't 6628 * supported. This effectively prevents mac80211 from doing frame 6629 * fragmentation in software. 6630 */ 6631 return -EOPNOTSUPP; 6632 } 6633 6634 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6635 u32 queues, bool drop) 6636 { 6637 struct ath10k *ar = hw->priv; 6638 bool skip; 6639 long time_left; 6640 6641 /* mac80211 doesn't care if we really xmit queued frames or not 6642 * we'll collect those frames either way if we stop/delete vdevs 6643 */ 6644 if (drop) 6645 return; 6646 6647 mutex_lock(&ar->conf_mutex); 6648 6649 if (ar->state == ATH10K_STATE_WEDGED) 6650 goto skip; 6651 6652 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 6653 bool empty; 6654 6655 spin_lock_bh(&ar->htt.tx_lock); 6656 empty = (ar->htt.num_pending_tx == 0); 6657 spin_unlock_bh(&ar->htt.tx_lock); 6658 6659 skip = (ar->state == ATH10K_STATE_WEDGED) || 6660 test_bit(ATH10K_FLAG_CRASH_FLUSH, 6661 &ar->dev_flags); 6662 6663 (empty || skip); 6664 }), ATH10K_FLUSH_TIMEOUT_HZ); 6665 6666 if (time_left == 0 || skip) 6667 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 6668 skip, ar->state, time_left); 6669 6670 skip: 6671 mutex_unlock(&ar->conf_mutex); 6672 } 6673 6674 /* TODO: Implement this function properly 6675 * For now it is needed to reply to Probe Requests in IBSS mode. 6676 * Propably we need this information from FW. 6677 */ 6678 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 6679 { 6680 return 1; 6681 } 6682 6683 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 6684 enum ieee80211_reconfig_type reconfig_type) 6685 { 6686 struct ath10k *ar = hw->priv; 6687 6688 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 6689 return; 6690 6691 mutex_lock(&ar->conf_mutex); 6692 6693 /* If device failed to restart it will be in a different state, e.g. 6694 * ATH10K_STATE_WEDGED 6695 */ 6696 if (ar->state == ATH10K_STATE_RESTARTED) { 6697 ath10k_info(ar, "device successfully recovered\n"); 6698 ar->state = ATH10K_STATE_ON; 6699 ieee80211_wake_queues(ar->hw); 6700 } 6701 6702 mutex_unlock(&ar->conf_mutex); 6703 } 6704 6705 static void 6706 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 6707 struct ieee80211_channel *channel) 6708 { 6709 int ret; 6710 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 6711 6712 lockdep_assert_held(&ar->conf_mutex); 6713 6714 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 6715 (ar->rx_channel != channel)) 6716 return; 6717 6718 if (ar->scan.state != ATH10K_SCAN_IDLE) { 6719 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 6720 return; 6721 } 6722 6723 reinit_completion(&ar->bss_survey_done); 6724 6725 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 6726 if (ret) { 6727 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 6728 return; 6729 } 6730 6731 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 6732 if (!ret) { 6733 ath10k_warn(ar, "bss channel survey timed out\n"); 6734 return; 6735 } 6736 } 6737 6738 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 6739 struct survey_info *survey) 6740 { 6741 struct ath10k *ar = hw->priv; 6742 struct ieee80211_supported_band *sband; 6743 struct survey_info *ar_survey = &ar->survey[idx]; 6744 int ret = 0; 6745 6746 mutex_lock(&ar->conf_mutex); 6747 6748 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 6749 if (sband && idx >= sband->n_channels) { 6750 idx -= sband->n_channels; 6751 sband = NULL; 6752 } 6753 6754 if (!sband) 6755 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 6756 6757 if (!sband || idx >= sband->n_channels) { 6758 ret = -ENOENT; 6759 goto exit; 6760 } 6761 6762 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 6763 6764 spin_lock_bh(&ar->data_lock); 6765 memcpy(survey, ar_survey, sizeof(*survey)); 6766 spin_unlock_bh(&ar->data_lock); 6767 6768 survey->channel = &sband->channels[idx]; 6769 6770 if (ar->rx_channel == survey->channel) 6771 survey->filled |= SURVEY_INFO_IN_USE; 6772 6773 exit: 6774 mutex_unlock(&ar->conf_mutex); 6775 return ret; 6776 } 6777 6778 static bool 6779 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 6780 enum nl80211_band band, 6781 const struct cfg80211_bitrate_mask *mask) 6782 { 6783 int num_rates = 0; 6784 int i; 6785 6786 num_rates += hweight32(mask->control[band].legacy); 6787 6788 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 6789 num_rates += hweight8(mask->control[band].ht_mcs[i]); 6790 6791 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 6792 num_rates += hweight16(mask->control[band].vht_mcs[i]); 6793 6794 return num_rates == 1; 6795 } 6796 6797 static bool 6798 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 6799 enum nl80211_band band, 6800 const struct cfg80211_bitrate_mask *mask, 6801 int *nss) 6802 { 6803 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6804 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 6805 u8 ht_nss_mask = 0; 6806 u8 vht_nss_mask = 0; 6807 int i; 6808 6809 if (mask->control[band].legacy) 6810 return false; 6811 6812 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6813 if (mask->control[band].ht_mcs[i] == 0) 6814 continue; 6815 else if (mask->control[band].ht_mcs[i] == 6816 sband->ht_cap.mcs.rx_mask[i]) 6817 ht_nss_mask |= BIT(i); 6818 else 6819 return false; 6820 } 6821 6822 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6823 if (mask->control[band].vht_mcs[i] == 0) 6824 continue; 6825 else if (mask->control[band].vht_mcs[i] == 6826 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 6827 vht_nss_mask |= BIT(i); 6828 else 6829 return false; 6830 } 6831 6832 if (ht_nss_mask != vht_nss_mask) 6833 return false; 6834 6835 if (ht_nss_mask == 0) 6836 return false; 6837 6838 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 6839 return false; 6840 6841 *nss = fls(ht_nss_mask); 6842 6843 return true; 6844 } 6845 6846 static int 6847 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 6848 enum nl80211_band band, 6849 const struct cfg80211_bitrate_mask *mask, 6850 u8 *rate, u8 *nss) 6851 { 6852 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6853 int rate_idx; 6854 int i; 6855 u16 bitrate; 6856 u8 preamble; 6857 u8 hw_rate; 6858 6859 if (hweight32(mask->control[band].legacy) == 1) { 6860 rate_idx = ffs(mask->control[band].legacy) - 1; 6861 6862 hw_rate = sband->bitrates[rate_idx].hw_value; 6863 bitrate = sband->bitrates[rate_idx].bitrate; 6864 6865 if (ath10k_mac_bitrate_is_cck(bitrate)) 6866 preamble = WMI_RATE_PREAMBLE_CCK; 6867 else 6868 preamble = WMI_RATE_PREAMBLE_OFDM; 6869 6870 *nss = 1; 6871 *rate = preamble << 6 | 6872 (*nss - 1) << 4 | 6873 hw_rate << 0; 6874 6875 return 0; 6876 } 6877 6878 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6879 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 6880 *nss = i + 1; 6881 *rate = WMI_RATE_PREAMBLE_HT << 6 | 6882 (*nss - 1) << 4 | 6883 (ffs(mask->control[band].ht_mcs[i]) - 1); 6884 6885 return 0; 6886 } 6887 } 6888 6889 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6890 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 6891 *nss = i + 1; 6892 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 6893 (*nss - 1) << 4 | 6894 (ffs(mask->control[band].vht_mcs[i]) - 1); 6895 6896 return 0; 6897 } 6898 } 6899 6900 return -EINVAL; 6901 } 6902 6903 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 6904 u8 rate, u8 nss, u8 sgi, u8 ldpc) 6905 { 6906 struct ath10k *ar = arvif->ar; 6907 u32 vdev_param; 6908 int ret; 6909 6910 lockdep_assert_held(&ar->conf_mutex); 6911 6912 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 6913 arvif->vdev_id, rate, nss, sgi); 6914 6915 vdev_param = ar->wmi.vdev_param->fixed_rate; 6916 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 6917 if (ret) { 6918 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 6919 rate, ret); 6920 return ret; 6921 } 6922 6923 vdev_param = ar->wmi.vdev_param->nss; 6924 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 6925 if (ret) { 6926 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 6927 return ret; 6928 } 6929 6930 vdev_param = ar->wmi.vdev_param->sgi; 6931 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 6932 if (ret) { 6933 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 6934 return ret; 6935 } 6936 6937 vdev_param = ar->wmi.vdev_param->ldpc; 6938 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 6939 if (ret) { 6940 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 6941 return ret; 6942 } 6943 6944 return 0; 6945 } 6946 6947 static bool 6948 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 6949 enum nl80211_band band, 6950 const struct cfg80211_bitrate_mask *mask) 6951 { 6952 int i; 6953 u16 vht_mcs; 6954 6955 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 6956 * to express all VHT MCS rate masks. Effectively only the following 6957 * ranges can be used: none, 0-7, 0-8 and 0-9. 6958 */ 6959 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 6960 vht_mcs = mask->control[band].vht_mcs[i]; 6961 6962 switch (vht_mcs) { 6963 case 0: 6964 case BIT(8) - 1: 6965 case BIT(9) - 1: 6966 case BIT(10) - 1: 6967 break; 6968 default: 6969 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 6970 return false; 6971 } 6972 } 6973 6974 return true; 6975 } 6976 6977 static void ath10k_mac_set_bitrate_mask_iter(void *data, 6978 struct ieee80211_sta *sta) 6979 { 6980 struct ath10k_vif *arvif = data; 6981 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6982 struct ath10k *ar = arvif->ar; 6983 6984 if (arsta->arvif != arvif) 6985 return; 6986 6987 spin_lock_bh(&ar->data_lock); 6988 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 6989 spin_unlock_bh(&ar->data_lock); 6990 6991 ieee80211_queue_work(ar->hw, &arsta->update_wk); 6992 } 6993 6994 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 6995 struct ieee80211_vif *vif, 6996 const struct cfg80211_bitrate_mask *mask) 6997 { 6998 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6999 struct cfg80211_chan_def def; 7000 struct ath10k *ar = arvif->ar; 7001 enum nl80211_band band; 7002 const u8 *ht_mcs_mask; 7003 const u16 *vht_mcs_mask; 7004 u8 rate; 7005 u8 nss; 7006 u8 sgi; 7007 u8 ldpc; 7008 int single_nss; 7009 int ret; 7010 7011 if (ath10k_mac_vif_chan(vif, &def)) 7012 return -EPERM; 7013 7014 band = def.chan->band; 7015 ht_mcs_mask = mask->control[band].ht_mcs; 7016 vht_mcs_mask = mask->control[band].vht_mcs; 7017 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7018 7019 sgi = mask->control[band].gi; 7020 if (sgi == NL80211_TXRATE_FORCE_LGI) 7021 return -EINVAL; 7022 7023 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 7024 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 7025 &rate, &nss); 7026 if (ret) { 7027 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 7028 arvif->vdev_id, ret); 7029 return ret; 7030 } 7031 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7032 &single_nss)) { 7033 rate = WMI_FIXED_RATE_NONE; 7034 nss = single_nss; 7035 } else { 7036 rate = WMI_FIXED_RATE_NONE; 7037 nss = min(ar->num_rf_chains, 7038 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 7039 ath10k_mac_max_vht_nss(vht_mcs_mask))); 7040 7041 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 7042 return -EINVAL; 7043 7044 mutex_lock(&ar->conf_mutex); 7045 7046 arvif->bitrate_mask = *mask; 7047 ieee80211_iterate_stations_atomic(ar->hw, 7048 ath10k_mac_set_bitrate_mask_iter, 7049 arvif); 7050 7051 mutex_unlock(&ar->conf_mutex); 7052 } 7053 7054 mutex_lock(&ar->conf_mutex); 7055 7056 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7057 if (ret) { 7058 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 7059 arvif->vdev_id, ret); 7060 goto exit; 7061 } 7062 7063 exit: 7064 mutex_unlock(&ar->conf_mutex); 7065 7066 return ret; 7067 } 7068 7069 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 7070 struct ieee80211_vif *vif, 7071 struct ieee80211_sta *sta, 7072 u32 changed) 7073 { 7074 struct ath10k *ar = hw->priv; 7075 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7076 u32 bw, smps; 7077 7078 spin_lock_bh(&ar->data_lock); 7079 7080 ath10k_dbg(ar, ATH10K_DBG_MAC, 7081 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 7082 sta->addr, changed, sta->bandwidth, sta->rx_nss, 7083 sta->smps_mode); 7084 7085 if (changed & IEEE80211_RC_BW_CHANGED) { 7086 bw = WMI_PEER_CHWIDTH_20MHZ; 7087 7088 switch (sta->bandwidth) { 7089 case IEEE80211_STA_RX_BW_20: 7090 bw = WMI_PEER_CHWIDTH_20MHZ; 7091 break; 7092 case IEEE80211_STA_RX_BW_40: 7093 bw = WMI_PEER_CHWIDTH_40MHZ; 7094 break; 7095 case IEEE80211_STA_RX_BW_80: 7096 bw = WMI_PEER_CHWIDTH_80MHZ; 7097 break; 7098 case IEEE80211_STA_RX_BW_160: 7099 bw = WMI_PEER_CHWIDTH_160MHZ; 7100 break; 7101 default: 7102 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7103 sta->bandwidth, sta->addr); 7104 bw = WMI_PEER_CHWIDTH_20MHZ; 7105 break; 7106 } 7107 7108 arsta->bw = bw; 7109 } 7110 7111 if (changed & IEEE80211_RC_NSS_CHANGED) 7112 arsta->nss = sta->rx_nss; 7113 7114 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7115 smps = WMI_PEER_SMPS_PS_NONE; 7116 7117 switch (sta->smps_mode) { 7118 case IEEE80211_SMPS_AUTOMATIC: 7119 case IEEE80211_SMPS_OFF: 7120 smps = WMI_PEER_SMPS_PS_NONE; 7121 break; 7122 case IEEE80211_SMPS_STATIC: 7123 smps = WMI_PEER_SMPS_STATIC; 7124 break; 7125 case IEEE80211_SMPS_DYNAMIC: 7126 smps = WMI_PEER_SMPS_DYNAMIC; 7127 break; 7128 case IEEE80211_SMPS_NUM_MODES: 7129 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7130 sta->smps_mode, sta->addr); 7131 smps = WMI_PEER_SMPS_PS_NONE; 7132 break; 7133 } 7134 7135 arsta->smps = smps; 7136 } 7137 7138 arsta->changed |= changed; 7139 7140 spin_unlock_bh(&ar->data_lock); 7141 7142 ieee80211_queue_work(hw, &arsta->update_wk); 7143 } 7144 7145 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7146 struct ieee80211_vif *vif, s64 tsf_offset) 7147 { 7148 struct ath10k *ar = hw->priv; 7149 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7150 u32 offset, vdev_param; 7151 int ret; 7152 7153 if (tsf_offset < 0) { 7154 vdev_param = ar->wmi.vdev_param->dec_tsf; 7155 offset = -tsf_offset; 7156 } else { 7157 vdev_param = ar->wmi.vdev_param->inc_tsf; 7158 offset = tsf_offset; 7159 } 7160 7161 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7162 vdev_param, offset); 7163 7164 if (ret && ret != -EOPNOTSUPP) 7165 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7166 offset, vdev_param, ret); 7167 } 7168 7169 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7170 struct ieee80211_vif *vif, 7171 struct ieee80211_ampdu_params *params) 7172 { 7173 struct ath10k *ar = hw->priv; 7174 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7175 struct ieee80211_sta *sta = params->sta; 7176 enum ieee80211_ampdu_mlme_action action = params->action; 7177 u16 tid = params->tid; 7178 7179 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7180 arvif->vdev_id, sta->addr, tid, action); 7181 7182 switch (action) { 7183 case IEEE80211_AMPDU_RX_START: 7184 case IEEE80211_AMPDU_RX_STOP: 7185 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7186 * creation/removal. Do we need to verify this? 7187 */ 7188 return 0; 7189 case IEEE80211_AMPDU_TX_START: 7190 case IEEE80211_AMPDU_TX_STOP_CONT: 7191 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7192 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7193 case IEEE80211_AMPDU_TX_OPERATIONAL: 7194 /* Firmware offloads Tx aggregation entirely so deny mac80211 7195 * Tx aggregation requests. 7196 */ 7197 return -EOPNOTSUPP; 7198 } 7199 7200 return -EINVAL; 7201 } 7202 7203 static void 7204 ath10k_mac_update_rx_channel(struct ath10k *ar, 7205 struct ieee80211_chanctx_conf *ctx, 7206 struct ieee80211_vif_chanctx_switch *vifs, 7207 int n_vifs) 7208 { 7209 struct cfg80211_chan_def *def = NULL; 7210 7211 /* Both locks are required because ar->rx_channel is modified. This 7212 * allows readers to hold either lock. 7213 */ 7214 lockdep_assert_held(&ar->conf_mutex); 7215 lockdep_assert_held(&ar->data_lock); 7216 7217 WARN_ON(ctx && vifs); 7218 WARN_ON(vifs && !n_vifs); 7219 7220 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7221 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7222 * ppdu on Rx may reduce performance on low-end systems. It should be 7223 * possible to make tables/hashmaps to speed the lookup up (be vary of 7224 * cpu data cache lines though regarding sizes) but to keep the initial 7225 * implementation simple and less intrusive fallback to the slow lookup 7226 * only for multi-channel cases. Single-channel cases will remain to 7227 * use the old channel derival and thus performance should not be 7228 * affected much. 7229 */ 7230 rcu_read_lock(); 7231 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7232 ieee80211_iter_chan_contexts_atomic(ar->hw, 7233 ath10k_mac_get_any_chandef_iter, 7234 &def); 7235 7236 if (vifs) 7237 def = &vifs[0].new_ctx->def; 7238 7239 ar->rx_channel = def->chan; 7240 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7241 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7242 /* During driver restart due to firmware assert, since mac80211 7243 * already has valid channel context for given radio, channel 7244 * context iteration return num_chanctx > 0. So fix rx_channel 7245 * when restart is in progress. 7246 */ 7247 ar->rx_channel = ctx->def.chan; 7248 } else { 7249 ar->rx_channel = NULL; 7250 } 7251 rcu_read_unlock(); 7252 } 7253 7254 static void 7255 ath10k_mac_update_vif_chan(struct ath10k *ar, 7256 struct ieee80211_vif_chanctx_switch *vifs, 7257 int n_vifs) 7258 { 7259 struct ath10k_vif *arvif; 7260 int ret; 7261 int i; 7262 7263 lockdep_assert_held(&ar->conf_mutex); 7264 7265 /* First stop monitor interface. Some FW versions crash if there's a 7266 * lone monitor interface. 7267 */ 7268 if (ar->monitor_started) 7269 ath10k_monitor_stop(ar); 7270 7271 for (i = 0; i < n_vifs; i++) { 7272 arvif = (void *)vifs[i].vif->drv_priv; 7273 7274 ath10k_dbg(ar, ATH10K_DBG_MAC, 7275 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7276 arvif->vdev_id, 7277 vifs[i].old_ctx->def.chan->center_freq, 7278 vifs[i].new_ctx->def.chan->center_freq, 7279 vifs[i].old_ctx->def.width, 7280 vifs[i].new_ctx->def.width); 7281 7282 if (WARN_ON(!arvif->is_started)) 7283 continue; 7284 7285 if (WARN_ON(!arvif->is_up)) 7286 continue; 7287 7288 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7289 if (ret) { 7290 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7291 arvif->vdev_id, ret); 7292 continue; 7293 } 7294 } 7295 7296 /* All relevant vdevs are downed and associated channel resources 7297 * should be available for the channel switch now. 7298 */ 7299 7300 spin_lock_bh(&ar->data_lock); 7301 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7302 spin_unlock_bh(&ar->data_lock); 7303 7304 for (i = 0; i < n_vifs; i++) { 7305 arvif = (void *)vifs[i].vif->drv_priv; 7306 7307 if (WARN_ON(!arvif->is_started)) 7308 continue; 7309 7310 if (WARN_ON(!arvif->is_up)) 7311 continue; 7312 7313 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7314 if (ret) 7315 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7316 ret); 7317 7318 ret = ath10k_mac_setup_prb_tmpl(arvif); 7319 if (ret) 7320 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7321 ret); 7322 7323 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7324 if (ret) { 7325 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7326 arvif->vdev_id, ret); 7327 continue; 7328 } 7329 7330 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7331 arvif->bssid); 7332 if (ret) { 7333 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7334 arvif->vdev_id, ret); 7335 continue; 7336 } 7337 } 7338 7339 ath10k_monitor_recalc(ar); 7340 } 7341 7342 static int 7343 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7344 struct ieee80211_chanctx_conf *ctx) 7345 { 7346 struct ath10k *ar = hw->priv; 7347 7348 ath10k_dbg(ar, ATH10K_DBG_MAC, 7349 "mac chanctx add freq %hu width %d ptr %pK\n", 7350 ctx->def.chan->center_freq, ctx->def.width, ctx); 7351 7352 mutex_lock(&ar->conf_mutex); 7353 7354 spin_lock_bh(&ar->data_lock); 7355 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7356 spin_unlock_bh(&ar->data_lock); 7357 7358 ath10k_recalc_radar_detection(ar); 7359 ath10k_monitor_recalc(ar); 7360 7361 mutex_unlock(&ar->conf_mutex); 7362 7363 return 0; 7364 } 7365 7366 static void 7367 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7368 struct ieee80211_chanctx_conf *ctx) 7369 { 7370 struct ath10k *ar = hw->priv; 7371 7372 ath10k_dbg(ar, ATH10K_DBG_MAC, 7373 "mac chanctx remove freq %hu width %d ptr %pK\n", 7374 ctx->def.chan->center_freq, ctx->def.width, ctx); 7375 7376 mutex_lock(&ar->conf_mutex); 7377 7378 spin_lock_bh(&ar->data_lock); 7379 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7380 spin_unlock_bh(&ar->data_lock); 7381 7382 ath10k_recalc_radar_detection(ar); 7383 ath10k_monitor_recalc(ar); 7384 7385 mutex_unlock(&ar->conf_mutex); 7386 } 7387 7388 struct ath10k_mac_change_chanctx_arg { 7389 struct ieee80211_chanctx_conf *ctx; 7390 struct ieee80211_vif_chanctx_switch *vifs; 7391 int n_vifs; 7392 int next_vif; 7393 }; 7394 7395 static void 7396 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7397 struct ieee80211_vif *vif) 7398 { 7399 struct ath10k_mac_change_chanctx_arg *arg = data; 7400 7401 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7402 return; 7403 7404 arg->n_vifs++; 7405 } 7406 7407 static void 7408 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7409 struct ieee80211_vif *vif) 7410 { 7411 struct ath10k_mac_change_chanctx_arg *arg = data; 7412 struct ieee80211_chanctx_conf *ctx; 7413 7414 ctx = rcu_access_pointer(vif->chanctx_conf); 7415 if (ctx != arg->ctx) 7416 return; 7417 7418 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7419 return; 7420 7421 arg->vifs[arg->next_vif].vif = vif; 7422 arg->vifs[arg->next_vif].old_ctx = ctx; 7423 arg->vifs[arg->next_vif].new_ctx = ctx; 7424 arg->next_vif++; 7425 } 7426 7427 static void 7428 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7429 struct ieee80211_chanctx_conf *ctx, 7430 u32 changed) 7431 { 7432 struct ath10k *ar = hw->priv; 7433 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7434 7435 mutex_lock(&ar->conf_mutex); 7436 7437 ath10k_dbg(ar, ATH10K_DBG_MAC, 7438 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7439 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7440 7441 /* This shouldn't really happen because channel switching should use 7442 * switch_vif_chanctx(). 7443 */ 7444 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7445 goto unlock; 7446 7447 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7448 ieee80211_iterate_active_interfaces_atomic( 7449 hw, 7450 IEEE80211_IFACE_ITER_NORMAL, 7451 ath10k_mac_change_chanctx_cnt_iter, 7452 &arg); 7453 if (arg.n_vifs == 0) 7454 goto radar; 7455 7456 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7457 GFP_KERNEL); 7458 if (!arg.vifs) 7459 goto radar; 7460 7461 ieee80211_iterate_active_interfaces_atomic( 7462 hw, 7463 IEEE80211_IFACE_ITER_NORMAL, 7464 ath10k_mac_change_chanctx_fill_iter, 7465 &arg); 7466 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7467 kfree(arg.vifs); 7468 } 7469 7470 radar: 7471 ath10k_recalc_radar_detection(ar); 7472 7473 /* FIXME: How to configure Rx chains properly? */ 7474 7475 /* No other actions are actually necessary. Firmware maintains channel 7476 * definitions per vdev internally and there's no host-side channel 7477 * context abstraction to configure, e.g. channel width. 7478 */ 7479 7480 unlock: 7481 mutex_unlock(&ar->conf_mutex); 7482 } 7483 7484 static int 7485 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7486 struct ieee80211_vif *vif, 7487 struct ieee80211_chanctx_conf *ctx) 7488 { 7489 struct ath10k *ar = hw->priv; 7490 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7491 int ret; 7492 7493 mutex_lock(&ar->conf_mutex); 7494 7495 ath10k_dbg(ar, ATH10K_DBG_MAC, 7496 "mac chanctx assign ptr %pK vdev_id %i\n", 7497 ctx, arvif->vdev_id); 7498 7499 if (WARN_ON(arvif->is_started)) { 7500 mutex_unlock(&ar->conf_mutex); 7501 return -EBUSY; 7502 } 7503 7504 ret = ath10k_vdev_start(arvif, &ctx->def); 7505 if (ret) { 7506 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 7507 arvif->vdev_id, vif->addr, 7508 ctx->def.chan->center_freq, ret); 7509 goto err; 7510 } 7511 7512 arvif->is_started = true; 7513 7514 ret = ath10k_mac_vif_setup_ps(arvif); 7515 if (ret) { 7516 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 7517 arvif->vdev_id, ret); 7518 goto err_stop; 7519 } 7520 7521 if (vif->type == NL80211_IFTYPE_MONITOR) { 7522 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 7523 if (ret) { 7524 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 7525 arvif->vdev_id, ret); 7526 goto err_stop; 7527 } 7528 7529 arvif->is_up = true; 7530 } 7531 7532 if (ath10k_mac_can_set_cts_prot(arvif)) { 7533 ret = ath10k_mac_set_cts_prot(arvif); 7534 if (ret) 7535 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 7536 arvif->vdev_id, ret); 7537 } 7538 7539 mutex_unlock(&ar->conf_mutex); 7540 return 0; 7541 7542 err_stop: 7543 ath10k_vdev_stop(arvif); 7544 arvif->is_started = false; 7545 ath10k_mac_vif_setup_ps(arvif); 7546 7547 err: 7548 mutex_unlock(&ar->conf_mutex); 7549 return ret; 7550 } 7551 7552 static void 7553 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7554 struct ieee80211_vif *vif, 7555 struct ieee80211_chanctx_conf *ctx) 7556 { 7557 struct ath10k *ar = hw->priv; 7558 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7559 int ret; 7560 7561 mutex_lock(&ar->conf_mutex); 7562 7563 ath10k_dbg(ar, ATH10K_DBG_MAC, 7564 "mac chanctx unassign ptr %pK vdev_id %i\n", 7565 ctx, arvif->vdev_id); 7566 7567 WARN_ON(!arvif->is_started); 7568 7569 if (vif->type == NL80211_IFTYPE_MONITOR) { 7570 WARN_ON(!arvif->is_up); 7571 7572 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7573 if (ret) 7574 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 7575 arvif->vdev_id, ret); 7576 7577 arvif->is_up = false; 7578 } 7579 7580 ret = ath10k_vdev_stop(arvif); 7581 if (ret) 7582 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 7583 arvif->vdev_id, ret); 7584 7585 arvif->is_started = false; 7586 7587 mutex_unlock(&ar->conf_mutex); 7588 } 7589 7590 static int 7591 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7592 struct ieee80211_vif_chanctx_switch *vifs, 7593 int n_vifs, 7594 enum ieee80211_chanctx_switch_mode mode) 7595 { 7596 struct ath10k *ar = hw->priv; 7597 7598 mutex_lock(&ar->conf_mutex); 7599 7600 ath10k_dbg(ar, ATH10K_DBG_MAC, 7601 "mac chanctx switch n_vifs %d mode %d\n", 7602 n_vifs, mode); 7603 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 7604 7605 mutex_unlock(&ar->conf_mutex); 7606 return 0; 7607 } 7608 7609 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 7610 struct ieee80211_vif *vif, 7611 struct ieee80211_sta *sta) 7612 { 7613 struct ath10k *ar; 7614 struct ath10k_peer *peer; 7615 7616 ar = hw->priv; 7617 7618 list_for_each_entry(peer, &ar->peers, list) 7619 if (peer->sta == sta) 7620 peer->removed = true; 7621 } 7622 7623 static const struct ieee80211_ops ath10k_ops = { 7624 .tx = ath10k_mac_op_tx, 7625 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 7626 .start = ath10k_start, 7627 .stop = ath10k_stop, 7628 .config = ath10k_config, 7629 .add_interface = ath10k_add_interface, 7630 .remove_interface = ath10k_remove_interface, 7631 .configure_filter = ath10k_configure_filter, 7632 .bss_info_changed = ath10k_bss_info_changed, 7633 .set_coverage_class = ath10k_mac_op_set_coverage_class, 7634 .hw_scan = ath10k_hw_scan, 7635 .cancel_hw_scan = ath10k_cancel_hw_scan, 7636 .set_key = ath10k_set_key, 7637 .set_default_unicast_key = ath10k_set_default_unicast_key, 7638 .sta_state = ath10k_sta_state, 7639 .conf_tx = ath10k_conf_tx, 7640 .remain_on_channel = ath10k_remain_on_channel, 7641 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 7642 .set_rts_threshold = ath10k_set_rts_threshold, 7643 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 7644 .flush = ath10k_flush, 7645 .tx_last_beacon = ath10k_tx_last_beacon, 7646 .set_antenna = ath10k_set_antenna, 7647 .get_antenna = ath10k_get_antenna, 7648 .reconfig_complete = ath10k_reconfig_complete, 7649 .get_survey = ath10k_get_survey, 7650 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 7651 .sta_rc_update = ath10k_sta_rc_update, 7652 .offset_tsf = ath10k_offset_tsf, 7653 .ampdu_action = ath10k_ampdu_action, 7654 .get_et_sset_count = ath10k_debug_get_et_sset_count, 7655 .get_et_stats = ath10k_debug_get_et_stats, 7656 .get_et_strings = ath10k_debug_get_et_strings, 7657 .add_chanctx = ath10k_mac_op_add_chanctx, 7658 .remove_chanctx = ath10k_mac_op_remove_chanctx, 7659 .change_chanctx = ath10k_mac_op_change_chanctx, 7660 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 7661 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 7662 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 7663 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 7664 7665 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 7666 7667 #ifdef CONFIG_PM 7668 .suspend = ath10k_wow_op_suspend, 7669 .resume = ath10k_wow_op_resume, 7670 .set_wakeup = ath10k_wow_op_set_wakeup, 7671 #endif 7672 #ifdef CONFIG_MAC80211_DEBUGFS 7673 .sta_add_debugfs = ath10k_sta_add_debugfs, 7674 .sta_statistics = ath10k_sta_statistics, 7675 #endif 7676 }; 7677 7678 #define CHAN2G(_channel, _freq, _flags) { \ 7679 .band = NL80211_BAND_2GHZ, \ 7680 .hw_value = (_channel), \ 7681 .center_freq = (_freq), \ 7682 .flags = (_flags), \ 7683 .max_antenna_gain = 0, \ 7684 .max_power = 30, \ 7685 } 7686 7687 #define CHAN5G(_channel, _freq, _flags) { \ 7688 .band = NL80211_BAND_5GHZ, \ 7689 .hw_value = (_channel), \ 7690 .center_freq = (_freq), \ 7691 .flags = (_flags), \ 7692 .max_antenna_gain = 0, \ 7693 .max_power = 30, \ 7694 } 7695 7696 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 7697 CHAN2G(1, 2412, 0), 7698 CHAN2G(2, 2417, 0), 7699 CHAN2G(3, 2422, 0), 7700 CHAN2G(4, 2427, 0), 7701 CHAN2G(5, 2432, 0), 7702 CHAN2G(6, 2437, 0), 7703 CHAN2G(7, 2442, 0), 7704 CHAN2G(8, 2447, 0), 7705 CHAN2G(9, 2452, 0), 7706 CHAN2G(10, 2457, 0), 7707 CHAN2G(11, 2462, 0), 7708 CHAN2G(12, 2467, 0), 7709 CHAN2G(13, 2472, 0), 7710 CHAN2G(14, 2484, 0), 7711 }; 7712 7713 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 7714 CHAN5G(36, 5180, 0), 7715 CHAN5G(40, 5200, 0), 7716 CHAN5G(44, 5220, 0), 7717 CHAN5G(48, 5240, 0), 7718 CHAN5G(52, 5260, 0), 7719 CHAN5G(56, 5280, 0), 7720 CHAN5G(60, 5300, 0), 7721 CHAN5G(64, 5320, 0), 7722 CHAN5G(100, 5500, 0), 7723 CHAN5G(104, 5520, 0), 7724 CHAN5G(108, 5540, 0), 7725 CHAN5G(112, 5560, 0), 7726 CHAN5G(116, 5580, 0), 7727 CHAN5G(120, 5600, 0), 7728 CHAN5G(124, 5620, 0), 7729 CHAN5G(128, 5640, 0), 7730 CHAN5G(132, 5660, 0), 7731 CHAN5G(136, 5680, 0), 7732 CHAN5G(140, 5700, 0), 7733 CHAN5G(144, 5720, 0), 7734 CHAN5G(149, 5745, 0), 7735 CHAN5G(153, 5765, 0), 7736 CHAN5G(157, 5785, 0), 7737 CHAN5G(161, 5805, 0), 7738 CHAN5G(165, 5825, 0), 7739 CHAN5G(169, 5845, 0), 7740 }; 7741 7742 struct ath10k *ath10k_mac_create(size_t priv_size) 7743 { 7744 struct ieee80211_hw *hw; 7745 struct ieee80211_ops *ops; 7746 struct ath10k *ar; 7747 7748 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 7749 if (!ops) 7750 return NULL; 7751 7752 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 7753 if (!hw) { 7754 kfree(ops); 7755 return NULL; 7756 } 7757 7758 ar = hw->priv; 7759 ar->hw = hw; 7760 ar->ops = ops; 7761 7762 return ar; 7763 } 7764 7765 void ath10k_mac_destroy(struct ath10k *ar) 7766 { 7767 struct ieee80211_ops *ops = ar->ops; 7768 7769 ieee80211_free_hw(ar->hw); 7770 kfree(ops); 7771 } 7772 7773 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 7774 { 7775 .max = 8, 7776 .types = BIT(NL80211_IFTYPE_STATION) 7777 | BIT(NL80211_IFTYPE_P2P_CLIENT) 7778 }, 7779 { 7780 .max = 3, 7781 .types = BIT(NL80211_IFTYPE_P2P_GO) 7782 }, 7783 { 7784 .max = 1, 7785 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 7786 }, 7787 { 7788 .max = 7, 7789 .types = BIT(NL80211_IFTYPE_AP) 7790 #ifdef CONFIG_MAC80211_MESH 7791 | BIT(NL80211_IFTYPE_MESH_POINT) 7792 #endif 7793 }, 7794 }; 7795 7796 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 7797 { 7798 .max = 8, 7799 .types = BIT(NL80211_IFTYPE_AP) 7800 #ifdef CONFIG_MAC80211_MESH 7801 | BIT(NL80211_IFTYPE_MESH_POINT) 7802 #endif 7803 }, 7804 { 7805 .max = 1, 7806 .types = BIT(NL80211_IFTYPE_STATION) 7807 }, 7808 }; 7809 7810 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 7811 { 7812 .limits = ath10k_if_limits, 7813 .n_limits = ARRAY_SIZE(ath10k_if_limits), 7814 .max_interfaces = 8, 7815 .num_different_channels = 1, 7816 .beacon_int_infra_match = true, 7817 }, 7818 }; 7819 7820 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 7821 { 7822 .limits = ath10k_10x_if_limits, 7823 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 7824 .max_interfaces = 8, 7825 .num_different_channels = 1, 7826 .beacon_int_infra_match = true, 7827 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7828 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7829 BIT(NL80211_CHAN_WIDTH_20) | 7830 BIT(NL80211_CHAN_WIDTH_40) | 7831 BIT(NL80211_CHAN_WIDTH_80), 7832 #endif 7833 }, 7834 }; 7835 7836 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 7837 { 7838 .max = 2, 7839 .types = BIT(NL80211_IFTYPE_STATION), 7840 }, 7841 { 7842 .max = 2, 7843 .types = BIT(NL80211_IFTYPE_AP) | 7844 #ifdef CONFIG_MAC80211_MESH 7845 BIT(NL80211_IFTYPE_MESH_POINT) | 7846 #endif 7847 BIT(NL80211_IFTYPE_P2P_CLIENT) | 7848 BIT(NL80211_IFTYPE_P2P_GO), 7849 }, 7850 { 7851 .max = 1, 7852 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7853 }, 7854 }; 7855 7856 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 7857 { 7858 .max = 2, 7859 .types = BIT(NL80211_IFTYPE_STATION), 7860 }, 7861 { 7862 .max = 2, 7863 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 7864 }, 7865 { 7866 .max = 1, 7867 .types = BIT(NL80211_IFTYPE_AP) | 7868 #ifdef CONFIG_MAC80211_MESH 7869 BIT(NL80211_IFTYPE_MESH_POINT) | 7870 #endif 7871 BIT(NL80211_IFTYPE_P2P_GO), 7872 }, 7873 { 7874 .max = 1, 7875 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7876 }, 7877 }; 7878 7879 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 7880 { 7881 .max = 1, 7882 .types = BIT(NL80211_IFTYPE_STATION), 7883 }, 7884 { 7885 .max = 1, 7886 .types = BIT(NL80211_IFTYPE_ADHOC), 7887 }, 7888 }; 7889 7890 /* FIXME: This is not thouroughly tested. These combinations may over- or 7891 * underestimate hw/fw capabilities. 7892 */ 7893 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 7894 { 7895 .limits = ath10k_tlv_if_limit, 7896 .num_different_channels = 1, 7897 .max_interfaces = 4, 7898 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7899 }, 7900 { 7901 .limits = ath10k_tlv_if_limit_ibss, 7902 .num_different_channels = 1, 7903 .max_interfaces = 2, 7904 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 7905 }, 7906 }; 7907 7908 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 7909 { 7910 .limits = ath10k_tlv_if_limit, 7911 .num_different_channels = 1, 7912 .max_interfaces = 4, 7913 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7914 }, 7915 { 7916 .limits = ath10k_tlv_qcs_if_limit, 7917 .num_different_channels = 2, 7918 .max_interfaces = 4, 7919 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 7920 }, 7921 { 7922 .limits = ath10k_tlv_if_limit_ibss, 7923 .num_different_channels = 1, 7924 .max_interfaces = 2, 7925 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 7926 }, 7927 }; 7928 7929 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 7930 { 7931 .max = 1, 7932 .types = BIT(NL80211_IFTYPE_STATION), 7933 }, 7934 { 7935 .max = 16, 7936 .types = BIT(NL80211_IFTYPE_AP) 7937 #ifdef CONFIG_MAC80211_MESH 7938 | BIT(NL80211_IFTYPE_MESH_POINT) 7939 #endif 7940 }, 7941 }; 7942 7943 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 7944 { 7945 .limits = ath10k_10_4_if_limits, 7946 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 7947 .max_interfaces = 16, 7948 .num_different_channels = 1, 7949 .beacon_int_infra_match = true, 7950 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7951 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7952 BIT(NL80211_CHAN_WIDTH_20) | 7953 BIT(NL80211_CHAN_WIDTH_40) | 7954 BIT(NL80211_CHAN_WIDTH_80), 7955 #endif 7956 }, 7957 }; 7958 7959 static void ath10k_get_arvif_iter(void *data, u8 *mac, 7960 struct ieee80211_vif *vif) 7961 { 7962 struct ath10k_vif_iter *arvif_iter = data; 7963 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7964 7965 if (arvif->vdev_id == arvif_iter->vdev_id) 7966 arvif_iter->arvif = arvif; 7967 } 7968 7969 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 7970 { 7971 struct ath10k_vif_iter arvif_iter; 7972 u32 flags; 7973 7974 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 7975 arvif_iter.vdev_id = vdev_id; 7976 7977 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 7978 ieee80211_iterate_active_interfaces_atomic(ar->hw, 7979 flags, 7980 ath10k_get_arvif_iter, 7981 &arvif_iter); 7982 if (!arvif_iter.arvif) { 7983 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 7984 return NULL; 7985 } 7986 7987 return arvif_iter.arvif; 7988 } 7989 7990 #define WRD_METHOD "WRDD" 7991 #define WRDD_WIFI (0x07) 7992 7993 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 7994 { 7995 union acpi_object *mcc_pkg; 7996 union acpi_object *domain_type; 7997 union acpi_object *mcc_value; 7998 u32 i; 7999 8000 if (wrdd->type != ACPI_TYPE_PACKAGE || 8001 wrdd->package.count < 2 || 8002 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 8003 wrdd->package.elements[0].integer.value != 0) { 8004 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 8005 return 0; 8006 } 8007 8008 for (i = 1; i < wrdd->package.count; ++i) { 8009 mcc_pkg = &wrdd->package.elements[i]; 8010 8011 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 8012 continue; 8013 if (mcc_pkg->package.count < 2) 8014 continue; 8015 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 8016 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 8017 continue; 8018 8019 domain_type = &mcc_pkg->package.elements[0]; 8020 if (domain_type->integer.value != WRDD_WIFI) 8021 continue; 8022 8023 mcc_value = &mcc_pkg->package.elements[1]; 8024 return mcc_value->integer.value; 8025 } 8026 return 0; 8027 } 8028 8029 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 8030 { 8031 struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev); 8032 acpi_handle root_handle; 8033 acpi_handle handle; 8034 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 8035 acpi_status status; 8036 u32 alpha2_code; 8037 char alpha2[3]; 8038 8039 root_handle = ACPI_HANDLE(&pdev->dev); 8040 if (!root_handle) 8041 return -EOPNOTSUPP; 8042 8043 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 8044 if (ACPI_FAILURE(status)) { 8045 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8046 "failed to get wrd method %d\n", status); 8047 return -EIO; 8048 } 8049 8050 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 8051 if (ACPI_FAILURE(status)) { 8052 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8053 "failed to call wrdc %d\n", status); 8054 return -EIO; 8055 } 8056 8057 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 8058 kfree(wrdd.pointer); 8059 if (!alpha2_code) 8060 return -EIO; 8061 8062 alpha2[0] = (alpha2_code >> 8) & 0xff; 8063 alpha2[1] = (alpha2_code >> 0) & 0xff; 8064 alpha2[2] = '\0'; 8065 8066 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8067 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 8068 8069 *rd = ath_regd_find_country_by_name(alpha2); 8070 if (*rd == 0xffff) 8071 return -EIO; 8072 8073 *rd |= COUNTRY_ERD_FLAG; 8074 return 0; 8075 } 8076 8077 static int ath10k_mac_init_rd(struct ath10k *ar) 8078 { 8079 int ret; 8080 u16 rd; 8081 8082 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 8083 if (ret) { 8084 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8085 "fallback to eeprom programmed regulatory settings\n"); 8086 rd = ar->hw_eeprom_rd; 8087 } 8088 8089 ar->ath_common.regulatory.current_rd = rd; 8090 return 0; 8091 } 8092 8093 int ath10k_mac_register(struct ath10k *ar) 8094 { 8095 static const u32 cipher_suites[] = { 8096 WLAN_CIPHER_SUITE_WEP40, 8097 WLAN_CIPHER_SUITE_WEP104, 8098 WLAN_CIPHER_SUITE_TKIP, 8099 WLAN_CIPHER_SUITE_CCMP, 8100 8101 /* Do not add hardware supported ciphers before this line. 8102 * Allow software encryption for all chips. Don't forget to 8103 * update n_cipher_suites below. 8104 */ 8105 WLAN_CIPHER_SUITE_AES_CMAC, 8106 WLAN_CIPHER_SUITE_BIP_CMAC_256, 8107 WLAN_CIPHER_SUITE_BIP_GMAC_128, 8108 WLAN_CIPHER_SUITE_BIP_GMAC_256, 8109 8110 /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256 8111 * and CCMP-256 in hardware. 8112 */ 8113 WLAN_CIPHER_SUITE_GCMP, 8114 WLAN_CIPHER_SUITE_GCMP_256, 8115 WLAN_CIPHER_SUITE_CCMP_256, 8116 }; 8117 struct ieee80211_supported_band *band; 8118 void *channels; 8119 int ret; 8120 8121 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8122 8123 SET_IEEE80211_DEV(ar->hw, ar->dev); 8124 8125 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8126 ARRAY_SIZE(ath10k_5ghz_channels)) != 8127 ATH10K_NUM_CHANS); 8128 8129 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8130 channels = kmemdup(ath10k_2ghz_channels, 8131 sizeof(ath10k_2ghz_channels), 8132 GFP_KERNEL); 8133 if (!channels) { 8134 ret = -ENOMEM; 8135 goto err_free; 8136 } 8137 8138 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8139 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8140 band->channels = channels; 8141 8142 if (ar->hw_params.cck_rate_map_rev2) { 8143 band->n_bitrates = ath10k_g_rates_rev2_size; 8144 band->bitrates = ath10k_g_rates_rev2; 8145 } else { 8146 band->n_bitrates = ath10k_g_rates_size; 8147 band->bitrates = ath10k_g_rates; 8148 } 8149 8150 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8151 } 8152 8153 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8154 channels = kmemdup(ath10k_5ghz_channels, 8155 sizeof(ath10k_5ghz_channels), 8156 GFP_KERNEL); 8157 if (!channels) { 8158 ret = -ENOMEM; 8159 goto err_free; 8160 } 8161 8162 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8163 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8164 band->channels = channels; 8165 band->n_bitrates = ath10k_a_rates_size; 8166 band->bitrates = ath10k_a_rates; 8167 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8168 } 8169 8170 ath10k_mac_setup_ht_vht_cap(ar); 8171 8172 ar->hw->wiphy->interface_modes = 8173 BIT(NL80211_IFTYPE_STATION) | 8174 BIT(NL80211_IFTYPE_AP) | 8175 BIT(NL80211_IFTYPE_MESH_POINT); 8176 8177 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8178 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8179 8180 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8181 ar->hw->wiphy->interface_modes |= 8182 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8183 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8184 BIT(NL80211_IFTYPE_P2P_GO); 8185 8186 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8187 8188 if (!test_bit(ATH10K_FW_FEATURE_NO_PS, 8189 ar->running_fw->fw_file.fw_features)) { 8190 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8191 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8192 } 8193 8194 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8195 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8196 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8197 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8198 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8199 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8200 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8201 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8202 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8203 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8204 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8205 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8206 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8207 8208 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8209 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8210 8211 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8212 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8213 8214 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8215 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8216 8217 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8218 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8219 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8220 } 8221 8222 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8223 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8224 8225 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8226 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8227 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8228 8229 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8230 8231 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8232 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8233 8234 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8235 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8236 * correct Probe Responses. This is more of a hack advert.. 8237 */ 8238 ar->hw->wiphy->probe_resp_offload |= 8239 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8240 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8241 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8242 } 8243 8244 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || 8245 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { 8246 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8247 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); 8248 } 8249 8250 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8251 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8252 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8253 8254 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8255 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8256 NL80211_FEATURE_AP_SCAN; 8257 8258 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8259 8260 ret = ath10k_wow_init(ar); 8261 if (ret) { 8262 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8263 goto err_free; 8264 } 8265 8266 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8267 8268 /* 8269 * on LL hardware queues are managed entirely by the FW 8270 * so we only advertise to mac we can do the queues thing 8271 */ 8272 ar->hw->queues = IEEE80211_MAX_QUEUES; 8273 8274 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8275 * something that vdev_ids can't reach so that we don't stop the queue 8276 * accidentally. 8277 */ 8278 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8279 8280 switch (ar->running_fw->fw_file.wmi_op_version) { 8281 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8282 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8283 ar->hw->wiphy->n_iface_combinations = 8284 ARRAY_SIZE(ath10k_if_comb); 8285 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8286 break; 8287 case ATH10K_FW_WMI_OP_VERSION_TLV: 8288 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8289 ar->hw->wiphy->iface_combinations = 8290 ath10k_tlv_qcs_if_comb; 8291 ar->hw->wiphy->n_iface_combinations = 8292 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8293 } else { 8294 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8295 ar->hw->wiphy->n_iface_combinations = 8296 ARRAY_SIZE(ath10k_tlv_if_comb); 8297 } 8298 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8299 break; 8300 case ATH10K_FW_WMI_OP_VERSION_10_1: 8301 case ATH10K_FW_WMI_OP_VERSION_10_2: 8302 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8303 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8304 ar->hw->wiphy->n_iface_combinations = 8305 ARRAY_SIZE(ath10k_10x_if_comb); 8306 break; 8307 case ATH10K_FW_WMI_OP_VERSION_10_4: 8308 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8309 ar->hw->wiphy->n_iface_combinations = 8310 ARRAY_SIZE(ath10k_10_4_if_comb); 8311 break; 8312 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8313 case ATH10K_FW_WMI_OP_VERSION_MAX: 8314 WARN_ON(1); 8315 ret = -EINVAL; 8316 goto err_free; 8317 } 8318 8319 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8320 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8321 8322 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8323 /* Init ath dfs pattern detector */ 8324 ar->ath_common.debug_mask = ATH_DBG_DFS; 8325 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8326 NL80211_DFS_UNSET); 8327 8328 if (!ar->dfs_detector) 8329 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8330 } 8331 8332 /* Current wake_tx_queue implementation imposes a significant 8333 * performance penalty in some setups. The tx scheduling code needs 8334 * more work anyway so disable the wake_tx_queue unless firmware 8335 * supports the pull-push mechanism. 8336 */ 8337 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 8338 ar->running_fw->fw_file.fw_features)) 8339 ar->ops->wake_tx_queue = NULL; 8340 8341 ret = ath10k_mac_init_rd(ar); 8342 if (ret) { 8343 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8344 goto err_dfs_detector_exit; 8345 } 8346 8347 /* Disable set_coverage_class for chipsets that do not support it. */ 8348 if (!ar->hw_params.hw_ops->set_coverage_class) 8349 ar->ops->set_coverage_class = NULL; 8350 8351 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8352 ath10k_reg_notifier); 8353 if (ret) { 8354 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8355 goto err_dfs_detector_exit; 8356 } 8357 8358 ar->hw->wiphy->cipher_suites = cipher_suites; 8359 8360 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 8361 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported 8362 * from chip specific hw_param table. 8363 */ 8364 if (!ar->hw_params.n_cipher_suites || 8365 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) { 8366 ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n", 8367 ar->hw_params.n_cipher_suites); 8368 ar->hw_params.n_cipher_suites = 8; 8369 } 8370 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites; 8371 8372 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8373 8374 ret = ieee80211_register_hw(ar->hw); 8375 if (ret) { 8376 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 8377 goto err_dfs_detector_exit; 8378 } 8379 8380 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 8381 ret = regulatory_hint(ar->hw->wiphy, 8382 ar->ath_common.regulatory.alpha2); 8383 if (ret) 8384 goto err_unregister; 8385 } 8386 8387 return 0; 8388 8389 err_unregister: 8390 ieee80211_unregister_hw(ar->hw); 8391 8392 err_dfs_detector_exit: 8393 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8394 ar->dfs_detector->exit(ar->dfs_detector); 8395 8396 err_free: 8397 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8398 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8399 8400 SET_IEEE80211_DEV(ar->hw, NULL); 8401 return ret; 8402 } 8403 8404 void ath10k_mac_unregister(struct ath10k *ar) 8405 { 8406 ieee80211_unregister_hw(ar->hw); 8407 8408 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8409 ar->dfs_detector->exit(ar->dfs_detector); 8410 8411 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8412 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8413 8414 SET_IEEE80211_DEV(ar->hw, NULL); 8415 } 8416