1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 * Copyright (C) 2018-2021 Intel Corporation 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/if_arp.h> 18 #include <linux/timer.h> 19 #include <linux/rtnetlink.h> 20 21 #include <net/codel.h> 22 #include <net/mac80211.h> 23 #include "ieee80211_i.h" 24 #include "driver-ops.h" 25 #include "rate.h" 26 #include "sta_info.h" 27 #include "debugfs_sta.h" 28 #include "mesh.h" 29 #include "wme.h" 30 31 /** 32 * DOC: STA information lifetime rules 33 * 34 * STA info structures (&struct sta_info) are managed in a hash table 35 * for faster lookup and a list for iteration. They are managed using 36 * RCU, i.e. access to the list and hash table is protected by RCU. 37 * 38 * Upon allocating a STA info structure with sta_info_alloc(), the caller 39 * owns that structure. It must then insert it into the hash table using 40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter 41 * case (which acquires an rcu read section but must not be called from 42 * within one) will the pointer still be valid after the call. Note that 43 * the caller may not do much with the STA info before inserting it, in 44 * particular, it may not start any mesh peer link management or add 45 * encryption keys. 46 * 47 * When the insertion fails (sta_info_insert()) returns non-zero), the 48 * structure will have been freed by sta_info_insert()! 49 * 50 * Station entries are added by mac80211 when you establish a link with a 51 * peer. This means different things for the different type of interfaces 52 * we support. For a regular station this mean we add the AP sta when we 53 * receive an association response from the AP. For IBSS this occurs when 54 * get to know about a peer on the same IBSS. For WDS we add the sta for 55 * the peer immediately upon device open. When using AP mode we add stations 56 * for each respective station upon request from userspace through nl80211. 57 * 58 * In order to remove a STA info structure, various sta_info_destroy_*() 59 * calls are available. 60 * 61 * There is no concept of ownership on a STA entry, each structure is 62 * owned by the global hash table/list until it is removed. All users of 63 * the structure need to be RCU protected so that the structure won't be 64 * freed before they are done using it. 65 */ 66 67 static const struct rhashtable_params sta_rht_params = { 68 .nelem_hint = 3, /* start small */ 69 .automatic_shrinking = true, 70 .head_offset = offsetof(struct sta_info, hash_node), 71 .key_offset = offsetof(struct sta_info, addr), 72 .key_len = ETH_ALEN, 73 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 74 }; 75 76 /* Caller must hold local->sta_mtx */ 77 static int sta_info_hash_del(struct ieee80211_local *local, 78 struct sta_info *sta) 79 { 80 return rhltable_remove(&local->sta_hash, &sta->hash_node, 81 sta_rht_params); 82 } 83 84 static void __cleanup_single_sta(struct sta_info *sta) 85 { 86 int ac, i; 87 struct tid_ampdu_tx *tid_tx; 88 struct ieee80211_sub_if_data *sdata = sta->sdata; 89 struct ieee80211_local *local = sdata->local; 90 struct ps_data *ps; 91 92 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 93 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 94 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { 95 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 96 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 97 ps = &sdata->bss->ps; 98 else if (ieee80211_vif_is_mesh(&sdata->vif)) 99 ps = &sdata->u.mesh.ps; 100 else 101 return; 102 103 clear_sta_flag(sta, WLAN_STA_PS_STA); 104 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 105 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 106 107 atomic_dec(&ps->num_sta_ps); 108 } 109 110 if (sta->sta.txq[0]) { 111 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 112 struct txq_info *txqi; 113 114 if (!sta->sta.txq[i]) 115 continue; 116 117 txqi = to_txq_info(sta->sta.txq[i]); 118 119 ieee80211_txq_purge(local, txqi); 120 } 121 } 122 123 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 124 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 125 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); 126 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 127 } 128 129 if (ieee80211_vif_is_mesh(&sdata->vif)) 130 mesh_sta_cleanup(sta); 131 132 cancel_work_sync(&sta->drv_deliver_wk); 133 134 /* 135 * Destroy aggregation state here. It would be nice to wait for the 136 * driver to finish aggregation stop and then clean up, but for now 137 * drivers have to handle aggregation stop being requested, followed 138 * directly by station destruction. 139 */ 140 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 141 kfree(sta->ampdu_mlme.tid_start_tx[i]); 142 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 143 if (!tid_tx) 144 continue; 145 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 146 kfree(tid_tx); 147 } 148 } 149 150 static void cleanup_single_sta(struct sta_info *sta) 151 { 152 struct ieee80211_sub_if_data *sdata = sta->sdata; 153 struct ieee80211_local *local = sdata->local; 154 155 __cleanup_single_sta(sta); 156 sta_info_free(local, sta); 157 } 158 159 struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, 160 const u8 *addr) 161 { 162 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); 163 } 164 165 /* protected by RCU */ 166 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 167 const u8 *addr) 168 { 169 struct ieee80211_local *local = sdata->local; 170 struct rhlist_head *tmp; 171 struct sta_info *sta; 172 173 rcu_read_lock(); 174 for_each_sta_info(local, addr, sta, tmp) { 175 if (sta->sdata == sdata) { 176 rcu_read_unlock(); 177 /* this is safe as the caller must already hold 178 * another rcu read section or the mutex 179 */ 180 return sta; 181 } 182 } 183 rcu_read_unlock(); 184 return NULL; 185 } 186 187 /* 188 * Get sta info either from the specified interface 189 * or from one of its vlans 190 */ 191 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 192 const u8 *addr) 193 { 194 struct ieee80211_local *local = sdata->local; 195 struct rhlist_head *tmp; 196 struct sta_info *sta; 197 198 rcu_read_lock(); 199 for_each_sta_info(local, addr, sta, tmp) { 200 if (sta->sdata == sdata || 201 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 202 rcu_read_unlock(); 203 /* this is safe as the caller must already hold 204 * another rcu read section or the mutex 205 */ 206 return sta; 207 } 208 } 209 rcu_read_unlock(); 210 return NULL; 211 } 212 213 struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, 214 const u8 *sta_addr, const u8 *vif_addr) 215 { 216 struct rhlist_head *tmp; 217 struct sta_info *sta; 218 219 for_each_sta_info(local, sta_addr, sta, tmp) { 220 if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) 221 return sta; 222 } 223 224 return NULL; 225 } 226 227 struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, 228 int idx) 229 { 230 struct ieee80211_local *local = sdata->local; 231 struct sta_info *sta; 232 int i = 0; 233 234 list_for_each_entry_rcu(sta, &local->sta_list, list, 235 lockdep_is_held(&local->sta_mtx)) { 236 if (sdata != sta->sdata) 237 continue; 238 if (i < idx) { 239 ++i; 240 continue; 241 } 242 return sta; 243 } 244 245 return NULL; 246 } 247 248 /** 249 * sta_info_free - free STA 250 * 251 * @local: pointer to the global information 252 * @sta: STA info to free 253 * 254 * This function must undo everything done by sta_info_alloc() 255 * that may happen before sta_info_insert(). It may only be 256 * called when sta_info_insert() has not been attempted (and 257 * if that fails, the station is freed anyway.) 258 */ 259 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 260 { 261 /* 262 * If we had used sta_info_pre_move_state() then we might not 263 * have gone through the state transitions down again, so do 264 * it here now (and warn if it's inserted). 265 * 266 * This will clear state such as fast TX/RX that may have been 267 * allocated during state transitions. 268 */ 269 while (sta->sta_state > IEEE80211_STA_NONE) { 270 int ret; 271 272 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); 273 274 ret = sta_info_move_state(sta, sta->sta_state - 1); 275 if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) 276 break; 277 } 278 279 if (sta->rate_ctrl) 280 rate_control_free_sta(sta); 281 282 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 283 284 if (sta->sta.txq[0]) 285 kfree(to_txq_info(sta->sta.txq[0])); 286 kfree(rcu_dereference_raw(sta->sta.rates)); 287 #ifdef CONFIG_MAC80211_MESH 288 kfree(sta->mesh); 289 #endif 290 free_percpu(sta->pcpu_rx_stats); 291 kfree(sta); 292 } 293 294 /* Caller must hold local->sta_mtx */ 295 static int sta_info_hash_add(struct ieee80211_local *local, 296 struct sta_info *sta) 297 { 298 return rhltable_insert(&local->sta_hash, &sta->hash_node, 299 sta_rht_params); 300 } 301 302 static void sta_deliver_ps_frames(struct work_struct *wk) 303 { 304 struct sta_info *sta; 305 306 sta = container_of(wk, struct sta_info, drv_deliver_wk); 307 308 if (sta->dead) 309 return; 310 311 local_bh_disable(); 312 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) 313 ieee80211_sta_ps_deliver_wakeup(sta); 314 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) 315 ieee80211_sta_ps_deliver_poll_response(sta); 316 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) 317 ieee80211_sta_ps_deliver_uapsd(sta); 318 local_bh_enable(); 319 } 320 321 static int sta_prepare_rate_control(struct ieee80211_local *local, 322 struct sta_info *sta, gfp_t gfp) 323 { 324 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) 325 return 0; 326 327 sta->rate_ctrl = local->rate_ctrl; 328 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 329 sta, gfp); 330 if (!sta->rate_ctrl_priv) 331 return -ENOMEM; 332 333 return 0; 334 } 335 336 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 337 const u8 *addr, gfp_t gfp) 338 { 339 struct ieee80211_local *local = sdata->local; 340 struct ieee80211_hw *hw = &local->hw; 341 struct sta_info *sta; 342 int i; 343 344 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 345 if (!sta) 346 return NULL; 347 348 if (ieee80211_hw_check(hw, USES_RSS)) { 349 sta->pcpu_rx_stats = 350 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); 351 if (!sta->pcpu_rx_stats) 352 goto free; 353 } 354 355 spin_lock_init(&sta->lock); 356 spin_lock_init(&sta->ps_lock); 357 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); 358 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 359 mutex_init(&sta->ampdu_mlme.mtx); 360 #ifdef CONFIG_MAC80211_MESH 361 if (ieee80211_vif_is_mesh(&sdata->vif)) { 362 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); 363 if (!sta->mesh) 364 goto free; 365 sta->mesh->plink_sta = sta; 366 spin_lock_init(&sta->mesh->plink_lock); 367 if (ieee80211_vif_is_mesh(&sdata->vif) && 368 !sdata->u.mesh.user_mpm) 369 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 370 0); 371 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; 372 } 373 #endif 374 375 memcpy(sta->addr, addr, ETH_ALEN); 376 memcpy(sta->sta.addr, addr, ETH_ALEN); 377 sta->sta.max_rx_aggregation_subframes = 378 local->hw.max_rx_aggregation_subframes; 379 380 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. 381 * The Tx path starts to use a key as soon as the key slot ptk_idx 382 * references to is not NULL. To not use the initial Rx-only key 383 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid 384 * which always will refer to a NULL key. 385 */ 386 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); 387 sta->ptk_idx = INVALID_PTK_KEYIDX; 388 389 sta->local = local; 390 sta->sdata = sdata; 391 sta->rx_stats.last_rx = jiffies; 392 393 u64_stats_init(&sta->rx_stats.syncp); 394 395 ieee80211_init_frag_cache(&sta->frags); 396 397 sta->sta_state = IEEE80211_STA_NONE; 398 399 /* Mark TID as unreserved */ 400 sta->reserved_tid = IEEE80211_TID_UNRESERVED; 401 402 sta->last_connected = ktime_get_seconds(); 403 ewma_signal_init(&sta->rx_stats_avg.signal); 404 ewma_avg_signal_init(&sta->status_stats.avg_ack_signal); 405 for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++) 406 ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]); 407 408 if (local->ops->wake_tx_queue) { 409 void *txq_data; 410 int size = sizeof(struct txq_info) + 411 ALIGN(hw->txq_data_size, sizeof(void *)); 412 413 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); 414 if (!txq_data) 415 goto free; 416 417 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 418 struct txq_info *txq = txq_data + i * size; 419 420 /* might not do anything for the bufferable MMPDU TXQ */ 421 ieee80211_txq_init(sdata, sta, txq, i); 422 } 423 } 424 425 if (sta_prepare_rate_control(local, sta, gfp)) 426 goto free_txq; 427 428 429 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 430 skb_queue_head_init(&sta->ps_tx_buf[i]); 431 skb_queue_head_init(&sta->tx_filtered[i]); 432 init_airtime_info(&sta->airtime[i], &local->airtime[i]); 433 } 434 435 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 436 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 437 438 for (i = 0; i < NUM_NL80211_BANDS; i++) { 439 u32 mandatory = 0; 440 int r; 441 442 if (!hw->wiphy->bands[i]) 443 continue; 444 445 switch (i) { 446 case NL80211_BAND_2GHZ: 447 case NL80211_BAND_LC: 448 /* 449 * We use both here, even if we cannot really know for 450 * sure the station will support both, but the only use 451 * for this is when we don't know anything yet and send 452 * management frames, and then we'll pick the lowest 453 * possible rate anyway. 454 * If we don't include _G here, we cannot find a rate 455 * in P2P, and thus trigger the WARN_ONCE() in rate.c 456 */ 457 mandatory = IEEE80211_RATE_MANDATORY_B | 458 IEEE80211_RATE_MANDATORY_G; 459 break; 460 case NL80211_BAND_5GHZ: 461 mandatory = IEEE80211_RATE_MANDATORY_A; 462 break; 463 case NL80211_BAND_60GHZ: 464 WARN_ON(1); 465 mandatory = 0; 466 break; 467 } 468 469 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { 470 struct ieee80211_rate *rate; 471 472 rate = &hw->wiphy->bands[i]->bitrates[r]; 473 474 if (!(rate->flags & mandatory)) 475 continue; 476 sta->sta.supp_rates[i] |= BIT(r); 477 } 478 } 479 480 sta->sta.smps_mode = IEEE80211_SMPS_OFF; 481 if (sdata->vif.type == NL80211_IFTYPE_AP || 482 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 483 struct ieee80211_supported_band *sband; 484 u8 smps; 485 486 sband = ieee80211_get_sband(sdata); 487 if (!sband) 488 goto free_txq; 489 490 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 491 IEEE80211_HT_CAP_SM_PS_SHIFT; 492 /* 493 * Assume that hostapd advertises our caps in the beacon and 494 * this is the known_smps_mode for a station that just assciated 495 */ 496 switch (smps) { 497 case WLAN_HT_SMPS_CONTROL_DISABLED: 498 sta->known_smps_mode = IEEE80211_SMPS_OFF; 499 break; 500 case WLAN_HT_SMPS_CONTROL_STATIC: 501 sta->known_smps_mode = IEEE80211_SMPS_STATIC; 502 break; 503 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 504 sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC; 505 break; 506 default: 507 WARN_ON(1); 508 } 509 } 510 511 sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; 512 513 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; 514 sta->cparams.target = MS2TIME(20); 515 sta->cparams.interval = MS2TIME(100); 516 sta->cparams.ecn = true; 517 sta->cparams.ce_threshold_selector = 0; 518 sta->cparams.ce_threshold_mask = 0; 519 520 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 521 522 return sta; 523 524 free_txq: 525 if (sta->sta.txq[0]) 526 kfree(to_txq_info(sta->sta.txq[0])); 527 free: 528 free_percpu(sta->pcpu_rx_stats); 529 #ifdef CONFIG_MAC80211_MESH 530 kfree(sta->mesh); 531 #endif 532 kfree(sta); 533 return NULL; 534 } 535 536 static int sta_info_insert_check(struct sta_info *sta) 537 { 538 struct ieee80211_sub_if_data *sdata = sta->sdata; 539 540 /* 541 * Can't be a WARN_ON because it can be triggered through a race: 542 * something inserts a STA (on one CPU) without holding the RTNL 543 * and another CPU turns off the net device. 544 */ 545 if (unlikely(!ieee80211_sdata_running(sdata))) 546 return -ENETDOWN; 547 548 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || 549 !is_valid_ether_addr(sta->sta.addr))) 550 return -EINVAL; 551 552 /* The RCU read lock is required by rhashtable due to 553 * asynchronous resize/rehash. We also require the mutex 554 * for correctness. 555 */ 556 rcu_read_lock(); 557 lockdep_assert_held(&sdata->local->sta_mtx); 558 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && 559 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { 560 rcu_read_unlock(); 561 return -ENOTUNIQ; 562 } 563 rcu_read_unlock(); 564 565 return 0; 566 } 567 568 static int sta_info_insert_drv_state(struct ieee80211_local *local, 569 struct ieee80211_sub_if_data *sdata, 570 struct sta_info *sta) 571 { 572 enum ieee80211_sta_state state; 573 int err = 0; 574 575 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { 576 err = drv_sta_state(local, sdata, sta, state, state + 1); 577 if (err) 578 break; 579 } 580 581 if (!err) { 582 /* 583 * Drivers using legacy sta_add/sta_remove callbacks only 584 * get uploaded set to true after sta_add is called. 585 */ 586 if (!local->ops->sta_add) 587 sta->uploaded = true; 588 return 0; 589 } 590 591 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 592 sdata_info(sdata, 593 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", 594 sta->sta.addr, state + 1, err); 595 err = 0; 596 } 597 598 /* unwind on error */ 599 for (; state > IEEE80211_STA_NOTEXIST; state--) 600 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); 601 602 return err; 603 } 604 605 static void 606 ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) 607 { 608 struct ieee80211_local *local = sdata->local; 609 bool allow_p2p_go_ps = sdata->vif.p2p; 610 struct sta_info *sta; 611 612 rcu_read_lock(); 613 list_for_each_entry_rcu(sta, &local->sta_list, list) { 614 if (sdata != sta->sdata || 615 !test_sta_flag(sta, WLAN_STA_ASSOC)) 616 continue; 617 if (!sta->sta.support_p2p_ps) { 618 allow_p2p_go_ps = false; 619 break; 620 } 621 } 622 rcu_read_unlock(); 623 624 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { 625 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; 626 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS); 627 } 628 } 629 630 /* 631 * should be called with sta_mtx locked 632 * this function replaces the mutex lock 633 * with a RCU lock 634 */ 635 static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) 636 { 637 struct ieee80211_local *local = sta->local; 638 struct ieee80211_sub_if_data *sdata = sta->sdata; 639 struct station_info *sinfo = NULL; 640 int err = 0; 641 642 lockdep_assert_held(&local->sta_mtx); 643 644 /* check if STA exists already */ 645 if (sta_info_get_bss(sdata, sta->sta.addr)) { 646 err = -EEXIST; 647 goto out_err; 648 } 649 650 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); 651 if (!sinfo) { 652 err = -ENOMEM; 653 goto out_err; 654 } 655 656 local->num_sta++; 657 local->sta_generation++; 658 smp_mb(); 659 660 /* simplify things and don't accept BA sessions yet */ 661 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 662 663 /* make the station visible */ 664 err = sta_info_hash_add(local, sta); 665 if (err) 666 goto out_drop_sta; 667 668 list_add_tail_rcu(&sta->list, &local->sta_list); 669 670 /* notify driver */ 671 err = sta_info_insert_drv_state(local, sdata, sta); 672 if (err) 673 goto out_remove; 674 675 set_sta_flag(sta, WLAN_STA_INSERTED); 676 677 if (sta->sta_state >= IEEE80211_STA_ASSOC) { 678 ieee80211_recalc_min_chandef(sta->sdata); 679 if (!sta->sta.support_p2p_ps) 680 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 681 } 682 683 /* accept BA sessions now */ 684 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 685 686 ieee80211_sta_debugfs_add(sta); 687 rate_control_add_sta_debugfs(sta); 688 689 sinfo->generation = local->sta_generation; 690 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 691 kfree(sinfo); 692 693 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); 694 695 /* move reference to rcu-protected */ 696 rcu_read_lock(); 697 mutex_unlock(&local->sta_mtx); 698 699 if (ieee80211_vif_is_mesh(&sdata->vif)) 700 mesh_accept_plinks_update(sdata); 701 702 return 0; 703 out_remove: 704 sta_info_hash_del(local, sta); 705 list_del_rcu(&sta->list); 706 out_drop_sta: 707 local->num_sta--; 708 synchronize_net(); 709 cleanup_single_sta(sta); 710 out_err: 711 mutex_unlock(&local->sta_mtx); 712 kfree(sinfo); 713 rcu_read_lock(); 714 return err; 715 } 716 717 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 718 { 719 struct ieee80211_local *local = sta->local; 720 int err; 721 722 might_sleep(); 723 724 mutex_lock(&local->sta_mtx); 725 726 err = sta_info_insert_check(sta); 727 if (err) { 728 sta_info_free(local, sta); 729 mutex_unlock(&local->sta_mtx); 730 rcu_read_lock(); 731 return err; 732 } 733 734 return sta_info_insert_finish(sta); 735 } 736 737 int sta_info_insert(struct sta_info *sta) 738 { 739 int err = sta_info_insert_rcu(sta); 740 741 rcu_read_unlock(); 742 743 return err; 744 } 745 746 static inline void __bss_tim_set(u8 *tim, u16 id) 747 { 748 /* 749 * This format has been mandated by the IEEE specifications, 750 * so this line may not be changed to use the __set_bit() format. 751 */ 752 tim[id / 8] |= (1 << (id % 8)); 753 } 754 755 static inline void __bss_tim_clear(u8 *tim, u16 id) 756 { 757 /* 758 * This format has been mandated by the IEEE specifications, 759 * so this line may not be changed to use the __clear_bit() format. 760 */ 761 tim[id / 8] &= ~(1 << (id % 8)); 762 } 763 764 static inline bool __bss_tim_get(u8 *tim, u16 id) 765 { 766 /* 767 * This format has been mandated by the IEEE specifications, 768 * so this line may not be changed to use the test_bit() format. 769 */ 770 return tim[id / 8] & (1 << (id % 8)); 771 } 772 773 static unsigned long ieee80211_tids_for_ac(int ac) 774 { 775 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 776 switch (ac) { 777 case IEEE80211_AC_VO: 778 return BIT(6) | BIT(7); 779 case IEEE80211_AC_VI: 780 return BIT(4) | BIT(5); 781 case IEEE80211_AC_BE: 782 return BIT(0) | BIT(3); 783 case IEEE80211_AC_BK: 784 return BIT(1) | BIT(2); 785 default: 786 WARN_ON(1); 787 return 0; 788 } 789 } 790 791 static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) 792 { 793 struct ieee80211_local *local = sta->local; 794 struct ps_data *ps; 795 bool indicate_tim = false; 796 u8 ignore_for_tim = sta->sta.uapsd_queues; 797 int ac; 798 u16 id = sta->sta.aid; 799 800 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 801 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 802 if (WARN_ON_ONCE(!sta->sdata->bss)) 803 return; 804 805 ps = &sta->sdata->bss->ps; 806 #ifdef CONFIG_MAC80211_MESH 807 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { 808 ps = &sta->sdata->u.mesh.ps; 809 #endif 810 } else { 811 return; 812 } 813 814 /* No need to do anything if the driver does all */ 815 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 816 return; 817 818 if (sta->dead) 819 goto done; 820 821 /* 822 * If all ACs are delivery-enabled then we should build 823 * the TIM bit for all ACs anyway; if only some are then 824 * we ignore those and build the TIM bit using only the 825 * non-enabled ones. 826 */ 827 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) 828 ignore_for_tim = 0; 829 830 if (ignore_pending) 831 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; 832 833 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 834 unsigned long tids; 835 836 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) 837 continue; 838 839 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || 840 !skb_queue_empty(&sta->ps_tx_buf[ac]); 841 if (indicate_tim) 842 break; 843 844 tids = ieee80211_tids_for_ac(ac); 845 846 indicate_tim |= 847 sta->driver_buffered_tids & tids; 848 indicate_tim |= 849 sta->txq_buffered_tids & tids; 850 } 851 852 done: 853 spin_lock_bh(&local->tim_lock); 854 855 if (indicate_tim == __bss_tim_get(ps->tim, id)) 856 goto out_unlock; 857 858 if (indicate_tim) 859 __bss_tim_set(ps->tim, id); 860 else 861 __bss_tim_clear(ps->tim, id); 862 863 if (local->ops->set_tim && !WARN_ON(sta->dead)) { 864 local->tim_in_locked_section = true; 865 drv_set_tim(local, &sta->sta, indicate_tim); 866 local->tim_in_locked_section = false; 867 } 868 869 out_unlock: 870 spin_unlock_bh(&local->tim_lock); 871 } 872 873 void sta_info_recalc_tim(struct sta_info *sta) 874 { 875 __sta_info_recalc_tim(sta, false); 876 } 877 878 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 879 { 880 struct ieee80211_tx_info *info; 881 int timeout; 882 883 if (!skb) 884 return false; 885 886 info = IEEE80211_SKB_CB(skb); 887 888 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 889 timeout = (sta->listen_interval * 890 sta->sdata->vif.bss_conf.beacon_int * 891 32 / 15625) * HZ; 892 if (timeout < STA_TX_BUFFER_EXPIRE) 893 timeout = STA_TX_BUFFER_EXPIRE; 894 return time_after(jiffies, info->control.jiffies + timeout); 895 } 896 897 898 static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, 899 struct sta_info *sta, int ac) 900 { 901 unsigned long flags; 902 struct sk_buff *skb; 903 904 /* 905 * First check for frames that should expire on the filtered 906 * queue. Frames here were rejected by the driver and are on 907 * a separate queue to avoid reordering with normal PS-buffered 908 * frames. They also aren't accounted for right now in the 909 * total_ps_buffered counter. 910 */ 911 for (;;) { 912 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 913 skb = skb_peek(&sta->tx_filtered[ac]); 914 if (sta_info_buffer_expired(sta, skb)) 915 skb = __skb_dequeue(&sta->tx_filtered[ac]); 916 else 917 skb = NULL; 918 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 919 920 /* 921 * Frames are queued in order, so if this one 922 * hasn't expired yet we can stop testing. If 923 * we actually reached the end of the queue we 924 * also need to stop, of course. 925 */ 926 if (!skb) 927 break; 928 ieee80211_free_txskb(&local->hw, skb); 929 } 930 931 /* 932 * Now also check the normal PS-buffered queue, this will 933 * only find something if the filtered queue was emptied 934 * since the filtered frames are all before the normal PS 935 * buffered frames. 936 */ 937 for (;;) { 938 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 939 skb = skb_peek(&sta->ps_tx_buf[ac]); 940 if (sta_info_buffer_expired(sta, skb)) 941 skb = __skb_dequeue(&sta->ps_tx_buf[ac]); 942 else 943 skb = NULL; 944 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 945 946 /* 947 * frames are queued in order, so if this one 948 * hasn't expired yet (or we reached the end of 949 * the queue) we can stop testing 950 */ 951 if (!skb) 952 break; 953 954 local->total_ps_buffered--; 955 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 956 sta->sta.addr); 957 ieee80211_free_txskb(&local->hw, skb); 958 } 959 960 /* 961 * Finally, recalculate the TIM bit for this station -- it might 962 * now be clear because the station was too slow to retrieve its 963 * frames. 964 */ 965 sta_info_recalc_tim(sta); 966 967 /* 968 * Return whether there are any frames still buffered, this is 969 * used to check whether the cleanup timer still needs to run, 970 * if there are no frames we don't need to rearm the timer. 971 */ 972 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && 973 skb_queue_empty(&sta->tx_filtered[ac])); 974 } 975 976 static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 977 struct sta_info *sta) 978 { 979 bool have_buffered = false; 980 int ac; 981 982 /* This is only necessary for stations on BSS/MBSS interfaces */ 983 if (!sta->sdata->bss && 984 !ieee80211_vif_is_mesh(&sta->sdata->vif)) 985 return false; 986 987 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 988 have_buffered |= 989 sta_info_cleanup_expire_buffered_ac(local, sta, ac); 990 991 return have_buffered; 992 } 993 994 static int __must_check __sta_info_destroy_part1(struct sta_info *sta) 995 { 996 struct ieee80211_local *local; 997 struct ieee80211_sub_if_data *sdata; 998 int ret; 999 1000 might_sleep(); 1001 1002 if (!sta) 1003 return -ENOENT; 1004 1005 local = sta->local; 1006 sdata = sta->sdata; 1007 1008 lockdep_assert_held(&local->sta_mtx); 1009 1010 /* 1011 * Before removing the station from the driver and 1012 * rate control, it might still start new aggregation 1013 * sessions -- block that to make sure the tear-down 1014 * will be sufficient. 1015 */ 1016 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1017 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1018 1019 /* 1020 * Before removing the station from the driver there might be pending 1021 * rx frames on RSS queues sent prior to the disassociation - wait for 1022 * all such frames to be processed. 1023 */ 1024 drv_sync_rx_queues(local, sta); 1025 1026 ret = sta_info_hash_del(local, sta); 1027 if (WARN_ON(ret)) 1028 return ret; 1029 1030 /* 1031 * for TDLS peers, make sure to return to the base channel before 1032 * removal. 1033 */ 1034 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { 1035 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); 1036 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); 1037 } 1038 1039 list_del_rcu(&sta->list); 1040 sta->removed = true; 1041 1042 drv_sta_pre_rcu_remove(local, sta->sdata, sta); 1043 1044 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1045 rcu_access_pointer(sdata->u.vlan.sta) == sta) 1046 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 1047 1048 return 0; 1049 } 1050 1051 static void __sta_info_destroy_part2(struct sta_info *sta) 1052 { 1053 struct ieee80211_local *local = sta->local; 1054 struct ieee80211_sub_if_data *sdata = sta->sdata; 1055 struct station_info *sinfo; 1056 int ret; 1057 1058 /* 1059 * NOTE: This assumes at least synchronize_net() was done 1060 * after _part1 and before _part2! 1061 */ 1062 1063 might_sleep(); 1064 lockdep_assert_held(&local->sta_mtx); 1065 1066 if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1067 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); 1068 WARN_ON_ONCE(ret); 1069 } 1070 1071 /* now keys can no longer be reached */ 1072 ieee80211_free_sta_keys(local, sta); 1073 1074 /* disable TIM bit - last chance to tell driver */ 1075 __sta_info_recalc_tim(sta, true); 1076 1077 sta->dead = true; 1078 1079 local->num_sta--; 1080 local->sta_generation++; 1081 1082 while (sta->sta_state > IEEE80211_STA_NONE) { 1083 ret = sta_info_move_state(sta, sta->sta_state - 1); 1084 if (ret) { 1085 WARN_ON_ONCE(1); 1086 break; 1087 } 1088 } 1089 1090 if (sta->uploaded) { 1091 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, 1092 IEEE80211_STA_NOTEXIST); 1093 WARN_ON_ONCE(ret != 0); 1094 } 1095 1096 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 1097 1098 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); 1099 if (sinfo) 1100 sta_set_sinfo(sta, sinfo, true); 1101 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 1102 kfree(sinfo); 1103 1104 ieee80211_sta_debugfs_remove(sta); 1105 1106 ieee80211_destroy_frag_cache(&sta->frags); 1107 1108 cleanup_single_sta(sta); 1109 } 1110 1111 int __must_check __sta_info_destroy(struct sta_info *sta) 1112 { 1113 int err = __sta_info_destroy_part1(sta); 1114 1115 if (err) 1116 return err; 1117 1118 synchronize_net(); 1119 1120 __sta_info_destroy_part2(sta); 1121 1122 return 0; 1123 } 1124 1125 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) 1126 { 1127 struct sta_info *sta; 1128 int ret; 1129 1130 mutex_lock(&sdata->local->sta_mtx); 1131 sta = sta_info_get(sdata, addr); 1132 ret = __sta_info_destroy(sta); 1133 mutex_unlock(&sdata->local->sta_mtx); 1134 1135 return ret; 1136 } 1137 1138 int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 1139 const u8 *addr) 1140 { 1141 struct sta_info *sta; 1142 int ret; 1143 1144 mutex_lock(&sdata->local->sta_mtx); 1145 sta = sta_info_get_bss(sdata, addr); 1146 ret = __sta_info_destroy(sta); 1147 mutex_unlock(&sdata->local->sta_mtx); 1148 1149 return ret; 1150 } 1151 1152 static void sta_info_cleanup(struct timer_list *t) 1153 { 1154 struct ieee80211_local *local = from_timer(local, t, sta_cleanup); 1155 struct sta_info *sta; 1156 bool timer_needed = false; 1157 1158 rcu_read_lock(); 1159 list_for_each_entry_rcu(sta, &local->sta_list, list) 1160 if (sta_info_cleanup_expire_buffered(local, sta)) 1161 timer_needed = true; 1162 rcu_read_unlock(); 1163 1164 if (local->quiescing) 1165 return; 1166 1167 if (!timer_needed) 1168 return; 1169 1170 mod_timer(&local->sta_cleanup, 1171 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); 1172 } 1173 1174 int sta_info_init(struct ieee80211_local *local) 1175 { 1176 int err; 1177 1178 err = rhltable_init(&local->sta_hash, &sta_rht_params); 1179 if (err) 1180 return err; 1181 1182 spin_lock_init(&local->tim_lock); 1183 mutex_init(&local->sta_mtx); 1184 INIT_LIST_HEAD(&local->sta_list); 1185 1186 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); 1187 return 0; 1188 } 1189 1190 void sta_info_stop(struct ieee80211_local *local) 1191 { 1192 del_timer_sync(&local->sta_cleanup); 1193 rhltable_destroy(&local->sta_hash); 1194 } 1195 1196 1197 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) 1198 { 1199 struct ieee80211_local *local = sdata->local; 1200 struct sta_info *sta, *tmp; 1201 LIST_HEAD(free_list); 1202 int ret = 0; 1203 1204 might_sleep(); 1205 1206 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); 1207 WARN_ON(vlans && !sdata->bss); 1208 1209 mutex_lock(&local->sta_mtx); 1210 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1211 if (sdata == sta->sdata || 1212 (vlans && sdata->bss == sta->sdata->bss)) { 1213 if (!WARN_ON(__sta_info_destroy_part1(sta))) 1214 list_add(&sta->free_list, &free_list); 1215 ret++; 1216 } 1217 } 1218 1219 if (!list_empty(&free_list)) { 1220 synchronize_net(); 1221 list_for_each_entry_safe(sta, tmp, &free_list, free_list) 1222 __sta_info_destroy_part2(sta); 1223 } 1224 mutex_unlock(&local->sta_mtx); 1225 1226 return ret; 1227 } 1228 1229 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 1230 unsigned long exp_time) 1231 { 1232 struct ieee80211_local *local = sdata->local; 1233 struct sta_info *sta, *tmp; 1234 1235 mutex_lock(&local->sta_mtx); 1236 1237 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1238 unsigned long last_active = ieee80211_sta_last_active(sta); 1239 1240 if (sdata != sta->sdata) 1241 continue; 1242 1243 if (time_is_before_jiffies(last_active + exp_time)) { 1244 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 1245 sta->sta.addr); 1246 1247 if (ieee80211_vif_is_mesh(&sdata->vif) && 1248 test_sta_flag(sta, WLAN_STA_PS_STA)) 1249 atomic_dec(&sdata->u.mesh.ps.num_sta_ps); 1250 1251 WARN_ON(__sta_info_destroy(sta)); 1252 } 1253 } 1254 1255 mutex_unlock(&local->sta_mtx); 1256 } 1257 1258 struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, 1259 const u8 *addr, 1260 const u8 *localaddr) 1261 { 1262 struct ieee80211_local *local = hw_to_local(hw); 1263 struct rhlist_head *tmp; 1264 struct sta_info *sta; 1265 1266 /* 1267 * Just return a random station if localaddr is NULL 1268 * ... first in list. 1269 */ 1270 for_each_sta_info(local, addr, sta, tmp) { 1271 if (localaddr && 1272 !ether_addr_equal(sta->sdata->vif.addr, localaddr)) 1273 continue; 1274 if (!sta->uploaded) 1275 return NULL; 1276 return &sta->sta; 1277 } 1278 1279 return NULL; 1280 } 1281 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); 1282 1283 struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 1284 const u8 *addr) 1285 { 1286 struct sta_info *sta; 1287 1288 if (!vif) 1289 return NULL; 1290 1291 sta = sta_info_get_bss(vif_to_sdata(vif), addr); 1292 if (!sta) 1293 return NULL; 1294 1295 if (!sta->uploaded) 1296 return NULL; 1297 1298 return &sta->sta; 1299 } 1300 EXPORT_SYMBOL(ieee80211_find_sta); 1301 1302 /* powersave support code */ 1303 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1304 { 1305 struct ieee80211_sub_if_data *sdata = sta->sdata; 1306 struct ieee80211_local *local = sdata->local; 1307 struct sk_buff_head pending; 1308 int filtered = 0, buffered = 0, ac, i; 1309 unsigned long flags; 1310 struct ps_data *ps; 1311 1312 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1313 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 1314 u.ap); 1315 1316 if (sdata->vif.type == NL80211_IFTYPE_AP) 1317 ps = &sdata->bss->ps; 1318 else if (ieee80211_vif_is_mesh(&sdata->vif)) 1319 ps = &sdata->u.mesh.ps; 1320 else 1321 return; 1322 1323 clear_sta_flag(sta, WLAN_STA_SP); 1324 1325 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); 1326 sta->driver_buffered_tids = 0; 1327 sta->txq_buffered_tids = 0; 1328 1329 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1330 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1331 1332 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1333 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) 1334 continue; 1335 1336 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); 1337 } 1338 1339 skb_queue_head_init(&pending); 1340 1341 /* sync with ieee80211_tx_h_unicast_ps_buf */ 1342 spin_lock(&sta->ps_lock); 1343 /* Send all buffered frames to the station */ 1344 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1345 int count = skb_queue_len(&pending), tmp; 1346 1347 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1348 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1349 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1350 tmp = skb_queue_len(&pending); 1351 filtered += tmp - count; 1352 count = tmp; 1353 1354 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1355 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1356 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1357 tmp = skb_queue_len(&pending); 1358 buffered += tmp - count; 1359 } 1360 1361 ieee80211_add_pending_skbs(local, &pending); 1362 1363 /* now we're no longer in the deliver code */ 1364 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 1365 1366 /* The station might have polled and then woken up before we responded, 1367 * so clear these flags now to avoid them sticking around. 1368 */ 1369 clear_sta_flag(sta, WLAN_STA_PSPOLL); 1370 clear_sta_flag(sta, WLAN_STA_UAPSD); 1371 spin_unlock(&sta->ps_lock); 1372 1373 atomic_dec(&ps->num_sta_ps); 1374 1375 local->total_ps_buffered -= buffered; 1376 1377 sta_info_recalc_tim(sta); 1378 1379 ps_dbg(sdata, 1380 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1381 sta->sta.addr, sta->sta.aid, filtered, buffered); 1382 1383 ieee80211_check_fast_xmit(sta); 1384 } 1385 1386 static void ieee80211_send_null_response(struct sta_info *sta, int tid, 1387 enum ieee80211_frame_release_type reason, 1388 bool call_driver, bool more_data) 1389 { 1390 struct ieee80211_sub_if_data *sdata = sta->sdata; 1391 struct ieee80211_local *local = sdata->local; 1392 struct ieee80211_qos_hdr *nullfunc; 1393 struct sk_buff *skb; 1394 int size = sizeof(*nullfunc); 1395 __le16 fc; 1396 bool qos = sta->sta.wme; 1397 struct ieee80211_tx_info *info; 1398 struct ieee80211_chanctx_conf *chanctx_conf; 1399 1400 if (qos) { 1401 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1402 IEEE80211_STYPE_QOS_NULLFUNC | 1403 IEEE80211_FCTL_FROMDS); 1404 } else { 1405 size -= 2; 1406 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1407 IEEE80211_STYPE_NULLFUNC | 1408 IEEE80211_FCTL_FROMDS); 1409 } 1410 1411 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 1412 if (!skb) 1413 return; 1414 1415 skb_reserve(skb, local->hw.extra_tx_headroom); 1416 1417 nullfunc = skb_put(skb, size); 1418 nullfunc->frame_control = fc; 1419 nullfunc->duration_id = 0; 1420 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1421 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1422 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1423 nullfunc->seq_ctrl = 0; 1424 1425 skb->priority = tid; 1426 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1427 if (qos) { 1428 nullfunc->qos_ctrl = cpu_to_le16(tid); 1429 1430 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { 1431 nullfunc->qos_ctrl |= 1432 cpu_to_le16(IEEE80211_QOS_CTL_EOSP); 1433 if (more_data) 1434 nullfunc->frame_control |= 1435 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1436 } 1437 } 1438 1439 info = IEEE80211_SKB_CB(skb); 1440 1441 /* 1442 * Tell TX path to send this frame even though the 1443 * STA may still remain is PS mode after this frame 1444 * exchange. Also set EOSP to indicate this packet 1445 * ends the poll/service period. 1446 */ 1447 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | 1448 IEEE80211_TX_STATUS_EOSP | 1449 IEEE80211_TX_CTL_REQ_TX_STATUS; 1450 1451 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1452 1453 if (call_driver) 1454 drv_allow_buffered_frames(local, sta, BIT(tid), 1, 1455 reason, false); 1456 1457 skb->dev = sdata->dev; 1458 1459 rcu_read_lock(); 1460 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 1461 if (WARN_ON(!chanctx_conf)) { 1462 rcu_read_unlock(); 1463 kfree_skb(skb); 1464 return; 1465 } 1466 1467 info->band = chanctx_conf->def.chan->band; 1468 ieee80211_xmit(sdata, sta, skb); 1469 rcu_read_unlock(); 1470 } 1471 1472 static int find_highest_prio_tid(unsigned long tids) 1473 { 1474 /* lower 3 TIDs aren't ordered perfectly */ 1475 if (tids & 0xF8) 1476 return fls(tids) - 1; 1477 /* TID 0 is BE just like TID 3 */ 1478 if (tids & BIT(0)) 1479 return 0; 1480 return fls(tids) - 1; 1481 } 1482 1483 /* Indicates if the MORE_DATA bit should be set in the last 1484 * frame obtained by ieee80211_sta_ps_get_frames. 1485 * Note that driver_release_tids is relevant only if 1486 * reason = IEEE80211_FRAME_RELEASE_PSPOLL 1487 */ 1488 static bool 1489 ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, 1490 enum ieee80211_frame_release_type reason, 1491 unsigned long driver_release_tids) 1492 { 1493 int ac; 1494 1495 /* If the driver has data on more than one TID then 1496 * certainly there's more data if we release just a 1497 * single frame now (from a single TID). This will 1498 * only happen for PS-Poll. 1499 */ 1500 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && 1501 hweight16(driver_release_tids) > 1) 1502 return true; 1503 1504 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1505 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1506 continue; 1507 1508 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1509 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1510 return true; 1511 } 1512 1513 return false; 1514 } 1515 1516 static void 1517 ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, 1518 enum ieee80211_frame_release_type reason, 1519 struct sk_buff_head *frames, 1520 unsigned long *driver_release_tids) 1521 { 1522 struct ieee80211_sub_if_data *sdata = sta->sdata; 1523 struct ieee80211_local *local = sdata->local; 1524 int ac; 1525 1526 /* Get response frame(s) and more data bit for the last one. */ 1527 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1528 unsigned long tids; 1529 1530 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1531 continue; 1532 1533 tids = ieee80211_tids_for_ac(ac); 1534 1535 /* if we already have frames from software, then we can't also 1536 * release from hardware queues 1537 */ 1538 if (skb_queue_empty(frames)) { 1539 *driver_release_tids |= 1540 sta->driver_buffered_tids & tids; 1541 *driver_release_tids |= sta->txq_buffered_tids & tids; 1542 } 1543 1544 if (!*driver_release_tids) { 1545 struct sk_buff *skb; 1546 1547 while (n_frames > 0) { 1548 skb = skb_dequeue(&sta->tx_filtered[ac]); 1549 if (!skb) { 1550 skb = skb_dequeue( 1551 &sta->ps_tx_buf[ac]); 1552 if (skb) 1553 local->total_ps_buffered--; 1554 } 1555 if (!skb) 1556 break; 1557 n_frames--; 1558 __skb_queue_tail(frames, skb); 1559 } 1560 } 1561 1562 /* If we have more frames buffered on this AC, then abort the 1563 * loop since we can't send more data from other ACs before 1564 * the buffered frames from this. 1565 */ 1566 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1567 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1568 break; 1569 } 1570 } 1571 1572 static void 1573 ieee80211_sta_ps_deliver_response(struct sta_info *sta, 1574 int n_frames, u8 ignored_acs, 1575 enum ieee80211_frame_release_type reason) 1576 { 1577 struct ieee80211_sub_if_data *sdata = sta->sdata; 1578 struct ieee80211_local *local = sdata->local; 1579 unsigned long driver_release_tids = 0; 1580 struct sk_buff_head frames; 1581 bool more_data; 1582 1583 /* Service or PS-Poll period starts */ 1584 set_sta_flag(sta, WLAN_STA_SP); 1585 1586 __skb_queue_head_init(&frames); 1587 1588 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, 1589 &frames, &driver_release_tids); 1590 1591 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); 1592 1593 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) 1594 driver_release_tids = 1595 BIT(find_highest_prio_tid(driver_release_tids)); 1596 1597 if (skb_queue_empty(&frames) && !driver_release_tids) { 1598 int tid, ac; 1599 1600 /* 1601 * For PS-Poll, this can only happen due to a race condition 1602 * when we set the TIM bit and the station notices it, but 1603 * before it can poll for the frame we expire it. 1604 * 1605 * For uAPSD, this is said in the standard (11.2.1.5 h): 1606 * At each unscheduled SP for a non-AP STA, the AP shall 1607 * attempt to transmit at least one MSDU or MMPDU, but no 1608 * more than the value specified in the Max SP Length field 1609 * in the QoS Capability element from delivery-enabled ACs, 1610 * that are destined for the non-AP STA. 1611 * 1612 * Since we have no other MSDU/MMPDU, transmit a QoS null frame. 1613 */ 1614 1615 /* This will evaluate to 1, 3, 5 or 7. */ 1616 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1617 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) 1618 break; 1619 tid = 7 - 2 * ac; 1620 1621 ieee80211_send_null_response(sta, tid, reason, true, false); 1622 } else if (!driver_release_tids) { 1623 struct sk_buff_head pending; 1624 struct sk_buff *skb; 1625 int num = 0; 1626 u16 tids = 0; 1627 bool need_null = false; 1628 1629 skb_queue_head_init(&pending); 1630 1631 while ((skb = __skb_dequeue(&frames))) { 1632 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1633 struct ieee80211_hdr *hdr = (void *) skb->data; 1634 u8 *qoshdr = NULL; 1635 1636 num++; 1637 1638 /* 1639 * Tell TX path to send this frame even though the 1640 * STA may still remain is PS mode after this frame 1641 * exchange. 1642 */ 1643 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 1644 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1645 1646 /* 1647 * Use MoreData flag to indicate whether there are 1648 * more buffered frames for this STA 1649 */ 1650 if (more_data || !skb_queue_empty(&frames)) 1651 hdr->frame_control |= 1652 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1653 else 1654 hdr->frame_control &= 1655 cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1656 1657 if (ieee80211_is_data_qos(hdr->frame_control) || 1658 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1659 qoshdr = ieee80211_get_qos_ctl(hdr); 1660 1661 tids |= BIT(skb->priority); 1662 1663 __skb_queue_tail(&pending, skb); 1664 1665 /* end service period after last frame or add one */ 1666 if (!skb_queue_empty(&frames)) 1667 continue; 1668 1669 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { 1670 /* for PS-Poll, there's only one frame */ 1671 info->flags |= IEEE80211_TX_STATUS_EOSP | 1672 IEEE80211_TX_CTL_REQ_TX_STATUS; 1673 break; 1674 } 1675 1676 /* For uAPSD, things are a bit more complicated. If the 1677 * last frame has a QoS header (i.e. is a QoS-data or 1678 * QoS-nulldata frame) then just set the EOSP bit there 1679 * and be done. 1680 * If the frame doesn't have a QoS header (which means 1681 * it should be a bufferable MMPDU) then we can't set 1682 * the EOSP bit in the QoS header; add a QoS-nulldata 1683 * frame to the list to send it after the MMPDU. 1684 * 1685 * Note that this code is only in the mac80211-release 1686 * code path, we assume that the driver will not buffer 1687 * anything but QoS-data frames, or if it does, will 1688 * create the QoS-nulldata frame by itself if needed. 1689 * 1690 * Cf. 802.11-2012 10.2.1.10 (c). 1691 */ 1692 if (qoshdr) { 1693 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 1694 1695 info->flags |= IEEE80211_TX_STATUS_EOSP | 1696 IEEE80211_TX_CTL_REQ_TX_STATUS; 1697 } else { 1698 /* The standard isn't completely clear on this 1699 * as it says the more-data bit should be set 1700 * if there are more BUs. The QoS-Null frame 1701 * we're about to send isn't buffered yet, we 1702 * only create it below, but let's pretend it 1703 * was buffered just in case some clients only 1704 * expect more-data=0 when eosp=1. 1705 */ 1706 hdr->frame_control |= 1707 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1708 need_null = true; 1709 num++; 1710 } 1711 break; 1712 } 1713 1714 drv_allow_buffered_frames(local, sta, tids, num, 1715 reason, more_data); 1716 1717 ieee80211_add_pending_skbs(local, &pending); 1718 1719 if (need_null) 1720 ieee80211_send_null_response( 1721 sta, find_highest_prio_tid(tids), 1722 reason, false, false); 1723 1724 sta_info_recalc_tim(sta); 1725 } else { 1726 int tid; 1727 1728 /* 1729 * We need to release a frame that is buffered somewhere in the 1730 * driver ... it'll have to handle that. 1731 * Note that the driver also has to check the number of frames 1732 * on the TIDs we're releasing from - if there are more than 1733 * n_frames it has to set the more-data bit (if we didn't ask 1734 * it to set it anyway due to other buffered frames); if there 1735 * are fewer than n_frames it has to make sure to adjust that 1736 * to allow the service period to end properly. 1737 */ 1738 drv_release_buffered_frames(local, sta, driver_release_tids, 1739 n_frames, reason, more_data); 1740 1741 /* 1742 * Note that we don't recalculate the TIM bit here as it would 1743 * most likely have no effect at all unless the driver told us 1744 * that the TID(s) became empty before returning here from the 1745 * release function. 1746 * Either way, however, when the driver tells us that the TID(s) 1747 * became empty or we find that a txq became empty, we'll do the 1748 * TIM recalculation. 1749 */ 1750 1751 if (!sta->sta.txq[0]) 1752 return; 1753 1754 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1755 if (!sta->sta.txq[tid] || 1756 !(driver_release_tids & BIT(tid)) || 1757 txq_has_queue(sta->sta.txq[tid])) 1758 continue; 1759 1760 sta_info_recalc_tim(sta); 1761 break; 1762 } 1763 } 1764 } 1765 1766 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 1767 { 1768 u8 ignore_for_response = sta->sta.uapsd_queues; 1769 1770 /* 1771 * If all ACs are delivery-enabled then we should reply 1772 * from any of them, if only some are enabled we reply 1773 * only from the non-enabled ones. 1774 */ 1775 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) 1776 ignore_for_response = 0; 1777 1778 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, 1779 IEEE80211_FRAME_RELEASE_PSPOLL); 1780 } 1781 1782 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) 1783 { 1784 int n_frames = sta->sta.max_sp; 1785 u8 delivery_enabled = sta->sta.uapsd_queues; 1786 1787 /* 1788 * If we ever grow support for TSPEC this might happen if 1789 * the TSPEC update from hostapd comes in between a trigger 1790 * frame setting WLAN_STA_UAPSD in the RX path and this 1791 * actually getting called. 1792 */ 1793 if (!delivery_enabled) 1794 return; 1795 1796 switch (sta->sta.max_sp) { 1797 case 1: 1798 n_frames = 2; 1799 break; 1800 case 2: 1801 n_frames = 4; 1802 break; 1803 case 3: 1804 n_frames = 6; 1805 break; 1806 case 0: 1807 /* XXX: what is a good value? */ 1808 n_frames = 128; 1809 break; 1810 } 1811 1812 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, 1813 IEEE80211_FRAME_RELEASE_UAPSD); 1814 } 1815 1816 void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 1817 struct ieee80211_sta *pubsta, bool block) 1818 { 1819 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1820 1821 trace_api_sta_block_awake(sta->local, pubsta, block); 1822 1823 if (block) { 1824 set_sta_flag(sta, WLAN_STA_PS_DRIVER); 1825 ieee80211_clear_fast_xmit(sta); 1826 return; 1827 } 1828 1829 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1830 return; 1831 1832 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { 1833 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1834 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1835 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1836 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || 1837 test_sta_flag(sta, WLAN_STA_UAPSD)) { 1838 /* must be asleep in this case */ 1839 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1840 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1841 } else { 1842 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1843 ieee80211_check_fast_xmit(sta); 1844 } 1845 } 1846 EXPORT_SYMBOL(ieee80211_sta_block_awake); 1847 1848 void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) 1849 { 1850 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1851 struct ieee80211_local *local = sta->local; 1852 1853 trace_api_eosp(local, pubsta); 1854 1855 clear_sta_flag(sta, WLAN_STA_SP); 1856 } 1857 EXPORT_SYMBOL(ieee80211_sta_eosp); 1858 1859 void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) 1860 { 1861 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1862 enum ieee80211_frame_release_type reason; 1863 bool more_data; 1864 1865 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); 1866 1867 reason = IEEE80211_FRAME_RELEASE_UAPSD; 1868 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, 1869 reason, 0); 1870 1871 ieee80211_send_null_response(sta, tid, reason, false, more_data); 1872 } 1873 EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); 1874 1875 void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 1876 u8 tid, bool buffered) 1877 { 1878 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1879 1880 if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) 1881 return; 1882 1883 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); 1884 1885 if (buffered) 1886 set_bit(tid, &sta->driver_buffered_tids); 1887 else 1888 clear_bit(tid, &sta->driver_buffered_tids); 1889 1890 sta_info_recalc_tim(sta); 1891 } 1892 EXPORT_SYMBOL(ieee80211_sta_set_buffered); 1893 1894 void ieee80211_register_airtime(struct ieee80211_txq *txq, 1895 u32 tx_airtime, u32 rx_airtime) 1896 { 1897 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif); 1898 struct ieee80211_local *local = sdata->local; 1899 u64 weight_sum, weight_sum_reciprocal; 1900 struct airtime_sched_info *air_sched; 1901 struct airtime_info *air_info; 1902 u32 airtime = 0; 1903 1904 air_sched = &local->airtime[txq->ac]; 1905 air_info = to_airtime_info(txq); 1906 1907 if (local->airtime_flags & AIRTIME_USE_TX) 1908 airtime += tx_airtime; 1909 if (local->airtime_flags & AIRTIME_USE_RX) 1910 airtime += rx_airtime; 1911 1912 /* Weights scale so the unit weight is 256 */ 1913 airtime <<= 8; 1914 1915 spin_lock_bh(&air_sched->lock); 1916 1917 air_info->tx_airtime += tx_airtime; 1918 air_info->rx_airtime += rx_airtime; 1919 1920 if (air_sched->weight_sum) { 1921 weight_sum = air_sched->weight_sum; 1922 weight_sum_reciprocal = air_sched->weight_sum_reciprocal; 1923 } else { 1924 weight_sum = air_info->weight; 1925 weight_sum_reciprocal = air_info->weight_reciprocal; 1926 } 1927 1928 /* Round the calculation of global vt */ 1929 air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) * 1930 weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64; 1931 air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) * 1932 air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32; 1933 ieee80211_resort_txq(&local->hw, txq); 1934 1935 spin_unlock_bh(&air_sched->lock); 1936 } 1937 1938 void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, 1939 u32 tx_airtime, u32 rx_airtime) 1940 { 1941 struct ieee80211_txq *txq = pubsta->txq[tid]; 1942 1943 if (!txq) 1944 return; 1945 1946 ieee80211_register_airtime(txq, tx_airtime, rx_airtime); 1947 } 1948 EXPORT_SYMBOL(ieee80211_sta_register_airtime); 1949 1950 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, 1951 struct sta_info *sta, u8 ac, 1952 u16 tx_airtime, bool tx_completed) 1953 { 1954 int tx_pending; 1955 1956 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) 1957 return; 1958 1959 if (!tx_completed) { 1960 if (sta) 1961 atomic_add(tx_airtime, 1962 &sta->airtime[ac].aql_tx_pending); 1963 1964 atomic_add(tx_airtime, &local->aql_total_pending_airtime); 1965 return; 1966 } 1967 1968 if (sta) { 1969 tx_pending = atomic_sub_return(tx_airtime, 1970 &sta->airtime[ac].aql_tx_pending); 1971 if (tx_pending < 0) 1972 atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, 1973 tx_pending, 0); 1974 } 1975 1976 tx_pending = atomic_sub_return(tx_airtime, 1977 &local->aql_total_pending_airtime); 1978 if (WARN_ONCE(tx_pending < 0, 1979 "Device %s AC %d pending airtime underflow: %u, %u", 1980 wiphy_name(local->hw.wiphy), ac, tx_pending, 1981 tx_airtime)) 1982 atomic_cmpxchg(&local->aql_total_pending_airtime, 1983 tx_pending, 0); 1984 } 1985 1986 int sta_info_move_state(struct sta_info *sta, 1987 enum ieee80211_sta_state new_state) 1988 { 1989 might_sleep(); 1990 1991 if (sta->sta_state == new_state) 1992 return 0; 1993 1994 /* check allowed transitions first */ 1995 1996 switch (new_state) { 1997 case IEEE80211_STA_NONE: 1998 if (sta->sta_state != IEEE80211_STA_AUTH) 1999 return -EINVAL; 2000 break; 2001 case IEEE80211_STA_AUTH: 2002 if (sta->sta_state != IEEE80211_STA_NONE && 2003 sta->sta_state != IEEE80211_STA_ASSOC) 2004 return -EINVAL; 2005 break; 2006 case IEEE80211_STA_ASSOC: 2007 if (sta->sta_state != IEEE80211_STA_AUTH && 2008 sta->sta_state != IEEE80211_STA_AUTHORIZED) 2009 return -EINVAL; 2010 break; 2011 case IEEE80211_STA_AUTHORIZED: 2012 if (sta->sta_state != IEEE80211_STA_ASSOC) 2013 return -EINVAL; 2014 break; 2015 default: 2016 WARN(1, "invalid state %d", new_state); 2017 return -EINVAL; 2018 } 2019 2020 sta_dbg(sta->sdata, "moving STA %pM to state %d\n", 2021 sta->sta.addr, new_state); 2022 2023 /* 2024 * notify the driver before the actual changes so it can 2025 * fail the transition 2026 */ 2027 if (test_sta_flag(sta, WLAN_STA_INSERTED)) { 2028 int err = drv_sta_state(sta->local, sta->sdata, sta, 2029 sta->sta_state, new_state); 2030 if (err) 2031 return err; 2032 } 2033 2034 /* reflect the change in all state variables */ 2035 2036 switch (new_state) { 2037 case IEEE80211_STA_NONE: 2038 if (sta->sta_state == IEEE80211_STA_AUTH) 2039 clear_bit(WLAN_STA_AUTH, &sta->_flags); 2040 break; 2041 case IEEE80211_STA_AUTH: 2042 if (sta->sta_state == IEEE80211_STA_NONE) { 2043 set_bit(WLAN_STA_AUTH, &sta->_flags); 2044 } else if (sta->sta_state == IEEE80211_STA_ASSOC) { 2045 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 2046 ieee80211_recalc_min_chandef(sta->sdata); 2047 if (!sta->sta.support_p2p_ps) 2048 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2049 } 2050 break; 2051 case IEEE80211_STA_ASSOC: 2052 if (sta->sta_state == IEEE80211_STA_AUTH) { 2053 set_bit(WLAN_STA_ASSOC, &sta->_flags); 2054 sta->assoc_at = ktime_get_boottime_ns(); 2055 ieee80211_recalc_min_chandef(sta->sdata); 2056 if (!sta->sta.support_p2p_ps) 2057 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2058 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 2059 ieee80211_vif_dec_num_mcast(sta->sdata); 2060 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2061 ieee80211_clear_fast_xmit(sta); 2062 ieee80211_clear_fast_rx(sta); 2063 } 2064 break; 2065 case IEEE80211_STA_AUTHORIZED: 2066 if (sta->sta_state == IEEE80211_STA_ASSOC) { 2067 ieee80211_vif_inc_num_mcast(sta->sdata); 2068 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2069 ieee80211_check_fast_xmit(sta); 2070 ieee80211_check_fast_rx(sta); 2071 } 2072 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 2073 sta->sdata->vif.type == NL80211_IFTYPE_AP) 2074 cfg80211_send_layer2_update(sta->sdata->dev, 2075 sta->sta.addr); 2076 break; 2077 default: 2078 break; 2079 } 2080 2081 sta->sta_state = new_state; 2082 2083 return 0; 2084 } 2085 2086 u8 sta_info_tx_streams(struct sta_info *sta) 2087 { 2088 struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap; 2089 u8 rx_streams; 2090 2091 if (!sta->sta.ht_cap.ht_supported) 2092 return 1; 2093 2094 if (sta->sta.vht_cap.vht_supported) { 2095 int i; 2096 u16 tx_mcs_map = 2097 le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map); 2098 2099 for (i = 7; i >= 0; i--) 2100 if ((tx_mcs_map & (0x3 << (i * 2))) != 2101 IEEE80211_VHT_MCS_NOT_SUPPORTED) 2102 return i + 1; 2103 } 2104 2105 if (ht_cap->mcs.rx_mask[3]) 2106 rx_streams = 4; 2107 else if (ht_cap->mcs.rx_mask[2]) 2108 rx_streams = 3; 2109 else if (ht_cap->mcs.rx_mask[1]) 2110 rx_streams = 2; 2111 else 2112 rx_streams = 1; 2113 2114 if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF)) 2115 return rx_streams; 2116 2117 return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 2118 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; 2119 } 2120 2121 static struct ieee80211_sta_rx_stats * 2122 sta_get_last_rx_stats(struct sta_info *sta) 2123 { 2124 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 2125 int cpu; 2126 2127 if (!sta->pcpu_rx_stats) 2128 return stats; 2129 2130 for_each_possible_cpu(cpu) { 2131 struct ieee80211_sta_rx_stats *cpustats; 2132 2133 cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2134 2135 if (time_after(cpustats->last_rx, stats->last_rx)) 2136 stats = cpustats; 2137 } 2138 2139 return stats; 2140 } 2141 2142 static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, 2143 struct rate_info *rinfo) 2144 { 2145 rinfo->bw = STA_STATS_GET(BW, rate); 2146 2147 switch (STA_STATS_GET(TYPE, rate)) { 2148 case STA_STATS_RATE_TYPE_VHT: 2149 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; 2150 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); 2151 rinfo->nss = STA_STATS_GET(VHT_NSS, rate); 2152 if (STA_STATS_GET(SGI, rate)) 2153 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2154 break; 2155 case STA_STATS_RATE_TYPE_HT: 2156 rinfo->flags = RATE_INFO_FLAGS_MCS; 2157 rinfo->mcs = STA_STATS_GET(HT_MCS, rate); 2158 if (STA_STATS_GET(SGI, rate)) 2159 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2160 break; 2161 case STA_STATS_RATE_TYPE_LEGACY: { 2162 struct ieee80211_supported_band *sband; 2163 u16 brate; 2164 unsigned int shift; 2165 int band = STA_STATS_GET(LEGACY_BAND, rate); 2166 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); 2167 2168 sband = local->hw.wiphy->bands[band]; 2169 2170 if (WARN_ON_ONCE(!sband->bitrates)) 2171 break; 2172 2173 brate = sband->bitrates[rate_idx].bitrate; 2174 if (rinfo->bw == RATE_INFO_BW_5) 2175 shift = 2; 2176 else if (rinfo->bw == RATE_INFO_BW_10) 2177 shift = 1; 2178 else 2179 shift = 0; 2180 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); 2181 break; 2182 } 2183 case STA_STATS_RATE_TYPE_HE: 2184 rinfo->flags = RATE_INFO_FLAGS_HE_MCS; 2185 rinfo->mcs = STA_STATS_GET(HE_MCS, rate); 2186 rinfo->nss = STA_STATS_GET(HE_NSS, rate); 2187 rinfo->he_gi = STA_STATS_GET(HE_GI, rate); 2188 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); 2189 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); 2190 break; 2191 } 2192 } 2193 2194 static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) 2195 { 2196 u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); 2197 2198 if (rate == STA_STATS_RATE_INVALID) 2199 return -EINVAL; 2200 2201 sta_stats_decode_rate(sta->local, rate, rinfo); 2202 return 0; 2203 } 2204 2205 static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, 2206 int tid) 2207 { 2208 unsigned int start; 2209 u64 value; 2210 2211 do { 2212 start = u64_stats_fetch_begin(&rxstats->syncp); 2213 value = rxstats->msdu[tid]; 2214 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2215 2216 return value; 2217 } 2218 2219 static void sta_set_tidstats(struct sta_info *sta, 2220 struct cfg80211_tid_stats *tidstats, 2221 int tid) 2222 { 2223 struct ieee80211_local *local = sta->local; 2224 int cpu; 2225 2226 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { 2227 tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid); 2228 2229 if (sta->pcpu_rx_stats) { 2230 for_each_possible_cpu(cpu) { 2231 struct ieee80211_sta_rx_stats *cpurxs; 2232 2233 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2234 tidstats->rx_msdu += 2235 sta_get_tidstats_msdu(cpurxs, tid); 2236 } 2237 } 2238 2239 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); 2240 } 2241 2242 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { 2243 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); 2244 tidstats->tx_msdu = sta->tx_stats.msdu[tid]; 2245 } 2246 2247 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && 2248 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2249 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); 2250 tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid]; 2251 } 2252 2253 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && 2254 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2255 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); 2256 tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid]; 2257 } 2258 2259 if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) { 2260 spin_lock_bh(&local->fq.lock); 2261 rcu_read_lock(); 2262 2263 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); 2264 ieee80211_fill_txq_stats(&tidstats->txq_stats, 2265 to_txq_info(sta->sta.txq[tid])); 2266 2267 rcu_read_unlock(); 2268 spin_unlock_bh(&local->fq.lock); 2269 } 2270 } 2271 2272 static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) 2273 { 2274 unsigned int start; 2275 u64 value; 2276 2277 do { 2278 start = u64_stats_fetch_begin(&rxstats->syncp); 2279 value = rxstats->bytes; 2280 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2281 2282 return value; 2283 } 2284 2285 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, 2286 bool tidstats) 2287 { 2288 struct ieee80211_sub_if_data *sdata = sta->sdata; 2289 struct ieee80211_local *local = sdata->local; 2290 u32 thr = 0; 2291 int i, ac, cpu; 2292 struct ieee80211_sta_rx_stats *last_rxstats; 2293 2294 last_rxstats = sta_get_last_rx_stats(sta); 2295 2296 sinfo->generation = sdata->local->sta_generation; 2297 2298 /* do before driver, so beacon filtering drivers have a 2299 * chance to e.g. just add the number of filtered beacons 2300 * (or just modify the value entirely, of course) 2301 */ 2302 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2303 sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal; 2304 2305 drv_sta_statistics(local, sdata, &sta->sta, sinfo); 2306 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | 2307 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | 2308 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | 2309 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | 2310 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | 2311 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); 2312 2313 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2314 sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; 2315 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); 2316 } 2317 2318 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 2319 sinfo->assoc_at = sta->assoc_at; 2320 sinfo->inactive_time = 2321 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); 2322 2323 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2324 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { 2325 sinfo->tx_bytes = 0; 2326 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2327 sinfo->tx_bytes += sta->tx_stats.bytes[ac]; 2328 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); 2329 } 2330 2331 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { 2332 sinfo->tx_packets = 0; 2333 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2334 sinfo->tx_packets += sta->tx_stats.packets[ac]; 2335 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 2336 } 2337 2338 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | 2339 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { 2340 sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); 2341 2342 if (sta->pcpu_rx_stats) { 2343 for_each_possible_cpu(cpu) { 2344 struct ieee80211_sta_rx_stats *cpurxs; 2345 2346 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2347 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); 2348 } 2349 } 2350 2351 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); 2352 } 2353 2354 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { 2355 sinfo->rx_packets = sta->rx_stats.packets; 2356 if (sta->pcpu_rx_stats) { 2357 for_each_possible_cpu(cpu) { 2358 struct ieee80211_sta_rx_stats *cpurxs; 2359 2360 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2361 sinfo->rx_packets += cpurxs->packets; 2362 } 2363 } 2364 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); 2365 } 2366 2367 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { 2368 sinfo->tx_retries = sta->status_stats.retry_count; 2369 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); 2370 } 2371 2372 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { 2373 sinfo->tx_failed = sta->status_stats.retry_failed; 2374 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); 2375 } 2376 2377 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { 2378 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2379 sinfo->rx_duration += sta->airtime[ac].rx_airtime; 2380 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 2381 } 2382 2383 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { 2384 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2385 sinfo->tx_duration += sta->airtime[ac].tx_airtime; 2386 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 2387 } 2388 2389 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { 2390 sinfo->airtime_weight = sta->airtime[0].weight; 2391 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); 2392 } 2393 2394 sinfo->rx_dropped_misc = sta->rx_stats.dropped; 2395 if (sta->pcpu_rx_stats) { 2396 for_each_possible_cpu(cpu) { 2397 struct ieee80211_sta_rx_stats *cpurxs; 2398 2399 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2400 sinfo->rx_dropped_misc += cpurxs->dropped; 2401 } 2402 } 2403 2404 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2405 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { 2406 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | 2407 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 2408 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); 2409 } 2410 2411 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || 2412 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { 2413 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { 2414 sinfo->signal = (s8)last_rxstats->last_signal; 2415 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 2416 } 2417 2418 if (!sta->pcpu_rx_stats && 2419 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { 2420 sinfo->signal_avg = 2421 -ewma_signal_read(&sta->rx_stats_avg.signal); 2422 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 2423 } 2424 } 2425 2426 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to 2427 * the sta->rx_stats struct, so the check here is fine with and without 2428 * pcpu statistics 2429 */ 2430 if (last_rxstats->chains && 2431 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | 2432 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { 2433 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); 2434 if (!sta->pcpu_rx_stats) 2435 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); 2436 2437 sinfo->chains = last_rxstats->chains; 2438 2439 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { 2440 sinfo->chain_signal[i] = 2441 last_rxstats->chain_signal_last[i]; 2442 sinfo->chain_signal_avg[i] = 2443 -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); 2444 } 2445 } 2446 2447 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { 2448 sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, 2449 &sinfo->txrate); 2450 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 2451 } 2452 2453 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { 2454 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) 2455 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); 2456 } 2457 2458 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { 2459 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) 2460 sta_set_tidstats(sta, &sinfo->pertid[i], i); 2461 } 2462 2463 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2464 #ifdef CONFIG_MAC80211_MESH 2465 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | 2466 BIT_ULL(NL80211_STA_INFO_PLID) | 2467 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | 2468 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | 2469 BIT_ULL(NL80211_STA_INFO_PEER_PM) | 2470 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | 2471 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | 2472 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); 2473 2474 sinfo->llid = sta->mesh->llid; 2475 sinfo->plid = sta->mesh->plid; 2476 sinfo->plink_state = sta->mesh->plink_state; 2477 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 2478 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); 2479 sinfo->t_offset = sta->mesh->t_offset; 2480 } 2481 sinfo->local_pm = sta->mesh->local_pm; 2482 sinfo->peer_pm = sta->mesh->peer_pm; 2483 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; 2484 sinfo->connected_to_gate = sta->mesh->connected_to_gate; 2485 sinfo->connected_to_as = sta->mesh->connected_to_as; 2486 #endif 2487 } 2488 2489 sinfo->bss_param.flags = 0; 2490 if (sdata->vif.bss_conf.use_cts_prot) 2491 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; 2492 if (sdata->vif.bss_conf.use_short_preamble) 2493 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; 2494 if (sdata->vif.bss_conf.use_short_slot) 2495 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 2496 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; 2497 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 2498 2499 sinfo->sta_flags.set = 0; 2500 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | 2501 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 2502 BIT(NL80211_STA_FLAG_WME) | 2503 BIT(NL80211_STA_FLAG_MFP) | 2504 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 2505 BIT(NL80211_STA_FLAG_ASSOCIATED) | 2506 BIT(NL80211_STA_FLAG_TDLS_PEER); 2507 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 2508 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 2509 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) 2510 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); 2511 if (sta->sta.wme) 2512 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); 2513 if (test_sta_flag(sta, WLAN_STA_MFP)) 2514 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 2515 if (test_sta_flag(sta, WLAN_STA_AUTH)) 2516 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 2517 if (test_sta_flag(sta, WLAN_STA_ASSOC)) 2518 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 2519 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 2520 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 2521 2522 thr = sta_get_expected_throughput(sta); 2523 2524 if (thr != 0) { 2525 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); 2526 sinfo->expected_throughput = thr; 2527 } 2528 2529 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && 2530 sta->status_stats.ack_signal_filled) { 2531 sinfo->ack_signal = sta->status_stats.last_ack_signal; 2532 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); 2533 } 2534 2535 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && 2536 sta->status_stats.ack_signal_filled) { 2537 sinfo->avg_ack_signal = 2538 -(s8)ewma_avg_signal_read( 2539 &sta->status_stats.avg_ack_signal); 2540 sinfo->filled |= 2541 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); 2542 } 2543 2544 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2545 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); 2546 sinfo->airtime_link_metric = 2547 airtime_link_metric_get(local, sta); 2548 } 2549 } 2550 2551 u32 sta_get_expected_throughput(struct sta_info *sta) 2552 { 2553 struct ieee80211_sub_if_data *sdata = sta->sdata; 2554 struct ieee80211_local *local = sdata->local; 2555 struct rate_control_ref *ref = NULL; 2556 u32 thr = 0; 2557 2558 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) 2559 ref = local->rate_ctrl; 2560 2561 /* check if the driver has a SW RC implementation */ 2562 if (ref && ref->ops->get_expected_throughput) 2563 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); 2564 else 2565 thr = drv_get_expected_throughput(local, sta); 2566 2567 return thr; 2568 } 2569 2570 unsigned long ieee80211_sta_last_active(struct sta_info *sta) 2571 { 2572 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2573 2574 if (!sta->status_stats.last_ack || 2575 time_after(stats->last_rx, sta->status_stats.last_ack)) 2576 return stats->last_rx; 2577 return sta->status_stats.last_ack; 2578 } 2579 2580 static void sta_update_codel_params(struct sta_info *sta, u32 thr) 2581 { 2582 if (!sta->sdata->local->ops->wake_tx_queue) 2583 return; 2584 2585 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { 2586 sta->cparams.target = MS2TIME(50); 2587 sta->cparams.interval = MS2TIME(300); 2588 sta->cparams.ecn = false; 2589 } else { 2590 sta->cparams.target = MS2TIME(20); 2591 sta->cparams.interval = MS2TIME(100); 2592 sta->cparams.ecn = true; 2593 } 2594 } 2595 2596 void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, 2597 u32 thr) 2598 { 2599 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2600 2601 sta_update_codel_params(sta, thr); 2602 } 2603