1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 * Copyright (C) 2018-2020 Intel Corporation 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/if_arp.h> 18 #include <linux/timer.h> 19 #include <linux/rtnetlink.h> 20 21 #include <net/codel.h> 22 #include <net/mac80211.h> 23 #include "ieee80211_i.h" 24 #include "driver-ops.h" 25 #include "rate.h" 26 #include "sta_info.h" 27 #include "debugfs_sta.h" 28 #include "mesh.h" 29 #include "wme.h" 30 31 /** 32 * DOC: STA information lifetime rules 33 * 34 * STA info structures (&struct sta_info) are managed in a hash table 35 * for faster lookup and a list for iteration. They are managed using 36 * RCU, i.e. access to the list and hash table is protected by RCU. 37 * 38 * Upon allocating a STA info structure with sta_info_alloc(), the caller 39 * owns that structure. It must then insert it into the hash table using 40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter 41 * case (which acquires an rcu read section but must not be called from 42 * within one) will the pointer still be valid after the call. Note that 43 * the caller may not do much with the STA info before inserting it, in 44 * particular, it may not start any mesh peer link management or add 45 * encryption keys. 46 * 47 * When the insertion fails (sta_info_insert()) returns non-zero), the 48 * structure will have been freed by sta_info_insert()! 49 * 50 * Station entries are added by mac80211 when you establish a link with a 51 * peer. This means different things for the different type of interfaces 52 * we support. For a regular station this mean we add the AP sta when we 53 * receive an association response from the AP. For IBSS this occurs when 54 * get to know about a peer on the same IBSS. For WDS we add the sta for 55 * the peer immediately upon device open. When using AP mode we add stations 56 * for each respective station upon request from userspace through nl80211. 57 * 58 * In order to remove a STA info structure, various sta_info_destroy_*() 59 * calls are available. 60 * 61 * There is no concept of ownership on a STA entry, each structure is 62 * owned by the global hash table/list until it is removed. All users of 63 * the structure need to be RCU protected so that the structure won't be 64 * freed before they are done using it. 65 */ 66 67 static const struct rhashtable_params sta_rht_params = { 68 .nelem_hint = 3, /* start small */ 69 .automatic_shrinking = true, 70 .head_offset = offsetof(struct sta_info, hash_node), 71 .key_offset = offsetof(struct sta_info, addr), 72 .key_len = ETH_ALEN, 73 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 74 }; 75 76 /* Caller must hold local->sta_mtx */ 77 static int sta_info_hash_del(struct ieee80211_local *local, 78 struct sta_info *sta) 79 { 80 return rhltable_remove(&local->sta_hash, &sta->hash_node, 81 sta_rht_params); 82 } 83 84 static void __cleanup_single_sta(struct sta_info *sta) 85 { 86 int ac, i; 87 struct tid_ampdu_tx *tid_tx; 88 struct ieee80211_sub_if_data *sdata = sta->sdata; 89 struct ieee80211_local *local = sdata->local; 90 struct ps_data *ps; 91 92 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 93 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 94 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { 95 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 96 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 97 ps = &sdata->bss->ps; 98 else if (ieee80211_vif_is_mesh(&sdata->vif)) 99 ps = &sdata->u.mesh.ps; 100 else 101 return; 102 103 clear_sta_flag(sta, WLAN_STA_PS_STA); 104 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 105 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 106 107 atomic_dec(&ps->num_sta_ps); 108 } 109 110 if (sta->sta.txq[0]) { 111 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 112 struct txq_info *txqi; 113 114 if (!sta->sta.txq[i]) 115 continue; 116 117 txqi = to_txq_info(sta->sta.txq[i]); 118 119 ieee80211_txq_purge(local, txqi); 120 } 121 } 122 123 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 124 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 125 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); 126 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 127 } 128 129 if (ieee80211_vif_is_mesh(&sdata->vif)) 130 mesh_sta_cleanup(sta); 131 132 cancel_work_sync(&sta->drv_deliver_wk); 133 134 /* 135 * Destroy aggregation state here. It would be nice to wait for the 136 * driver to finish aggregation stop and then clean up, but for now 137 * drivers have to handle aggregation stop being requested, followed 138 * directly by station destruction. 139 */ 140 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 141 kfree(sta->ampdu_mlme.tid_start_tx[i]); 142 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 143 if (!tid_tx) 144 continue; 145 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 146 kfree(tid_tx); 147 } 148 } 149 150 static void cleanup_single_sta(struct sta_info *sta) 151 { 152 struct ieee80211_sub_if_data *sdata = sta->sdata; 153 struct ieee80211_local *local = sdata->local; 154 155 __cleanup_single_sta(sta); 156 sta_info_free(local, sta); 157 } 158 159 struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, 160 const u8 *addr) 161 { 162 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); 163 } 164 165 /* protected by RCU */ 166 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 167 const u8 *addr) 168 { 169 struct ieee80211_local *local = sdata->local; 170 struct rhlist_head *tmp; 171 struct sta_info *sta; 172 173 rcu_read_lock(); 174 for_each_sta_info(local, addr, sta, tmp) { 175 if (sta->sdata == sdata) { 176 rcu_read_unlock(); 177 /* this is safe as the caller must already hold 178 * another rcu read section or the mutex 179 */ 180 return sta; 181 } 182 } 183 rcu_read_unlock(); 184 return NULL; 185 } 186 187 /* 188 * Get sta info either from the specified interface 189 * or from one of its vlans 190 */ 191 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 192 const u8 *addr) 193 { 194 struct ieee80211_local *local = sdata->local; 195 struct rhlist_head *tmp; 196 struct sta_info *sta; 197 198 rcu_read_lock(); 199 for_each_sta_info(local, addr, sta, tmp) { 200 if (sta->sdata == sdata || 201 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 202 rcu_read_unlock(); 203 /* this is safe as the caller must already hold 204 * another rcu read section or the mutex 205 */ 206 return sta; 207 } 208 } 209 rcu_read_unlock(); 210 return NULL; 211 } 212 213 struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, 214 const u8 *sta_addr, const u8 *vif_addr) 215 { 216 struct rhlist_head *tmp; 217 struct sta_info *sta; 218 219 for_each_sta_info(local, sta_addr, sta, tmp) { 220 if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) 221 return sta; 222 } 223 224 return NULL; 225 } 226 227 struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, 228 int idx) 229 { 230 struct ieee80211_local *local = sdata->local; 231 struct sta_info *sta; 232 int i = 0; 233 234 list_for_each_entry_rcu(sta, &local->sta_list, list, 235 lockdep_is_held(&local->sta_mtx)) { 236 if (sdata != sta->sdata) 237 continue; 238 if (i < idx) { 239 ++i; 240 continue; 241 } 242 return sta; 243 } 244 245 return NULL; 246 } 247 248 /** 249 * sta_info_free - free STA 250 * 251 * @local: pointer to the global information 252 * @sta: STA info to free 253 * 254 * This function must undo everything done by sta_info_alloc() 255 * that may happen before sta_info_insert(). It may only be 256 * called when sta_info_insert() has not been attempted (and 257 * if that fails, the station is freed anyway.) 258 */ 259 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 260 { 261 if (sta->rate_ctrl) 262 rate_control_free_sta(sta); 263 264 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 265 266 if (sta->sta.txq[0]) 267 kfree(to_txq_info(sta->sta.txq[0])); 268 kfree(rcu_dereference_raw(sta->sta.rates)); 269 #ifdef CONFIG_MAC80211_MESH 270 kfree(sta->mesh); 271 #endif 272 free_percpu(sta->pcpu_rx_stats); 273 kfree(sta); 274 } 275 276 /* Caller must hold local->sta_mtx */ 277 static int sta_info_hash_add(struct ieee80211_local *local, 278 struct sta_info *sta) 279 { 280 return rhltable_insert(&local->sta_hash, &sta->hash_node, 281 sta_rht_params); 282 } 283 284 static void sta_deliver_ps_frames(struct work_struct *wk) 285 { 286 struct sta_info *sta; 287 288 sta = container_of(wk, struct sta_info, drv_deliver_wk); 289 290 if (sta->dead) 291 return; 292 293 local_bh_disable(); 294 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) 295 ieee80211_sta_ps_deliver_wakeup(sta); 296 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) 297 ieee80211_sta_ps_deliver_poll_response(sta); 298 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) 299 ieee80211_sta_ps_deliver_uapsd(sta); 300 local_bh_enable(); 301 } 302 303 static int sta_prepare_rate_control(struct ieee80211_local *local, 304 struct sta_info *sta, gfp_t gfp) 305 { 306 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) 307 return 0; 308 309 sta->rate_ctrl = local->rate_ctrl; 310 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 311 sta, gfp); 312 if (!sta->rate_ctrl_priv) 313 return -ENOMEM; 314 315 return 0; 316 } 317 318 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 319 const u8 *addr, gfp_t gfp) 320 { 321 struct ieee80211_local *local = sdata->local; 322 struct ieee80211_hw *hw = &local->hw; 323 struct sta_info *sta; 324 int i; 325 326 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 327 if (!sta) 328 return NULL; 329 330 if (ieee80211_hw_check(hw, USES_RSS)) { 331 sta->pcpu_rx_stats = 332 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); 333 if (!sta->pcpu_rx_stats) 334 goto free; 335 } 336 337 spin_lock_init(&sta->lock); 338 spin_lock_init(&sta->ps_lock); 339 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); 340 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 341 mutex_init(&sta->ampdu_mlme.mtx); 342 #ifdef CONFIG_MAC80211_MESH 343 if (ieee80211_vif_is_mesh(&sdata->vif)) { 344 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); 345 if (!sta->mesh) 346 goto free; 347 sta->mesh->plink_sta = sta; 348 spin_lock_init(&sta->mesh->plink_lock); 349 if (ieee80211_vif_is_mesh(&sdata->vif) && 350 !sdata->u.mesh.user_mpm) 351 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 352 0); 353 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; 354 } 355 #endif 356 357 memcpy(sta->addr, addr, ETH_ALEN); 358 memcpy(sta->sta.addr, addr, ETH_ALEN); 359 sta->sta.max_rx_aggregation_subframes = 360 local->hw.max_rx_aggregation_subframes; 361 362 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. 363 * The Tx path starts to use a key as soon as the key slot ptk_idx 364 * references to is not NULL. To not use the initial Rx-only key 365 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid 366 * which always will refer to a NULL key. 367 */ 368 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); 369 sta->ptk_idx = INVALID_PTK_KEYIDX; 370 371 sta->local = local; 372 sta->sdata = sdata; 373 sta->rx_stats.last_rx = jiffies; 374 375 u64_stats_init(&sta->rx_stats.syncp); 376 377 sta->sta_state = IEEE80211_STA_NONE; 378 379 /* Mark TID as unreserved */ 380 sta->reserved_tid = IEEE80211_TID_UNRESERVED; 381 382 sta->last_connected = ktime_get_seconds(); 383 ewma_signal_init(&sta->rx_stats_avg.signal); 384 ewma_avg_signal_init(&sta->status_stats.avg_ack_signal); 385 for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++) 386 ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]); 387 388 if (local->ops->wake_tx_queue) { 389 void *txq_data; 390 int size = sizeof(struct txq_info) + 391 ALIGN(hw->txq_data_size, sizeof(void *)); 392 393 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); 394 if (!txq_data) 395 goto free; 396 397 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 398 struct txq_info *txq = txq_data + i * size; 399 400 /* might not do anything for the bufferable MMPDU TXQ */ 401 ieee80211_txq_init(sdata, sta, txq, i); 402 } 403 } 404 405 if (sta_prepare_rate_control(local, sta, gfp)) 406 goto free_txq; 407 408 sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; 409 410 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 411 skb_queue_head_init(&sta->ps_tx_buf[i]); 412 skb_queue_head_init(&sta->tx_filtered[i]); 413 sta->airtime[i].deficit = sta->airtime_weight; 414 atomic_set(&sta->airtime[i].aql_tx_pending, 0); 415 sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; 416 sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; 417 } 418 419 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 420 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 421 422 for (i = 0; i < NUM_NL80211_BANDS; i++) { 423 u32 mandatory = 0; 424 int r; 425 426 if (!hw->wiphy->bands[i]) 427 continue; 428 429 switch (i) { 430 case NL80211_BAND_2GHZ: 431 /* 432 * We use both here, even if we cannot really know for 433 * sure the station will support both, but the only use 434 * for this is when we don't know anything yet and send 435 * management frames, and then we'll pick the lowest 436 * possible rate anyway. 437 * If we don't include _G here, we cannot find a rate 438 * in P2P, and thus trigger the WARN_ONCE() in rate.c 439 */ 440 mandatory = IEEE80211_RATE_MANDATORY_B | 441 IEEE80211_RATE_MANDATORY_G; 442 break; 443 case NL80211_BAND_5GHZ: 444 mandatory = IEEE80211_RATE_MANDATORY_A; 445 break; 446 case NL80211_BAND_60GHZ: 447 WARN_ON(1); 448 mandatory = 0; 449 break; 450 } 451 452 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { 453 struct ieee80211_rate *rate; 454 455 rate = &hw->wiphy->bands[i]->bitrates[r]; 456 457 if (!(rate->flags & mandatory)) 458 continue; 459 sta->sta.supp_rates[i] |= BIT(r); 460 } 461 } 462 463 sta->sta.smps_mode = IEEE80211_SMPS_OFF; 464 if (sdata->vif.type == NL80211_IFTYPE_AP || 465 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 466 struct ieee80211_supported_band *sband; 467 u8 smps; 468 469 sband = ieee80211_get_sband(sdata); 470 if (!sband) 471 goto free_txq; 472 473 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 474 IEEE80211_HT_CAP_SM_PS_SHIFT; 475 /* 476 * Assume that hostapd advertises our caps in the beacon and 477 * this is the known_smps_mode for a station that just assciated 478 */ 479 switch (smps) { 480 case WLAN_HT_SMPS_CONTROL_DISABLED: 481 sta->known_smps_mode = IEEE80211_SMPS_OFF; 482 break; 483 case WLAN_HT_SMPS_CONTROL_STATIC: 484 sta->known_smps_mode = IEEE80211_SMPS_STATIC; 485 break; 486 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 487 sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC; 488 break; 489 default: 490 WARN_ON(1); 491 } 492 } 493 494 sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; 495 496 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; 497 sta->cparams.target = MS2TIME(20); 498 sta->cparams.interval = MS2TIME(100); 499 sta->cparams.ecn = true; 500 501 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 502 503 return sta; 504 505 free_txq: 506 if (sta->sta.txq[0]) 507 kfree(to_txq_info(sta->sta.txq[0])); 508 free: 509 free_percpu(sta->pcpu_rx_stats); 510 #ifdef CONFIG_MAC80211_MESH 511 kfree(sta->mesh); 512 #endif 513 kfree(sta); 514 return NULL; 515 } 516 517 static int sta_info_insert_check(struct sta_info *sta) 518 { 519 struct ieee80211_sub_if_data *sdata = sta->sdata; 520 521 /* 522 * Can't be a WARN_ON because it can be triggered through a race: 523 * something inserts a STA (on one CPU) without holding the RTNL 524 * and another CPU turns off the net device. 525 */ 526 if (unlikely(!ieee80211_sdata_running(sdata))) 527 return -ENETDOWN; 528 529 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || 530 is_multicast_ether_addr(sta->sta.addr))) 531 return -EINVAL; 532 533 /* The RCU read lock is required by rhashtable due to 534 * asynchronous resize/rehash. We also require the mutex 535 * for correctness. 536 */ 537 rcu_read_lock(); 538 lockdep_assert_held(&sdata->local->sta_mtx); 539 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && 540 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { 541 rcu_read_unlock(); 542 return -ENOTUNIQ; 543 } 544 rcu_read_unlock(); 545 546 return 0; 547 } 548 549 static int sta_info_insert_drv_state(struct ieee80211_local *local, 550 struct ieee80211_sub_if_data *sdata, 551 struct sta_info *sta) 552 { 553 enum ieee80211_sta_state state; 554 int err = 0; 555 556 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { 557 err = drv_sta_state(local, sdata, sta, state, state + 1); 558 if (err) 559 break; 560 } 561 562 if (!err) { 563 /* 564 * Drivers using legacy sta_add/sta_remove callbacks only 565 * get uploaded set to true after sta_add is called. 566 */ 567 if (!local->ops->sta_add) 568 sta->uploaded = true; 569 return 0; 570 } 571 572 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 573 sdata_info(sdata, 574 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", 575 sta->sta.addr, state + 1, err); 576 err = 0; 577 } 578 579 /* unwind on error */ 580 for (; state > IEEE80211_STA_NOTEXIST; state--) 581 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); 582 583 return err; 584 } 585 586 static void 587 ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) 588 { 589 struct ieee80211_local *local = sdata->local; 590 bool allow_p2p_go_ps = sdata->vif.p2p; 591 struct sta_info *sta; 592 593 rcu_read_lock(); 594 list_for_each_entry_rcu(sta, &local->sta_list, list) { 595 if (sdata != sta->sdata || 596 !test_sta_flag(sta, WLAN_STA_ASSOC)) 597 continue; 598 if (!sta->sta.support_p2p_ps) { 599 allow_p2p_go_ps = false; 600 break; 601 } 602 } 603 rcu_read_unlock(); 604 605 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { 606 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; 607 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS); 608 } 609 } 610 611 /* 612 * should be called with sta_mtx locked 613 * this function replaces the mutex lock 614 * with a RCU lock 615 */ 616 static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) 617 { 618 struct ieee80211_local *local = sta->local; 619 struct ieee80211_sub_if_data *sdata = sta->sdata; 620 struct station_info *sinfo = NULL; 621 int err = 0; 622 623 lockdep_assert_held(&local->sta_mtx); 624 625 /* check if STA exists already */ 626 if (sta_info_get_bss(sdata, sta->sta.addr)) { 627 err = -EEXIST; 628 goto out_err; 629 } 630 631 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); 632 if (!sinfo) { 633 err = -ENOMEM; 634 goto out_err; 635 } 636 637 local->num_sta++; 638 local->sta_generation++; 639 smp_mb(); 640 641 /* simplify things and don't accept BA sessions yet */ 642 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 643 644 /* make the station visible */ 645 err = sta_info_hash_add(local, sta); 646 if (err) 647 goto out_drop_sta; 648 649 list_add_tail_rcu(&sta->list, &local->sta_list); 650 651 /* notify driver */ 652 err = sta_info_insert_drv_state(local, sdata, sta); 653 if (err) 654 goto out_remove; 655 656 set_sta_flag(sta, WLAN_STA_INSERTED); 657 658 if (sta->sta_state >= IEEE80211_STA_ASSOC) { 659 ieee80211_recalc_min_chandef(sta->sdata); 660 if (!sta->sta.support_p2p_ps) 661 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 662 } 663 664 /* accept BA sessions now */ 665 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 666 667 ieee80211_sta_debugfs_add(sta); 668 rate_control_add_sta_debugfs(sta); 669 670 sinfo->generation = local->sta_generation; 671 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 672 kfree(sinfo); 673 674 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); 675 676 /* move reference to rcu-protected */ 677 rcu_read_lock(); 678 mutex_unlock(&local->sta_mtx); 679 680 if (ieee80211_vif_is_mesh(&sdata->vif)) 681 mesh_accept_plinks_update(sdata); 682 683 return 0; 684 out_remove: 685 sta_info_hash_del(local, sta); 686 list_del_rcu(&sta->list); 687 out_drop_sta: 688 local->num_sta--; 689 synchronize_net(); 690 __cleanup_single_sta(sta); 691 out_err: 692 mutex_unlock(&local->sta_mtx); 693 kfree(sinfo); 694 rcu_read_lock(); 695 return err; 696 } 697 698 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 699 { 700 struct ieee80211_local *local = sta->local; 701 int err; 702 703 might_sleep(); 704 705 mutex_lock(&local->sta_mtx); 706 707 err = sta_info_insert_check(sta); 708 if (err) { 709 mutex_unlock(&local->sta_mtx); 710 rcu_read_lock(); 711 goto out_free; 712 } 713 714 err = sta_info_insert_finish(sta); 715 if (err) 716 goto out_free; 717 718 return 0; 719 out_free: 720 sta_info_free(local, sta); 721 return err; 722 } 723 724 int sta_info_insert(struct sta_info *sta) 725 { 726 int err = sta_info_insert_rcu(sta); 727 728 rcu_read_unlock(); 729 730 return err; 731 } 732 733 static inline void __bss_tim_set(u8 *tim, u16 id) 734 { 735 /* 736 * This format has been mandated by the IEEE specifications, 737 * so this line may not be changed to use the __set_bit() format. 738 */ 739 tim[id / 8] |= (1 << (id % 8)); 740 } 741 742 static inline void __bss_tim_clear(u8 *tim, u16 id) 743 { 744 /* 745 * This format has been mandated by the IEEE specifications, 746 * so this line may not be changed to use the __clear_bit() format. 747 */ 748 tim[id / 8] &= ~(1 << (id % 8)); 749 } 750 751 static inline bool __bss_tim_get(u8 *tim, u16 id) 752 { 753 /* 754 * This format has been mandated by the IEEE specifications, 755 * so this line may not be changed to use the test_bit() format. 756 */ 757 return tim[id / 8] & (1 << (id % 8)); 758 } 759 760 static unsigned long ieee80211_tids_for_ac(int ac) 761 { 762 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 763 switch (ac) { 764 case IEEE80211_AC_VO: 765 return BIT(6) | BIT(7); 766 case IEEE80211_AC_VI: 767 return BIT(4) | BIT(5); 768 case IEEE80211_AC_BE: 769 return BIT(0) | BIT(3); 770 case IEEE80211_AC_BK: 771 return BIT(1) | BIT(2); 772 default: 773 WARN_ON(1); 774 return 0; 775 } 776 } 777 778 static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) 779 { 780 struct ieee80211_local *local = sta->local; 781 struct ps_data *ps; 782 bool indicate_tim = false; 783 u8 ignore_for_tim = sta->sta.uapsd_queues; 784 int ac; 785 u16 id = sta->sta.aid; 786 787 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 788 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 789 if (WARN_ON_ONCE(!sta->sdata->bss)) 790 return; 791 792 ps = &sta->sdata->bss->ps; 793 #ifdef CONFIG_MAC80211_MESH 794 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { 795 ps = &sta->sdata->u.mesh.ps; 796 #endif 797 } else { 798 return; 799 } 800 801 /* No need to do anything if the driver does all */ 802 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 803 return; 804 805 if (sta->dead) 806 goto done; 807 808 /* 809 * If all ACs are delivery-enabled then we should build 810 * the TIM bit for all ACs anyway; if only some are then 811 * we ignore those and build the TIM bit using only the 812 * non-enabled ones. 813 */ 814 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) 815 ignore_for_tim = 0; 816 817 if (ignore_pending) 818 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; 819 820 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 821 unsigned long tids; 822 823 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) 824 continue; 825 826 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || 827 !skb_queue_empty(&sta->ps_tx_buf[ac]); 828 if (indicate_tim) 829 break; 830 831 tids = ieee80211_tids_for_ac(ac); 832 833 indicate_tim |= 834 sta->driver_buffered_tids & tids; 835 indicate_tim |= 836 sta->txq_buffered_tids & tids; 837 } 838 839 done: 840 spin_lock_bh(&local->tim_lock); 841 842 if (indicate_tim == __bss_tim_get(ps->tim, id)) 843 goto out_unlock; 844 845 if (indicate_tim) 846 __bss_tim_set(ps->tim, id); 847 else 848 __bss_tim_clear(ps->tim, id); 849 850 if (local->ops->set_tim && !WARN_ON(sta->dead)) { 851 local->tim_in_locked_section = true; 852 drv_set_tim(local, &sta->sta, indicate_tim); 853 local->tim_in_locked_section = false; 854 } 855 856 out_unlock: 857 spin_unlock_bh(&local->tim_lock); 858 } 859 860 void sta_info_recalc_tim(struct sta_info *sta) 861 { 862 __sta_info_recalc_tim(sta, false); 863 } 864 865 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 866 { 867 struct ieee80211_tx_info *info; 868 int timeout; 869 870 if (!skb) 871 return false; 872 873 info = IEEE80211_SKB_CB(skb); 874 875 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 876 timeout = (sta->listen_interval * 877 sta->sdata->vif.bss_conf.beacon_int * 878 32 / 15625) * HZ; 879 if (timeout < STA_TX_BUFFER_EXPIRE) 880 timeout = STA_TX_BUFFER_EXPIRE; 881 return time_after(jiffies, info->control.jiffies + timeout); 882 } 883 884 885 static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, 886 struct sta_info *sta, int ac) 887 { 888 unsigned long flags; 889 struct sk_buff *skb; 890 891 /* 892 * First check for frames that should expire on the filtered 893 * queue. Frames here were rejected by the driver and are on 894 * a separate queue to avoid reordering with normal PS-buffered 895 * frames. They also aren't accounted for right now in the 896 * total_ps_buffered counter. 897 */ 898 for (;;) { 899 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 900 skb = skb_peek(&sta->tx_filtered[ac]); 901 if (sta_info_buffer_expired(sta, skb)) 902 skb = __skb_dequeue(&sta->tx_filtered[ac]); 903 else 904 skb = NULL; 905 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 906 907 /* 908 * Frames are queued in order, so if this one 909 * hasn't expired yet we can stop testing. If 910 * we actually reached the end of the queue we 911 * also need to stop, of course. 912 */ 913 if (!skb) 914 break; 915 ieee80211_free_txskb(&local->hw, skb); 916 } 917 918 /* 919 * Now also check the normal PS-buffered queue, this will 920 * only find something if the filtered queue was emptied 921 * since the filtered frames are all before the normal PS 922 * buffered frames. 923 */ 924 for (;;) { 925 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 926 skb = skb_peek(&sta->ps_tx_buf[ac]); 927 if (sta_info_buffer_expired(sta, skb)) 928 skb = __skb_dequeue(&sta->ps_tx_buf[ac]); 929 else 930 skb = NULL; 931 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 932 933 /* 934 * frames are queued in order, so if this one 935 * hasn't expired yet (or we reached the end of 936 * the queue) we can stop testing 937 */ 938 if (!skb) 939 break; 940 941 local->total_ps_buffered--; 942 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 943 sta->sta.addr); 944 ieee80211_free_txskb(&local->hw, skb); 945 } 946 947 /* 948 * Finally, recalculate the TIM bit for this station -- it might 949 * now be clear because the station was too slow to retrieve its 950 * frames. 951 */ 952 sta_info_recalc_tim(sta); 953 954 /* 955 * Return whether there are any frames still buffered, this is 956 * used to check whether the cleanup timer still needs to run, 957 * if there are no frames we don't need to rearm the timer. 958 */ 959 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && 960 skb_queue_empty(&sta->tx_filtered[ac])); 961 } 962 963 static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 964 struct sta_info *sta) 965 { 966 bool have_buffered = false; 967 int ac; 968 969 /* This is only necessary for stations on BSS/MBSS interfaces */ 970 if (!sta->sdata->bss && 971 !ieee80211_vif_is_mesh(&sta->sdata->vif)) 972 return false; 973 974 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 975 have_buffered |= 976 sta_info_cleanup_expire_buffered_ac(local, sta, ac); 977 978 return have_buffered; 979 } 980 981 static int __must_check __sta_info_destroy_part1(struct sta_info *sta) 982 { 983 struct ieee80211_local *local; 984 struct ieee80211_sub_if_data *sdata; 985 int ret; 986 987 might_sleep(); 988 989 if (!sta) 990 return -ENOENT; 991 992 local = sta->local; 993 sdata = sta->sdata; 994 995 lockdep_assert_held(&local->sta_mtx); 996 997 /* 998 * Before removing the station from the driver and 999 * rate control, it might still start new aggregation 1000 * sessions -- block that to make sure the tear-down 1001 * will be sufficient. 1002 */ 1003 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1004 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1005 1006 /* 1007 * Before removing the station from the driver there might be pending 1008 * rx frames on RSS queues sent prior to the disassociation - wait for 1009 * all such frames to be processed. 1010 */ 1011 drv_sync_rx_queues(local, sta); 1012 1013 ret = sta_info_hash_del(local, sta); 1014 if (WARN_ON(ret)) 1015 return ret; 1016 1017 /* 1018 * for TDLS peers, make sure to return to the base channel before 1019 * removal. 1020 */ 1021 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { 1022 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); 1023 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); 1024 } 1025 1026 list_del_rcu(&sta->list); 1027 sta->removed = true; 1028 1029 drv_sta_pre_rcu_remove(local, sta->sdata, sta); 1030 1031 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1032 rcu_access_pointer(sdata->u.vlan.sta) == sta) 1033 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 1034 1035 return 0; 1036 } 1037 1038 static void __sta_info_destroy_part2(struct sta_info *sta) 1039 { 1040 struct ieee80211_local *local = sta->local; 1041 struct ieee80211_sub_if_data *sdata = sta->sdata; 1042 struct station_info *sinfo; 1043 int ret; 1044 1045 /* 1046 * NOTE: This assumes at least synchronize_net() was done 1047 * after _part1 and before _part2! 1048 */ 1049 1050 might_sleep(); 1051 lockdep_assert_held(&local->sta_mtx); 1052 1053 if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1054 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); 1055 WARN_ON_ONCE(ret); 1056 } 1057 1058 /* now keys can no longer be reached */ 1059 ieee80211_free_sta_keys(local, sta); 1060 1061 /* disable TIM bit - last chance to tell driver */ 1062 __sta_info_recalc_tim(sta, true); 1063 1064 sta->dead = true; 1065 1066 local->num_sta--; 1067 local->sta_generation++; 1068 1069 while (sta->sta_state > IEEE80211_STA_NONE) { 1070 ret = sta_info_move_state(sta, sta->sta_state - 1); 1071 if (ret) { 1072 WARN_ON_ONCE(1); 1073 break; 1074 } 1075 } 1076 1077 if (sta->uploaded) { 1078 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, 1079 IEEE80211_STA_NOTEXIST); 1080 WARN_ON_ONCE(ret != 0); 1081 } 1082 1083 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 1084 1085 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); 1086 if (sinfo) 1087 sta_set_sinfo(sta, sinfo, true); 1088 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 1089 kfree(sinfo); 1090 1091 ieee80211_sta_debugfs_remove(sta); 1092 1093 cleanup_single_sta(sta); 1094 } 1095 1096 int __must_check __sta_info_destroy(struct sta_info *sta) 1097 { 1098 int err = __sta_info_destroy_part1(sta); 1099 1100 if (err) 1101 return err; 1102 1103 synchronize_net(); 1104 1105 __sta_info_destroy_part2(sta); 1106 1107 return 0; 1108 } 1109 1110 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) 1111 { 1112 struct sta_info *sta; 1113 int ret; 1114 1115 mutex_lock(&sdata->local->sta_mtx); 1116 sta = sta_info_get(sdata, addr); 1117 ret = __sta_info_destroy(sta); 1118 mutex_unlock(&sdata->local->sta_mtx); 1119 1120 return ret; 1121 } 1122 1123 int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 1124 const u8 *addr) 1125 { 1126 struct sta_info *sta; 1127 int ret; 1128 1129 mutex_lock(&sdata->local->sta_mtx); 1130 sta = sta_info_get_bss(sdata, addr); 1131 ret = __sta_info_destroy(sta); 1132 mutex_unlock(&sdata->local->sta_mtx); 1133 1134 return ret; 1135 } 1136 1137 static void sta_info_cleanup(struct timer_list *t) 1138 { 1139 struct ieee80211_local *local = from_timer(local, t, sta_cleanup); 1140 struct sta_info *sta; 1141 bool timer_needed = false; 1142 1143 rcu_read_lock(); 1144 list_for_each_entry_rcu(sta, &local->sta_list, list) 1145 if (sta_info_cleanup_expire_buffered(local, sta)) 1146 timer_needed = true; 1147 rcu_read_unlock(); 1148 1149 if (local->quiescing) 1150 return; 1151 1152 if (!timer_needed) 1153 return; 1154 1155 mod_timer(&local->sta_cleanup, 1156 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); 1157 } 1158 1159 int sta_info_init(struct ieee80211_local *local) 1160 { 1161 int err; 1162 1163 err = rhltable_init(&local->sta_hash, &sta_rht_params); 1164 if (err) 1165 return err; 1166 1167 spin_lock_init(&local->tim_lock); 1168 mutex_init(&local->sta_mtx); 1169 INIT_LIST_HEAD(&local->sta_list); 1170 1171 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); 1172 return 0; 1173 } 1174 1175 void sta_info_stop(struct ieee80211_local *local) 1176 { 1177 del_timer_sync(&local->sta_cleanup); 1178 rhltable_destroy(&local->sta_hash); 1179 } 1180 1181 1182 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) 1183 { 1184 struct ieee80211_local *local = sdata->local; 1185 struct sta_info *sta, *tmp; 1186 LIST_HEAD(free_list); 1187 int ret = 0; 1188 1189 might_sleep(); 1190 1191 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); 1192 WARN_ON(vlans && !sdata->bss); 1193 1194 mutex_lock(&local->sta_mtx); 1195 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1196 if (sdata == sta->sdata || 1197 (vlans && sdata->bss == sta->sdata->bss)) { 1198 if (!WARN_ON(__sta_info_destroy_part1(sta))) 1199 list_add(&sta->free_list, &free_list); 1200 ret++; 1201 } 1202 } 1203 1204 if (!list_empty(&free_list)) { 1205 synchronize_net(); 1206 list_for_each_entry_safe(sta, tmp, &free_list, free_list) 1207 __sta_info_destroy_part2(sta); 1208 } 1209 mutex_unlock(&local->sta_mtx); 1210 1211 return ret; 1212 } 1213 1214 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 1215 unsigned long exp_time) 1216 { 1217 struct ieee80211_local *local = sdata->local; 1218 struct sta_info *sta, *tmp; 1219 1220 mutex_lock(&local->sta_mtx); 1221 1222 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1223 unsigned long last_active = ieee80211_sta_last_active(sta); 1224 1225 if (sdata != sta->sdata) 1226 continue; 1227 1228 if (time_is_before_jiffies(last_active + exp_time)) { 1229 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 1230 sta->sta.addr); 1231 1232 if (ieee80211_vif_is_mesh(&sdata->vif) && 1233 test_sta_flag(sta, WLAN_STA_PS_STA)) 1234 atomic_dec(&sdata->u.mesh.ps.num_sta_ps); 1235 1236 WARN_ON(__sta_info_destroy(sta)); 1237 } 1238 } 1239 1240 mutex_unlock(&local->sta_mtx); 1241 } 1242 1243 struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, 1244 const u8 *addr, 1245 const u8 *localaddr) 1246 { 1247 struct ieee80211_local *local = hw_to_local(hw); 1248 struct rhlist_head *tmp; 1249 struct sta_info *sta; 1250 1251 /* 1252 * Just return a random station if localaddr is NULL 1253 * ... first in list. 1254 */ 1255 for_each_sta_info(local, addr, sta, tmp) { 1256 if (localaddr && 1257 !ether_addr_equal(sta->sdata->vif.addr, localaddr)) 1258 continue; 1259 if (!sta->uploaded) 1260 return NULL; 1261 return &sta->sta; 1262 } 1263 1264 return NULL; 1265 } 1266 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); 1267 1268 struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 1269 const u8 *addr) 1270 { 1271 struct sta_info *sta; 1272 1273 if (!vif) 1274 return NULL; 1275 1276 sta = sta_info_get_bss(vif_to_sdata(vif), addr); 1277 if (!sta) 1278 return NULL; 1279 1280 if (!sta->uploaded) 1281 return NULL; 1282 1283 return &sta->sta; 1284 } 1285 EXPORT_SYMBOL(ieee80211_find_sta); 1286 1287 /* powersave support code */ 1288 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1289 { 1290 struct ieee80211_sub_if_data *sdata = sta->sdata; 1291 struct ieee80211_local *local = sdata->local; 1292 struct sk_buff_head pending; 1293 int filtered = 0, buffered = 0, ac, i; 1294 unsigned long flags; 1295 struct ps_data *ps; 1296 1297 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1298 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 1299 u.ap); 1300 1301 if (sdata->vif.type == NL80211_IFTYPE_AP) 1302 ps = &sdata->bss->ps; 1303 else if (ieee80211_vif_is_mesh(&sdata->vif)) 1304 ps = &sdata->u.mesh.ps; 1305 else 1306 return; 1307 1308 clear_sta_flag(sta, WLAN_STA_SP); 1309 1310 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); 1311 sta->driver_buffered_tids = 0; 1312 sta->txq_buffered_tids = 0; 1313 1314 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1315 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1316 1317 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1318 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) 1319 continue; 1320 1321 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); 1322 } 1323 1324 skb_queue_head_init(&pending); 1325 1326 /* sync with ieee80211_tx_h_unicast_ps_buf */ 1327 spin_lock(&sta->ps_lock); 1328 /* Send all buffered frames to the station */ 1329 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1330 int count = skb_queue_len(&pending), tmp; 1331 1332 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1333 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1334 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1335 tmp = skb_queue_len(&pending); 1336 filtered += tmp - count; 1337 count = tmp; 1338 1339 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1340 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1341 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1342 tmp = skb_queue_len(&pending); 1343 buffered += tmp - count; 1344 } 1345 1346 ieee80211_add_pending_skbs(local, &pending); 1347 1348 /* now we're no longer in the deliver code */ 1349 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 1350 1351 /* The station might have polled and then woken up before we responded, 1352 * so clear these flags now to avoid them sticking around. 1353 */ 1354 clear_sta_flag(sta, WLAN_STA_PSPOLL); 1355 clear_sta_flag(sta, WLAN_STA_UAPSD); 1356 spin_unlock(&sta->ps_lock); 1357 1358 atomic_dec(&ps->num_sta_ps); 1359 1360 local->total_ps_buffered -= buffered; 1361 1362 sta_info_recalc_tim(sta); 1363 1364 ps_dbg(sdata, 1365 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1366 sta->sta.addr, sta->sta.aid, filtered, buffered); 1367 1368 ieee80211_check_fast_xmit(sta); 1369 } 1370 1371 static void ieee80211_send_null_response(struct sta_info *sta, int tid, 1372 enum ieee80211_frame_release_type reason, 1373 bool call_driver, bool more_data) 1374 { 1375 struct ieee80211_sub_if_data *sdata = sta->sdata; 1376 struct ieee80211_local *local = sdata->local; 1377 struct ieee80211_qos_hdr *nullfunc; 1378 struct sk_buff *skb; 1379 int size = sizeof(*nullfunc); 1380 __le16 fc; 1381 bool qos = sta->sta.wme; 1382 struct ieee80211_tx_info *info; 1383 struct ieee80211_chanctx_conf *chanctx_conf; 1384 1385 /* Don't send NDPs when STA is connected HE */ 1386 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1387 !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) 1388 return; 1389 1390 if (qos) { 1391 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1392 IEEE80211_STYPE_QOS_NULLFUNC | 1393 IEEE80211_FCTL_FROMDS); 1394 } else { 1395 size -= 2; 1396 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1397 IEEE80211_STYPE_NULLFUNC | 1398 IEEE80211_FCTL_FROMDS); 1399 } 1400 1401 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 1402 if (!skb) 1403 return; 1404 1405 skb_reserve(skb, local->hw.extra_tx_headroom); 1406 1407 nullfunc = skb_put(skb, size); 1408 nullfunc->frame_control = fc; 1409 nullfunc->duration_id = 0; 1410 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1411 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1412 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1413 nullfunc->seq_ctrl = 0; 1414 1415 skb->priority = tid; 1416 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1417 if (qos) { 1418 nullfunc->qos_ctrl = cpu_to_le16(tid); 1419 1420 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { 1421 nullfunc->qos_ctrl |= 1422 cpu_to_le16(IEEE80211_QOS_CTL_EOSP); 1423 if (more_data) 1424 nullfunc->frame_control |= 1425 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1426 } 1427 } 1428 1429 info = IEEE80211_SKB_CB(skb); 1430 1431 /* 1432 * Tell TX path to send this frame even though the 1433 * STA may still remain is PS mode after this frame 1434 * exchange. Also set EOSP to indicate this packet 1435 * ends the poll/service period. 1436 */ 1437 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | 1438 IEEE80211_TX_STATUS_EOSP | 1439 IEEE80211_TX_CTL_REQ_TX_STATUS; 1440 1441 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1442 1443 if (call_driver) 1444 drv_allow_buffered_frames(local, sta, BIT(tid), 1, 1445 reason, false); 1446 1447 skb->dev = sdata->dev; 1448 1449 rcu_read_lock(); 1450 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 1451 if (WARN_ON(!chanctx_conf)) { 1452 rcu_read_unlock(); 1453 kfree_skb(skb); 1454 return; 1455 } 1456 1457 info->band = chanctx_conf->def.chan->band; 1458 ieee80211_xmit(sdata, sta, skb); 1459 rcu_read_unlock(); 1460 } 1461 1462 static int find_highest_prio_tid(unsigned long tids) 1463 { 1464 /* lower 3 TIDs aren't ordered perfectly */ 1465 if (tids & 0xF8) 1466 return fls(tids) - 1; 1467 /* TID 0 is BE just like TID 3 */ 1468 if (tids & BIT(0)) 1469 return 0; 1470 return fls(tids) - 1; 1471 } 1472 1473 /* Indicates if the MORE_DATA bit should be set in the last 1474 * frame obtained by ieee80211_sta_ps_get_frames. 1475 * Note that driver_release_tids is relevant only if 1476 * reason = IEEE80211_FRAME_RELEASE_PSPOLL 1477 */ 1478 static bool 1479 ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, 1480 enum ieee80211_frame_release_type reason, 1481 unsigned long driver_release_tids) 1482 { 1483 int ac; 1484 1485 /* If the driver has data on more than one TID then 1486 * certainly there's more data if we release just a 1487 * single frame now (from a single TID). This will 1488 * only happen for PS-Poll. 1489 */ 1490 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && 1491 hweight16(driver_release_tids) > 1) 1492 return true; 1493 1494 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1495 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1496 continue; 1497 1498 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1499 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1500 return true; 1501 } 1502 1503 return false; 1504 } 1505 1506 static void 1507 ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, 1508 enum ieee80211_frame_release_type reason, 1509 struct sk_buff_head *frames, 1510 unsigned long *driver_release_tids) 1511 { 1512 struct ieee80211_sub_if_data *sdata = sta->sdata; 1513 struct ieee80211_local *local = sdata->local; 1514 int ac; 1515 1516 /* Get response frame(s) and more data bit for the last one. */ 1517 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1518 unsigned long tids; 1519 1520 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1521 continue; 1522 1523 tids = ieee80211_tids_for_ac(ac); 1524 1525 /* if we already have frames from software, then we can't also 1526 * release from hardware queues 1527 */ 1528 if (skb_queue_empty(frames)) { 1529 *driver_release_tids |= 1530 sta->driver_buffered_tids & tids; 1531 *driver_release_tids |= sta->txq_buffered_tids & tids; 1532 } 1533 1534 if (!*driver_release_tids) { 1535 struct sk_buff *skb; 1536 1537 while (n_frames > 0) { 1538 skb = skb_dequeue(&sta->tx_filtered[ac]); 1539 if (!skb) { 1540 skb = skb_dequeue( 1541 &sta->ps_tx_buf[ac]); 1542 if (skb) 1543 local->total_ps_buffered--; 1544 } 1545 if (!skb) 1546 break; 1547 n_frames--; 1548 __skb_queue_tail(frames, skb); 1549 } 1550 } 1551 1552 /* If we have more frames buffered on this AC, then abort the 1553 * loop since we can't send more data from other ACs before 1554 * the buffered frames from this. 1555 */ 1556 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1557 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1558 break; 1559 } 1560 } 1561 1562 static void 1563 ieee80211_sta_ps_deliver_response(struct sta_info *sta, 1564 int n_frames, u8 ignored_acs, 1565 enum ieee80211_frame_release_type reason) 1566 { 1567 struct ieee80211_sub_if_data *sdata = sta->sdata; 1568 struct ieee80211_local *local = sdata->local; 1569 unsigned long driver_release_tids = 0; 1570 struct sk_buff_head frames; 1571 bool more_data; 1572 1573 /* Service or PS-Poll period starts */ 1574 set_sta_flag(sta, WLAN_STA_SP); 1575 1576 __skb_queue_head_init(&frames); 1577 1578 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, 1579 &frames, &driver_release_tids); 1580 1581 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); 1582 1583 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) 1584 driver_release_tids = 1585 BIT(find_highest_prio_tid(driver_release_tids)); 1586 1587 if (skb_queue_empty(&frames) && !driver_release_tids) { 1588 int tid, ac; 1589 1590 /* 1591 * For PS-Poll, this can only happen due to a race condition 1592 * when we set the TIM bit and the station notices it, but 1593 * before it can poll for the frame we expire it. 1594 * 1595 * For uAPSD, this is said in the standard (11.2.1.5 h): 1596 * At each unscheduled SP for a non-AP STA, the AP shall 1597 * attempt to transmit at least one MSDU or MMPDU, but no 1598 * more than the value specified in the Max SP Length field 1599 * in the QoS Capability element from delivery-enabled ACs, 1600 * that are destined for the non-AP STA. 1601 * 1602 * Since we have no other MSDU/MMPDU, transmit a QoS null frame. 1603 */ 1604 1605 /* This will evaluate to 1, 3, 5 or 7. */ 1606 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1607 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) 1608 break; 1609 tid = 7 - 2 * ac; 1610 1611 ieee80211_send_null_response(sta, tid, reason, true, false); 1612 } else if (!driver_release_tids) { 1613 struct sk_buff_head pending; 1614 struct sk_buff *skb; 1615 int num = 0; 1616 u16 tids = 0; 1617 bool need_null = false; 1618 1619 skb_queue_head_init(&pending); 1620 1621 while ((skb = __skb_dequeue(&frames))) { 1622 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1623 struct ieee80211_hdr *hdr = (void *) skb->data; 1624 u8 *qoshdr = NULL; 1625 1626 num++; 1627 1628 /* 1629 * Tell TX path to send this frame even though the 1630 * STA may still remain is PS mode after this frame 1631 * exchange. 1632 */ 1633 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 1634 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1635 1636 /* 1637 * Use MoreData flag to indicate whether there are 1638 * more buffered frames for this STA 1639 */ 1640 if (more_data || !skb_queue_empty(&frames)) 1641 hdr->frame_control |= 1642 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1643 else 1644 hdr->frame_control &= 1645 cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1646 1647 if (ieee80211_is_data_qos(hdr->frame_control) || 1648 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1649 qoshdr = ieee80211_get_qos_ctl(hdr); 1650 1651 tids |= BIT(skb->priority); 1652 1653 __skb_queue_tail(&pending, skb); 1654 1655 /* end service period after last frame or add one */ 1656 if (!skb_queue_empty(&frames)) 1657 continue; 1658 1659 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { 1660 /* for PS-Poll, there's only one frame */ 1661 info->flags |= IEEE80211_TX_STATUS_EOSP | 1662 IEEE80211_TX_CTL_REQ_TX_STATUS; 1663 break; 1664 } 1665 1666 /* For uAPSD, things are a bit more complicated. If the 1667 * last frame has a QoS header (i.e. is a QoS-data or 1668 * QoS-nulldata frame) then just set the EOSP bit there 1669 * and be done. 1670 * If the frame doesn't have a QoS header (which means 1671 * it should be a bufferable MMPDU) then we can't set 1672 * the EOSP bit in the QoS header; add a QoS-nulldata 1673 * frame to the list to send it after the MMPDU. 1674 * 1675 * Note that this code is only in the mac80211-release 1676 * code path, we assume that the driver will not buffer 1677 * anything but QoS-data frames, or if it does, will 1678 * create the QoS-nulldata frame by itself if needed. 1679 * 1680 * Cf. 802.11-2012 10.2.1.10 (c). 1681 */ 1682 if (qoshdr) { 1683 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 1684 1685 info->flags |= IEEE80211_TX_STATUS_EOSP | 1686 IEEE80211_TX_CTL_REQ_TX_STATUS; 1687 } else { 1688 /* The standard isn't completely clear on this 1689 * as it says the more-data bit should be set 1690 * if there are more BUs. The QoS-Null frame 1691 * we're about to send isn't buffered yet, we 1692 * only create it below, but let's pretend it 1693 * was buffered just in case some clients only 1694 * expect more-data=0 when eosp=1. 1695 */ 1696 hdr->frame_control |= 1697 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1698 need_null = true; 1699 num++; 1700 } 1701 break; 1702 } 1703 1704 drv_allow_buffered_frames(local, sta, tids, num, 1705 reason, more_data); 1706 1707 ieee80211_add_pending_skbs(local, &pending); 1708 1709 if (need_null) 1710 ieee80211_send_null_response( 1711 sta, find_highest_prio_tid(tids), 1712 reason, false, false); 1713 1714 sta_info_recalc_tim(sta); 1715 } else { 1716 int tid; 1717 1718 /* 1719 * We need to release a frame that is buffered somewhere in the 1720 * driver ... it'll have to handle that. 1721 * Note that the driver also has to check the number of frames 1722 * on the TIDs we're releasing from - if there are more than 1723 * n_frames it has to set the more-data bit (if we didn't ask 1724 * it to set it anyway due to other buffered frames); if there 1725 * are fewer than n_frames it has to make sure to adjust that 1726 * to allow the service period to end properly. 1727 */ 1728 drv_release_buffered_frames(local, sta, driver_release_tids, 1729 n_frames, reason, more_data); 1730 1731 /* 1732 * Note that we don't recalculate the TIM bit here as it would 1733 * most likely have no effect at all unless the driver told us 1734 * that the TID(s) became empty before returning here from the 1735 * release function. 1736 * Either way, however, when the driver tells us that the TID(s) 1737 * became empty or we find that a txq became empty, we'll do the 1738 * TIM recalculation. 1739 */ 1740 1741 if (!sta->sta.txq[0]) 1742 return; 1743 1744 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1745 if (!sta->sta.txq[tid] || 1746 !(driver_release_tids & BIT(tid)) || 1747 txq_has_queue(sta->sta.txq[tid])) 1748 continue; 1749 1750 sta_info_recalc_tim(sta); 1751 break; 1752 } 1753 } 1754 } 1755 1756 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 1757 { 1758 u8 ignore_for_response = sta->sta.uapsd_queues; 1759 1760 /* 1761 * If all ACs are delivery-enabled then we should reply 1762 * from any of them, if only some are enabled we reply 1763 * only from the non-enabled ones. 1764 */ 1765 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) 1766 ignore_for_response = 0; 1767 1768 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, 1769 IEEE80211_FRAME_RELEASE_PSPOLL); 1770 } 1771 1772 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) 1773 { 1774 int n_frames = sta->sta.max_sp; 1775 u8 delivery_enabled = sta->sta.uapsd_queues; 1776 1777 /* 1778 * If we ever grow support for TSPEC this might happen if 1779 * the TSPEC update from hostapd comes in between a trigger 1780 * frame setting WLAN_STA_UAPSD in the RX path and this 1781 * actually getting called. 1782 */ 1783 if (!delivery_enabled) 1784 return; 1785 1786 switch (sta->sta.max_sp) { 1787 case 1: 1788 n_frames = 2; 1789 break; 1790 case 2: 1791 n_frames = 4; 1792 break; 1793 case 3: 1794 n_frames = 6; 1795 break; 1796 case 0: 1797 /* XXX: what is a good value? */ 1798 n_frames = 128; 1799 break; 1800 } 1801 1802 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, 1803 IEEE80211_FRAME_RELEASE_UAPSD); 1804 } 1805 1806 void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 1807 struct ieee80211_sta *pubsta, bool block) 1808 { 1809 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1810 1811 trace_api_sta_block_awake(sta->local, pubsta, block); 1812 1813 if (block) { 1814 set_sta_flag(sta, WLAN_STA_PS_DRIVER); 1815 ieee80211_clear_fast_xmit(sta); 1816 return; 1817 } 1818 1819 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1820 return; 1821 1822 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { 1823 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1824 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1825 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1826 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || 1827 test_sta_flag(sta, WLAN_STA_UAPSD)) { 1828 /* must be asleep in this case */ 1829 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1830 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1831 } else { 1832 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1833 ieee80211_check_fast_xmit(sta); 1834 } 1835 } 1836 EXPORT_SYMBOL(ieee80211_sta_block_awake); 1837 1838 void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) 1839 { 1840 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1841 struct ieee80211_local *local = sta->local; 1842 1843 trace_api_eosp(local, pubsta); 1844 1845 clear_sta_flag(sta, WLAN_STA_SP); 1846 } 1847 EXPORT_SYMBOL(ieee80211_sta_eosp); 1848 1849 void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) 1850 { 1851 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1852 enum ieee80211_frame_release_type reason; 1853 bool more_data; 1854 1855 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); 1856 1857 reason = IEEE80211_FRAME_RELEASE_UAPSD; 1858 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, 1859 reason, 0); 1860 1861 ieee80211_send_null_response(sta, tid, reason, false, more_data); 1862 } 1863 EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); 1864 1865 void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 1866 u8 tid, bool buffered) 1867 { 1868 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1869 1870 if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) 1871 return; 1872 1873 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); 1874 1875 if (buffered) 1876 set_bit(tid, &sta->driver_buffered_tids); 1877 else 1878 clear_bit(tid, &sta->driver_buffered_tids); 1879 1880 sta_info_recalc_tim(sta); 1881 } 1882 EXPORT_SYMBOL(ieee80211_sta_set_buffered); 1883 1884 void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, 1885 u32 tx_airtime, u32 rx_airtime) 1886 { 1887 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1888 struct ieee80211_local *local = sta->sdata->local; 1889 u8 ac = ieee80211_ac_from_tid(tid); 1890 u32 airtime = 0; 1891 1892 if (sta->local->airtime_flags & AIRTIME_USE_TX) 1893 airtime += tx_airtime; 1894 if (sta->local->airtime_flags & AIRTIME_USE_RX) 1895 airtime += rx_airtime; 1896 1897 spin_lock_bh(&local->active_txq_lock[ac]); 1898 sta->airtime[ac].tx_airtime += tx_airtime; 1899 sta->airtime[ac].rx_airtime += rx_airtime; 1900 sta->airtime[ac].deficit -= airtime; 1901 spin_unlock_bh(&local->active_txq_lock[ac]); 1902 } 1903 EXPORT_SYMBOL(ieee80211_sta_register_airtime); 1904 1905 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, 1906 struct sta_info *sta, u8 ac, 1907 u16 tx_airtime, bool tx_completed) 1908 { 1909 int tx_pending; 1910 1911 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) 1912 return; 1913 1914 if (!tx_completed) { 1915 if (sta) 1916 atomic_add(tx_airtime, 1917 &sta->airtime[ac].aql_tx_pending); 1918 1919 atomic_add(tx_airtime, &local->aql_total_pending_airtime); 1920 return; 1921 } 1922 1923 if (sta) { 1924 tx_pending = atomic_sub_return(tx_airtime, 1925 &sta->airtime[ac].aql_tx_pending); 1926 if (tx_pending < 0) 1927 atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, 1928 tx_pending, 0); 1929 } 1930 1931 tx_pending = atomic_sub_return(tx_airtime, 1932 &local->aql_total_pending_airtime); 1933 if (WARN_ONCE(tx_pending < 0, 1934 "Device %s AC %d pending airtime underflow: %u, %u", 1935 wiphy_name(local->hw.wiphy), ac, tx_pending, 1936 tx_airtime)) 1937 atomic_cmpxchg(&local->aql_total_pending_airtime, 1938 tx_pending, 0); 1939 } 1940 1941 int sta_info_move_state(struct sta_info *sta, 1942 enum ieee80211_sta_state new_state) 1943 { 1944 might_sleep(); 1945 1946 if (sta->sta_state == new_state) 1947 return 0; 1948 1949 /* check allowed transitions first */ 1950 1951 switch (new_state) { 1952 case IEEE80211_STA_NONE: 1953 if (sta->sta_state != IEEE80211_STA_AUTH) 1954 return -EINVAL; 1955 break; 1956 case IEEE80211_STA_AUTH: 1957 if (sta->sta_state != IEEE80211_STA_NONE && 1958 sta->sta_state != IEEE80211_STA_ASSOC) 1959 return -EINVAL; 1960 break; 1961 case IEEE80211_STA_ASSOC: 1962 if (sta->sta_state != IEEE80211_STA_AUTH && 1963 sta->sta_state != IEEE80211_STA_AUTHORIZED) 1964 return -EINVAL; 1965 break; 1966 case IEEE80211_STA_AUTHORIZED: 1967 if (sta->sta_state != IEEE80211_STA_ASSOC) 1968 return -EINVAL; 1969 break; 1970 default: 1971 WARN(1, "invalid state %d", new_state); 1972 return -EINVAL; 1973 } 1974 1975 sta_dbg(sta->sdata, "moving STA %pM to state %d\n", 1976 sta->sta.addr, new_state); 1977 1978 /* 1979 * notify the driver before the actual changes so it can 1980 * fail the transition 1981 */ 1982 if (test_sta_flag(sta, WLAN_STA_INSERTED)) { 1983 int err = drv_sta_state(sta->local, sta->sdata, sta, 1984 sta->sta_state, new_state); 1985 if (err) 1986 return err; 1987 } 1988 1989 /* reflect the change in all state variables */ 1990 1991 switch (new_state) { 1992 case IEEE80211_STA_NONE: 1993 if (sta->sta_state == IEEE80211_STA_AUTH) 1994 clear_bit(WLAN_STA_AUTH, &sta->_flags); 1995 break; 1996 case IEEE80211_STA_AUTH: 1997 if (sta->sta_state == IEEE80211_STA_NONE) { 1998 set_bit(WLAN_STA_AUTH, &sta->_flags); 1999 } else if (sta->sta_state == IEEE80211_STA_ASSOC) { 2000 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 2001 ieee80211_recalc_min_chandef(sta->sdata); 2002 if (!sta->sta.support_p2p_ps) 2003 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2004 } 2005 break; 2006 case IEEE80211_STA_ASSOC: 2007 if (sta->sta_state == IEEE80211_STA_AUTH) { 2008 set_bit(WLAN_STA_ASSOC, &sta->_flags); 2009 sta->assoc_at = ktime_get_boottime_ns(); 2010 ieee80211_recalc_min_chandef(sta->sdata); 2011 if (!sta->sta.support_p2p_ps) 2012 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2013 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 2014 ieee80211_vif_dec_num_mcast(sta->sdata); 2015 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2016 ieee80211_clear_fast_xmit(sta); 2017 ieee80211_clear_fast_rx(sta); 2018 } 2019 break; 2020 case IEEE80211_STA_AUTHORIZED: 2021 if (sta->sta_state == IEEE80211_STA_ASSOC) { 2022 ieee80211_vif_inc_num_mcast(sta->sdata); 2023 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2024 ieee80211_check_fast_xmit(sta); 2025 ieee80211_check_fast_rx(sta); 2026 } 2027 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 2028 sta->sdata->vif.type == NL80211_IFTYPE_AP) 2029 cfg80211_send_layer2_update(sta->sdata->dev, 2030 sta->sta.addr); 2031 break; 2032 default: 2033 break; 2034 } 2035 2036 sta->sta_state = new_state; 2037 2038 return 0; 2039 } 2040 2041 u8 sta_info_tx_streams(struct sta_info *sta) 2042 { 2043 struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap; 2044 u8 rx_streams; 2045 2046 if (!sta->sta.ht_cap.ht_supported) 2047 return 1; 2048 2049 if (sta->sta.vht_cap.vht_supported) { 2050 int i; 2051 u16 tx_mcs_map = 2052 le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map); 2053 2054 for (i = 7; i >= 0; i--) 2055 if ((tx_mcs_map & (0x3 << (i * 2))) != 2056 IEEE80211_VHT_MCS_NOT_SUPPORTED) 2057 return i + 1; 2058 } 2059 2060 if (ht_cap->mcs.rx_mask[3]) 2061 rx_streams = 4; 2062 else if (ht_cap->mcs.rx_mask[2]) 2063 rx_streams = 3; 2064 else if (ht_cap->mcs.rx_mask[1]) 2065 rx_streams = 2; 2066 else 2067 rx_streams = 1; 2068 2069 if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF)) 2070 return rx_streams; 2071 2072 return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 2073 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; 2074 } 2075 2076 static struct ieee80211_sta_rx_stats * 2077 sta_get_last_rx_stats(struct sta_info *sta) 2078 { 2079 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 2080 struct ieee80211_local *local = sta->local; 2081 int cpu; 2082 2083 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2084 return stats; 2085 2086 for_each_possible_cpu(cpu) { 2087 struct ieee80211_sta_rx_stats *cpustats; 2088 2089 cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2090 2091 if (time_after(cpustats->last_rx, stats->last_rx)) 2092 stats = cpustats; 2093 } 2094 2095 return stats; 2096 } 2097 2098 static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, 2099 struct rate_info *rinfo) 2100 { 2101 rinfo->bw = STA_STATS_GET(BW, rate); 2102 2103 switch (STA_STATS_GET(TYPE, rate)) { 2104 case STA_STATS_RATE_TYPE_VHT: 2105 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; 2106 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); 2107 rinfo->nss = STA_STATS_GET(VHT_NSS, rate); 2108 if (STA_STATS_GET(SGI, rate)) 2109 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2110 break; 2111 case STA_STATS_RATE_TYPE_HT: 2112 rinfo->flags = RATE_INFO_FLAGS_MCS; 2113 rinfo->mcs = STA_STATS_GET(HT_MCS, rate); 2114 if (STA_STATS_GET(SGI, rate)) 2115 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2116 break; 2117 case STA_STATS_RATE_TYPE_LEGACY: { 2118 struct ieee80211_supported_band *sband; 2119 u16 brate; 2120 unsigned int shift; 2121 int band = STA_STATS_GET(LEGACY_BAND, rate); 2122 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); 2123 2124 sband = local->hw.wiphy->bands[band]; 2125 2126 if (WARN_ON_ONCE(!sband->bitrates)) 2127 break; 2128 2129 brate = sband->bitrates[rate_idx].bitrate; 2130 if (rinfo->bw == RATE_INFO_BW_5) 2131 shift = 2; 2132 else if (rinfo->bw == RATE_INFO_BW_10) 2133 shift = 1; 2134 else 2135 shift = 0; 2136 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); 2137 break; 2138 } 2139 case STA_STATS_RATE_TYPE_HE: 2140 rinfo->flags = RATE_INFO_FLAGS_HE_MCS; 2141 rinfo->mcs = STA_STATS_GET(HE_MCS, rate); 2142 rinfo->nss = STA_STATS_GET(HE_NSS, rate); 2143 rinfo->he_gi = STA_STATS_GET(HE_GI, rate); 2144 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); 2145 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); 2146 break; 2147 } 2148 } 2149 2150 static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) 2151 { 2152 u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); 2153 2154 if (rate == STA_STATS_RATE_INVALID) 2155 return -EINVAL; 2156 2157 sta_stats_decode_rate(sta->local, rate, rinfo); 2158 return 0; 2159 } 2160 2161 static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, 2162 int tid) 2163 { 2164 unsigned int start; 2165 u64 value; 2166 2167 do { 2168 start = u64_stats_fetch_begin(&rxstats->syncp); 2169 value = rxstats->msdu[tid]; 2170 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2171 2172 return value; 2173 } 2174 2175 static void sta_set_tidstats(struct sta_info *sta, 2176 struct cfg80211_tid_stats *tidstats, 2177 int tid) 2178 { 2179 struct ieee80211_local *local = sta->local; 2180 int cpu; 2181 2182 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { 2183 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2184 tidstats->rx_msdu += 2185 sta_get_tidstats_msdu(&sta->rx_stats, tid); 2186 2187 if (sta->pcpu_rx_stats) { 2188 for_each_possible_cpu(cpu) { 2189 struct ieee80211_sta_rx_stats *cpurxs; 2190 2191 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2192 tidstats->rx_msdu += 2193 sta_get_tidstats_msdu(cpurxs, tid); 2194 } 2195 } 2196 2197 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); 2198 } 2199 2200 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { 2201 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); 2202 tidstats->tx_msdu = sta->tx_stats.msdu[tid]; 2203 } 2204 2205 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && 2206 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2207 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); 2208 tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid]; 2209 } 2210 2211 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && 2212 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2213 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); 2214 tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid]; 2215 } 2216 2217 if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) { 2218 spin_lock_bh(&local->fq.lock); 2219 rcu_read_lock(); 2220 2221 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); 2222 ieee80211_fill_txq_stats(&tidstats->txq_stats, 2223 to_txq_info(sta->sta.txq[tid])); 2224 2225 rcu_read_unlock(); 2226 spin_unlock_bh(&local->fq.lock); 2227 } 2228 } 2229 2230 static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) 2231 { 2232 unsigned int start; 2233 u64 value; 2234 2235 do { 2236 start = u64_stats_fetch_begin(&rxstats->syncp); 2237 value = rxstats->bytes; 2238 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2239 2240 return value; 2241 } 2242 2243 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, 2244 bool tidstats) 2245 { 2246 struct ieee80211_sub_if_data *sdata = sta->sdata; 2247 struct ieee80211_local *local = sdata->local; 2248 u32 thr = 0; 2249 int i, ac, cpu; 2250 struct ieee80211_sta_rx_stats *last_rxstats; 2251 2252 last_rxstats = sta_get_last_rx_stats(sta); 2253 2254 sinfo->generation = sdata->local->sta_generation; 2255 2256 /* do before driver, so beacon filtering drivers have a 2257 * chance to e.g. just add the number of filtered beacons 2258 * (or just modify the value entirely, of course) 2259 */ 2260 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2261 sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal; 2262 2263 drv_sta_statistics(local, sdata, &sta->sta, sinfo); 2264 2265 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | 2266 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | 2267 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | 2268 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | 2269 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | 2270 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); 2271 2272 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2273 sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; 2274 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); 2275 } 2276 2277 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 2278 sinfo->assoc_at = sta->assoc_at; 2279 sinfo->inactive_time = 2280 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); 2281 2282 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2283 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { 2284 sinfo->tx_bytes = 0; 2285 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2286 sinfo->tx_bytes += sta->tx_stats.bytes[ac]; 2287 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); 2288 } 2289 2290 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { 2291 sinfo->tx_packets = 0; 2292 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2293 sinfo->tx_packets += sta->tx_stats.packets[ac]; 2294 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 2295 } 2296 2297 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | 2298 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { 2299 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2300 sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); 2301 2302 if (sta->pcpu_rx_stats) { 2303 for_each_possible_cpu(cpu) { 2304 struct ieee80211_sta_rx_stats *cpurxs; 2305 2306 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2307 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); 2308 } 2309 } 2310 2311 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); 2312 } 2313 2314 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { 2315 sinfo->rx_packets = sta->rx_stats.packets; 2316 if (sta->pcpu_rx_stats) { 2317 for_each_possible_cpu(cpu) { 2318 struct ieee80211_sta_rx_stats *cpurxs; 2319 2320 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2321 sinfo->rx_packets += cpurxs->packets; 2322 } 2323 } 2324 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); 2325 } 2326 2327 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { 2328 sinfo->tx_retries = sta->status_stats.retry_count; 2329 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); 2330 } 2331 2332 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { 2333 sinfo->tx_failed = sta->status_stats.retry_failed; 2334 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); 2335 } 2336 2337 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { 2338 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2339 sinfo->rx_duration += sta->airtime[ac].rx_airtime; 2340 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 2341 } 2342 2343 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { 2344 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2345 sinfo->tx_duration += sta->airtime[ac].tx_airtime; 2346 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 2347 } 2348 2349 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { 2350 sinfo->airtime_weight = sta->airtime_weight; 2351 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); 2352 } 2353 2354 sinfo->rx_dropped_misc = sta->rx_stats.dropped; 2355 if (sta->pcpu_rx_stats) { 2356 for_each_possible_cpu(cpu) { 2357 struct ieee80211_sta_rx_stats *cpurxs; 2358 2359 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2360 sinfo->rx_dropped_misc += cpurxs->dropped; 2361 } 2362 } 2363 2364 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2365 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { 2366 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | 2367 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 2368 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); 2369 } 2370 2371 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || 2372 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { 2373 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { 2374 sinfo->signal = (s8)last_rxstats->last_signal; 2375 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 2376 } 2377 2378 if (!sta->pcpu_rx_stats && 2379 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { 2380 sinfo->signal_avg = 2381 -ewma_signal_read(&sta->rx_stats_avg.signal); 2382 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 2383 } 2384 } 2385 2386 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to 2387 * the sta->rx_stats struct, so the check here is fine with and without 2388 * pcpu statistics 2389 */ 2390 if (last_rxstats->chains && 2391 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | 2392 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { 2393 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); 2394 if (!sta->pcpu_rx_stats) 2395 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); 2396 2397 sinfo->chains = last_rxstats->chains; 2398 2399 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { 2400 sinfo->chain_signal[i] = 2401 last_rxstats->chain_signal_last[i]; 2402 sinfo->chain_signal_avg[i] = 2403 -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); 2404 } 2405 } 2406 2407 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { 2408 sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, 2409 &sinfo->txrate); 2410 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 2411 } 2412 2413 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { 2414 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) 2415 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); 2416 } 2417 2418 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { 2419 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) 2420 sta_set_tidstats(sta, &sinfo->pertid[i], i); 2421 } 2422 2423 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2424 #ifdef CONFIG_MAC80211_MESH 2425 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | 2426 BIT_ULL(NL80211_STA_INFO_PLID) | 2427 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | 2428 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | 2429 BIT_ULL(NL80211_STA_INFO_PEER_PM) | 2430 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | 2431 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | 2432 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); 2433 2434 sinfo->llid = sta->mesh->llid; 2435 sinfo->plid = sta->mesh->plid; 2436 sinfo->plink_state = sta->mesh->plink_state; 2437 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 2438 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); 2439 sinfo->t_offset = sta->mesh->t_offset; 2440 } 2441 sinfo->local_pm = sta->mesh->local_pm; 2442 sinfo->peer_pm = sta->mesh->peer_pm; 2443 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; 2444 sinfo->connected_to_gate = sta->mesh->connected_to_gate; 2445 sinfo->connected_to_as = sta->mesh->connected_to_as; 2446 #endif 2447 } 2448 2449 sinfo->bss_param.flags = 0; 2450 if (sdata->vif.bss_conf.use_cts_prot) 2451 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; 2452 if (sdata->vif.bss_conf.use_short_preamble) 2453 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; 2454 if (sdata->vif.bss_conf.use_short_slot) 2455 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 2456 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; 2457 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 2458 2459 sinfo->sta_flags.set = 0; 2460 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | 2461 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 2462 BIT(NL80211_STA_FLAG_WME) | 2463 BIT(NL80211_STA_FLAG_MFP) | 2464 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 2465 BIT(NL80211_STA_FLAG_ASSOCIATED) | 2466 BIT(NL80211_STA_FLAG_TDLS_PEER); 2467 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 2468 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 2469 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) 2470 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); 2471 if (sta->sta.wme) 2472 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); 2473 if (test_sta_flag(sta, WLAN_STA_MFP)) 2474 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 2475 if (test_sta_flag(sta, WLAN_STA_AUTH)) 2476 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 2477 if (test_sta_flag(sta, WLAN_STA_ASSOC)) 2478 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 2479 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 2480 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 2481 2482 thr = sta_get_expected_throughput(sta); 2483 2484 if (thr != 0) { 2485 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); 2486 sinfo->expected_throughput = thr; 2487 } 2488 2489 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && 2490 sta->status_stats.ack_signal_filled) { 2491 sinfo->ack_signal = sta->status_stats.last_ack_signal; 2492 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); 2493 } 2494 2495 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && 2496 sta->status_stats.ack_signal_filled) { 2497 sinfo->avg_ack_signal = 2498 -(s8)ewma_avg_signal_read( 2499 &sta->status_stats.avg_ack_signal); 2500 sinfo->filled |= 2501 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); 2502 } 2503 2504 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2505 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); 2506 sinfo->airtime_link_metric = 2507 airtime_link_metric_get(local, sta); 2508 } 2509 } 2510 2511 u32 sta_get_expected_throughput(struct sta_info *sta) 2512 { 2513 struct ieee80211_sub_if_data *sdata = sta->sdata; 2514 struct ieee80211_local *local = sdata->local; 2515 struct rate_control_ref *ref = NULL; 2516 u32 thr = 0; 2517 2518 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) 2519 ref = local->rate_ctrl; 2520 2521 /* check if the driver has a SW RC implementation */ 2522 if (ref && ref->ops->get_expected_throughput) 2523 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); 2524 else 2525 thr = drv_get_expected_throughput(local, sta); 2526 2527 return thr; 2528 } 2529 2530 unsigned long ieee80211_sta_last_active(struct sta_info *sta) 2531 { 2532 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2533 2534 if (!sta->status_stats.last_ack || 2535 time_after(stats->last_rx, sta->status_stats.last_ack)) 2536 return stats->last_rx; 2537 return sta->status_stats.last_ack; 2538 } 2539 2540 static void sta_update_codel_params(struct sta_info *sta, u32 thr) 2541 { 2542 if (!sta->sdata->local->ops->wake_tx_queue) 2543 return; 2544 2545 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { 2546 sta->cparams.target = MS2TIME(50); 2547 sta->cparams.interval = MS2TIME(300); 2548 sta->cparams.ecn = false; 2549 } else { 2550 sta->cparams.target = MS2TIME(20); 2551 sta->cparams.interval = MS2TIME(100); 2552 sta->cparams.ecn = true; 2553 } 2554 } 2555 2556 void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, 2557 u32 thr) 2558 { 2559 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2560 2561 sta_update_codel_params(sta, thr); 2562 } 2563