1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 * Copyright (C) 2018-2020 Intel Corporation 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/if_arp.h> 18 #include <linux/timer.h> 19 #include <linux/rtnetlink.h> 20 21 #include <net/codel.h> 22 #include <net/mac80211.h> 23 #include "ieee80211_i.h" 24 #include "driver-ops.h" 25 #include "rate.h" 26 #include "sta_info.h" 27 #include "debugfs_sta.h" 28 #include "mesh.h" 29 #include "wme.h" 30 31 /** 32 * DOC: STA information lifetime rules 33 * 34 * STA info structures (&struct sta_info) are managed in a hash table 35 * for faster lookup and a list for iteration. They are managed using 36 * RCU, i.e. access to the list and hash table is protected by RCU. 37 * 38 * Upon allocating a STA info structure with sta_info_alloc(), the caller 39 * owns that structure. It must then insert it into the hash table using 40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter 41 * case (which acquires an rcu read section but must not be called from 42 * within one) will the pointer still be valid after the call. Note that 43 * the caller may not do much with the STA info before inserting it, in 44 * particular, it may not start any mesh peer link management or add 45 * encryption keys. 46 * 47 * When the insertion fails (sta_info_insert()) returns non-zero), the 48 * structure will have been freed by sta_info_insert()! 49 * 50 * Station entries are added by mac80211 when you establish a link with a 51 * peer. This means different things for the different type of interfaces 52 * we support. For a regular station this mean we add the AP sta when we 53 * receive an association response from the AP. For IBSS this occurs when 54 * get to know about a peer on the same IBSS. For WDS we add the sta for 55 * the peer immediately upon device open. When using AP mode we add stations 56 * for each respective station upon request from userspace through nl80211. 57 * 58 * In order to remove a STA info structure, various sta_info_destroy_*() 59 * calls are available. 60 * 61 * There is no concept of ownership on a STA entry, each structure is 62 * owned by the global hash table/list until it is removed. All users of 63 * the structure need to be RCU protected so that the structure won't be 64 * freed before they are done using it. 65 */ 66 67 static const struct rhashtable_params sta_rht_params = { 68 .nelem_hint = 3, /* start small */ 69 .automatic_shrinking = true, 70 .head_offset = offsetof(struct sta_info, hash_node), 71 .key_offset = offsetof(struct sta_info, addr), 72 .key_len = ETH_ALEN, 73 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 74 }; 75 76 /* Caller must hold local->sta_mtx */ 77 static int sta_info_hash_del(struct ieee80211_local *local, 78 struct sta_info *sta) 79 { 80 return rhltable_remove(&local->sta_hash, &sta->hash_node, 81 sta_rht_params); 82 } 83 84 static void __cleanup_single_sta(struct sta_info *sta) 85 { 86 int ac, i; 87 struct tid_ampdu_tx *tid_tx; 88 struct ieee80211_sub_if_data *sdata = sta->sdata; 89 struct ieee80211_local *local = sdata->local; 90 struct ps_data *ps; 91 92 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 93 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 94 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { 95 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 96 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 97 ps = &sdata->bss->ps; 98 else if (ieee80211_vif_is_mesh(&sdata->vif)) 99 ps = &sdata->u.mesh.ps; 100 else 101 return; 102 103 clear_sta_flag(sta, WLAN_STA_PS_STA); 104 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 105 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 106 107 atomic_dec(&ps->num_sta_ps); 108 } 109 110 if (sta->sta.txq[0]) { 111 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 112 struct txq_info *txqi; 113 114 if (!sta->sta.txq[i]) 115 continue; 116 117 txqi = to_txq_info(sta->sta.txq[i]); 118 119 ieee80211_txq_purge(local, txqi); 120 } 121 } 122 123 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 124 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 125 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); 126 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 127 } 128 129 if (ieee80211_vif_is_mesh(&sdata->vif)) 130 mesh_sta_cleanup(sta); 131 132 cancel_work_sync(&sta->drv_deliver_wk); 133 134 /* 135 * Destroy aggregation state here. It would be nice to wait for the 136 * driver to finish aggregation stop and then clean up, but for now 137 * drivers have to handle aggregation stop being requested, followed 138 * directly by station destruction. 139 */ 140 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 141 kfree(sta->ampdu_mlme.tid_start_tx[i]); 142 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 143 if (!tid_tx) 144 continue; 145 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 146 kfree(tid_tx); 147 } 148 } 149 150 static void cleanup_single_sta(struct sta_info *sta) 151 { 152 struct ieee80211_sub_if_data *sdata = sta->sdata; 153 struct ieee80211_local *local = sdata->local; 154 155 __cleanup_single_sta(sta); 156 sta_info_free(local, sta); 157 } 158 159 struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, 160 const u8 *addr) 161 { 162 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); 163 } 164 165 /* protected by RCU */ 166 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 167 const u8 *addr) 168 { 169 struct ieee80211_local *local = sdata->local; 170 struct rhlist_head *tmp; 171 struct sta_info *sta; 172 173 rcu_read_lock(); 174 for_each_sta_info(local, addr, sta, tmp) { 175 if (sta->sdata == sdata) { 176 rcu_read_unlock(); 177 /* this is safe as the caller must already hold 178 * another rcu read section or the mutex 179 */ 180 return sta; 181 } 182 } 183 rcu_read_unlock(); 184 return NULL; 185 } 186 187 /* 188 * Get sta info either from the specified interface 189 * or from one of its vlans 190 */ 191 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 192 const u8 *addr) 193 { 194 struct ieee80211_local *local = sdata->local; 195 struct rhlist_head *tmp; 196 struct sta_info *sta; 197 198 rcu_read_lock(); 199 for_each_sta_info(local, addr, sta, tmp) { 200 if (sta->sdata == sdata || 201 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 202 rcu_read_unlock(); 203 /* this is safe as the caller must already hold 204 * another rcu read section or the mutex 205 */ 206 return sta; 207 } 208 } 209 rcu_read_unlock(); 210 return NULL; 211 } 212 213 struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, 214 const u8 *sta_addr, const u8 *vif_addr) 215 { 216 struct rhlist_head *tmp; 217 struct sta_info *sta; 218 219 for_each_sta_info(local, sta_addr, sta, tmp) { 220 if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) 221 return sta; 222 } 223 224 return NULL; 225 } 226 227 struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, 228 int idx) 229 { 230 struct ieee80211_local *local = sdata->local; 231 struct sta_info *sta; 232 int i = 0; 233 234 list_for_each_entry_rcu(sta, &local->sta_list, list, 235 lockdep_is_held(&local->sta_mtx)) { 236 if (sdata != sta->sdata) 237 continue; 238 if (i < idx) { 239 ++i; 240 continue; 241 } 242 return sta; 243 } 244 245 return NULL; 246 } 247 248 /** 249 * sta_info_free - free STA 250 * 251 * @local: pointer to the global information 252 * @sta: STA info to free 253 * 254 * This function must undo everything done by sta_info_alloc() 255 * that may happen before sta_info_insert(). It may only be 256 * called when sta_info_insert() has not been attempted (and 257 * if that fails, the station is freed anyway.) 258 */ 259 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 260 { 261 /* 262 * If we had used sta_info_pre_move_state() then we might not 263 * have gone through the state transitions down again, so do 264 * it here now (and warn if it's inserted). 265 * 266 * This will clear state such as fast TX/RX that may have been 267 * allocated during state transitions. 268 */ 269 while (sta->sta_state > IEEE80211_STA_NONE) { 270 int ret; 271 272 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); 273 274 ret = sta_info_move_state(sta, sta->sta_state - 1); 275 if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) 276 break; 277 } 278 279 if (sta->rate_ctrl) 280 rate_control_free_sta(sta); 281 282 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 283 284 if (sta->sta.txq[0]) 285 kfree(to_txq_info(sta->sta.txq[0])); 286 kfree(rcu_dereference_raw(sta->sta.rates)); 287 #ifdef CONFIG_MAC80211_MESH 288 kfree(sta->mesh); 289 #endif 290 free_percpu(sta->pcpu_rx_stats); 291 kfree(sta); 292 } 293 294 /* Caller must hold local->sta_mtx */ 295 static int sta_info_hash_add(struct ieee80211_local *local, 296 struct sta_info *sta) 297 { 298 return rhltable_insert(&local->sta_hash, &sta->hash_node, 299 sta_rht_params); 300 } 301 302 static void sta_deliver_ps_frames(struct work_struct *wk) 303 { 304 struct sta_info *sta; 305 306 sta = container_of(wk, struct sta_info, drv_deliver_wk); 307 308 if (sta->dead) 309 return; 310 311 local_bh_disable(); 312 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) 313 ieee80211_sta_ps_deliver_wakeup(sta); 314 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) 315 ieee80211_sta_ps_deliver_poll_response(sta); 316 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) 317 ieee80211_sta_ps_deliver_uapsd(sta); 318 local_bh_enable(); 319 } 320 321 static int sta_prepare_rate_control(struct ieee80211_local *local, 322 struct sta_info *sta, gfp_t gfp) 323 { 324 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) 325 return 0; 326 327 sta->rate_ctrl = local->rate_ctrl; 328 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 329 sta, gfp); 330 if (!sta->rate_ctrl_priv) 331 return -ENOMEM; 332 333 return 0; 334 } 335 336 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 337 const u8 *addr, gfp_t gfp) 338 { 339 struct ieee80211_local *local = sdata->local; 340 struct ieee80211_hw *hw = &local->hw; 341 struct sta_info *sta; 342 int i; 343 344 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 345 if (!sta) 346 return NULL; 347 348 if (ieee80211_hw_check(hw, USES_RSS)) { 349 sta->pcpu_rx_stats = 350 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); 351 if (!sta->pcpu_rx_stats) 352 goto free; 353 } 354 355 spin_lock_init(&sta->lock); 356 spin_lock_init(&sta->ps_lock); 357 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); 358 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 359 mutex_init(&sta->ampdu_mlme.mtx); 360 #ifdef CONFIG_MAC80211_MESH 361 if (ieee80211_vif_is_mesh(&sdata->vif)) { 362 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); 363 if (!sta->mesh) 364 goto free; 365 sta->mesh->plink_sta = sta; 366 spin_lock_init(&sta->mesh->plink_lock); 367 if (ieee80211_vif_is_mesh(&sdata->vif) && 368 !sdata->u.mesh.user_mpm) 369 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 370 0); 371 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; 372 } 373 #endif 374 375 memcpy(sta->addr, addr, ETH_ALEN); 376 memcpy(sta->sta.addr, addr, ETH_ALEN); 377 sta->sta.max_rx_aggregation_subframes = 378 local->hw.max_rx_aggregation_subframes; 379 380 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. 381 * The Tx path starts to use a key as soon as the key slot ptk_idx 382 * references to is not NULL. To not use the initial Rx-only key 383 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid 384 * which always will refer to a NULL key. 385 */ 386 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); 387 sta->ptk_idx = INVALID_PTK_KEYIDX; 388 389 sta->local = local; 390 sta->sdata = sdata; 391 sta->rx_stats.last_rx = jiffies; 392 393 u64_stats_init(&sta->rx_stats.syncp); 394 395 sta->sta_state = IEEE80211_STA_NONE; 396 397 /* Mark TID as unreserved */ 398 sta->reserved_tid = IEEE80211_TID_UNRESERVED; 399 400 sta->last_connected = ktime_get_seconds(); 401 ewma_signal_init(&sta->rx_stats_avg.signal); 402 ewma_avg_signal_init(&sta->status_stats.avg_ack_signal); 403 for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++) 404 ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]); 405 406 if (local->ops->wake_tx_queue) { 407 void *txq_data; 408 int size = sizeof(struct txq_info) + 409 ALIGN(hw->txq_data_size, sizeof(void *)); 410 411 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); 412 if (!txq_data) 413 goto free; 414 415 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 416 struct txq_info *txq = txq_data + i * size; 417 418 /* might not do anything for the bufferable MMPDU TXQ */ 419 ieee80211_txq_init(sdata, sta, txq, i); 420 } 421 } 422 423 if (sta_prepare_rate_control(local, sta, gfp)) 424 goto free_txq; 425 426 sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; 427 428 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 429 skb_queue_head_init(&sta->ps_tx_buf[i]); 430 skb_queue_head_init(&sta->tx_filtered[i]); 431 sta->airtime[i].deficit = sta->airtime_weight; 432 atomic_set(&sta->airtime[i].aql_tx_pending, 0); 433 sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; 434 sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; 435 } 436 437 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 438 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 439 440 for (i = 0; i < NUM_NL80211_BANDS; i++) { 441 u32 mandatory = 0; 442 int r; 443 444 if (!hw->wiphy->bands[i]) 445 continue; 446 447 switch (i) { 448 case NL80211_BAND_2GHZ: 449 /* 450 * We use both here, even if we cannot really know for 451 * sure the station will support both, but the only use 452 * for this is when we don't know anything yet and send 453 * management frames, and then we'll pick the lowest 454 * possible rate anyway. 455 * If we don't include _G here, we cannot find a rate 456 * in P2P, and thus trigger the WARN_ONCE() in rate.c 457 */ 458 mandatory = IEEE80211_RATE_MANDATORY_B | 459 IEEE80211_RATE_MANDATORY_G; 460 break; 461 case NL80211_BAND_5GHZ: 462 mandatory = IEEE80211_RATE_MANDATORY_A; 463 break; 464 case NL80211_BAND_60GHZ: 465 WARN_ON(1); 466 mandatory = 0; 467 break; 468 } 469 470 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { 471 struct ieee80211_rate *rate; 472 473 rate = &hw->wiphy->bands[i]->bitrates[r]; 474 475 if (!(rate->flags & mandatory)) 476 continue; 477 sta->sta.supp_rates[i] |= BIT(r); 478 } 479 } 480 481 sta->sta.smps_mode = IEEE80211_SMPS_OFF; 482 if (sdata->vif.type == NL80211_IFTYPE_AP || 483 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 484 struct ieee80211_supported_band *sband; 485 u8 smps; 486 487 sband = ieee80211_get_sband(sdata); 488 if (!sband) 489 goto free_txq; 490 491 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 492 IEEE80211_HT_CAP_SM_PS_SHIFT; 493 /* 494 * Assume that hostapd advertises our caps in the beacon and 495 * this is the known_smps_mode for a station that just assciated 496 */ 497 switch (smps) { 498 case WLAN_HT_SMPS_CONTROL_DISABLED: 499 sta->known_smps_mode = IEEE80211_SMPS_OFF; 500 break; 501 case WLAN_HT_SMPS_CONTROL_STATIC: 502 sta->known_smps_mode = IEEE80211_SMPS_STATIC; 503 break; 504 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 505 sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC; 506 break; 507 default: 508 WARN_ON(1); 509 } 510 } 511 512 sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; 513 514 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; 515 sta->cparams.target = MS2TIME(20); 516 sta->cparams.interval = MS2TIME(100); 517 sta->cparams.ecn = true; 518 519 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 520 521 return sta; 522 523 free_txq: 524 if (sta->sta.txq[0]) 525 kfree(to_txq_info(sta->sta.txq[0])); 526 free: 527 free_percpu(sta->pcpu_rx_stats); 528 #ifdef CONFIG_MAC80211_MESH 529 kfree(sta->mesh); 530 #endif 531 kfree(sta); 532 return NULL; 533 } 534 535 static int sta_info_insert_check(struct sta_info *sta) 536 { 537 struct ieee80211_sub_if_data *sdata = sta->sdata; 538 539 /* 540 * Can't be a WARN_ON because it can be triggered through a race: 541 * something inserts a STA (on one CPU) without holding the RTNL 542 * and another CPU turns off the net device. 543 */ 544 if (unlikely(!ieee80211_sdata_running(sdata))) 545 return -ENETDOWN; 546 547 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || 548 is_multicast_ether_addr(sta->sta.addr))) 549 return -EINVAL; 550 551 /* The RCU read lock is required by rhashtable due to 552 * asynchronous resize/rehash. We also require the mutex 553 * for correctness. 554 */ 555 rcu_read_lock(); 556 lockdep_assert_held(&sdata->local->sta_mtx); 557 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && 558 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { 559 rcu_read_unlock(); 560 return -ENOTUNIQ; 561 } 562 rcu_read_unlock(); 563 564 return 0; 565 } 566 567 static int sta_info_insert_drv_state(struct ieee80211_local *local, 568 struct ieee80211_sub_if_data *sdata, 569 struct sta_info *sta) 570 { 571 enum ieee80211_sta_state state; 572 int err = 0; 573 574 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { 575 err = drv_sta_state(local, sdata, sta, state, state + 1); 576 if (err) 577 break; 578 } 579 580 if (!err) { 581 /* 582 * Drivers using legacy sta_add/sta_remove callbacks only 583 * get uploaded set to true after sta_add is called. 584 */ 585 if (!local->ops->sta_add) 586 sta->uploaded = true; 587 return 0; 588 } 589 590 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 591 sdata_info(sdata, 592 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", 593 sta->sta.addr, state + 1, err); 594 err = 0; 595 } 596 597 /* unwind on error */ 598 for (; state > IEEE80211_STA_NOTEXIST; state--) 599 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); 600 601 return err; 602 } 603 604 static void 605 ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) 606 { 607 struct ieee80211_local *local = sdata->local; 608 bool allow_p2p_go_ps = sdata->vif.p2p; 609 struct sta_info *sta; 610 611 rcu_read_lock(); 612 list_for_each_entry_rcu(sta, &local->sta_list, list) { 613 if (sdata != sta->sdata || 614 !test_sta_flag(sta, WLAN_STA_ASSOC)) 615 continue; 616 if (!sta->sta.support_p2p_ps) { 617 allow_p2p_go_ps = false; 618 break; 619 } 620 } 621 rcu_read_unlock(); 622 623 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { 624 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; 625 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS); 626 } 627 } 628 629 /* 630 * should be called with sta_mtx locked 631 * this function replaces the mutex lock 632 * with a RCU lock 633 */ 634 static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) 635 { 636 struct ieee80211_local *local = sta->local; 637 struct ieee80211_sub_if_data *sdata = sta->sdata; 638 struct station_info *sinfo = NULL; 639 int err = 0; 640 641 lockdep_assert_held(&local->sta_mtx); 642 643 /* check if STA exists already */ 644 if (sta_info_get_bss(sdata, sta->sta.addr)) { 645 err = -EEXIST; 646 goto out_err; 647 } 648 649 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); 650 if (!sinfo) { 651 err = -ENOMEM; 652 goto out_err; 653 } 654 655 local->num_sta++; 656 local->sta_generation++; 657 smp_mb(); 658 659 /* simplify things and don't accept BA sessions yet */ 660 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 661 662 /* make the station visible */ 663 err = sta_info_hash_add(local, sta); 664 if (err) 665 goto out_drop_sta; 666 667 list_add_tail_rcu(&sta->list, &local->sta_list); 668 669 /* notify driver */ 670 err = sta_info_insert_drv_state(local, sdata, sta); 671 if (err) 672 goto out_remove; 673 674 set_sta_flag(sta, WLAN_STA_INSERTED); 675 676 if (sta->sta_state >= IEEE80211_STA_ASSOC) { 677 ieee80211_recalc_min_chandef(sta->sdata); 678 if (!sta->sta.support_p2p_ps) 679 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 680 } 681 682 /* accept BA sessions now */ 683 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 684 685 ieee80211_sta_debugfs_add(sta); 686 rate_control_add_sta_debugfs(sta); 687 688 sinfo->generation = local->sta_generation; 689 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 690 kfree(sinfo); 691 692 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); 693 694 /* move reference to rcu-protected */ 695 rcu_read_lock(); 696 mutex_unlock(&local->sta_mtx); 697 698 if (ieee80211_vif_is_mesh(&sdata->vif)) 699 mesh_accept_plinks_update(sdata); 700 701 return 0; 702 out_remove: 703 sta_info_hash_del(local, sta); 704 list_del_rcu(&sta->list); 705 out_drop_sta: 706 local->num_sta--; 707 synchronize_net(); 708 __cleanup_single_sta(sta); 709 out_err: 710 mutex_unlock(&local->sta_mtx); 711 kfree(sinfo); 712 rcu_read_lock(); 713 return err; 714 } 715 716 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 717 { 718 struct ieee80211_local *local = sta->local; 719 int err; 720 721 might_sleep(); 722 723 mutex_lock(&local->sta_mtx); 724 725 err = sta_info_insert_check(sta); 726 if (err) { 727 mutex_unlock(&local->sta_mtx); 728 rcu_read_lock(); 729 goto out_free; 730 } 731 732 err = sta_info_insert_finish(sta); 733 if (err) 734 goto out_free; 735 736 return 0; 737 out_free: 738 sta_info_free(local, sta); 739 return err; 740 } 741 742 int sta_info_insert(struct sta_info *sta) 743 { 744 int err = sta_info_insert_rcu(sta); 745 746 rcu_read_unlock(); 747 748 return err; 749 } 750 751 static inline void __bss_tim_set(u8 *tim, u16 id) 752 { 753 /* 754 * This format has been mandated by the IEEE specifications, 755 * so this line may not be changed to use the __set_bit() format. 756 */ 757 tim[id / 8] |= (1 << (id % 8)); 758 } 759 760 static inline void __bss_tim_clear(u8 *tim, u16 id) 761 { 762 /* 763 * This format has been mandated by the IEEE specifications, 764 * so this line may not be changed to use the __clear_bit() format. 765 */ 766 tim[id / 8] &= ~(1 << (id % 8)); 767 } 768 769 static inline bool __bss_tim_get(u8 *tim, u16 id) 770 { 771 /* 772 * This format has been mandated by the IEEE specifications, 773 * so this line may not be changed to use the test_bit() format. 774 */ 775 return tim[id / 8] & (1 << (id % 8)); 776 } 777 778 static unsigned long ieee80211_tids_for_ac(int ac) 779 { 780 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 781 switch (ac) { 782 case IEEE80211_AC_VO: 783 return BIT(6) | BIT(7); 784 case IEEE80211_AC_VI: 785 return BIT(4) | BIT(5); 786 case IEEE80211_AC_BE: 787 return BIT(0) | BIT(3); 788 case IEEE80211_AC_BK: 789 return BIT(1) | BIT(2); 790 default: 791 WARN_ON(1); 792 return 0; 793 } 794 } 795 796 static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) 797 { 798 struct ieee80211_local *local = sta->local; 799 struct ps_data *ps; 800 bool indicate_tim = false; 801 u8 ignore_for_tim = sta->sta.uapsd_queues; 802 int ac; 803 u16 id = sta->sta.aid; 804 805 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 806 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 807 if (WARN_ON_ONCE(!sta->sdata->bss)) 808 return; 809 810 ps = &sta->sdata->bss->ps; 811 #ifdef CONFIG_MAC80211_MESH 812 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { 813 ps = &sta->sdata->u.mesh.ps; 814 #endif 815 } else { 816 return; 817 } 818 819 /* No need to do anything if the driver does all */ 820 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 821 return; 822 823 if (sta->dead) 824 goto done; 825 826 /* 827 * If all ACs are delivery-enabled then we should build 828 * the TIM bit for all ACs anyway; if only some are then 829 * we ignore those and build the TIM bit using only the 830 * non-enabled ones. 831 */ 832 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) 833 ignore_for_tim = 0; 834 835 if (ignore_pending) 836 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; 837 838 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 839 unsigned long tids; 840 841 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) 842 continue; 843 844 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || 845 !skb_queue_empty(&sta->ps_tx_buf[ac]); 846 if (indicate_tim) 847 break; 848 849 tids = ieee80211_tids_for_ac(ac); 850 851 indicate_tim |= 852 sta->driver_buffered_tids & tids; 853 indicate_tim |= 854 sta->txq_buffered_tids & tids; 855 } 856 857 done: 858 spin_lock_bh(&local->tim_lock); 859 860 if (indicate_tim == __bss_tim_get(ps->tim, id)) 861 goto out_unlock; 862 863 if (indicate_tim) 864 __bss_tim_set(ps->tim, id); 865 else 866 __bss_tim_clear(ps->tim, id); 867 868 if (local->ops->set_tim && !WARN_ON(sta->dead)) { 869 local->tim_in_locked_section = true; 870 drv_set_tim(local, &sta->sta, indicate_tim); 871 local->tim_in_locked_section = false; 872 } 873 874 out_unlock: 875 spin_unlock_bh(&local->tim_lock); 876 } 877 878 void sta_info_recalc_tim(struct sta_info *sta) 879 { 880 __sta_info_recalc_tim(sta, false); 881 } 882 883 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 884 { 885 struct ieee80211_tx_info *info; 886 int timeout; 887 888 if (!skb) 889 return false; 890 891 info = IEEE80211_SKB_CB(skb); 892 893 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 894 timeout = (sta->listen_interval * 895 sta->sdata->vif.bss_conf.beacon_int * 896 32 / 15625) * HZ; 897 if (timeout < STA_TX_BUFFER_EXPIRE) 898 timeout = STA_TX_BUFFER_EXPIRE; 899 return time_after(jiffies, info->control.jiffies + timeout); 900 } 901 902 903 static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, 904 struct sta_info *sta, int ac) 905 { 906 unsigned long flags; 907 struct sk_buff *skb; 908 909 /* 910 * First check for frames that should expire on the filtered 911 * queue. Frames here were rejected by the driver and are on 912 * a separate queue to avoid reordering with normal PS-buffered 913 * frames. They also aren't accounted for right now in the 914 * total_ps_buffered counter. 915 */ 916 for (;;) { 917 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 918 skb = skb_peek(&sta->tx_filtered[ac]); 919 if (sta_info_buffer_expired(sta, skb)) 920 skb = __skb_dequeue(&sta->tx_filtered[ac]); 921 else 922 skb = NULL; 923 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 924 925 /* 926 * Frames are queued in order, so if this one 927 * hasn't expired yet we can stop testing. If 928 * we actually reached the end of the queue we 929 * also need to stop, of course. 930 */ 931 if (!skb) 932 break; 933 ieee80211_free_txskb(&local->hw, skb); 934 } 935 936 /* 937 * Now also check the normal PS-buffered queue, this will 938 * only find something if the filtered queue was emptied 939 * since the filtered frames are all before the normal PS 940 * buffered frames. 941 */ 942 for (;;) { 943 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 944 skb = skb_peek(&sta->ps_tx_buf[ac]); 945 if (sta_info_buffer_expired(sta, skb)) 946 skb = __skb_dequeue(&sta->ps_tx_buf[ac]); 947 else 948 skb = NULL; 949 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 950 951 /* 952 * frames are queued in order, so if this one 953 * hasn't expired yet (or we reached the end of 954 * the queue) we can stop testing 955 */ 956 if (!skb) 957 break; 958 959 local->total_ps_buffered--; 960 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 961 sta->sta.addr); 962 ieee80211_free_txskb(&local->hw, skb); 963 } 964 965 /* 966 * Finally, recalculate the TIM bit for this station -- it might 967 * now be clear because the station was too slow to retrieve its 968 * frames. 969 */ 970 sta_info_recalc_tim(sta); 971 972 /* 973 * Return whether there are any frames still buffered, this is 974 * used to check whether the cleanup timer still needs to run, 975 * if there are no frames we don't need to rearm the timer. 976 */ 977 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && 978 skb_queue_empty(&sta->tx_filtered[ac])); 979 } 980 981 static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 982 struct sta_info *sta) 983 { 984 bool have_buffered = false; 985 int ac; 986 987 /* This is only necessary for stations on BSS/MBSS interfaces */ 988 if (!sta->sdata->bss && 989 !ieee80211_vif_is_mesh(&sta->sdata->vif)) 990 return false; 991 992 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 993 have_buffered |= 994 sta_info_cleanup_expire_buffered_ac(local, sta, ac); 995 996 return have_buffered; 997 } 998 999 static int __must_check __sta_info_destroy_part1(struct sta_info *sta) 1000 { 1001 struct ieee80211_local *local; 1002 struct ieee80211_sub_if_data *sdata; 1003 int ret; 1004 1005 might_sleep(); 1006 1007 if (!sta) 1008 return -ENOENT; 1009 1010 local = sta->local; 1011 sdata = sta->sdata; 1012 1013 lockdep_assert_held(&local->sta_mtx); 1014 1015 /* 1016 * Before removing the station from the driver and 1017 * rate control, it might still start new aggregation 1018 * sessions -- block that to make sure the tear-down 1019 * will be sufficient. 1020 */ 1021 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1022 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1023 1024 /* 1025 * Before removing the station from the driver there might be pending 1026 * rx frames on RSS queues sent prior to the disassociation - wait for 1027 * all such frames to be processed. 1028 */ 1029 drv_sync_rx_queues(local, sta); 1030 1031 ret = sta_info_hash_del(local, sta); 1032 if (WARN_ON(ret)) 1033 return ret; 1034 1035 /* 1036 * for TDLS peers, make sure to return to the base channel before 1037 * removal. 1038 */ 1039 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { 1040 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); 1041 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); 1042 } 1043 1044 list_del_rcu(&sta->list); 1045 sta->removed = true; 1046 1047 drv_sta_pre_rcu_remove(local, sta->sdata, sta); 1048 1049 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1050 rcu_access_pointer(sdata->u.vlan.sta) == sta) 1051 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 1052 1053 return 0; 1054 } 1055 1056 static void __sta_info_destroy_part2(struct sta_info *sta) 1057 { 1058 struct ieee80211_local *local = sta->local; 1059 struct ieee80211_sub_if_data *sdata = sta->sdata; 1060 struct station_info *sinfo; 1061 int ret; 1062 1063 /* 1064 * NOTE: This assumes at least synchronize_net() was done 1065 * after _part1 and before _part2! 1066 */ 1067 1068 might_sleep(); 1069 lockdep_assert_held(&local->sta_mtx); 1070 1071 if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1072 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); 1073 WARN_ON_ONCE(ret); 1074 } 1075 1076 /* now keys can no longer be reached */ 1077 ieee80211_free_sta_keys(local, sta); 1078 1079 /* disable TIM bit - last chance to tell driver */ 1080 __sta_info_recalc_tim(sta, true); 1081 1082 sta->dead = true; 1083 1084 local->num_sta--; 1085 local->sta_generation++; 1086 1087 while (sta->sta_state > IEEE80211_STA_NONE) { 1088 ret = sta_info_move_state(sta, sta->sta_state - 1); 1089 if (ret) { 1090 WARN_ON_ONCE(1); 1091 break; 1092 } 1093 } 1094 1095 if (sta->uploaded) { 1096 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, 1097 IEEE80211_STA_NOTEXIST); 1098 WARN_ON_ONCE(ret != 0); 1099 } 1100 1101 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 1102 1103 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); 1104 if (sinfo) 1105 sta_set_sinfo(sta, sinfo, true); 1106 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 1107 kfree(sinfo); 1108 1109 ieee80211_sta_debugfs_remove(sta); 1110 1111 cleanup_single_sta(sta); 1112 } 1113 1114 int __must_check __sta_info_destroy(struct sta_info *sta) 1115 { 1116 int err = __sta_info_destroy_part1(sta); 1117 1118 if (err) 1119 return err; 1120 1121 synchronize_net(); 1122 1123 __sta_info_destroy_part2(sta); 1124 1125 return 0; 1126 } 1127 1128 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) 1129 { 1130 struct sta_info *sta; 1131 int ret; 1132 1133 mutex_lock(&sdata->local->sta_mtx); 1134 sta = sta_info_get(sdata, addr); 1135 ret = __sta_info_destroy(sta); 1136 mutex_unlock(&sdata->local->sta_mtx); 1137 1138 return ret; 1139 } 1140 1141 int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 1142 const u8 *addr) 1143 { 1144 struct sta_info *sta; 1145 int ret; 1146 1147 mutex_lock(&sdata->local->sta_mtx); 1148 sta = sta_info_get_bss(sdata, addr); 1149 ret = __sta_info_destroy(sta); 1150 mutex_unlock(&sdata->local->sta_mtx); 1151 1152 return ret; 1153 } 1154 1155 static void sta_info_cleanup(struct timer_list *t) 1156 { 1157 struct ieee80211_local *local = from_timer(local, t, sta_cleanup); 1158 struct sta_info *sta; 1159 bool timer_needed = false; 1160 1161 rcu_read_lock(); 1162 list_for_each_entry_rcu(sta, &local->sta_list, list) 1163 if (sta_info_cleanup_expire_buffered(local, sta)) 1164 timer_needed = true; 1165 rcu_read_unlock(); 1166 1167 if (local->quiescing) 1168 return; 1169 1170 if (!timer_needed) 1171 return; 1172 1173 mod_timer(&local->sta_cleanup, 1174 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); 1175 } 1176 1177 int sta_info_init(struct ieee80211_local *local) 1178 { 1179 int err; 1180 1181 err = rhltable_init(&local->sta_hash, &sta_rht_params); 1182 if (err) 1183 return err; 1184 1185 spin_lock_init(&local->tim_lock); 1186 mutex_init(&local->sta_mtx); 1187 INIT_LIST_HEAD(&local->sta_list); 1188 1189 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); 1190 return 0; 1191 } 1192 1193 void sta_info_stop(struct ieee80211_local *local) 1194 { 1195 del_timer_sync(&local->sta_cleanup); 1196 rhltable_destroy(&local->sta_hash); 1197 } 1198 1199 1200 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) 1201 { 1202 struct ieee80211_local *local = sdata->local; 1203 struct sta_info *sta, *tmp; 1204 LIST_HEAD(free_list); 1205 int ret = 0; 1206 1207 might_sleep(); 1208 1209 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); 1210 WARN_ON(vlans && !sdata->bss); 1211 1212 mutex_lock(&local->sta_mtx); 1213 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1214 if (sdata == sta->sdata || 1215 (vlans && sdata->bss == sta->sdata->bss)) { 1216 if (!WARN_ON(__sta_info_destroy_part1(sta))) 1217 list_add(&sta->free_list, &free_list); 1218 ret++; 1219 } 1220 } 1221 1222 if (!list_empty(&free_list)) { 1223 synchronize_net(); 1224 list_for_each_entry_safe(sta, tmp, &free_list, free_list) 1225 __sta_info_destroy_part2(sta); 1226 } 1227 mutex_unlock(&local->sta_mtx); 1228 1229 return ret; 1230 } 1231 1232 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 1233 unsigned long exp_time) 1234 { 1235 struct ieee80211_local *local = sdata->local; 1236 struct sta_info *sta, *tmp; 1237 1238 mutex_lock(&local->sta_mtx); 1239 1240 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1241 unsigned long last_active = ieee80211_sta_last_active(sta); 1242 1243 if (sdata != sta->sdata) 1244 continue; 1245 1246 if (time_is_before_jiffies(last_active + exp_time)) { 1247 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 1248 sta->sta.addr); 1249 1250 if (ieee80211_vif_is_mesh(&sdata->vif) && 1251 test_sta_flag(sta, WLAN_STA_PS_STA)) 1252 atomic_dec(&sdata->u.mesh.ps.num_sta_ps); 1253 1254 WARN_ON(__sta_info_destroy(sta)); 1255 } 1256 } 1257 1258 mutex_unlock(&local->sta_mtx); 1259 } 1260 1261 struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, 1262 const u8 *addr, 1263 const u8 *localaddr) 1264 { 1265 struct ieee80211_local *local = hw_to_local(hw); 1266 struct rhlist_head *tmp; 1267 struct sta_info *sta; 1268 1269 /* 1270 * Just return a random station if localaddr is NULL 1271 * ... first in list. 1272 */ 1273 for_each_sta_info(local, addr, sta, tmp) { 1274 if (localaddr && 1275 !ether_addr_equal(sta->sdata->vif.addr, localaddr)) 1276 continue; 1277 if (!sta->uploaded) 1278 return NULL; 1279 return &sta->sta; 1280 } 1281 1282 return NULL; 1283 } 1284 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); 1285 1286 struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 1287 const u8 *addr) 1288 { 1289 struct sta_info *sta; 1290 1291 if (!vif) 1292 return NULL; 1293 1294 sta = sta_info_get_bss(vif_to_sdata(vif), addr); 1295 if (!sta) 1296 return NULL; 1297 1298 if (!sta->uploaded) 1299 return NULL; 1300 1301 return &sta->sta; 1302 } 1303 EXPORT_SYMBOL(ieee80211_find_sta); 1304 1305 /* powersave support code */ 1306 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1307 { 1308 struct ieee80211_sub_if_data *sdata = sta->sdata; 1309 struct ieee80211_local *local = sdata->local; 1310 struct sk_buff_head pending; 1311 int filtered = 0, buffered = 0, ac, i; 1312 unsigned long flags; 1313 struct ps_data *ps; 1314 1315 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1316 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 1317 u.ap); 1318 1319 if (sdata->vif.type == NL80211_IFTYPE_AP) 1320 ps = &sdata->bss->ps; 1321 else if (ieee80211_vif_is_mesh(&sdata->vif)) 1322 ps = &sdata->u.mesh.ps; 1323 else 1324 return; 1325 1326 clear_sta_flag(sta, WLAN_STA_SP); 1327 1328 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); 1329 sta->driver_buffered_tids = 0; 1330 sta->txq_buffered_tids = 0; 1331 1332 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1333 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1334 1335 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1336 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) 1337 continue; 1338 1339 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); 1340 } 1341 1342 skb_queue_head_init(&pending); 1343 1344 /* sync with ieee80211_tx_h_unicast_ps_buf */ 1345 spin_lock(&sta->ps_lock); 1346 /* Send all buffered frames to the station */ 1347 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1348 int count = skb_queue_len(&pending), tmp; 1349 1350 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1351 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1352 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1353 tmp = skb_queue_len(&pending); 1354 filtered += tmp - count; 1355 count = tmp; 1356 1357 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1358 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1359 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1360 tmp = skb_queue_len(&pending); 1361 buffered += tmp - count; 1362 } 1363 1364 ieee80211_add_pending_skbs(local, &pending); 1365 1366 /* now we're no longer in the deliver code */ 1367 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 1368 1369 /* The station might have polled and then woken up before we responded, 1370 * so clear these flags now to avoid them sticking around. 1371 */ 1372 clear_sta_flag(sta, WLAN_STA_PSPOLL); 1373 clear_sta_flag(sta, WLAN_STA_UAPSD); 1374 spin_unlock(&sta->ps_lock); 1375 1376 atomic_dec(&ps->num_sta_ps); 1377 1378 local->total_ps_buffered -= buffered; 1379 1380 sta_info_recalc_tim(sta); 1381 1382 ps_dbg(sdata, 1383 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1384 sta->sta.addr, sta->sta.aid, filtered, buffered); 1385 1386 ieee80211_check_fast_xmit(sta); 1387 } 1388 1389 static void ieee80211_send_null_response(struct sta_info *sta, int tid, 1390 enum ieee80211_frame_release_type reason, 1391 bool call_driver, bool more_data) 1392 { 1393 struct ieee80211_sub_if_data *sdata = sta->sdata; 1394 struct ieee80211_local *local = sdata->local; 1395 struct ieee80211_qos_hdr *nullfunc; 1396 struct sk_buff *skb; 1397 int size = sizeof(*nullfunc); 1398 __le16 fc; 1399 bool qos = sta->sta.wme; 1400 struct ieee80211_tx_info *info; 1401 struct ieee80211_chanctx_conf *chanctx_conf; 1402 1403 /* Don't send NDPs when STA is connected HE */ 1404 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1405 !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE)) 1406 return; 1407 1408 if (qos) { 1409 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1410 IEEE80211_STYPE_QOS_NULLFUNC | 1411 IEEE80211_FCTL_FROMDS); 1412 } else { 1413 size -= 2; 1414 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1415 IEEE80211_STYPE_NULLFUNC | 1416 IEEE80211_FCTL_FROMDS); 1417 } 1418 1419 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 1420 if (!skb) 1421 return; 1422 1423 skb_reserve(skb, local->hw.extra_tx_headroom); 1424 1425 nullfunc = skb_put(skb, size); 1426 nullfunc->frame_control = fc; 1427 nullfunc->duration_id = 0; 1428 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1429 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1430 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1431 nullfunc->seq_ctrl = 0; 1432 1433 skb->priority = tid; 1434 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1435 if (qos) { 1436 nullfunc->qos_ctrl = cpu_to_le16(tid); 1437 1438 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { 1439 nullfunc->qos_ctrl |= 1440 cpu_to_le16(IEEE80211_QOS_CTL_EOSP); 1441 if (more_data) 1442 nullfunc->frame_control |= 1443 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1444 } 1445 } 1446 1447 info = IEEE80211_SKB_CB(skb); 1448 1449 /* 1450 * Tell TX path to send this frame even though the 1451 * STA may still remain is PS mode after this frame 1452 * exchange. Also set EOSP to indicate this packet 1453 * ends the poll/service period. 1454 */ 1455 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | 1456 IEEE80211_TX_STATUS_EOSP | 1457 IEEE80211_TX_CTL_REQ_TX_STATUS; 1458 1459 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1460 1461 if (call_driver) 1462 drv_allow_buffered_frames(local, sta, BIT(tid), 1, 1463 reason, false); 1464 1465 skb->dev = sdata->dev; 1466 1467 rcu_read_lock(); 1468 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 1469 if (WARN_ON(!chanctx_conf)) { 1470 rcu_read_unlock(); 1471 kfree_skb(skb); 1472 return; 1473 } 1474 1475 info->band = chanctx_conf->def.chan->band; 1476 ieee80211_xmit(sdata, sta, skb); 1477 rcu_read_unlock(); 1478 } 1479 1480 static int find_highest_prio_tid(unsigned long tids) 1481 { 1482 /* lower 3 TIDs aren't ordered perfectly */ 1483 if (tids & 0xF8) 1484 return fls(tids) - 1; 1485 /* TID 0 is BE just like TID 3 */ 1486 if (tids & BIT(0)) 1487 return 0; 1488 return fls(tids) - 1; 1489 } 1490 1491 /* Indicates if the MORE_DATA bit should be set in the last 1492 * frame obtained by ieee80211_sta_ps_get_frames. 1493 * Note that driver_release_tids is relevant only if 1494 * reason = IEEE80211_FRAME_RELEASE_PSPOLL 1495 */ 1496 static bool 1497 ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, 1498 enum ieee80211_frame_release_type reason, 1499 unsigned long driver_release_tids) 1500 { 1501 int ac; 1502 1503 /* If the driver has data on more than one TID then 1504 * certainly there's more data if we release just a 1505 * single frame now (from a single TID). This will 1506 * only happen for PS-Poll. 1507 */ 1508 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && 1509 hweight16(driver_release_tids) > 1) 1510 return true; 1511 1512 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1513 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1514 continue; 1515 1516 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1517 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1518 return true; 1519 } 1520 1521 return false; 1522 } 1523 1524 static void 1525 ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, 1526 enum ieee80211_frame_release_type reason, 1527 struct sk_buff_head *frames, 1528 unsigned long *driver_release_tids) 1529 { 1530 struct ieee80211_sub_if_data *sdata = sta->sdata; 1531 struct ieee80211_local *local = sdata->local; 1532 int ac; 1533 1534 /* Get response frame(s) and more data bit for the last one. */ 1535 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1536 unsigned long tids; 1537 1538 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1539 continue; 1540 1541 tids = ieee80211_tids_for_ac(ac); 1542 1543 /* if we already have frames from software, then we can't also 1544 * release from hardware queues 1545 */ 1546 if (skb_queue_empty(frames)) { 1547 *driver_release_tids |= 1548 sta->driver_buffered_tids & tids; 1549 *driver_release_tids |= sta->txq_buffered_tids & tids; 1550 } 1551 1552 if (!*driver_release_tids) { 1553 struct sk_buff *skb; 1554 1555 while (n_frames > 0) { 1556 skb = skb_dequeue(&sta->tx_filtered[ac]); 1557 if (!skb) { 1558 skb = skb_dequeue( 1559 &sta->ps_tx_buf[ac]); 1560 if (skb) 1561 local->total_ps_buffered--; 1562 } 1563 if (!skb) 1564 break; 1565 n_frames--; 1566 __skb_queue_tail(frames, skb); 1567 } 1568 } 1569 1570 /* If we have more frames buffered on this AC, then abort the 1571 * loop since we can't send more data from other ACs before 1572 * the buffered frames from this. 1573 */ 1574 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1575 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1576 break; 1577 } 1578 } 1579 1580 static void 1581 ieee80211_sta_ps_deliver_response(struct sta_info *sta, 1582 int n_frames, u8 ignored_acs, 1583 enum ieee80211_frame_release_type reason) 1584 { 1585 struct ieee80211_sub_if_data *sdata = sta->sdata; 1586 struct ieee80211_local *local = sdata->local; 1587 unsigned long driver_release_tids = 0; 1588 struct sk_buff_head frames; 1589 bool more_data; 1590 1591 /* Service or PS-Poll period starts */ 1592 set_sta_flag(sta, WLAN_STA_SP); 1593 1594 __skb_queue_head_init(&frames); 1595 1596 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, 1597 &frames, &driver_release_tids); 1598 1599 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); 1600 1601 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) 1602 driver_release_tids = 1603 BIT(find_highest_prio_tid(driver_release_tids)); 1604 1605 if (skb_queue_empty(&frames) && !driver_release_tids) { 1606 int tid, ac; 1607 1608 /* 1609 * For PS-Poll, this can only happen due to a race condition 1610 * when we set the TIM bit and the station notices it, but 1611 * before it can poll for the frame we expire it. 1612 * 1613 * For uAPSD, this is said in the standard (11.2.1.5 h): 1614 * At each unscheduled SP for a non-AP STA, the AP shall 1615 * attempt to transmit at least one MSDU or MMPDU, but no 1616 * more than the value specified in the Max SP Length field 1617 * in the QoS Capability element from delivery-enabled ACs, 1618 * that are destined for the non-AP STA. 1619 * 1620 * Since we have no other MSDU/MMPDU, transmit a QoS null frame. 1621 */ 1622 1623 /* This will evaluate to 1, 3, 5 or 7. */ 1624 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1625 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) 1626 break; 1627 tid = 7 - 2 * ac; 1628 1629 ieee80211_send_null_response(sta, tid, reason, true, false); 1630 } else if (!driver_release_tids) { 1631 struct sk_buff_head pending; 1632 struct sk_buff *skb; 1633 int num = 0; 1634 u16 tids = 0; 1635 bool need_null = false; 1636 1637 skb_queue_head_init(&pending); 1638 1639 while ((skb = __skb_dequeue(&frames))) { 1640 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1641 struct ieee80211_hdr *hdr = (void *) skb->data; 1642 u8 *qoshdr = NULL; 1643 1644 num++; 1645 1646 /* 1647 * Tell TX path to send this frame even though the 1648 * STA may still remain is PS mode after this frame 1649 * exchange. 1650 */ 1651 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 1652 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1653 1654 /* 1655 * Use MoreData flag to indicate whether there are 1656 * more buffered frames for this STA 1657 */ 1658 if (more_data || !skb_queue_empty(&frames)) 1659 hdr->frame_control |= 1660 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1661 else 1662 hdr->frame_control &= 1663 cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1664 1665 if (ieee80211_is_data_qos(hdr->frame_control) || 1666 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1667 qoshdr = ieee80211_get_qos_ctl(hdr); 1668 1669 tids |= BIT(skb->priority); 1670 1671 __skb_queue_tail(&pending, skb); 1672 1673 /* end service period after last frame or add one */ 1674 if (!skb_queue_empty(&frames)) 1675 continue; 1676 1677 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { 1678 /* for PS-Poll, there's only one frame */ 1679 info->flags |= IEEE80211_TX_STATUS_EOSP | 1680 IEEE80211_TX_CTL_REQ_TX_STATUS; 1681 break; 1682 } 1683 1684 /* For uAPSD, things are a bit more complicated. If the 1685 * last frame has a QoS header (i.e. is a QoS-data or 1686 * QoS-nulldata frame) then just set the EOSP bit there 1687 * and be done. 1688 * If the frame doesn't have a QoS header (which means 1689 * it should be a bufferable MMPDU) then we can't set 1690 * the EOSP bit in the QoS header; add a QoS-nulldata 1691 * frame to the list to send it after the MMPDU. 1692 * 1693 * Note that this code is only in the mac80211-release 1694 * code path, we assume that the driver will not buffer 1695 * anything but QoS-data frames, or if it does, will 1696 * create the QoS-nulldata frame by itself if needed. 1697 * 1698 * Cf. 802.11-2012 10.2.1.10 (c). 1699 */ 1700 if (qoshdr) { 1701 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 1702 1703 info->flags |= IEEE80211_TX_STATUS_EOSP | 1704 IEEE80211_TX_CTL_REQ_TX_STATUS; 1705 } else { 1706 /* The standard isn't completely clear on this 1707 * as it says the more-data bit should be set 1708 * if there are more BUs. The QoS-Null frame 1709 * we're about to send isn't buffered yet, we 1710 * only create it below, but let's pretend it 1711 * was buffered just in case some clients only 1712 * expect more-data=0 when eosp=1. 1713 */ 1714 hdr->frame_control |= 1715 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1716 need_null = true; 1717 num++; 1718 } 1719 break; 1720 } 1721 1722 drv_allow_buffered_frames(local, sta, tids, num, 1723 reason, more_data); 1724 1725 ieee80211_add_pending_skbs(local, &pending); 1726 1727 if (need_null) 1728 ieee80211_send_null_response( 1729 sta, find_highest_prio_tid(tids), 1730 reason, false, false); 1731 1732 sta_info_recalc_tim(sta); 1733 } else { 1734 int tid; 1735 1736 /* 1737 * We need to release a frame that is buffered somewhere in the 1738 * driver ... it'll have to handle that. 1739 * Note that the driver also has to check the number of frames 1740 * on the TIDs we're releasing from - if there are more than 1741 * n_frames it has to set the more-data bit (if we didn't ask 1742 * it to set it anyway due to other buffered frames); if there 1743 * are fewer than n_frames it has to make sure to adjust that 1744 * to allow the service period to end properly. 1745 */ 1746 drv_release_buffered_frames(local, sta, driver_release_tids, 1747 n_frames, reason, more_data); 1748 1749 /* 1750 * Note that we don't recalculate the TIM bit here as it would 1751 * most likely have no effect at all unless the driver told us 1752 * that the TID(s) became empty before returning here from the 1753 * release function. 1754 * Either way, however, when the driver tells us that the TID(s) 1755 * became empty or we find that a txq became empty, we'll do the 1756 * TIM recalculation. 1757 */ 1758 1759 if (!sta->sta.txq[0]) 1760 return; 1761 1762 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1763 if (!sta->sta.txq[tid] || 1764 !(driver_release_tids & BIT(tid)) || 1765 txq_has_queue(sta->sta.txq[tid])) 1766 continue; 1767 1768 sta_info_recalc_tim(sta); 1769 break; 1770 } 1771 } 1772 } 1773 1774 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 1775 { 1776 u8 ignore_for_response = sta->sta.uapsd_queues; 1777 1778 /* 1779 * If all ACs are delivery-enabled then we should reply 1780 * from any of them, if only some are enabled we reply 1781 * only from the non-enabled ones. 1782 */ 1783 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) 1784 ignore_for_response = 0; 1785 1786 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, 1787 IEEE80211_FRAME_RELEASE_PSPOLL); 1788 } 1789 1790 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) 1791 { 1792 int n_frames = sta->sta.max_sp; 1793 u8 delivery_enabled = sta->sta.uapsd_queues; 1794 1795 /* 1796 * If we ever grow support for TSPEC this might happen if 1797 * the TSPEC update from hostapd comes in between a trigger 1798 * frame setting WLAN_STA_UAPSD in the RX path and this 1799 * actually getting called. 1800 */ 1801 if (!delivery_enabled) 1802 return; 1803 1804 switch (sta->sta.max_sp) { 1805 case 1: 1806 n_frames = 2; 1807 break; 1808 case 2: 1809 n_frames = 4; 1810 break; 1811 case 3: 1812 n_frames = 6; 1813 break; 1814 case 0: 1815 /* XXX: what is a good value? */ 1816 n_frames = 128; 1817 break; 1818 } 1819 1820 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, 1821 IEEE80211_FRAME_RELEASE_UAPSD); 1822 } 1823 1824 void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 1825 struct ieee80211_sta *pubsta, bool block) 1826 { 1827 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1828 1829 trace_api_sta_block_awake(sta->local, pubsta, block); 1830 1831 if (block) { 1832 set_sta_flag(sta, WLAN_STA_PS_DRIVER); 1833 ieee80211_clear_fast_xmit(sta); 1834 return; 1835 } 1836 1837 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1838 return; 1839 1840 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { 1841 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1842 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1843 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1844 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || 1845 test_sta_flag(sta, WLAN_STA_UAPSD)) { 1846 /* must be asleep in this case */ 1847 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1848 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1849 } else { 1850 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1851 ieee80211_check_fast_xmit(sta); 1852 } 1853 } 1854 EXPORT_SYMBOL(ieee80211_sta_block_awake); 1855 1856 void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) 1857 { 1858 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1859 struct ieee80211_local *local = sta->local; 1860 1861 trace_api_eosp(local, pubsta); 1862 1863 clear_sta_flag(sta, WLAN_STA_SP); 1864 } 1865 EXPORT_SYMBOL(ieee80211_sta_eosp); 1866 1867 void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) 1868 { 1869 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1870 enum ieee80211_frame_release_type reason; 1871 bool more_data; 1872 1873 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); 1874 1875 reason = IEEE80211_FRAME_RELEASE_UAPSD; 1876 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, 1877 reason, 0); 1878 1879 ieee80211_send_null_response(sta, tid, reason, false, more_data); 1880 } 1881 EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); 1882 1883 void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 1884 u8 tid, bool buffered) 1885 { 1886 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1887 1888 if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) 1889 return; 1890 1891 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); 1892 1893 if (buffered) 1894 set_bit(tid, &sta->driver_buffered_tids); 1895 else 1896 clear_bit(tid, &sta->driver_buffered_tids); 1897 1898 sta_info_recalc_tim(sta); 1899 } 1900 EXPORT_SYMBOL(ieee80211_sta_set_buffered); 1901 1902 void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, 1903 u32 tx_airtime, u32 rx_airtime) 1904 { 1905 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1906 struct ieee80211_local *local = sta->sdata->local; 1907 u8 ac = ieee80211_ac_from_tid(tid); 1908 u32 airtime = 0; 1909 1910 if (sta->local->airtime_flags & AIRTIME_USE_TX) 1911 airtime += tx_airtime; 1912 if (sta->local->airtime_flags & AIRTIME_USE_RX) 1913 airtime += rx_airtime; 1914 1915 spin_lock_bh(&local->active_txq_lock[ac]); 1916 sta->airtime[ac].tx_airtime += tx_airtime; 1917 sta->airtime[ac].rx_airtime += rx_airtime; 1918 sta->airtime[ac].deficit -= airtime; 1919 spin_unlock_bh(&local->active_txq_lock[ac]); 1920 } 1921 EXPORT_SYMBOL(ieee80211_sta_register_airtime); 1922 1923 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, 1924 struct sta_info *sta, u8 ac, 1925 u16 tx_airtime, bool tx_completed) 1926 { 1927 int tx_pending; 1928 1929 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) 1930 return; 1931 1932 if (!tx_completed) { 1933 if (sta) 1934 atomic_add(tx_airtime, 1935 &sta->airtime[ac].aql_tx_pending); 1936 1937 atomic_add(tx_airtime, &local->aql_total_pending_airtime); 1938 return; 1939 } 1940 1941 if (sta) { 1942 tx_pending = atomic_sub_return(tx_airtime, 1943 &sta->airtime[ac].aql_tx_pending); 1944 if (tx_pending < 0) 1945 atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, 1946 tx_pending, 0); 1947 } 1948 1949 tx_pending = atomic_sub_return(tx_airtime, 1950 &local->aql_total_pending_airtime); 1951 if (WARN_ONCE(tx_pending < 0, 1952 "Device %s AC %d pending airtime underflow: %u, %u", 1953 wiphy_name(local->hw.wiphy), ac, tx_pending, 1954 tx_airtime)) 1955 atomic_cmpxchg(&local->aql_total_pending_airtime, 1956 tx_pending, 0); 1957 } 1958 1959 int sta_info_move_state(struct sta_info *sta, 1960 enum ieee80211_sta_state new_state) 1961 { 1962 might_sleep(); 1963 1964 if (sta->sta_state == new_state) 1965 return 0; 1966 1967 /* check allowed transitions first */ 1968 1969 switch (new_state) { 1970 case IEEE80211_STA_NONE: 1971 if (sta->sta_state != IEEE80211_STA_AUTH) 1972 return -EINVAL; 1973 break; 1974 case IEEE80211_STA_AUTH: 1975 if (sta->sta_state != IEEE80211_STA_NONE && 1976 sta->sta_state != IEEE80211_STA_ASSOC) 1977 return -EINVAL; 1978 break; 1979 case IEEE80211_STA_ASSOC: 1980 if (sta->sta_state != IEEE80211_STA_AUTH && 1981 sta->sta_state != IEEE80211_STA_AUTHORIZED) 1982 return -EINVAL; 1983 break; 1984 case IEEE80211_STA_AUTHORIZED: 1985 if (sta->sta_state != IEEE80211_STA_ASSOC) 1986 return -EINVAL; 1987 break; 1988 default: 1989 WARN(1, "invalid state %d", new_state); 1990 return -EINVAL; 1991 } 1992 1993 sta_dbg(sta->sdata, "moving STA %pM to state %d\n", 1994 sta->sta.addr, new_state); 1995 1996 /* 1997 * notify the driver before the actual changes so it can 1998 * fail the transition 1999 */ 2000 if (test_sta_flag(sta, WLAN_STA_INSERTED)) { 2001 int err = drv_sta_state(sta->local, sta->sdata, sta, 2002 sta->sta_state, new_state); 2003 if (err) 2004 return err; 2005 } 2006 2007 /* reflect the change in all state variables */ 2008 2009 switch (new_state) { 2010 case IEEE80211_STA_NONE: 2011 if (sta->sta_state == IEEE80211_STA_AUTH) 2012 clear_bit(WLAN_STA_AUTH, &sta->_flags); 2013 break; 2014 case IEEE80211_STA_AUTH: 2015 if (sta->sta_state == IEEE80211_STA_NONE) { 2016 set_bit(WLAN_STA_AUTH, &sta->_flags); 2017 } else if (sta->sta_state == IEEE80211_STA_ASSOC) { 2018 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 2019 ieee80211_recalc_min_chandef(sta->sdata); 2020 if (!sta->sta.support_p2p_ps) 2021 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2022 } 2023 break; 2024 case IEEE80211_STA_ASSOC: 2025 if (sta->sta_state == IEEE80211_STA_AUTH) { 2026 set_bit(WLAN_STA_ASSOC, &sta->_flags); 2027 sta->assoc_at = ktime_get_boottime_ns(); 2028 ieee80211_recalc_min_chandef(sta->sdata); 2029 if (!sta->sta.support_p2p_ps) 2030 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2031 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 2032 ieee80211_vif_dec_num_mcast(sta->sdata); 2033 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2034 ieee80211_clear_fast_xmit(sta); 2035 ieee80211_clear_fast_rx(sta); 2036 } 2037 break; 2038 case IEEE80211_STA_AUTHORIZED: 2039 if (sta->sta_state == IEEE80211_STA_ASSOC) { 2040 ieee80211_vif_inc_num_mcast(sta->sdata); 2041 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2042 ieee80211_check_fast_xmit(sta); 2043 ieee80211_check_fast_rx(sta); 2044 } 2045 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 2046 sta->sdata->vif.type == NL80211_IFTYPE_AP) 2047 cfg80211_send_layer2_update(sta->sdata->dev, 2048 sta->sta.addr); 2049 break; 2050 default: 2051 break; 2052 } 2053 2054 sta->sta_state = new_state; 2055 2056 return 0; 2057 } 2058 2059 u8 sta_info_tx_streams(struct sta_info *sta) 2060 { 2061 struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap; 2062 u8 rx_streams; 2063 2064 if (!sta->sta.ht_cap.ht_supported) 2065 return 1; 2066 2067 if (sta->sta.vht_cap.vht_supported) { 2068 int i; 2069 u16 tx_mcs_map = 2070 le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map); 2071 2072 for (i = 7; i >= 0; i--) 2073 if ((tx_mcs_map & (0x3 << (i * 2))) != 2074 IEEE80211_VHT_MCS_NOT_SUPPORTED) 2075 return i + 1; 2076 } 2077 2078 if (ht_cap->mcs.rx_mask[3]) 2079 rx_streams = 4; 2080 else if (ht_cap->mcs.rx_mask[2]) 2081 rx_streams = 3; 2082 else if (ht_cap->mcs.rx_mask[1]) 2083 rx_streams = 2; 2084 else 2085 rx_streams = 1; 2086 2087 if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF)) 2088 return rx_streams; 2089 2090 return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 2091 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; 2092 } 2093 2094 static struct ieee80211_sta_rx_stats * 2095 sta_get_last_rx_stats(struct sta_info *sta) 2096 { 2097 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 2098 struct ieee80211_local *local = sta->local; 2099 int cpu; 2100 2101 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2102 return stats; 2103 2104 for_each_possible_cpu(cpu) { 2105 struct ieee80211_sta_rx_stats *cpustats; 2106 2107 cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2108 2109 if (time_after(cpustats->last_rx, stats->last_rx)) 2110 stats = cpustats; 2111 } 2112 2113 return stats; 2114 } 2115 2116 static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, 2117 struct rate_info *rinfo) 2118 { 2119 rinfo->bw = STA_STATS_GET(BW, rate); 2120 2121 switch (STA_STATS_GET(TYPE, rate)) { 2122 case STA_STATS_RATE_TYPE_VHT: 2123 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; 2124 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); 2125 rinfo->nss = STA_STATS_GET(VHT_NSS, rate); 2126 if (STA_STATS_GET(SGI, rate)) 2127 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2128 break; 2129 case STA_STATS_RATE_TYPE_HT: 2130 rinfo->flags = RATE_INFO_FLAGS_MCS; 2131 rinfo->mcs = STA_STATS_GET(HT_MCS, rate); 2132 if (STA_STATS_GET(SGI, rate)) 2133 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2134 break; 2135 case STA_STATS_RATE_TYPE_LEGACY: { 2136 struct ieee80211_supported_band *sband; 2137 u16 brate; 2138 unsigned int shift; 2139 int band = STA_STATS_GET(LEGACY_BAND, rate); 2140 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); 2141 2142 sband = local->hw.wiphy->bands[band]; 2143 2144 if (WARN_ON_ONCE(!sband->bitrates)) 2145 break; 2146 2147 brate = sband->bitrates[rate_idx].bitrate; 2148 if (rinfo->bw == RATE_INFO_BW_5) 2149 shift = 2; 2150 else if (rinfo->bw == RATE_INFO_BW_10) 2151 shift = 1; 2152 else 2153 shift = 0; 2154 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); 2155 break; 2156 } 2157 case STA_STATS_RATE_TYPE_HE: 2158 rinfo->flags = RATE_INFO_FLAGS_HE_MCS; 2159 rinfo->mcs = STA_STATS_GET(HE_MCS, rate); 2160 rinfo->nss = STA_STATS_GET(HE_NSS, rate); 2161 rinfo->he_gi = STA_STATS_GET(HE_GI, rate); 2162 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); 2163 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); 2164 break; 2165 } 2166 } 2167 2168 static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) 2169 { 2170 u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); 2171 2172 if (rate == STA_STATS_RATE_INVALID) 2173 return -EINVAL; 2174 2175 sta_stats_decode_rate(sta->local, rate, rinfo); 2176 return 0; 2177 } 2178 2179 static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, 2180 int tid) 2181 { 2182 unsigned int start; 2183 u64 value; 2184 2185 do { 2186 start = u64_stats_fetch_begin(&rxstats->syncp); 2187 value = rxstats->msdu[tid]; 2188 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2189 2190 return value; 2191 } 2192 2193 static void sta_set_tidstats(struct sta_info *sta, 2194 struct cfg80211_tid_stats *tidstats, 2195 int tid) 2196 { 2197 struct ieee80211_local *local = sta->local; 2198 int cpu; 2199 2200 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { 2201 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2202 tidstats->rx_msdu += 2203 sta_get_tidstats_msdu(&sta->rx_stats, tid); 2204 2205 if (sta->pcpu_rx_stats) { 2206 for_each_possible_cpu(cpu) { 2207 struct ieee80211_sta_rx_stats *cpurxs; 2208 2209 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2210 tidstats->rx_msdu += 2211 sta_get_tidstats_msdu(cpurxs, tid); 2212 } 2213 } 2214 2215 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); 2216 } 2217 2218 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { 2219 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); 2220 tidstats->tx_msdu = sta->tx_stats.msdu[tid]; 2221 } 2222 2223 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && 2224 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2225 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); 2226 tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid]; 2227 } 2228 2229 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && 2230 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2231 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); 2232 tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid]; 2233 } 2234 2235 if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) { 2236 spin_lock_bh(&local->fq.lock); 2237 rcu_read_lock(); 2238 2239 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); 2240 ieee80211_fill_txq_stats(&tidstats->txq_stats, 2241 to_txq_info(sta->sta.txq[tid])); 2242 2243 rcu_read_unlock(); 2244 spin_unlock_bh(&local->fq.lock); 2245 } 2246 } 2247 2248 static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) 2249 { 2250 unsigned int start; 2251 u64 value; 2252 2253 do { 2254 start = u64_stats_fetch_begin(&rxstats->syncp); 2255 value = rxstats->bytes; 2256 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2257 2258 return value; 2259 } 2260 2261 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, 2262 bool tidstats) 2263 { 2264 struct ieee80211_sub_if_data *sdata = sta->sdata; 2265 struct ieee80211_local *local = sdata->local; 2266 u32 thr = 0; 2267 int i, ac, cpu; 2268 struct ieee80211_sta_rx_stats *last_rxstats; 2269 2270 last_rxstats = sta_get_last_rx_stats(sta); 2271 2272 sinfo->generation = sdata->local->sta_generation; 2273 2274 /* do before driver, so beacon filtering drivers have a 2275 * chance to e.g. just add the number of filtered beacons 2276 * (or just modify the value entirely, of course) 2277 */ 2278 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2279 sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal; 2280 2281 drv_sta_statistics(local, sdata, &sta->sta, sinfo); 2282 2283 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | 2284 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | 2285 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | 2286 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | 2287 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | 2288 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); 2289 2290 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2291 sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; 2292 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); 2293 } 2294 2295 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 2296 sinfo->assoc_at = sta->assoc_at; 2297 sinfo->inactive_time = 2298 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); 2299 2300 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2301 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { 2302 sinfo->tx_bytes = 0; 2303 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2304 sinfo->tx_bytes += sta->tx_stats.bytes[ac]; 2305 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); 2306 } 2307 2308 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { 2309 sinfo->tx_packets = 0; 2310 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2311 sinfo->tx_packets += sta->tx_stats.packets[ac]; 2312 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 2313 } 2314 2315 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | 2316 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { 2317 if (!ieee80211_hw_check(&local->hw, USES_RSS)) 2318 sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); 2319 2320 if (sta->pcpu_rx_stats) { 2321 for_each_possible_cpu(cpu) { 2322 struct ieee80211_sta_rx_stats *cpurxs; 2323 2324 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2325 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); 2326 } 2327 } 2328 2329 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); 2330 } 2331 2332 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { 2333 sinfo->rx_packets = sta->rx_stats.packets; 2334 if (sta->pcpu_rx_stats) { 2335 for_each_possible_cpu(cpu) { 2336 struct ieee80211_sta_rx_stats *cpurxs; 2337 2338 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2339 sinfo->rx_packets += cpurxs->packets; 2340 } 2341 } 2342 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); 2343 } 2344 2345 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { 2346 sinfo->tx_retries = sta->status_stats.retry_count; 2347 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); 2348 } 2349 2350 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { 2351 sinfo->tx_failed = sta->status_stats.retry_failed; 2352 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); 2353 } 2354 2355 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { 2356 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2357 sinfo->rx_duration += sta->airtime[ac].rx_airtime; 2358 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 2359 } 2360 2361 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { 2362 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2363 sinfo->tx_duration += sta->airtime[ac].tx_airtime; 2364 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 2365 } 2366 2367 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { 2368 sinfo->airtime_weight = sta->airtime_weight; 2369 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); 2370 } 2371 2372 sinfo->rx_dropped_misc = sta->rx_stats.dropped; 2373 if (sta->pcpu_rx_stats) { 2374 for_each_possible_cpu(cpu) { 2375 struct ieee80211_sta_rx_stats *cpurxs; 2376 2377 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2378 sinfo->rx_dropped_misc += cpurxs->dropped; 2379 } 2380 } 2381 2382 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2383 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { 2384 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | 2385 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 2386 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); 2387 } 2388 2389 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || 2390 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { 2391 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { 2392 sinfo->signal = (s8)last_rxstats->last_signal; 2393 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 2394 } 2395 2396 if (!sta->pcpu_rx_stats && 2397 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { 2398 sinfo->signal_avg = 2399 -ewma_signal_read(&sta->rx_stats_avg.signal); 2400 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 2401 } 2402 } 2403 2404 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to 2405 * the sta->rx_stats struct, so the check here is fine with and without 2406 * pcpu statistics 2407 */ 2408 if (last_rxstats->chains && 2409 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | 2410 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { 2411 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); 2412 if (!sta->pcpu_rx_stats) 2413 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); 2414 2415 sinfo->chains = last_rxstats->chains; 2416 2417 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { 2418 sinfo->chain_signal[i] = 2419 last_rxstats->chain_signal_last[i]; 2420 sinfo->chain_signal_avg[i] = 2421 -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); 2422 } 2423 } 2424 2425 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { 2426 sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, 2427 &sinfo->txrate); 2428 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 2429 } 2430 2431 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { 2432 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) 2433 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); 2434 } 2435 2436 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { 2437 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) 2438 sta_set_tidstats(sta, &sinfo->pertid[i], i); 2439 } 2440 2441 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2442 #ifdef CONFIG_MAC80211_MESH 2443 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | 2444 BIT_ULL(NL80211_STA_INFO_PLID) | 2445 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | 2446 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | 2447 BIT_ULL(NL80211_STA_INFO_PEER_PM) | 2448 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | 2449 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | 2450 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); 2451 2452 sinfo->llid = sta->mesh->llid; 2453 sinfo->plid = sta->mesh->plid; 2454 sinfo->plink_state = sta->mesh->plink_state; 2455 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 2456 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); 2457 sinfo->t_offset = sta->mesh->t_offset; 2458 } 2459 sinfo->local_pm = sta->mesh->local_pm; 2460 sinfo->peer_pm = sta->mesh->peer_pm; 2461 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; 2462 sinfo->connected_to_gate = sta->mesh->connected_to_gate; 2463 sinfo->connected_to_as = sta->mesh->connected_to_as; 2464 #endif 2465 } 2466 2467 sinfo->bss_param.flags = 0; 2468 if (sdata->vif.bss_conf.use_cts_prot) 2469 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; 2470 if (sdata->vif.bss_conf.use_short_preamble) 2471 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; 2472 if (sdata->vif.bss_conf.use_short_slot) 2473 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 2474 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; 2475 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 2476 2477 sinfo->sta_flags.set = 0; 2478 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | 2479 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 2480 BIT(NL80211_STA_FLAG_WME) | 2481 BIT(NL80211_STA_FLAG_MFP) | 2482 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 2483 BIT(NL80211_STA_FLAG_ASSOCIATED) | 2484 BIT(NL80211_STA_FLAG_TDLS_PEER); 2485 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 2486 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 2487 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) 2488 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); 2489 if (sta->sta.wme) 2490 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); 2491 if (test_sta_flag(sta, WLAN_STA_MFP)) 2492 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 2493 if (test_sta_flag(sta, WLAN_STA_AUTH)) 2494 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 2495 if (test_sta_flag(sta, WLAN_STA_ASSOC)) 2496 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 2497 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 2498 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 2499 2500 thr = sta_get_expected_throughput(sta); 2501 2502 if (thr != 0) { 2503 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); 2504 sinfo->expected_throughput = thr; 2505 } 2506 2507 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && 2508 sta->status_stats.ack_signal_filled) { 2509 sinfo->ack_signal = sta->status_stats.last_ack_signal; 2510 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); 2511 } 2512 2513 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && 2514 sta->status_stats.ack_signal_filled) { 2515 sinfo->avg_ack_signal = 2516 -(s8)ewma_avg_signal_read( 2517 &sta->status_stats.avg_ack_signal); 2518 sinfo->filled |= 2519 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); 2520 } 2521 2522 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2523 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); 2524 sinfo->airtime_link_metric = 2525 airtime_link_metric_get(local, sta); 2526 } 2527 } 2528 2529 u32 sta_get_expected_throughput(struct sta_info *sta) 2530 { 2531 struct ieee80211_sub_if_data *sdata = sta->sdata; 2532 struct ieee80211_local *local = sdata->local; 2533 struct rate_control_ref *ref = NULL; 2534 u32 thr = 0; 2535 2536 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) 2537 ref = local->rate_ctrl; 2538 2539 /* check if the driver has a SW RC implementation */ 2540 if (ref && ref->ops->get_expected_throughput) 2541 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); 2542 else 2543 thr = drv_get_expected_throughput(local, sta); 2544 2545 return thr; 2546 } 2547 2548 unsigned long ieee80211_sta_last_active(struct sta_info *sta) 2549 { 2550 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2551 2552 if (!sta->status_stats.last_ack || 2553 time_after(stats->last_rx, sta->status_stats.last_ack)) 2554 return stats->last_rx; 2555 return sta->status_stats.last_ack; 2556 } 2557 2558 static void sta_update_codel_params(struct sta_info *sta, u32 thr) 2559 { 2560 if (!sta->sdata->local->ops->wake_tx_queue) 2561 return; 2562 2563 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { 2564 sta->cparams.target = MS2TIME(50); 2565 sta->cparams.interval = MS2TIME(300); 2566 sta->cparams.ecn = false; 2567 } else { 2568 sta->cparams.target = MS2TIME(20); 2569 sta->cparams.interval = MS2TIME(100); 2570 sta->cparams.ecn = true; 2571 } 2572 } 2573 2574 void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, 2575 u32 thr) 2576 { 2577 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2578 2579 sta_update_codel_params(sta, thr); 2580 } 2581