1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 * Copyright (C) 2018-2023 Intel Corporation 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/if_arp.h> 18 #include <linux/timer.h> 19 #include <linux/rtnetlink.h> 20 21 #include <net/codel.h> 22 #include <net/mac80211.h> 23 #include "ieee80211_i.h" 24 #include "driver-ops.h" 25 #include "rate.h" 26 #include "sta_info.h" 27 #include "debugfs_sta.h" 28 #include "mesh.h" 29 #include "wme.h" 30 31 /** 32 * DOC: STA information lifetime rules 33 * 34 * STA info structures (&struct sta_info) are managed in a hash table 35 * for faster lookup and a list for iteration. They are managed using 36 * RCU, i.e. access to the list and hash table is protected by RCU. 37 * 38 * Upon allocating a STA info structure with sta_info_alloc(), the caller 39 * owns that structure. It must then insert it into the hash table using 40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter 41 * case (which acquires an rcu read section but must not be called from 42 * within one) will the pointer still be valid after the call. Note that 43 * the caller may not do much with the STA info before inserting it, in 44 * particular, it may not start any mesh peer link management or add 45 * encryption keys. 46 * 47 * When the insertion fails (sta_info_insert()) returns non-zero), the 48 * structure will have been freed by sta_info_insert()! 49 * 50 * Station entries are added by mac80211 when you establish a link with a 51 * peer. This means different things for the different type of interfaces 52 * we support. For a regular station this mean we add the AP sta when we 53 * receive an association response from the AP. For IBSS this occurs when 54 * get to know about a peer on the same IBSS. For WDS we add the sta for 55 * the peer immediately upon device open. When using AP mode we add stations 56 * for each respective station upon request from userspace through nl80211. 57 * 58 * In order to remove a STA info structure, various sta_info_destroy_*() 59 * calls are available. 60 * 61 * There is no concept of ownership on a STA entry, each structure is 62 * owned by the global hash table/list until it is removed. All users of 63 * the structure need to be RCU protected so that the structure won't be 64 * freed before they are done using it. 65 */ 66 67 struct sta_link_alloc { 68 struct link_sta_info info; 69 struct ieee80211_link_sta sta; 70 struct rcu_head rcu_head; 71 }; 72 73 static const struct rhashtable_params sta_rht_params = { 74 .nelem_hint = 3, /* start small */ 75 .automatic_shrinking = true, 76 .head_offset = offsetof(struct sta_info, hash_node), 77 .key_offset = offsetof(struct sta_info, addr), 78 .key_len = ETH_ALEN, 79 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 80 }; 81 82 static const struct rhashtable_params link_sta_rht_params = { 83 .nelem_hint = 3, /* start small */ 84 .automatic_shrinking = true, 85 .head_offset = offsetof(struct link_sta_info, link_hash_node), 86 .key_offset = offsetof(struct link_sta_info, addr), 87 .key_len = ETH_ALEN, 88 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 89 }; 90 91 /* Caller must hold local->sta_mtx */ 92 static int sta_info_hash_del(struct ieee80211_local *local, 93 struct sta_info *sta) 94 { 95 return rhltable_remove(&local->sta_hash, &sta->hash_node, 96 sta_rht_params); 97 } 98 99 static int link_sta_info_hash_add(struct ieee80211_local *local, 100 struct link_sta_info *link_sta) 101 { 102 lockdep_assert_held(&local->sta_mtx); 103 return rhltable_insert(&local->link_sta_hash, 104 &link_sta->link_hash_node, 105 link_sta_rht_params); 106 } 107 108 static int link_sta_info_hash_del(struct ieee80211_local *local, 109 struct link_sta_info *link_sta) 110 { 111 lockdep_assert_held(&local->sta_mtx); 112 return rhltable_remove(&local->link_sta_hash, 113 &link_sta->link_hash_node, 114 link_sta_rht_params); 115 } 116 117 static void __cleanup_single_sta(struct sta_info *sta) 118 { 119 int ac, i; 120 struct tid_ampdu_tx *tid_tx; 121 struct ieee80211_sub_if_data *sdata = sta->sdata; 122 struct ieee80211_local *local = sdata->local; 123 struct ps_data *ps; 124 125 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 126 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 127 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { 128 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 129 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 130 ps = &sdata->bss->ps; 131 else if (ieee80211_vif_is_mesh(&sdata->vif)) 132 ps = &sdata->u.mesh.ps; 133 else 134 return; 135 136 clear_sta_flag(sta, WLAN_STA_PS_STA); 137 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 138 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 139 140 atomic_dec(&ps->num_sta_ps); 141 } 142 143 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 144 struct txq_info *txqi; 145 146 if (!sta->sta.txq[i]) 147 continue; 148 149 txqi = to_txq_info(sta->sta.txq[i]); 150 151 ieee80211_txq_purge(local, txqi); 152 } 153 154 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 155 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 156 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); 157 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 158 } 159 160 if (ieee80211_vif_is_mesh(&sdata->vif)) 161 mesh_sta_cleanup(sta); 162 163 cancel_work_sync(&sta->drv_deliver_wk); 164 165 /* 166 * Destroy aggregation state here. It would be nice to wait for the 167 * driver to finish aggregation stop and then clean up, but for now 168 * drivers have to handle aggregation stop being requested, followed 169 * directly by station destruction. 170 */ 171 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 172 kfree(sta->ampdu_mlme.tid_start_tx[i]); 173 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 174 if (!tid_tx) 175 continue; 176 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 177 kfree(tid_tx); 178 } 179 } 180 181 static void cleanup_single_sta(struct sta_info *sta) 182 { 183 struct ieee80211_sub_if_data *sdata = sta->sdata; 184 struct ieee80211_local *local = sdata->local; 185 186 __cleanup_single_sta(sta); 187 sta_info_free(local, sta); 188 } 189 190 struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, 191 const u8 *addr) 192 { 193 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); 194 } 195 196 /* protected by RCU */ 197 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 198 const u8 *addr) 199 { 200 struct ieee80211_local *local = sdata->local; 201 struct rhlist_head *tmp; 202 struct sta_info *sta; 203 204 rcu_read_lock(); 205 for_each_sta_info(local, addr, sta, tmp) { 206 if (sta->sdata == sdata) { 207 rcu_read_unlock(); 208 /* this is safe as the caller must already hold 209 * another rcu read section or the mutex 210 */ 211 return sta; 212 } 213 } 214 rcu_read_unlock(); 215 return NULL; 216 } 217 218 /* 219 * Get sta info either from the specified interface 220 * or from one of its vlans 221 */ 222 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 223 const u8 *addr) 224 { 225 struct ieee80211_local *local = sdata->local; 226 struct rhlist_head *tmp; 227 struct sta_info *sta; 228 229 rcu_read_lock(); 230 for_each_sta_info(local, addr, sta, tmp) { 231 if (sta->sdata == sdata || 232 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 233 rcu_read_unlock(); 234 /* this is safe as the caller must already hold 235 * another rcu read section or the mutex 236 */ 237 return sta; 238 } 239 } 240 rcu_read_unlock(); 241 return NULL; 242 } 243 244 struct rhlist_head *link_sta_info_hash_lookup(struct ieee80211_local *local, 245 const u8 *addr) 246 { 247 return rhltable_lookup(&local->link_sta_hash, addr, 248 link_sta_rht_params); 249 } 250 251 struct link_sta_info * 252 link_sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr) 253 { 254 struct ieee80211_local *local = sdata->local; 255 struct rhlist_head *tmp; 256 struct link_sta_info *link_sta; 257 258 rcu_read_lock(); 259 for_each_link_sta_info(local, addr, link_sta, tmp) { 260 struct sta_info *sta = link_sta->sta; 261 262 if (sta->sdata == sdata || 263 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 264 rcu_read_unlock(); 265 /* this is safe as the caller must already hold 266 * another rcu read section or the mutex 267 */ 268 return link_sta; 269 } 270 } 271 rcu_read_unlock(); 272 return NULL; 273 } 274 275 struct ieee80211_sta * 276 ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw, 277 const u8 *addr, 278 const u8 *localaddr, 279 unsigned int *link_id) 280 { 281 struct ieee80211_local *local = hw_to_local(hw); 282 struct link_sta_info *link_sta; 283 struct rhlist_head *tmp; 284 285 for_each_link_sta_info(local, addr, link_sta, tmp) { 286 struct sta_info *sta = link_sta->sta; 287 struct ieee80211_link_data *link; 288 u8 _link_id = link_sta->link_id; 289 290 if (!localaddr) { 291 if (link_id) 292 *link_id = _link_id; 293 return &sta->sta; 294 } 295 296 link = rcu_dereference(sta->sdata->link[_link_id]); 297 if (!link) 298 continue; 299 300 if (memcmp(link->conf->addr, localaddr, ETH_ALEN)) 301 continue; 302 303 if (link_id) 304 *link_id = _link_id; 305 return &sta->sta; 306 } 307 308 return NULL; 309 } 310 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_link_addrs); 311 312 struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, 313 const u8 *sta_addr, const u8 *vif_addr) 314 { 315 struct rhlist_head *tmp; 316 struct sta_info *sta; 317 318 for_each_sta_info(local, sta_addr, sta, tmp) { 319 if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) 320 return sta; 321 } 322 323 return NULL; 324 } 325 326 struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, 327 int idx) 328 { 329 struct ieee80211_local *local = sdata->local; 330 struct sta_info *sta; 331 int i = 0; 332 333 list_for_each_entry_rcu(sta, &local->sta_list, list, 334 lockdep_is_held(&local->sta_mtx)) { 335 if (sdata != sta->sdata) 336 continue; 337 if (i < idx) { 338 ++i; 339 continue; 340 } 341 return sta; 342 } 343 344 return NULL; 345 } 346 347 static void sta_info_free_link(struct link_sta_info *link_sta) 348 { 349 free_percpu(link_sta->pcpu_rx_stats); 350 } 351 352 static void sta_remove_link(struct sta_info *sta, unsigned int link_id, 353 bool unhash) 354 { 355 struct sta_link_alloc *alloc = NULL; 356 struct link_sta_info *link_sta; 357 358 link_sta = rcu_access_pointer(sta->link[link_id]); 359 if (link_sta != &sta->deflink) 360 lockdep_assert_held(&sta->local->sta_mtx); 361 362 if (WARN_ON(!link_sta)) 363 return; 364 365 if (unhash) 366 link_sta_info_hash_del(sta->local, link_sta); 367 368 if (test_sta_flag(sta, WLAN_STA_INSERTED)) 369 ieee80211_link_sta_debugfs_remove(link_sta); 370 371 if (link_sta != &sta->deflink) 372 alloc = container_of(link_sta, typeof(*alloc), info); 373 374 sta->sta.valid_links &= ~BIT(link_id); 375 RCU_INIT_POINTER(sta->link[link_id], NULL); 376 RCU_INIT_POINTER(sta->sta.link[link_id], NULL); 377 if (alloc) { 378 sta_info_free_link(&alloc->info); 379 kfree_rcu(alloc, rcu_head); 380 } 381 382 ieee80211_sta_recalc_aggregates(&sta->sta); 383 } 384 385 /** 386 * sta_info_free - free STA 387 * 388 * @local: pointer to the global information 389 * @sta: STA info to free 390 * 391 * This function must undo everything done by sta_info_alloc() 392 * that may happen before sta_info_insert(). It may only be 393 * called when sta_info_insert() has not been attempted (and 394 * if that fails, the station is freed anyway.) 395 */ 396 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 397 { 398 int i; 399 400 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 401 struct link_sta_info *link_sta; 402 403 link_sta = rcu_access_pointer(sta->link[i]); 404 if (!link_sta) 405 continue; 406 407 sta_remove_link(sta, i, false); 408 } 409 410 /* 411 * If we had used sta_info_pre_move_state() then we might not 412 * have gone through the state transitions down again, so do 413 * it here now (and warn if it's inserted). 414 * 415 * This will clear state such as fast TX/RX that may have been 416 * allocated during state transitions. 417 */ 418 while (sta->sta_state > IEEE80211_STA_NONE) { 419 int ret; 420 421 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); 422 423 ret = sta_info_move_state(sta, sta->sta_state - 1); 424 if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) 425 break; 426 } 427 428 if (sta->rate_ctrl) 429 rate_control_free_sta(sta); 430 431 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 432 433 kfree(to_txq_info(sta->sta.txq[0])); 434 kfree(rcu_dereference_raw(sta->sta.rates)); 435 #ifdef CONFIG_MAC80211_MESH 436 kfree(sta->mesh); 437 #endif 438 439 sta_info_free_link(&sta->deflink); 440 kfree(sta); 441 } 442 443 /* Caller must hold local->sta_mtx */ 444 static int sta_info_hash_add(struct ieee80211_local *local, 445 struct sta_info *sta) 446 { 447 return rhltable_insert(&local->sta_hash, &sta->hash_node, 448 sta_rht_params); 449 } 450 451 static void sta_deliver_ps_frames(struct work_struct *wk) 452 { 453 struct sta_info *sta; 454 455 sta = container_of(wk, struct sta_info, drv_deliver_wk); 456 457 if (sta->dead) 458 return; 459 460 local_bh_disable(); 461 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) 462 ieee80211_sta_ps_deliver_wakeup(sta); 463 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) 464 ieee80211_sta_ps_deliver_poll_response(sta); 465 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) 466 ieee80211_sta_ps_deliver_uapsd(sta); 467 local_bh_enable(); 468 } 469 470 static int sta_prepare_rate_control(struct ieee80211_local *local, 471 struct sta_info *sta, gfp_t gfp) 472 { 473 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) 474 return 0; 475 476 sta->rate_ctrl = local->rate_ctrl; 477 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 478 sta, gfp); 479 if (!sta->rate_ctrl_priv) 480 return -ENOMEM; 481 482 return 0; 483 } 484 485 static int sta_info_alloc_link(struct ieee80211_local *local, 486 struct link_sta_info *link_info, 487 gfp_t gfp) 488 { 489 struct ieee80211_hw *hw = &local->hw; 490 int i; 491 492 if (ieee80211_hw_check(hw, USES_RSS)) { 493 link_info->pcpu_rx_stats = 494 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); 495 if (!link_info->pcpu_rx_stats) 496 return -ENOMEM; 497 } 498 499 link_info->rx_stats.last_rx = jiffies; 500 u64_stats_init(&link_info->rx_stats.syncp); 501 502 ewma_signal_init(&link_info->rx_stats_avg.signal); 503 ewma_avg_signal_init(&link_info->status_stats.avg_ack_signal); 504 for (i = 0; i < ARRAY_SIZE(link_info->rx_stats_avg.chain_signal); i++) 505 ewma_signal_init(&link_info->rx_stats_avg.chain_signal[i]); 506 507 return 0; 508 } 509 510 static void sta_info_add_link(struct sta_info *sta, 511 unsigned int link_id, 512 struct link_sta_info *link_info, 513 struct ieee80211_link_sta *link_sta) 514 { 515 link_info->sta = sta; 516 link_info->link_id = link_id; 517 link_info->pub = link_sta; 518 link_info->pub->sta = &sta->sta; 519 link_sta->link_id = link_id; 520 rcu_assign_pointer(sta->link[link_id], link_info); 521 rcu_assign_pointer(sta->sta.link[link_id], link_sta); 522 523 link_sta->smps_mode = IEEE80211_SMPS_OFF; 524 link_sta->agg.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; 525 } 526 527 static struct sta_info * 528 __sta_info_alloc(struct ieee80211_sub_if_data *sdata, 529 const u8 *addr, int link_id, const u8 *link_addr, 530 gfp_t gfp) 531 { 532 struct ieee80211_local *local = sdata->local; 533 struct ieee80211_hw *hw = &local->hw; 534 struct sta_info *sta; 535 void *txq_data; 536 int size; 537 int i; 538 539 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 540 if (!sta) 541 return NULL; 542 543 sta->local = local; 544 sta->sdata = sdata; 545 546 if (sta_info_alloc_link(local, &sta->deflink, gfp)) 547 goto free; 548 549 if (link_id >= 0) { 550 sta_info_add_link(sta, link_id, &sta->deflink, 551 &sta->sta.deflink); 552 sta->sta.valid_links = BIT(link_id); 553 } else { 554 sta_info_add_link(sta, 0, &sta->deflink, &sta->sta.deflink); 555 } 556 557 sta->sta.cur = &sta->sta.deflink.agg; 558 559 spin_lock_init(&sta->lock); 560 spin_lock_init(&sta->ps_lock); 561 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); 562 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 563 mutex_init(&sta->ampdu_mlme.mtx); 564 #ifdef CONFIG_MAC80211_MESH 565 if (ieee80211_vif_is_mesh(&sdata->vif)) { 566 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); 567 if (!sta->mesh) 568 goto free; 569 sta->mesh->plink_sta = sta; 570 spin_lock_init(&sta->mesh->plink_lock); 571 if (!sdata->u.mesh.user_mpm) 572 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 573 0); 574 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; 575 } 576 #endif 577 578 memcpy(sta->addr, addr, ETH_ALEN); 579 memcpy(sta->sta.addr, addr, ETH_ALEN); 580 memcpy(sta->deflink.addr, link_addr, ETH_ALEN); 581 memcpy(sta->sta.deflink.addr, link_addr, ETH_ALEN); 582 sta->sta.max_rx_aggregation_subframes = 583 local->hw.max_rx_aggregation_subframes; 584 585 /* TODO link specific alloc and assignments for MLO Link STA */ 586 587 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. 588 * The Tx path starts to use a key as soon as the key slot ptk_idx 589 * references to is not NULL. To not use the initial Rx-only key 590 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid 591 * which always will refer to a NULL key. 592 */ 593 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); 594 sta->ptk_idx = INVALID_PTK_KEYIDX; 595 596 597 ieee80211_init_frag_cache(&sta->frags); 598 599 sta->sta_state = IEEE80211_STA_NONE; 600 601 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 602 sta->amsdu_mesh_control = -1; 603 604 /* Mark TID as unreserved */ 605 sta->reserved_tid = IEEE80211_TID_UNRESERVED; 606 607 sta->last_connected = ktime_get_seconds(); 608 609 size = sizeof(struct txq_info) + 610 ALIGN(hw->txq_data_size, sizeof(void *)); 611 612 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); 613 if (!txq_data) 614 goto free; 615 616 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 617 struct txq_info *txq = txq_data + i * size; 618 619 /* might not do anything for the (bufferable) MMPDU TXQ */ 620 ieee80211_txq_init(sdata, sta, txq, i); 621 } 622 623 if (sta_prepare_rate_control(local, sta, gfp)) 624 goto free_txq; 625 626 sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; 627 628 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 629 skb_queue_head_init(&sta->ps_tx_buf[i]); 630 skb_queue_head_init(&sta->tx_filtered[i]); 631 sta->airtime[i].deficit = sta->airtime_weight; 632 atomic_set(&sta->airtime[i].aql_tx_pending, 0); 633 sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; 634 sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; 635 } 636 637 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 638 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 639 640 for (i = 0; i < NUM_NL80211_BANDS; i++) { 641 u32 mandatory = 0; 642 int r; 643 644 if (!hw->wiphy->bands[i]) 645 continue; 646 647 switch (i) { 648 case NL80211_BAND_2GHZ: 649 case NL80211_BAND_LC: 650 /* 651 * We use both here, even if we cannot really know for 652 * sure the station will support both, but the only use 653 * for this is when we don't know anything yet and send 654 * management frames, and then we'll pick the lowest 655 * possible rate anyway. 656 * If we don't include _G here, we cannot find a rate 657 * in P2P, and thus trigger the WARN_ONCE() in rate.c 658 */ 659 mandatory = IEEE80211_RATE_MANDATORY_B | 660 IEEE80211_RATE_MANDATORY_G; 661 break; 662 case NL80211_BAND_5GHZ: 663 mandatory = IEEE80211_RATE_MANDATORY_A; 664 break; 665 case NL80211_BAND_60GHZ: 666 WARN_ON(1); 667 mandatory = 0; 668 break; 669 } 670 671 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { 672 struct ieee80211_rate *rate; 673 674 rate = &hw->wiphy->bands[i]->bitrates[r]; 675 676 if (!(rate->flags & mandatory)) 677 continue; 678 sta->sta.deflink.supp_rates[i] |= BIT(r); 679 } 680 } 681 682 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; 683 sta->cparams.target = MS2TIME(20); 684 sta->cparams.interval = MS2TIME(100); 685 sta->cparams.ecn = true; 686 sta->cparams.ce_threshold_selector = 0; 687 sta->cparams.ce_threshold_mask = 0; 688 689 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 690 691 return sta; 692 693 free_txq: 694 kfree(to_txq_info(sta->sta.txq[0])); 695 free: 696 sta_info_free_link(&sta->deflink); 697 #ifdef CONFIG_MAC80211_MESH 698 kfree(sta->mesh); 699 #endif 700 kfree(sta); 701 return NULL; 702 } 703 704 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 705 const u8 *addr, gfp_t gfp) 706 { 707 return __sta_info_alloc(sdata, addr, -1, addr, gfp); 708 } 709 710 struct sta_info *sta_info_alloc_with_link(struct ieee80211_sub_if_data *sdata, 711 const u8 *mld_addr, 712 unsigned int link_id, 713 const u8 *link_addr, 714 gfp_t gfp) 715 { 716 return __sta_info_alloc(sdata, mld_addr, link_id, link_addr, gfp); 717 } 718 719 static int sta_info_insert_check(struct sta_info *sta) 720 { 721 struct ieee80211_sub_if_data *sdata = sta->sdata; 722 723 /* 724 * Can't be a WARN_ON because it can be triggered through a race: 725 * something inserts a STA (on one CPU) without holding the RTNL 726 * and another CPU turns off the net device. 727 */ 728 if (unlikely(!ieee80211_sdata_running(sdata))) 729 return -ENETDOWN; 730 731 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || 732 !is_valid_ether_addr(sta->sta.addr))) 733 return -EINVAL; 734 735 /* The RCU read lock is required by rhashtable due to 736 * asynchronous resize/rehash. We also require the mutex 737 * for correctness. 738 */ 739 rcu_read_lock(); 740 lockdep_assert_held(&sdata->local->sta_mtx); 741 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && 742 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { 743 rcu_read_unlock(); 744 return -ENOTUNIQ; 745 } 746 rcu_read_unlock(); 747 748 return 0; 749 } 750 751 static int sta_info_insert_drv_state(struct ieee80211_local *local, 752 struct ieee80211_sub_if_data *sdata, 753 struct sta_info *sta) 754 { 755 enum ieee80211_sta_state state; 756 int err = 0; 757 758 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { 759 err = drv_sta_state(local, sdata, sta, state, state + 1); 760 if (err) 761 break; 762 } 763 764 if (!err) { 765 /* 766 * Drivers using legacy sta_add/sta_remove callbacks only 767 * get uploaded set to true after sta_add is called. 768 */ 769 if (!local->ops->sta_add) 770 sta->uploaded = true; 771 return 0; 772 } 773 774 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 775 sdata_info(sdata, 776 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", 777 sta->sta.addr, state + 1, err); 778 err = 0; 779 } 780 781 /* unwind on error */ 782 for (; state > IEEE80211_STA_NOTEXIST; state--) 783 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); 784 785 return err; 786 } 787 788 static void 789 ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) 790 { 791 struct ieee80211_local *local = sdata->local; 792 bool allow_p2p_go_ps = sdata->vif.p2p; 793 struct sta_info *sta; 794 795 rcu_read_lock(); 796 list_for_each_entry_rcu(sta, &local->sta_list, list) { 797 if (sdata != sta->sdata || 798 !test_sta_flag(sta, WLAN_STA_ASSOC)) 799 continue; 800 if (!sta->sta.support_p2p_ps) { 801 allow_p2p_go_ps = false; 802 break; 803 } 804 } 805 rcu_read_unlock(); 806 807 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { 808 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; 809 ieee80211_link_info_change_notify(sdata, &sdata->deflink, 810 BSS_CHANGED_P2P_PS); 811 } 812 } 813 814 /* 815 * should be called with sta_mtx locked 816 * this function replaces the mutex lock 817 * with a RCU lock 818 */ 819 static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) 820 { 821 struct ieee80211_local *local = sta->local; 822 struct ieee80211_sub_if_data *sdata = sta->sdata; 823 struct station_info *sinfo = NULL; 824 int err = 0; 825 826 lockdep_assert_held(&local->sta_mtx); 827 828 /* check if STA exists already */ 829 if (sta_info_get_bss(sdata, sta->sta.addr)) { 830 err = -EEXIST; 831 goto out_cleanup; 832 } 833 834 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); 835 if (!sinfo) { 836 err = -ENOMEM; 837 goto out_cleanup; 838 } 839 840 local->num_sta++; 841 local->sta_generation++; 842 smp_mb(); 843 844 /* simplify things and don't accept BA sessions yet */ 845 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 846 847 /* make the station visible */ 848 err = sta_info_hash_add(local, sta); 849 if (err) 850 goto out_drop_sta; 851 852 if (sta->sta.valid_links) { 853 err = link_sta_info_hash_add(local, &sta->deflink); 854 if (err) { 855 sta_info_hash_del(local, sta); 856 goto out_drop_sta; 857 } 858 } 859 860 list_add_tail_rcu(&sta->list, &local->sta_list); 861 862 /* update channel context before notifying the driver about state 863 * change, this enables driver using the updated channel context right away. 864 */ 865 if (sta->sta_state >= IEEE80211_STA_ASSOC) { 866 ieee80211_recalc_min_chandef(sta->sdata, -1); 867 if (!sta->sta.support_p2p_ps) 868 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 869 } 870 871 /* notify driver */ 872 err = sta_info_insert_drv_state(local, sdata, sta); 873 if (err) 874 goto out_remove; 875 876 set_sta_flag(sta, WLAN_STA_INSERTED); 877 878 /* accept BA sessions now */ 879 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 880 881 ieee80211_sta_debugfs_add(sta); 882 rate_control_add_sta_debugfs(sta); 883 if (sta->sta.valid_links) { 884 int i; 885 886 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 887 struct link_sta_info *link_sta; 888 889 link_sta = rcu_dereference_protected(sta->link[i], 890 lockdep_is_held(&local->sta_mtx)); 891 892 if (!link_sta) 893 continue; 894 895 ieee80211_link_sta_debugfs_add(link_sta); 896 if (sdata->vif.active_links & BIT(i)) 897 ieee80211_link_sta_debugfs_drv_add(link_sta); 898 } 899 } else { 900 ieee80211_link_sta_debugfs_add(&sta->deflink); 901 ieee80211_link_sta_debugfs_drv_add(&sta->deflink); 902 } 903 904 sinfo->generation = local->sta_generation; 905 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 906 kfree(sinfo); 907 908 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); 909 910 /* move reference to rcu-protected */ 911 rcu_read_lock(); 912 mutex_unlock(&local->sta_mtx); 913 914 if (ieee80211_vif_is_mesh(&sdata->vif)) 915 mesh_accept_plinks_update(sdata); 916 917 ieee80211_check_fast_xmit(sta); 918 919 return 0; 920 out_remove: 921 if (sta->sta.valid_links) 922 link_sta_info_hash_del(local, &sta->deflink); 923 sta_info_hash_del(local, sta); 924 list_del_rcu(&sta->list); 925 out_drop_sta: 926 local->num_sta--; 927 synchronize_net(); 928 out_cleanup: 929 cleanup_single_sta(sta); 930 mutex_unlock(&local->sta_mtx); 931 kfree(sinfo); 932 rcu_read_lock(); 933 return err; 934 } 935 936 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 937 { 938 struct ieee80211_local *local = sta->local; 939 int err; 940 941 might_sleep(); 942 943 mutex_lock(&local->sta_mtx); 944 945 err = sta_info_insert_check(sta); 946 if (err) { 947 sta_info_free(local, sta); 948 mutex_unlock(&local->sta_mtx); 949 rcu_read_lock(); 950 return err; 951 } 952 953 return sta_info_insert_finish(sta); 954 } 955 956 int sta_info_insert(struct sta_info *sta) 957 { 958 int err = sta_info_insert_rcu(sta); 959 960 rcu_read_unlock(); 961 962 return err; 963 } 964 965 static inline void __bss_tim_set(u8 *tim, u16 id) 966 { 967 /* 968 * This format has been mandated by the IEEE specifications, 969 * so this line may not be changed to use the __set_bit() format. 970 */ 971 tim[id / 8] |= (1 << (id % 8)); 972 } 973 974 static inline void __bss_tim_clear(u8 *tim, u16 id) 975 { 976 /* 977 * This format has been mandated by the IEEE specifications, 978 * so this line may not be changed to use the __clear_bit() format. 979 */ 980 tim[id / 8] &= ~(1 << (id % 8)); 981 } 982 983 static inline bool __bss_tim_get(u8 *tim, u16 id) 984 { 985 /* 986 * This format has been mandated by the IEEE specifications, 987 * so this line may not be changed to use the test_bit() format. 988 */ 989 return tim[id / 8] & (1 << (id % 8)); 990 } 991 992 static unsigned long ieee80211_tids_for_ac(int ac) 993 { 994 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 995 switch (ac) { 996 case IEEE80211_AC_VO: 997 return BIT(6) | BIT(7); 998 case IEEE80211_AC_VI: 999 return BIT(4) | BIT(5); 1000 case IEEE80211_AC_BE: 1001 return BIT(0) | BIT(3); 1002 case IEEE80211_AC_BK: 1003 return BIT(1) | BIT(2); 1004 default: 1005 WARN_ON(1); 1006 return 0; 1007 } 1008 } 1009 1010 static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) 1011 { 1012 struct ieee80211_local *local = sta->local; 1013 struct ps_data *ps; 1014 bool indicate_tim = false; 1015 u8 ignore_for_tim = sta->sta.uapsd_queues; 1016 int ac; 1017 u16 id = sta->sta.aid; 1018 1019 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1020 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 1021 if (WARN_ON_ONCE(!sta->sdata->bss)) 1022 return; 1023 1024 ps = &sta->sdata->bss->ps; 1025 #ifdef CONFIG_MAC80211_MESH 1026 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { 1027 ps = &sta->sdata->u.mesh.ps; 1028 #endif 1029 } else { 1030 return; 1031 } 1032 1033 /* No need to do anything if the driver does all */ 1034 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 1035 return; 1036 1037 if (sta->dead) 1038 goto done; 1039 1040 /* 1041 * If all ACs are delivery-enabled then we should build 1042 * the TIM bit for all ACs anyway; if only some are then 1043 * we ignore those and build the TIM bit using only the 1044 * non-enabled ones. 1045 */ 1046 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) 1047 ignore_for_tim = 0; 1048 1049 if (ignore_pending) 1050 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; 1051 1052 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1053 unsigned long tids; 1054 1055 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) 1056 continue; 1057 1058 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || 1059 !skb_queue_empty(&sta->ps_tx_buf[ac]); 1060 if (indicate_tim) 1061 break; 1062 1063 tids = ieee80211_tids_for_ac(ac); 1064 1065 indicate_tim |= 1066 sta->driver_buffered_tids & tids; 1067 indicate_tim |= 1068 sta->txq_buffered_tids & tids; 1069 } 1070 1071 done: 1072 spin_lock_bh(&local->tim_lock); 1073 1074 if (indicate_tim == __bss_tim_get(ps->tim, id)) 1075 goto out_unlock; 1076 1077 if (indicate_tim) 1078 __bss_tim_set(ps->tim, id); 1079 else 1080 __bss_tim_clear(ps->tim, id); 1081 1082 if (local->ops->set_tim && !WARN_ON(sta->dead)) { 1083 local->tim_in_locked_section = true; 1084 drv_set_tim(local, &sta->sta, indicate_tim); 1085 local->tim_in_locked_section = false; 1086 } 1087 1088 out_unlock: 1089 spin_unlock_bh(&local->tim_lock); 1090 } 1091 1092 void sta_info_recalc_tim(struct sta_info *sta) 1093 { 1094 __sta_info_recalc_tim(sta, false); 1095 } 1096 1097 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 1098 { 1099 struct ieee80211_tx_info *info; 1100 int timeout; 1101 1102 if (!skb) 1103 return false; 1104 1105 info = IEEE80211_SKB_CB(skb); 1106 1107 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 1108 timeout = (sta->listen_interval * 1109 sta->sdata->vif.bss_conf.beacon_int * 1110 32 / 15625) * HZ; 1111 if (timeout < STA_TX_BUFFER_EXPIRE) 1112 timeout = STA_TX_BUFFER_EXPIRE; 1113 return time_after(jiffies, info->control.jiffies + timeout); 1114 } 1115 1116 1117 static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, 1118 struct sta_info *sta, int ac) 1119 { 1120 unsigned long flags; 1121 struct sk_buff *skb; 1122 1123 /* 1124 * First check for frames that should expire on the filtered 1125 * queue. Frames here were rejected by the driver and are on 1126 * a separate queue to avoid reordering with normal PS-buffered 1127 * frames. They also aren't accounted for right now in the 1128 * total_ps_buffered counter. 1129 */ 1130 for (;;) { 1131 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1132 skb = skb_peek(&sta->tx_filtered[ac]); 1133 if (sta_info_buffer_expired(sta, skb)) 1134 skb = __skb_dequeue(&sta->tx_filtered[ac]); 1135 else 1136 skb = NULL; 1137 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1138 1139 /* 1140 * Frames are queued in order, so if this one 1141 * hasn't expired yet we can stop testing. If 1142 * we actually reached the end of the queue we 1143 * also need to stop, of course. 1144 */ 1145 if (!skb) 1146 break; 1147 ieee80211_free_txskb(&local->hw, skb); 1148 } 1149 1150 /* 1151 * Now also check the normal PS-buffered queue, this will 1152 * only find something if the filtered queue was emptied 1153 * since the filtered frames are all before the normal PS 1154 * buffered frames. 1155 */ 1156 for (;;) { 1157 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1158 skb = skb_peek(&sta->ps_tx_buf[ac]); 1159 if (sta_info_buffer_expired(sta, skb)) 1160 skb = __skb_dequeue(&sta->ps_tx_buf[ac]); 1161 else 1162 skb = NULL; 1163 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1164 1165 /* 1166 * frames are queued in order, so if this one 1167 * hasn't expired yet (or we reached the end of 1168 * the queue) we can stop testing 1169 */ 1170 if (!skb) 1171 break; 1172 1173 local->total_ps_buffered--; 1174 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 1175 sta->sta.addr); 1176 ieee80211_free_txskb(&local->hw, skb); 1177 } 1178 1179 /* 1180 * Finally, recalculate the TIM bit for this station -- it might 1181 * now be clear because the station was too slow to retrieve its 1182 * frames. 1183 */ 1184 sta_info_recalc_tim(sta); 1185 1186 /* 1187 * Return whether there are any frames still buffered, this is 1188 * used to check whether the cleanup timer still needs to run, 1189 * if there are no frames we don't need to rearm the timer. 1190 */ 1191 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && 1192 skb_queue_empty(&sta->tx_filtered[ac])); 1193 } 1194 1195 static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 1196 struct sta_info *sta) 1197 { 1198 bool have_buffered = false; 1199 int ac; 1200 1201 /* This is only necessary for stations on BSS/MBSS interfaces */ 1202 if (!sta->sdata->bss && 1203 !ieee80211_vif_is_mesh(&sta->sdata->vif)) 1204 return false; 1205 1206 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 1207 have_buffered |= 1208 sta_info_cleanup_expire_buffered_ac(local, sta, ac); 1209 1210 return have_buffered; 1211 } 1212 1213 static int __must_check __sta_info_destroy_part1(struct sta_info *sta) 1214 { 1215 struct ieee80211_local *local; 1216 struct ieee80211_sub_if_data *sdata; 1217 int ret, i; 1218 1219 might_sleep(); 1220 1221 if (!sta) 1222 return -ENOENT; 1223 1224 local = sta->local; 1225 sdata = sta->sdata; 1226 1227 lockdep_assert_held(&local->sta_mtx); 1228 1229 /* 1230 * Before removing the station from the driver and 1231 * rate control, it might still start new aggregation 1232 * sessions -- block that to make sure the tear-down 1233 * will be sufficient. 1234 */ 1235 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1236 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1237 1238 /* 1239 * Before removing the station from the driver there might be pending 1240 * rx frames on RSS queues sent prior to the disassociation - wait for 1241 * all such frames to be processed. 1242 */ 1243 drv_sync_rx_queues(local, sta); 1244 1245 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 1246 struct link_sta_info *link_sta; 1247 1248 if (!(sta->sta.valid_links & BIT(i))) 1249 continue; 1250 1251 link_sta = rcu_dereference_protected(sta->link[i], 1252 lockdep_is_held(&local->sta_mtx)); 1253 1254 link_sta_info_hash_del(local, link_sta); 1255 } 1256 1257 ret = sta_info_hash_del(local, sta); 1258 if (WARN_ON(ret)) 1259 return ret; 1260 1261 /* 1262 * for TDLS peers, make sure to return to the base channel before 1263 * removal. 1264 */ 1265 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { 1266 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); 1267 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); 1268 } 1269 1270 list_del_rcu(&sta->list); 1271 sta->removed = true; 1272 1273 if (sta->uploaded) 1274 drv_sta_pre_rcu_remove(local, sta->sdata, sta); 1275 1276 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1277 rcu_access_pointer(sdata->u.vlan.sta) == sta) 1278 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 1279 1280 return 0; 1281 } 1282 1283 static int _sta_info_move_state(struct sta_info *sta, 1284 enum ieee80211_sta_state new_state, 1285 bool recalc) 1286 { 1287 struct ieee80211_local *local = sta->local; 1288 1289 might_sleep(); 1290 1291 if (sta->sta_state == new_state) 1292 return 0; 1293 1294 /* check allowed transitions first */ 1295 1296 switch (new_state) { 1297 case IEEE80211_STA_NONE: 1298 if (sta->sta_state != IEEE80211_STA_AUTH) 1299 return -EINVAL; 1300 break; 1301 case IEEE80211_STA_AUTH: 1302 if (sta->sta_state != IEEE80211_STA_NONE && 1303 sta->sta_state != IEEE80211_STA_ASSOC) 1304 return -EINVAL; 1305 break; 1306 case IEEE80211_STA_ASSOC: 1307 if (sta->sta_state != IEEE80211_STA_AUTH && 1308 sta->sta_state != IEEE80211_STA_AUTHORIZED) 1309 return -EINVAL; 1310 break; 1311 case IEEE80211_STA_AUTHORIZED: 1312 if (sta->sta_state != IEEE80211_STA_ASSOC) 1313 return -EINVAL; 1314 break; 1315 default: 1316 WARN(1, "invalid state %d", new_state); 1317 return -EINVAL; 1318 } 1319 1320 sta_dbg(sta->sdata, "moving STA %pM to state %d\n", 1321 sta->sta.addr, new_state); 1322 1323 /* notify the driver before the actual changes so it can 1324 * fail the transition 1325 */ 1326 if (test_sta_flag(sta, WLAN_STA_INSERTED)) { 1327 int err = drv_sta_state(sta->local, sta->sdata, sta, 1328 sta->sta_state, new_state); 1329 if (err) 1330 return err; 1331 } 1332 1333 /* reflect the change in all state variables */ 1334 1335 switch (new_state) { 1336 case IEEE80211_STA_NONE: 1337 if (sta->sta_state == IEEE80211_STA_AUTH) 1338 clear_bit(WLAN_STA_AUTH, &sta->_flags); 1339 break; 1340 case IEEE80211_STA_AUTH: 1341 if (sta->sta_state == IEEE80211_STA_NONE) { 1342 set_bit(WLAN_STA_AUTH, &sta->_flags); 1343 } else if (sta->sta_state == IEEE80211_STA_ASSOC) { 1344 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 1345 if (recalc) { 1346 ieee80211_recalc_min_chandef(sta->sdata, -1); 1347 if (!sta->sta.support_p2p_ps) 1348 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 1349 } 1350 } 1351 break; 1352 case IEEE80211_STA_ASSOC: 1353 if (sta->sta_state == IEEE80211_STA_AUTH) { 1354 set_bit(WLAN_STA_ASSOC, &sta->_flags); 1355 sta->assoc_at = ktime_get_boottime_ns(); 1356 if (recalc) { 1357 ieee80211_recalc_min_chandef(sta->sdata, -1); 1358 if (!sta->sta.support_p2p_ps) 1359 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 1360 } 1361 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1362 ieee80211_vif_dec_num_mcast(sta->sdata); 1363 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1364 1365 /* 1366 * If we have encryption offload, flush (station) queues 1367 * (after ensuring concurrent TX completed) so we won't 1368 * transmit anything later unencrypted if/when keys are 1369 * also removed, which might otherwise happen depending 1370 * on how the hardware offload works. 1371 */ 1372 if (local->ops->set_key) { 1373 synchronize_net(); 1374 if (local->ops->flush_sta) 1375 drv_flush_sta(local, sta->sdata, sta); 1376 else 1377 ieee80211_flush_queues(local, 1378 sta->sdata, 1379 false); 1380 } 1381 1382 ieee80211_clear_fast_xmit(sta); 1383 ieee80211_clear_fast_rx(sta); 1384 } 1385 break; 1386 case IEEE80211_STA_AUTHORIZED: 1387 if (sta->sta_state == IEEE80211_STA_ASSOC) { 1388 ieee80211_vif_inc_num_mcast(sta->sdata); 1389 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1390 ieee80211_check_fast_xmit(sta); 1391 ieee80211_check_fast_rx(sta); 1392 } 1393 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 1394 sta->sdata->vif.type == NL80211_IFTYPE_AP) 1395 cfg80211_send_layer2_update(sta->sdata->dev, 1396 sta->sta.addr); 1397 break; 1398 default: 1399 break; 1400 } 1401 1402 sta->sta_state = new_state; 1403 1404 return 0; 1405 } 1406 1407 int sta_info_move_state(struct sta_info *sta, 1408 enum ieee80211_sta_state new_state) 1409 { 1410 return _sta_info_move_state(sta, new_state, true); 1411 } 1412 1413 static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc) 1414 { 1415 struct ieee80211_local *local = sta->local; 1416 struct ieee80211_sub_if_data *sdata = sta->sdata; 1417 struct station_info *sinfo; 1418 int ret; 1419 1420 /* 1421 * NOTE: This assumes at least synchronize_net() was done 1422 * after _part1 and before _part2! 1423 */ 1424 1425 /* 1426 * There's a potential race in _part1 where we set WLAN_STA_BLOCK_BA 1427 * but someone might have just gotten past a check, and not yet into 1428 * queuing the work/creating the data/etc. 1429 * 1430 * Do another round of destruction so that the worker is certainly 1431 * canceled before we later free the station. 1432 * 1433 * Since this is after synchronize_rcu()/synchronize_net() we're now 1434 * certain that nobody can actually hold a reference to the STA and 1435 * be calling e.g. ieee80211_start_tx_ba_session(). 1436 */ 1437 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1438 1439 might_sleep(); 1440 lockdep_assert_held(&local->sta_mtx); 1441 1442 if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1443 ret = _sta_info_move_state(sta, IEEE80211_STA_ASSOC, recalc); 1444 WARN_ON_ONCE(ret); 1445 } 1446 1447 /* now keys can no longer be reached */ 1448 ieee80211_free_sta_keys(local, sta); 1449 1450 /* disable TIM bit - last chance to tell driver */ 1451 __sta_info_recalc_tim(sta, true); 1452 1453 sta->dead = true; 1454 1455 local->num_sta--; 1456 local->sta_generation++; 1457 1458 while (sta->sta_state > IEEE80211_STA_NONE) { 1459 ret = _sta_info_move_state(sta, sta->sta_state - 1, recalc); 1460 if (ret) { 1461 WARN_ON_ONCE(1); 1462 break; 1463 } 1464 } 1465 1466 if (sta->uploaded) { 1467 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, 1468 IEEE80211_STA_NOTEXIST); 1469 WARN_ON_ONCE(ret != 0); 1470 } 1471 1472 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 1473 1474 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); 1475 if (sinfo) 1476 sta_set_sinfo(sta, sinfo, true); 1477 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 1478 kfree(sinfo); 1479 1480 ieee80211_sta_debugfs_remove(sta); 1481 1482 ieee80211_destroy_frag_cache(&sta->frags); 1483 1484 cleanup_single_sta(sta); 1485 } 1486 1487 int __must_check __sta_info_destroy(struct sta_info *sta) 1488 { 1489 int err = __sta_info_destroy_part1(sta); 1490 1491 if (err) 1492 return err; 1493 1494 synchronize_net(); 1495 1496 __sta_info_destroy_part2(sta, true); 1497 1498 return 0; 1499 } 1500 1501 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) 1502 { 1503 struct sta_info *sta; 1504 int ret; 1505 1506 mutex_lock(&sdata->local->sta_mtx); 1507 sta = sta_info_get(sdata, addr); 1508 ret = __sta_info_destroy(sta); 1509 mutex_unlock(&sdata->local->sta_mtx); 1510 1511 return ret; 1512 } 1513 1514 int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 1515 const u8 *addr) 1516 { 1517 struct sta_info *sta; 1518 int ret; 1519 1520 mutex_lock(&sdata->local->sta_mtx); 1521 sta = sta_info_get_bss(sdata, addr); 1522 ret = __sta_info_destroy(sta); 1523 mutex_unlock(&sdata->local->sta_mtx); 1524 1525 return ret; 1526 } 1527 1528 static void sta_info_cleanup(struct timer_list *t) 1529 { 1530 struct ieee80211_local *local = from_timer(local, t, sta_cleanup); 1531 struct sta_info *sta; 1532 bool timer_needed = false; 1533 1534 rcu_read_lock(); 1535 list_for_each_entry_rcu(sta, &local->sta_list, list) 1536 if (sta_info_cleanup_expire_buffered(local, sta)) 1537 timer_needed = true; 1538 rcu_read_unlock(); 1539 1540 if (local->quiescing) 1541 return; 1542 1543 if (!timer_needed) 1544 return; 1545 1546 mod_timer(&local->sta_cleanup, 1547 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); 1548 } 1549 1550 int sta_info_init(struct ieee80211_local *local) 1551 { 1552 int err; 1553 1554 err = rhltable_init(&local->sta_hash, &sta_rht_params); 1555 if (err) 1556 return err; 1557 1558 err = rhltable_init(&local->link_sta_hash, &link_sta_rht_params); 1559 if (err) { 1560 rhltable_destroy(&local->sta_hash); 1561 return err; 1562 } 1563 1564 spin_lock_init(&local->tim_lock); 1565 mutex_init(&local->sta_mtx); 1566 INIT_LIST_HEAD(&local->sta_list); 1567 1568 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); 1569 return 0; 1570 } 1571 1572 void sta_info_stop(struct ieee80211_local *local) 1573 { 1574 del_timer_sync(&local->sta_cleanup); 1575 rhltable_destroy(&local->sta_hash); 1576 rhltable_destroy(&local->link_sta_hash); 1577 } 1578 1579 1580 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) 1581 { 1582 struct ieee80211_local *local = sdata->local; 1583 struct sta_info *sta, *tmp; 1584 LIST_HEAD(free_list); 1585 int ret = 0; 1586 1587 might_sleep(); 1588 1589 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); 1590 WARN_ON(vlans && !sdata->bss); 1591 1592 mutex_lock(&local->sta_mtx); 1593 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1594 if (sdata == sta->sdata || 1595 (vlans && sdata->bss == sta->sdata->bss)) { 1596 if (!WARN_ON(__sta_info_destroy_part1(sta))) 1597 list_add(&sta->free_list, &free_list); 1598 ret++; 1599 } 1600 } 1601 1602 if (!list_empty(&free_list)) { 1603 bool support_p2p_ps = true; 1604 1605 synchronize_net(); 1606 list_for_each_entry_safe(sta, tmp, &free_list, free_list) { 1607 if (!sta->sta.support_p2p_ps) 1608 support_p2p_ps = false; 1609 __sta_info_destroy_part2(sta, false); 1610 } 1611 1612 ieee80211_recalc_min_chandef(sdata, -1); 1613 if (!support_p2p_ps) 1614 ieee80211_recalc_p2p_go_ps_allowed(sdata); 1615 } 1616 mutex_unlock(&local->sta_mtx); 1617 1618 return ret; 1619 } 1620 1621 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 1622 unsigned long exp_time) 1623 { 1624 struct ieee80211_local *local = sdata->local; 1625 struct sta_info *sta, *tmp; 1626 1627 mutex_lock(&local->sta_mtx); 1628 1629 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1630 unsigned long last_active = ieee80211_sta_last_active(sta); 1631 1632 if (sdata != sta->sdata) 1633 continue; 1634 1635 if (time_is_before_jiffies(last_active + exp_time)) { 1636 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 1637 sta->sta.addr); 1638 1639 if (ieee80211_vif_is_mesh(&sdata->vif) && 1640 test_sta_flag(sta, WLAN_STA_PS_STA)) 1641 atomic_dec(&sdata->u.mesh.ps.num_sta_ps); 1642 1643 WARN_ON(__sta_info_destroy(sta)); 1644 } 1645 } 1646 1647 mutex_unlock(&local->sta_mtx); 1648 } 1649 1650 struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, 1651 const u8 *addr, 1652 const u8 *localaddr) 1653 { 1654 struct ieee80211_local *local = hw_to_local(hw); 1655 struct rhlist_head *tmp; 1656 struct sta_info *sta; 1657 1658 /* 1659 * Just return a random station if localaddr is NULL 1660 * ... first in list. 1661 */ 1662 for_each_sta_info(local, addr, sta, tmp) { 1663 if (localaddr && 1664 !ether_addr_equal(sta->sdata->vif.addr, localaddr)) 1665 continue; 1666 if (!sta->uploaded) 1667 return NULL; 1668 return &sta->sta; 1669 } 1670 1671 return NULL; 1672 } 1673 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); 1674 1675 struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 1676 const u8 *addr) 1677 { 1678 struct sta_info *sta; 1679 1680 if (!vif) 1681 return NULL; 1682 1683 sta = sta_info_get_bss(vif_to_sdata(vif), addr); 1684 if (!sta) 1685 return NULL; 1686 1687 if (!sta->uploaded) 1688 return NULL; 1689 1690 return &sta->sta; 1691 } 1692 EXPORT_SYMBOL(ieee80211_find_sta); 1693 1694 /* powersave support code */ 1695 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1696 { 1697 struct ieee80211_sub_if_data *sdata = sta->sdata; 1698 struct ieee80211_local *local = sdata->local; 1699 struct sk_buff_head pending; 1700 int filtered = 0, buffered = 0, ac, i; 1701 unsigned long flags; 1702 struct ps_data *ps; 1703 1704 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1705 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 1706 u.ap); 1707 1708 if (sdata->vif.type == NL80211_IFTYPE_AP) 1709 ps = &sdata->bss->ps; 1710 else if (ieee80211_vif_is_mesh(&sdata->vif)) 1711 ps = &sdata->u.mesh.ps; 1712 else 1713 return; 1714 1715 clear_sta_flag(sta, WLAN_STA_SP); 1716 1717 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); 1718 sta->driver_buffered_tids = 0; 1719 sta->txq_buffered_tids = 0; 1720 1721 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1722 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1723 1724 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1725 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) 1726 continue; 1727 1728 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); 1729 } 1730 1731 skb_queue_head_init(&pending); 1732 1733 /* sync with ieee80211_tx_h_unicast_ps_buf */ 1734 spin_lock_bh(&sta->ps_lock); 1735 /* Send all buffered frames to the station */ 1736 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1737 int count = skb_queue_len(&pending), tmp; 1738 1739 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1740 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1741 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1742 tmp = skb_queue_len(&pending); 1743 filtered += tmp - count; 1744 count = tmp; 1745 1746 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1747 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1748 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1749 tmp = skb_queue_len(&pending); 1750 buffered += tmp - count; 1751 } 1752 1753 ieee80211_add_pending_skbs(local, &pending); 1754 1755 /* now we're no longer in the deliver code */ 1756 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 1757 1758 /* The station might have polled and then woken up before we responded, 1759 * so clear these flags now to avoid them sticking around. 1760 */ 1761 clear_sta_flag(sta, WLAN_STA_PSPOLL); 1762 clear_sta_flag(sta, WLAN_STA_UAPSD); 1763 spin_unlock_bh(&sta->ps_lock); 1764 1765 atomic_dec(&ps->num_sta_ps); 1766 1767 local->total_ps_buffered -= buffered; 1768 1769 sta_info_recalc_tim(sta); 1770 1771 ps_dbg(sdata, 1772 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1773 sta->sta.addr, sta->sta.aid, filtered, buffered); 1774 1775 ieee80211_check_fast_xmit(sta); 1776 } 1777 1778 static void ieee80211_send_null_response(struct sta_info *sta, int tid, 1779 enum ieee80211_frame_release_type reason, 1780 bool call_driver, bool more_data) 1781 { 1782 struct ieee80211_sub_if_data *sdata = sta->sdata; 1783 struct ieee80211_local *local = sdata->local; 1784 struct ieee80211_qos_hdr *nullfunc; 1785 struct sk_buff *skb; 1786 int size = sizeof(*nullfunc); 1787 __le16 fc; 1788 bool qos = sta->sta.wme; 1789 struct ieee80211_tx_info *info; 1790 struct ieee80211_chanctx_conf *chanctx_conf; 1791 1792 if (qos) { 1793 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1794 IEEE80211_STYPE_QOS_NULLFUNC | 1795 IEEE80211_FCTL_FROMDS); 1796 } else { 1797 size -= 2; 1798 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1799 IEEE80211_STYPE_NULLFUNC | 1800 IEEE80211_FCTL_FROMDS); 1801 } 1802 1803 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 1804 if (!skb) 1805 return; 1806 1807 skb_reserve(skb, local->hw.extra_tx_headroom); 1808 1809 nullfunc = skb_put(skb, size); 1810 nullfunc->frame_control = fc; 1811 nullfunc->duration_id = 0; 1812 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1813 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1814 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1815 nullfunc->seq_ctrl = 0; 1816 1817 skb->priority = tid; 1818 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1819 if (qos) { 1820 nullfunc->qos_ctrl = cpu_to_le16(tid); 1821 1822 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { 1823 nullfunc->qos_ctrl |= 1824 cpu_to_le16(IEEE80211_QOS_CTL_EOSP); 1825 if (more_data) 1826 nullfunc->frame_control |= 1827 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1828 } 1829 } 1830 1831 info = IEEE80211_SKB_CB(skb); 1832 1833 /* 1834 * Tell TX path to send this frame even though the 1835 * STA may still remain is PS mode after this frame 1836 * exchange. Also set EOSP to indicate this packet 1837 * ends the poll/service period. 1838 */ 1839 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | 1840 IEEE80211_TX_STATUS_EOSP | 1841 IEEE80211_TX_CTL_REQ_TX_STATUS; 1842 1843 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1844 1845 if (call_driver) 1846 drv_allow_buffered_frames(local, sta, BIT(tid), 1, 1847 reason, false); 1848 1849 skb->dev = sdata->dev; 1850 1851 rcu_read_lock(); 1852 chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); 1853 if (WARN_ON(!chanctx_conf)) { 1854 rcu_read_unlock(); 1855 kfree_skb(skb); 1856 return; 1857 } 1858 1859 info->band = chanctx_conf->def.chan->band; 1860 ieee80211_xmit(sdata, sta, skb); 1861 rcu_read_unlock(); 1862 } 1863 1864 static int find_highest_prio_tid(unsigned long tids) 1865 { 1866 /* lower 3 TIDs aren't ordered perfectly */ 1867 if (tids & 0xF8) 1868 return fls(tids) - 1; 1869 /* TID 0 is BE just like TID 3 */ 1870 if (tids & BIT(0)) 1871 return 0; 1872 return fls(tids) - 1; 1873 } 1874 1875 /* Indicates if the MORE_DATA bit should be set in the last 1876 * frame obtained by ieee80211_sta_ps_get_frames. 1877 * Note that driver_release_tids is relevant only if 1878 * reason = IEEE80211_FRAME_RELEASE_PSPOLL 1879 */ 1880 static bool 1881 ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, 1882 enum ieee80211_frame_release_type reason, 1883 unsigned long driver_release_tids) 1884 { 1885 int ac; 1886 1887 /* If the driver has data on more than one TID then 1888 * certainly there's more data if we release just a 1889 * single frame now (from a single TID). This will 1890 * only happen for PS-Poll. 1891 */ 1892 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && 1893 hweight16(driver_release_tids) > 1) 1894 return true; 1895 1896 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1897 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1898 continue; 1899 1900 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1901 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1902 return true; 1903 } 1904 1905 return false; 1906 } 1907 1908 static void 1909 ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, 1910 enum ieee80211_frame_release_type reason, 1911 struct sk_buff_head *frames, 1912 unsigned long *driver_release_tids) 1913 { 1914 struct ieee80211_sub_if_data *sdata = sta->sdata; 1915 struct ieee80211_local *local = sdata->local; 1916 int ac; 1917 1918 /* Get response frame(s) and more data bit for the last one. */ 1919 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1920 unsigned long tids; 1921 1922 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1923 continue; 1924 1925 tids = ieee80211_tids_for_ac(ac); 1926 1927 /* if we already have frames from software, then we can't also 1928 * release from hardware queues 1929 */ 1930 if (skb_queue_empty(frames)) { 1931 *driver_release_tids |= 1932 sta->driver_buffered_tids & tids; 1933 *driver_release_tids |= sta->txq_buffered_tids & tids; 1934 } 1935 1936 if (!*driver_release_tids) { 1937 struct sk_buff *skb; 1938 1939 while (n_frames > 0) { 1940 skb = skb_dequeue(&sta->tx_filtered[ac]); 1941 if (!skb) { 1942 skb = skb_dequeue( 1943 &sta->ps_tx_buf[ac]); 1944 if (skb) 1945 local->total_ps_buffered--; 1946 } 1947 if (!skb) 1948 break; 1949 n_frames--; 1950 __skb_queue_tail(frames, skb); 1951 } 1952 } 1953 1954 /* If we have more frames buffered on this AC, then abort the 1955 * loop since we can't send more data from other ACs before 1956 * the buffered frames from this. 1957 */ 1958 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1959 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1960 break; 1961 } 1962 } 1963 1964 static void 1965 ieee80211_sta_ps_deliver_response(struct sta_info *sta, 1966 int n_frames, u8 ignored_acs, 1967 enum ieee80211_frame_release_type reason) 1968 { 1969 struct ieee80211_sub_if_data *sdata = sta->sdata; 1970 struct ieee80211_local *local = sdata->local; 1971 unsigned long driver_release_tids = 0; 1972 struct sk_buff_head frames; 1973 bool more_data; 1974 1975 /* Service or PS-Poll period starts */ 1976 set_sta_flag(sta, WLAN_STA_SP); 1977 1978 __skb_queue_head_init(&frames); 1979 1980 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, 1981 &frames, &driver_release_tids); 1982 1983 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); 1984 1985 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) 1986 driver_release_tids = 1987 BIT(find_highest_prio_tid(driver_release_tids)); 1988 1989 if (skb_queue_empty(&frames) && !driver_release_tids) { 1990 int tid, ac; 1991 1992 /* 1993 * For PS-Poll, this can only happen due to a race condition 1994 * when we set the TIM bit and the station notices it, but 1995 * before it can poll for the frame we expire it. 1996 * 1997 * For uAPSD, this is said in the standard (11.2.1.5 h): 1998 * At each unscheduled SP for a non-AP STA, the AP shall 1999 * attempt to transmit at least one MSDU or MMPDU, but no 2000 * more than the value specified in the Max SP Length field 2001 * in the QoS Capability element from delivery-enabled ACs, 2002 * that are destined for the non-AP STA. 2003 * 2004 * Since we have no other MSDU/MMPDU, transmit a QoS null frame. 2005 */ 2006 2007 /* This will evaluate to 1, 3, 5 or 7. */ 2008 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 2009 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) 2010 break; 2011 tid = 7 - 2 * ac; 2012 2013 ieee80211_send_null_response(sta, tid, reason, true, false); 2014 } else if (!driver_release_tids) { 2015 struct sk_buff_head pending; 2016 struct sk_buff *skb; 2017 int num = 0; 2018 u16 tids = 0; 2019 bool need_null = false; 2020 2021 skb_queue_head_init(&pending); 2022 2023 while ((skb = __skb_dequeue(&frames))) { 2024 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2025 struct ieee80211_hdr *hdr = (void *) skb->data; 2026 u8 *qoshdr = NULL; 2027 2028 num++; 2029 2030 /* 2031 * Tell TX path to send this frame even though the 2032 * STA may still remain is PS mode after this frame 2033 * exchange. 2034 */ 2035 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 2036 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 2037 2038 /* 2039 * Use MoreData flag to indicate whether there are 2040 * more buffered frames for this STA 2041 */ 2042 if (more_data || !skb_queue_empty(&frames)) 2043 hdr->frame_control |= 2044 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2045 else 2046 hdr->frame_control &= 2047 cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 2048 2049 if (ieee80211_is_data_qos(hdr->frame_control) || 2050 ieee80211_is_qos_nullfunc(hdr->frame_control)) 2051 qoshdr = ieee80211_get_qos_ctl(hdr); 2052 2053 tids |= BIT(skb->priority); 2054 2055 __skb_queue_tail(&pending, skb); 2056 2057 /* end service period after last frame or add one */ 2058 if (!skb_queue_empty(&frames)) 2059 continue; 2060 2061 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { 2062 /* for PS-Poll, there's only one frame */ 2063 info->flags |= IEEE80211_TX_STATUS_EOSP | 2064 IEEE80211_TX_CTL_REQ_TX_STATUS; 2065 break; 2066 } 2067 2068 /* For uAPSD, things are a bit more complicated. If the 2069 * last frame has a QoS header (i.e. is a QoS-data or 2070 * QoS-nulldata frame) then just set the EOSP bit there 2071 * and be done. 2072 * If the frame doesn't have a QoS header (which means 2073 * it should be a bufferable MMPDU) then we can't set 2074 * the EOSP bit in the QoS header; add a QoS-nulldata 2075 * frame to the list to send it after the MMPDU. 2076 * 2077 * Note that this code is only in the mac80211-release 2078 * code path, we assume that the driver will not buffer 2079 * anything but QoS-data frames, or if it does, will 2080 * create the QoS-nulldata frame by itself if needed. 2081 * 2082 * Cf. 802.11-2012 10.2.1.10 (c). 2083 */ 2084 if (qoshdr) { 2085 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 2086 2087 info->flags |= IEEE80211_TX_STATUS_EOSP | 2088 IEEE80211_TX_CTL_REQ_TX_STATUS; 2089 } else { 2090 /* The standard isn't completely clear on this 2091 * as it says the more-data bit should be set 2092 * if there are more BUs. The QoS-Null frame 2093 * we're about to send isn't buffered yet, we 2094 * only create it below, but let's pretend it 2095 * was buffered just in case some clients only 2096 * expect more-data=0 when eosp=1. 2097 */ 2098 hdr->frame_control |= 2099 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2100 need_null = true; 2101 num++; 2102 } 2103 break; 2104 } 2105 2106 drv_allow_buffered_frames(local, sta, tids, num, 2107 reason, more_data); 2108 2109 ieee80211_add_pending_skbs(local, &pending); 2110 2111 if (need_null) 2112 ieee80211_send_null_response( 2113 sta, find_highest_prio_tid(tids), 2114 reason, false, false); 2115 2116 sta_info_recalc_tim(sta); 2117 } else { 2118 int tid; 2119 2120 /* 2121 * We need to release a frame that is buffered somewhere in the 2122 * driver ... it'll have to handle that. 2123 * Note that the driver also has to check the number of frames 2124 * on the TIDs we're releasing from - if there are more than 2125 * n_frames it has to set the more-data bit (if we didn't ask 2126 * it to set it anyway due to other buffered frames); if there 2127 * are fewer than n_frames it has to make sure to adjust that 2128 * to allow the service period to end properly. 2129 */ 2130 drv_release_buffered_frames(local, sta, driver_release_tids, 2131 n_frames, reason, more_data); 2132 2133 /* 2134 * Note that we don't recalculate the TIM bit here as it would 2135 * most likely have no effect at all unless the driver told us 2136 * that the TID(s) became empty before returning here from the 2137 * release function. 2138 * Either way, however, when the driver tells us that the TID(s) 2139 * became empty or we find that a txq became empty, we'll do the 2140 * TIM recalculation. 2141 */ 2142 2143 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 2144 if (!sta->sta.txq[tid] || 2145 !(driver_release_tids & BIT(tid)) || 2146 txq_has_queue(sta->sta.txq[tid])) 2147 continue; 2148 2149 sta_info_recalc_tim(sta); 2150 break; 2151 } 2152 } 2153 } 2154 2155 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 2156 { 2157 u8 ignore_for_response = sta->sta.uapsd_queues; 2158 2159 /* 2160 * If all ACs are delivery-enabled then we should reply 2161 * from any of them, if only some are enabled we reply 2162 * only from the non-enabled ones. 2163 */ 2164 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) 2165 ignore_for_response = 0; 2166 2167 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, 2168 IEEE80211_FRAME_RELEASE_PSPOLL); 2169 } 2170 2171 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) 2172 { 2173 int n_frames = sta->sta.max_sp; 2174 u8 delivery_enabled = sta->sta.uapsd_queues; 2175 2176 /* 2177 * If we ever grow support for TSPEC this might happen if 2178 * the TSPEC update from hostapd comes in between a trigger 2179 * frame setting WLAN_STA_UAPSD in the RX path and this 2180 * actually getting called. 2181 */ 2182 if (!delivery_enabled) 2183 return; 2184 2185 switch (sta->sta.max_sp) { 2186 case 1: 2187 n_frames = 2; 2188 break; 2189 case 2: 2190 n_frames = 4; 2191 break; 2192 case 3: 2193 n_frames = 6; 2194 break; 2195 case 0: 2196 /* XXX: what is a good value? */ 2197 n_frames = 128; 2198 break; 2199 } 2200 2201 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, 2202 IEEE80211_FRAME_RELEASE_UAPSD); 2203 } 2204 2205 void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 2206 struct ieee80211_sta *pubsta, bool block) 2207 { 2208 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2209 2210 trace_api_sta_block_awake(sta->local, pubsta, block); 2211 2212 if (block) { 2213 set_sta_flag(sta, WLAN_STA_PS_DRIVER); 2214 ieee80211_clear_fast_xmit(sta); 2215 return; 2216 } 2217 2218 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 2219 return; 2220 2221 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { 2222 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 2223 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2224 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 2225 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || 2226 test_sta_flag(sta, WLAN_STA_UAPSD)) { 2227 /* must be asleep in this case */ 2228 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2229 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 2230 } else { 2231 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2232 ieee80211_check_fast_xmit(sta); 2233 } 2234 } 2235 EXPORT_SYMBOL(ieee80211_sta_block_awake); 2236 2237 void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) 2238 { 2239 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2240 struct ieee80211_local *local = sta->local; 2241 2242 trace_api_eosp(local, pubsta); 2243 2244 clear_sta_flag(sta, WLAN_STA_SP); 2245 } 2246 EXPORT_SYMBOL(ieee80211_sta_eosp); 2247 2248 void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) 2249 { 2250 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2251 enum ieee80211_frame_release_type reason; 2252 bool more_data; 2253 2254 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); 2255 2256 reason = IEEE80211_FRAME_RELEASE_UAPSD; 2257 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, 2258 reason, 0); 2259 2260 ieee80211_send_null_response(sta, tid, reason, false, more_data); 2261 } 2262 EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); 2263 2264 void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 2265 u8 tid, bool buffered) 2266 { 2267 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2268 2269 if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) 2270 return; 2271 2272 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); 2273 2274 if (buffered) 2275 set_bit(tid, &sta->driver_buffered_tids); 2276 else 2277 clear_bit(tid, &sta->driver_buffered_tids); 2278 2279 sta_info_recalc_tim(sta); 2280 } 2281 EXPORT_SYMBOL(ieee80211_sta_set_buffered); 2282 2283 void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, 2284 u32 tx_airtime, u32 rx_airtime) 2285 { 2286 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2287 struct ieee80211_local *local = sta->sdata->local; 2288 u8 ac = ieee80211_ac_from_tid(tid); 2289 u32 airtime = 0; 2290 u32 diff; 2291 2292 if (sta->local->airtime_flags & AIRTIME_USE_TX) 2293 airtime += tx_airtime; 2294 if (sta->local->airtime_flags & AIRTIME_USE_RX) 2295 airtime += rx_airtime; 2296 2297 spin_lock_bh(&local->active_txq_lock[ac]); 2298 sta->airtime[ac].tx_airtime += tx_airtime; 2299 sta->airtime[ac].rx_airtime += rx_airtime; 2300 2301 diff = (u32)jiffies - sta->airtime[ac].last_active; 2302 if (diff <= AIRTIME_ACTIVE_DURATION) 2303 sta->airtime[ac].deficit -= airtime; 2304 2305 spin_unlock_bh(&local->active_txq_lock[ac]); 2306 } 2307 EXPORT_SYMBOL(ieee80211_sta_register_airtime); 2308 2309 void __ieee80211_sta_recalc_aggregates(struct sta_info *sta, u16 active_links) 2310 { 2311 bool first = true; 2312 int link_id; 2313 2314 if (!sta->sta.valid_links || !sta->sta.mlo) { 2315 sta->sta.cur = &sta->sta.deflink.agg; 2316 return; 2317 } 2318 2319 rcu_read_lock(); 2320 for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) { 2321 struct ieee80211_link_sta *link_sta; 2322 int i; 2323 2324 if (!(active_links & BIT(link_id))) 2325 continue; 2326 2327 link_sta = rcu_dereference(sta->sta.link[link_id]); 2328 if (!link_sta) 2329 continue; 2330 2331 if (first) { 2332 sta->cur = sta->sta.deflink.agg; 2333 first = false; 2334 continue; 2335 } 2336 2337 sta->cur.max_amsdu_len = 2338 min(sta->cur.max_amsdu_len, 2339 link_sta->agg.max_amsdu_len); 2340 sta->cur.max_rc_amsdu_len = 2341 min(sta->cur.max_rc_amsdu_len, 2342 link_sta->agg.max_rc_amsdu_len); 2343 2344 for (i = 0; i < ARRAY_SIZE(sta->cur.max_tid_amsdu_len); i++) 2345 sta->cur.max_tid_amsdu_len[i] = 2346 min(sta->cur.max_tid_amsdu_len[i], 2347 link_sta->agg.max_tid_amsdu_len[i]); 2348 } 2349 rcu_read_unlock(); 2350 2351 sta->sta.cur = &sta->cur; 2352 } 2353 2354 void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta) 2355 { 2356 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2357 2358 __ieee80211_sta_recalc_aggregates(sta, sta->sdata->vif.active_links); 2359 } 2360 EXPORT_SYMBOL(ieee80211_sta_recalc_aggregates); 2361 2362 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, 2363 struct sta_info *sta, u8 ac, 2364 u16 tx_airtime, bool tx_completed) 2365 { 2366 int tx_pending; 2367 2368 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) 2369 return; 2370 2371 if (!tx_completed) { 2372 if (sta) 2373 atomic_add(tx_airtime, 2374 &sta->airtime[ac].aql_tx_pending); 2375 2376 atomic_add(tx_airtime, &local->aql_total_pending_airtime); 2377 atomic_add(tx_airtime, &local->aql_ac_pending_airtime[ac]); 2378 return; 2379 } 2380 2381 if (sta) { 2382 tx_pending = atomic_sub_return(tx_airtime, 2383 &sta->airtime[ac].aql_tx_pending); 2384 if (tx_pending < 0) 2385 atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, 2386 tx_pending, 0); 2387 } 2388 2389 atomic_sub(tx_airtime, &local->aql_total_pending_airtime); 2390 tx_pending = atomic_sub_return(tx_airtime, 2391 &local->aql_ac_pending_airtime[ac]); 2392 if (WARN_ONCE(tx_pending < 0, 2393 "Device %s AC %d pending airtime underflow: %u, %u", 2394 wiphy_name(local->hw.wiphy), ac, tx_pending, 2395 tx_airtime)) { 2396 atomic_cmpxchg(&local->aql_ac_pending_airtime[ac], 2397 tx_pending, 0); 2398 atomic_sub(tx_pending, &local->aql_total_pending_airtime); 2399 } 2400 } 2401 2402 static struct ieee80211_sta_rx_stats * 2403 sta_get_last_rx_stats(struct sta_info *sta) 2404 { 2405 struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; 2406 int cpu; 2407 2408 if (!sta->deflink.pcpu_rx_stats) 2409 return stats; 2410 2411 for_each_possible_cpu(cpu) { 2412 struct ieee80211_sta_rx_stats *cpustats; 2413 2414 cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); 2415 2416 if (time_after(cpustats->last_rx, stats->last_rx)) 2417 stats = cpustats; 2418 } 2419 2420 return stats; 2421 } 2422 2423 static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, 2424 struct rate_info *rinfo) 2425 { 2426 rinfo->bw = STA_STATS_GET(BW, rate); 2427 2428 switch (STA_STATS_GET(TYPE, rate)) { 2429 case STA_STATS_RATE_TYPE_VHT: 2430 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; 2431 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); 2432 rinfo->nss = STA_STATS_GET(VHT_NSS, rate); 2433 if (STA_STATS_GET(SGI, rate)) 2434 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2435 break; 2436 case STA_STATS_RATE_TYPE_HT: 2437 rinfo->flags = RATE_INFO_FLAGS_MCS; 2438 rinfo->mcs = STA_STATS_GET(HT_MCS, rate); 2439 if (STA_STATS_GET(SGI, rate)) 2440 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2441 break; 2442 case STA_STATS_RATE_TYPE_LEGACY: { 2443 struct ieee80211_supported_band *sband; 2444 u16 brate; 2445 unsigned int shift; 2446 int band = STA_STATS_GET(LEGACY_BAND, rate); 2447 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); 2448 2449 sband = local->hw.wiphy->bands[band]; 2450 2451 if (WARN_ON_ONCE(!sband->bitrates)) 2452 break; 2453 2454 brate = sband->bitrates[rate_idx].bitrate; 2455 if (rinfo->bw == RATE_INFO_BW_5) 2456 shift = 2; 2457 else if (rinfo->bw == RATE_INFO_BW_10) 2458 shift = 1; 2459 else 2460 shift = 0; 2461 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); 2462 break; 2463 } 2464 case STA_STATS_RATE_TYPE_HE: 2465 rinfo->flags = RATE_INFO_FLAGS_HE_MCS; 2466 rinfo->mcs = STA_STATS_GET(HE_MCS, rate); 2467 rinfo->nss = STA_STATS_GET(HE_NSS, rate); 2468 rinfo->he_gi = STA_STATS_GET(HE_GI, rate); 2469 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); 2470 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); 2471 break; 2472 case STA_STATS_RATE_TYPE_EHT: 2473 rinfo->flags = RATE_INFO_FLAGS_EHT_MCS; 2474 rinfo->mcs = STA_STATS_GET(EHT_MCS, rate); 2475 rinfo->nss = STA_STATS_GET(EHT_NSS, rate); 2476 rinfo->eht_gi = STA_STATS_GET(EHT_GI, rate); 2477 rinfo->eht_ru_alloc = STA_STATS_GET(EHT_RU, rate); 2478 break; 2479 } 2480 } 2481 2482 static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) 2483 { 2484 u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); 2485 2486 if (rate == STA_STATS_RATE_INVALID) 2487 return -EINVAL; 2488 2489 sta_stats_decode_rate(sta->local, rate, rinfo); 2490 return 0; 2491 } 2492 2493 static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, 2494 int tid) 2495 { 2496 unsigned int start; 2497 u64 value; 2498 2499 do { 2500 start = u64_stats_fetch_begin(&rxstats->syncp); 2501 value = rxstats->msdu[tid]; 2502 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2503 2504 return value; 2505 } 2506 2507 static void sta_set_tidstats(struct sta_info *sta, 2508 struct cfg80211_tid_stats *tidstats, 2509 int tid) 2510 { 2511 struct ieee80211_local *local = sta->local; 2512 int cpu; 2513 2514 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { 2515 tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats, 2516 tid); 2517 2518 if (sta->deflink.pcpu_rx_stats) { 2519 for_each_possible_cpu(cpu) { 2520 struct ieee80211_sta_rx_stats *cpurxs; 2521 2522 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2523 cpu); 2524 tidstats->rx_msdu += 2525 sta_get_tidstats_msdu(cpurxs, tid); 2526 } 2527 } 2528 2529 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); 2530 } 2531 2532 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { 2533 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); 2534 tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid]; 2535 } 2536 2537 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && 2538 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2539 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); 2540 tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid]; 2541 } 2542 2543 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && 2544 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2545 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); 2546 tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid]; 2547 } 2548 2549 if (tid < IEEE80211_NUM_TIDS) { 2550 spin_lock_bh(&local->fq.lock); 2551 rcu_read_lock(); 2552 2553 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); 2554 ieee80211_fill_txq_stats(&tidstats->txq_stats, 2555 to_txq_info(sta->sta.txq[tid])); 2556 2557 rcu_read_unlock(); 2558 spin_unlock_bh(&local->fq.lock); 2559 } 2560 } 2561 2562 static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) 2563 { 2564 unsigned int start; 2565 u64 value; 2566 2567 do { 2568 start = u64_stats_fetch_begin(&rxstats->syncp); 2569 value = rxstats->bytes; 2570 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2571 2572 return value; 2573 } 2574 2575 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, 2576 bool tidstats) 2577 { 2578 struct ieee80211_sub_if_data *sdata = sta->sdata; 2579 struct ieee80211_local *local = sdata->local; 2580 u32 thr = 0; 2581 int i, ac, cpu; 2582 struct ieee80211_sta_rx_stats *last_rxstats; 2583 2584 last_rxstats = sta_get_last_rx_stats(sta); 2585 2586 sinfo->generation = sdata->local->sta_generation; 2587 2588 /* do before driver, so beacon filtering drivers have a 2589 * chance to e.g. just add the number of filtered beacons 2590 * (or just modify the value entirely, of course) 2591 */ 2592 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2593 sinfo->rx_beacon = sdata->deflink.u.mgd.count_beacon_signal; 2594 2595 drv_sta_statistics(local, sdata, &sta->sta, sinfo); 2596 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | 2597 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | 2598 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | 2599 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | 2600 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | 2601 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); 2602 2603 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2604 sinfo->beacon_loss_count = 2605 sdata->deflink.u.mgd.beacon_loss_count; 2606 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); 2607 } 2608 2609 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 2610 sinfo->assoc_at = sta->assoc_at; 2611 sinfo->inactive_time = 2612 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); 2613 2614 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2615 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { 2616 sinfo->tx_bytes = 0; 2617 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2618 sinfo->tx_bytes += sta->deflink.tx_stats.bytes[ac]; 2619 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); 2620 } 2621 2622 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { 2623 sinfo->tx_packets = 0; 2624 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2625 sinfo->tx_packets += sta->deflink.tx_stats.packets[ac]; 2626 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 2627 } 2628 2629 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | 2630 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { 2631 sinfo->rx_bytes += sta_get_stats_bytes(&sta->deflink.rx_stats); 2632 2633 if (sta->deflink.pcpu_rx_stats) { 2634 for_each_possible_cpu(cpu) { 2635 struct ieee80211_sta_rx_stats *cpurxs; 2636 2637 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2638 cpu); 2639 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); 2640 } 2641 } 2642 2643 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); 2644 } 2645 2646 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { 2647 sinfo->rx_packets = sta->deflink.rx_stats.packets; 2648 if (sta->deflink.pcpu_rx_stats) { 2649 for_each_possible_cpu(cpu) { 2650 struct ieee80211_sta_rx_stats *cpurxs; 2651 2652 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2653 cpu); 2654 sinfo->rx_packets += cpurxs->packets; 2655 } 2656 } 2657 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); 2658 } 2659 2660 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { 2661 sinfo->tx_retries = sta->deflink.status_stats.retry_count; 2662 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); 2663 } 2664 2665 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { 2666 sinfo->tx_failed = sta->deflink.status_stats.retry_failed; 2667 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); 2668 } 2669 2670 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { 2671 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2672 sinfo->rx_duration += sta->airtime[ac].rx_airtime; 2673 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 2674 } 2675 2676 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { 2677 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2678 sinfo->tx_duration += sta->airtime[ac].tx_airtime; 2679 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 2680 } 2681 2682 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { 2683 sinfo->airtime_weight = sta->airtime_weight; 2684 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); 2685 } 2686 2687 sinfo->rx_dropped_misc = sta->deflink.rx_stats.dropped; 2688 if (sta->deflink.pcpu_rx_stats) { 2689 for_each_possible_cpu(cpu) { 2690 struct ieee80211_sta_rx_stats *cpurxs; 2691 2692 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); 2693 sinfo->rx_dropped_misc += cpurxs->dropped; 2694 } 2695 } 2696 2697 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2698 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { 2699 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | 2700 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 2701 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); 2702 } 2703 2704 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || 2705 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { 2706 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { 2707 sinfo->signal = (s8)last_rxstats->last_signal; 2708 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 2709 } 2710 2711 if (!sta->deflink.pcpu_rx_stats && 2712 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { 2713 sinfo->signal_avg = 2714 -ewma_signal_read(&sta->deflink.rx_stats_avg.signal); 2715 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 2716 } 2717 } 2718 2719 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to 2720 * the sta->rx_stats struct, so the check here is fine with and without 2721 * pcpu statistics 2722 */ 2723 if (last_rxstats->chains && 2724 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | 2725 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { 2726 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); 2727 if (!sta->deflink.pcpu_rx_stats) 2728 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); 2729 2730 sinfo->chains = last_rxstats->chains; 2731 2732 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { 2733 sinfo->chain_signal[i] = 2734 last_rxstats->chain_signal_last[i]; 2735 sinfo->chain_signal_avg[i] = 2736 -ewma_signal_read(&sta->deflink.rx_stats_avg.chain_signal[i]); 2737 } 2738 } 2739 2740 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) && 2741 !sta->sta.valid_links) { 2742 sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, 2743 &sinfo->txrate); 2744 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 2745 } 2746 2747 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) && 2748 !sta->sta.valid_links) { 2749 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) 2750 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); 2751 } 2752 2753 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { 2754 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) 2755 sta_set_tidstats(sta, &sinfo->pertid[i], i); 2756 } 2757 2758 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2759 #ifdef CONFIG_MAC80211_MESH 2760 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | 2761 BIT_ULL(NL80211_STA_INFO_PLID) | 2762 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | 2763 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | 2764 BIT_ULL(NL80211_STA_INFO_PEER_PM) | 2765 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | 2766 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | 2767 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); 2768 2769 sinfo->llid = sta->mesh->llid; 2770 sinfo->plid = sta->mesh->plid; 2771 sinfo->plink_state = sta->mesh->plink_state; 2772 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 2773 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); 2774 sinfo->t_offset = sta->mesh->t_offset; 2775 } 2776 sinfo->local_pm = sta->mesh->local_pm; 2777 sinfo->peer_pm = sta->mesh->peer_pm; 2778 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; 2779 sinfo->connected_to_gate = sta->mesh->connected_to_gate; 2780 sinfo->connected_to_as = sta->mesh->connected_to_as; 2781 #endif 2782 } 2783 2784 sinfo->bss_param.flags = 0; 2785 if (sdata->vif.bss_conf.use_cts_prot) 2786 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; 2787 if (sdata->vif.bss_conf.use_short_preamble) 2788 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; 2789 if (sdata->vif.bss_conf.use_short_slot) 2790 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 2791 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; 2792 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 2793 2794 sinfo->sta_flags.set = 0; 2795 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | 2796 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 2797 BIT(NL80211_STA_FLAG_WME) | 2798 BIT(NL80211_STA_FLAG_MFP) | 2799 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 2800 BIT(NL80211_STA_FLAG_ASSOCIATED) | 2801 BIT(NL80211_STA_FLAG_TDLS_PEER); 2802 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 2803 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 2804 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) 2805 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); 2806 if (sta->sta.wme) 2807 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); 2808 if (test_sta_flag(sta, WLAN_STA_MFP)) 2809 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 2810 if (test_sta_flag(sta, WLAN_STA_AUTH)) 2811 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 2812 if (test_sta_flag(sta, WLAN_STA_ASSOC)) 2813 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 2814 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 2815 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 2816 2817 thr = sta_get_expected_throughput(sta); 2818 2819 if (thr != 0) { 2820 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); 2821 sinfo->expected_throughput = thr; 2822 } 2823 2824 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && 2825 sta->deflink.status_stats.ack_signal_filled) { 2826 sinfo->ack_signal = sta->deflink.status_stats.last_ack_signal; 2827 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); 2828 } 2829 2830 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && 2831 sta->deflink.status_stats.ack_signal_filled) { 2832 sinfo->avg_ack_signal = 2833 -(s8)ewma_avg_signal_read( 2834 &sta->deflink.status_stats.avg_ack_signal); 2835 sinfo->filled |= 2836 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); 2837 } 2838 2839 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2840 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); 2841 sinfo->airtime_link_metric = 2842 airtime_link_metric_get(local, sta); 2843 } 2844 } 2845 2846 u32 sta_get_expected_throughput(struct sta_info *sta) 2847 { 2848 struct ieee80211_sub_if_data *sdata = sta->sdata; 2849 struct ieee80211_local *local = sdata->local; 2850 struct rate_control_ref *ref = NULL; 2851 u32 thr = 0; 2852 2853 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) 2854 ref = local->rate_ctrl; 2855 2856 /* check if the driver has a SW RC implementation */ 2857 if (ref && ref->ops->get_expected_throughput) 2858 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); 2859 else 2860 thr = drv_get_expected_throughput(local, sta); 2861 2862 return thr; 2863 } 2864 2865 unsigned long ieee80211_sta_last_active(struct sta_info *sta) 2866 { 2867 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2868 2869 if (!sta->deflink.status_stats.last_ack || 2870 time_after(stats->last_rx, sta->deflink.status_stats.last_ack)) 2871 return stats->last_rx; 2872 return sta->deflink.status_stats.last_ack; 2873 } 2874 2875 static void sta_update_codel_params(struct sta_info *sta, u32 thr) 2876 { 2877 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { 2878 sta->cparams.target = MS2TIME(50); 2879 sta->cparams.interval = MS2TIME(300); 2880 sta->cparams.ecn = false; 2881 } else { 2882 sta->cparams.target = MS2TIME(20); 2883 sta->cparams.interval = MS2TIME(100); 2884 sta->cparams.ecn = true; 2885 } 2886 } 2887 2888 void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, 2889 u32 thr) 2890 { 2891 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2892 2893 sta_update_codel_params(sta, thr); 2894 } 2895 2896 int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id) 2897 { 2898 struct ieee80211_sub_if_data *sdata = sta->sdata; 2899 struct sta_link_alloc *alloc; 2900 int ret; 2901 2902 lockdep_assert_held(&sdata->local->sta_mtx); 2903 2904 /* must represent an MLD from the start */ 2905 if (WARN_ON(!sta->sta.valid_links)) 2906 return -EINVAL; 2907 2908 if (WARN_ON(sta->sta.valid_links & BIT(link_id) || 2909 sta->link[link_id])) 2910 return -EBUSY; 2911 2912 alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); 2913 if (!alloc) 2914 return -ENOMEM; 2915 2916 ret = sta_info_alloc_link(sdata->local, &alloc->info, GFP_KERNEL); 2917 if (ret) { 2918 kfree(alloc); 2919 return ret; 2920 } 2921 2922 sta_info_add_link(sta, link_id, &alloc->info, &alloc->sta); 2923 2924 ieee80211_link_sta_debugfs_add(&alloc->info); 2925 2926 return 0; 2927 } 2928 2929 void ieee80211_sta_free_link(struct sta_info *sta, unsigned int link_id) 2930 { 2931 lockdep_assert_held(&sta->sdata->local->sta_mtx); 2932 2933 sta_remove_link(sta, link_id, false); 2934 } 2935 2936 int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id) 2937 { 2938 struct ieee80211_sub_if_data *sdata = sta->sdata; 2939 struct link_sta_info *link_sta; 2940 u16 old_links = sta->sta.valid_links; 2941 u16 new_links = old_links | BIT(link_id); 2942 int ret; 2943 2944 link_sta = rcu_dereference_protected(sta->link[link_id], 2945 lockdep_is_held(&sdata->local->sta_mtx)); 2946 2947 if (WARN_ON(old_links == new_links || !link_sta)) 2948 return -EINVAL; 2949 2950 rcu_read_lock(); 2951 if (link_sta_info_hash_lookup(sdata->local, link_sta->addr)) { 2952 rcu_read_unlock(); 2953 return -EALREADY; 2954 } 2955 /* we only modify under the mutex so this is fine */ 2956 rcu_read_unlock(); 2957 2958 sta->sta.valid_links = new_links; 2959 2960 if (!test_sta_flag(sta, WLAN_STA_INSERTED)) 2961 goto hash; 2962 2963 ieee80211_recalc_min_chandef(sdata, link_id); 2964 2965 /* Ensure the values are updated for the driver, 2966 * redone by sta_remove_link on failure. 2967 */ 2968 ieee80211_sta_recalc_aggregates(&sta->sta); 2969 2970 ret = drv_change_sta_links(sdata->local, sdata, &sta->sta, 2971 old_links, new_links); 2972 if (ret) { 2973 sta->sta.valid_links = old_links; 2974 sta_remove_link(sta, link_id, false); 2975 return ret; 2976 } 2977 2978 hash: 2979 ret = link_sta_info_hash_add(sdata->local, link_sta); 2980 WARN_ON(ret); 2981 return 0; 2982 } 2983 2984 void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id) 2985 { 2986 struct ieee80211_sub_if_data *sdata = sta->sdata; 2987 u16 old_links = sta->sta.valid_links; 2988 2989 lockdep_assert_held(&sdata->local->sta_mtx); 2990 2991 sta->sta.valid_links &= ~BIT(link_id); 2992 2993 if (test_sta_flag(sta, WLAN_STA_INSERTED)) 2994 drv_change_sta_links(sdata->local, sdata, &sta->sta, 2995 old_links, sta->sta.valid_links); 2996 2997 sta_remove_link(sta, link_id, true); 2998 } 2999 3000 void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta, 3001 const u8 *ext_capab, 3002 unsigned int ext_capab_len) 3003 { 3004 u8 val; 3005 3006 sta->sta.max_amsdu_subframes = 0; 3007 3008 if (ext_capab_len < 8) 3009 return; 3010 3011 /* The sender might not have sent the last bit, consider it to be 0 */ 3012 val = u8_get_bits(ext_capab[7], WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB); 3013 3014 /* we did get all the bits, take the MSB as well */ 3015 if (ext_capab_len >= 9) 3016 val |= u8_get_bits(ext_capab[8], 3017 WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1; 3018 3019 if (val) 3020 sta->sta.max_amsdu_subframes = 4 << (4 - val); 3021 } 3022 3023 #ifdef CONFIG_LOCKDEP 3024 bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta) 3025 { 3026 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 3027 3028 return lockdep_is_held(&sta->local->sta_mtx); 3029 } 3030 EXPORT_SYMBOL(lockdep_sta_mutex_held); 3031 #endif 3032