1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 * Copyright (C) 2018-2023 Intel Corporation 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/etherdevice.h> 13 #include <linux/netdevice.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/if_arp.h> 18 #include <linux/timer.h> 19 #include <linux/rtnetlink.h> 20 21 #include <net/codel.h> 22 #include <net/mac80211.h> 23 #include "ieee80211_i.h" 24 #include "driver-ops.h" 25 #include "rate.h" 26 #include "sta_info.h" 27 #include "debugfs_sta.h" 28 #include "mesh.h" 29 #include "wme.h" 30 31 /** 32 * DOC: STA information lifetime rules 33 * 34 * STA info structures (&struct sta_info) are managed in a hash table 35 * for faster lookup and a list for iteration. They are managed using 36 * RCU, i.e. access to the list and hash table is protected by RCU. 37 * 38 * Upon allocating a STA info structure with sta_info_alloc(), the caller 39 * owns that structure. It must then insert it into the hash table using 40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter 41 * case (which acquires an rcu read section but must not be called from 42 * within one) will the pointer still be valid after the call. Note that 43 * the caller may not do much with the STA info before inserting it, in 44 * particular, it may not start any mesh peer link management or add 45 * encryption keys. 46 * 47 * When the insertion fails (sta_info_insert()) returns non-zero), the 48 * structure will have been freed by sta_info_insert()! 49 * 50 * Station entries are added by mac80211 when you establish a link with a 51 * peer. This means different things for the different type of interfaces 52 * we support. For a regular station this mean we add the AP sta when we 53 * receive an association response from the AP. For IBSS this occurs when 54 * get to know about a peer on the same IBSS. For WDS we add the sta for 55 * the peer immediately upon device open. When using AP mode we add stations 56 * for each respective station upon request from userspace through nl80211. 57 * 58 * In order to remove a STA info structure, various sta_info_destroy_*() 59 * calls are available. 60 * 61 * There is no concept of ownership on a STA entry, each structure is 62 * owned by the global hash table/list until it is removed. All users of 63 * the structure need to be RCU protected so that the structure won't be 64 * freed before they are done using it. 65 */ 66 67 struct sta_link_alloc { 68 struct link_sta_info info; 69 struct ieee80211_link_sta sta; 70 struct rcu_head rcu_head; 71 }; 72 73 static const struct rhashtable_params sta_rht_params = { 74 .nelem_hint = 3, /* start small */ 75 .automatic_shrinking = true, 76 .head_offset = offsetof(struct sta_info, hash_node), 77 .key_offset = offsetof(struct sta_info, addr), 78 .key_len = ETH_ALEN, 79 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 80 }; 81 82 static const struct rhashtable_params link_sta_rht_params = { 83 .nelem_hint = 3, /* start small */ 84 .automatic_shrinking = true, 85 .head_offset = offsetof(struct link_sta_info, link_hash_node), 86 .key_offset = offsetof(struct link_sta_info, addr), 87 .key_len = ETH_ALEN, 88 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 89 }; 90 91 /* Caller must hold local->sta_mtx */ 92 static int sta_info_hash_del(struct ieee80211_local *local, 93 struct sta_info *sta) 94 { 95 return rhltable_remove(&local->sta_hash, &sta->hash_node, 96 sta_rht_params); 97 } 98 99 static int link_sta_info_hash_add(struct ieee80211_local *local, 100 struct link_sta_info *link_sta) 101 { 102 lockdep_assert_held(&local->sta_mtx); 103 return rhltable_insert(&local->link_sta_hash, 104 &link_sta->link_hash_node, 105 link_sta_rht_params); 106 } 107 108 static int link_sta_info_hash_del(struct ieee80211_local *local, 109 struct link_sta_info *link_sta) 110 { 111 lockdep_assert_held(&local->sta_mtx); 112 return rhltable_remove(&local->link_sta_hash, 113 &link_sta->link_hash_node, 114 link_sta_rht_params); 115 } 116 117 static void __cleanup_single_sta(struct sta_info *sta) 118 { 119 int ac, i; 120 struct tid_ampdu_tx *tid_tx; 121 struct ieee80211_sub_if_data *sdata = sta->sdata; 122 struct ieee80211_local *local = sdata->local; 123 struct ps_data *ps; 124 125 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 126 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 127 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { 128 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 129 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 130 ps = &sdata->bss->ps; 131 else if (ieee80211_vif_is_mesh(&sdata->vif)) 132 ps = &sdata->u.mesh.ps; 133 else 134 return; 135 136 clear_sta_flag(sta, WLAN_STA_PS_STA); 137 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 138 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 139 140 atomic_dec(&ps->num_sta_ps); 141 } 142 143 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 144 struct txq_info *txqi; 145 146 if (!sta->sta.txq[i]) 147 continue; 148 149 txqi = to_txq_info(sta->sta.txq[i]); 150 151 ieee80211_txq_purge(local, txqi); 152 } 153 154 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 155 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 156 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); 157 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 158 } 159 160 if (ieee80211_vif_is_mesh(&sdata->vif)) 161 mesh_sta_cleanup(sta); 162 163 cancel_work_sync(&sta->drv_deliver_wk); 164 165 /* 166 * Destroy aggregation state here. It would be nice to wait for the 167 * driver to finish aggregation stop and then clean up, but for now 168 * drivers have to handle aggregation stop being requested, followed 169 * directly by station destruction. 170 */ 171 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 172 kfree(sta->ampdu_mlme.tid_start_tx[i]); 173 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 174 if (!tid_tx) 175 continue; 176 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 177 kfree(tid_tx); 178 } 179 } 180 181 static void cleanup_single_sta(struct sta_info *sta) 182 { 183 struct ieee80211_sub_if_data *sdata = sta->sdata; 184 struct ieee80211_local *local = sdata->local; 185 186 __cleanup_single_sta(sta); 187 sta_info_free(local, sta); 188 } 189 190 struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, 191 const u8 *addr) 192 { 193 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); 194 } 195 196 /* protected by RCU */ 197 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 198 const u8 *addr) 199 { 200 struct ieee80211_local *local = sdata->local; 201 struct rhlist_head *tmp; 202 struct sta_info *sta; 203 204 rcu_read_lock(); 205 for_each_sta_info(local, addr, sta, tmp) { 206 if (sta->sdata == sdata) { 207 rcu_read_unlock(); 208 /* this is safe as the caller must already hold 209 * another rcu read section or the mutex 210 */ 211 return sta; 212 } 213 } 214 rcu_read_unlock(); 215 return NULL; 216 } 217 218 /* 219 * Get sta info either from the specified interface 220 * or from one of its vlans 221 */ 222 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 223 const u8 *addr) 224 { 225 struct ieee80211_local *local = sdata->local; 226 struct rhlist_head *tmp; 227 struct sta_info *sta; 228 229 rcu_read_lock(); 230 for_each_sta_info(local, addr, sta, tmp) { 231 if (sta->sdata == sdata || 232 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 233 rcu_read_unlock(); 234 /* this is safe as the caller must already hold 235 * another rcu read section or the mutex 236 */ 237 return sta; 238 } 239 } 240 rcu_read_unlock(); 241 return NULL; 242 } 243 244 struct rhlist_head *link_sta_info_hash_lookup(struct ieee80211_local *local, 245 const u8 *addr) 246 { 247 return rhltable_lookup(&local->link_sta_hash, addr, 248 link_sta_rht_params); 249 } 250 251 struct link_sta_info * 252 link_sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr) 253 { 254 struct ieee80211_local *local = sdata->local; 255 struct rhlist_head *tmp; 256 struct link_sta_info *link_sta; 257 258 rcu_read_lock(); 259 for_each_link_sta_info(local, addr, link_sta, tmp) { 260 struct sta_info *sta = link_sta->sta; 261 262 if (sta->sdata == sdata || 263 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 264 rcu_read_unlock(); 265 /* this is safe as the caller must already hold 266 * another rcu read section or the mutex 267 */ 268 return link_sta; 269 } 270 } 271 rcu_read_unlock(); 272 return NULL; 273 } 274 275 struct ieee80211_sta * 276 ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw, 277 const u8 *addr, 278 const u8 *localaddr, 279 unsigned int *link_id) 280 { 281 struct ieee80211_local *local = hw_to_local(hw); 282 struct link_sta_info *link_sta; 283 struct rhlist_head *tmp; 284 285 for_each_link_sta_info(local, addr, link_sta, tmp) { 286 struct sta_info *sta = link_sta->sta; 287 struct ieee80211_link_data *link; 288 u8 _link_id = link_sta->link_id; 289 290 if (!localaddr) { 291 if (link_id) 292 *link_id = _link_id; 293 return &sta->sta; 294 } 295 296 link = rcu_dereference(sta->sdata->link[_link_id]); 297 if (!link) 298 continue; 299 300 if (memcmp(link->conf->addr, localaddr, ETH_ALEN)) 301 continue; 302 303 if (link_id) 304 *link_id = _link_id; 305 return &sta->sta; 306 } 307 308 return NULL; 309 } 310 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_link_addrs); 311 312 struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, 313 const u8 *sta_addr, const u8 *vif_addr) 314 { 315 struct rhlist_head *tmp; 316 struct sta_info *sta; 317 318 for_each_sta_info(local, sta_addr, sta, tmp) { 319 if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) 320 return sta; 321 } 322 323 return NULL; 324 } 325 326 struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, 327 int idx) 328 { 329 struct ieee80211_local *local = sdata->local; 330 struct sta_info *sta; 331 int i = 0; 332 333 list_for_each_entry_rcu(sta, &local->sta_list, list, 334 lockdep_is_held(&local->sta_mtx)) { 335 if (sdata != sta->sdata) 336 continue; 337 if (i < idx) { 338 ++i; 339 continue; 340 } 341 return sta; 342 } 343 344 return NULL; 345 } 346 347 static void sta_info_free_link(struct link_sta_info *link_sta) 348 { 349 free_percpu(link_sta->pcpu_rx_stats); 350 } 351 352 static void sta_remove_link(struct sta_info *sta, unsigned int link_id, 353 bool unhash) 354 { 355 struct sta_link_alloc *alloc = NULL; 356 struct link_sta_info *link_sta; 357 358 link_sta = rcu_access_pointer(sta->link[link_id]); 359 if (link_sta != &sta->deflink) 360 lockdep_assert_held(&sta->local->sta_mtx); 361 362 if (WARN_ON(!link_sta)) 363 return; 364 365 if (unhash) 366 link_sta_info_hash_del(sta->local, link_sta); 367 368 if (test_sta_flag(sta, WLAN_STA_INSERTED)) 369 ieee80211_link_sta_debugfs_remove(link_sta); 370 371 if (link_sta != &sta->deflink) 372 alloc = container_of(link_sta, typeof(*alloc), info); 373 374 sta->sta.valid_links &= ~BIT(link_id); 375 RCU_INIT_POINTER(sta->link[link_id], NULL); 376 RCU_INIT_POINTER(sta->sta.link[link_id], NULL); 377 if (alloc) { 378 sta_info_free_link(&alloc->info); 379 kfree_rcu(alloc, rcu_head); 380 } 381 382 ieee80211_sta_recalc_aggregates(&sta->sta); 383 } 384 385 /** 386 * sta_info_free - free STA 387 * 388 * @local: pointer to the global information 389 * @sta: STA info to free 390 * 391 * This function must undo everything done by sta_info_alloc() 392 * that may happen before sta_info_insert(). It may only be 393 * called when sta_info_insert() has not been attempted (and 394 * if that fails, the station is freed anyway.) 395 */ 396 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 397 { 398 int i; 399 400 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 401 if (!(sta->sta.valid_links & BIT(i))) 402 continue; 403 404 sta_remove_link(sta, i, false); 405 } 406 407 /* 408 * If we had used sta_info_pre_move_state() then we might not 409 * have gone through the state transitions down again, so do 410 * it here now (and warn if it's inserted). 411 * 412 * This will clear state such as fast TX/RX that may have been 413 * allocated during state transitions. 414 */ 415 while (sta->sta_state > IEEE80211_STA_NONE) { 416 int ret; 417 418 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); 419 420 ret = sta_info_move_state(sta, sta->sta_state - 1); 421 if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) 422 break; 423 } 424 425 if (sta->rate_ctrl) 426 rate_control_free_sta(sta); 427 428 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 429 430 kfree(to_txq_info(sta->sta.txq[0])); 431 kfree(rcu_dereference_raw(sta->sta.rates)); 432 #ifdef CONFIG_MAC80211_MESH 433 kfree(sta->mesh); 434 #endif 435 436 sta_info_free_link(&sta->deflink); 437 kfree(sta); 438 } 439 440 /* Caller must hold local->sta_mtx */ 441 static int sta_info_hash_add(struct ieee80211_local *local, 442 struct sta_info *sta) 443 { 444 return rhltable_insert(&local->sta_hash, &sta->hash_node, 445 sta_rht_params); 446 } 447 448 static void sta_deliver_ps_frames(struct work_struct *wk) 449 { 450 struct sta_info *sta; 451 452 sta = container_of(wk, struct sta_info, drv_deliver_wk); 453 454 if (sta->dead) 455 return; 456 457 local_bh_disable(); 458 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) 459 ieee80211_sta_ps_deliver_wakeup(sta); 460 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) 461 ieee80211_sta_ps_deliver_poll_response(sta); 462 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) 463 ieee80211_sta_ps_deliver_uapsd(sta); 464 local_bh_enable(); 465 } 466 467 static int sta_prepare_rate_control(struct ieee80211_local *local, 468 struct sta_info *sta, gfp_t gfp) 469 { 470 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) 471 return 0; 472 473 sta->rate_ctrl = local->rate_ctrl; 474 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 475 sta, gfp); 476 if (!sta->rate_ctrl_priv) 477 return -ENOMEM; 478 479 return 0; 480 } 481 482 static int sta_info_alloc_link(struct ieee80211_local *local, 483 struct link_sta_info *link_info, 484 gfp_t gfp) 485 { 486 struct ieee80211_hw *hw = &local->hw; 487 int i; 488 489 if (ieee80211_hw_check(hw, USES_RSS)) { 490 link_info->pcpu_rx_stats = 491 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); 492 if (!link_info->pcpu_rx_stats) 493 return -ENOMEM; 494 } 495 496 link_info->rx_stats.last_rx = jiffies; 497 u64_stats_init(&link_info->rx_stats.syncp); 498 499 ewma_signal_init(&link_info->rx_stats_avg.signal); 500 ewma_avg_signal_init(&link_info->status_stats.avg_ack_signal); 501 for (i = 0; i < ARRAY_SIZE(link_info->rx_stats_avg.chain_signal); i++) 502 ewma_signal_init(&link_info->rx_stats_avg.chain_signal[i]); 503 504 return 0; 505 } 506 507 static void sta_info_add_link(struct sta_info *sta, 508 unsigned int link_id, 509 struct link_sta_info *link_info, 510 struct ieee80211_link_sta *link_sta) 511 { 512 link_info->sta = sta; 513 link_info->link_id = link_id; 514 link_info->pub = link_sta; 515 link_info->pub->sta = &sta->sta; 516 link_sta->link_id = link_id; 517 rcu_assign_pointer(sta->link[link_id], link_info); 518 rcu_assign_pointer(sta->sta.link[link_id], link_sta); 519 520 link_sta->smps_mode = IEEE80211_SMPS_OFF; 521 link_sta->agg.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; 522 } 523 524 static struct sta_info * 525 __sta_info_alloc(struct ieee80211_sub_if_data *sdata, 526 const u8 *addr, int link_id, const u8 *link_addr, 527 gfp_t gfp) 528 { 529 struct ieee80211_local *local = sdata->local; 530 struct ieee80211_hw *hw = &local->hw; 531 struct sta_info *sta; 532 void *txq_data; 533 int size; 534 int i; 535 536 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 537 if (!sta) 538 return NULL; 539 540 sta->local = local; 541 sta->sdata = sdata; 542 543 if (sta_info_alloc_link(local, &sta->deflink, gfp)) 544 goto free; 545 546 if (link_id >= 0) { 547 sta_info_add_link(sta, link_id, &sta->deflink, 548 &sta->sta.deflink); 549 sta->sta.valid_links = BIT(link_id); 550 } else { 551 sta_info_add_link(sta, 0, &sta->deflink, &sta->sta.deflink); 552 } 553 554 sta->sta.cur = &sta->sta.deflink.agg; 555 556 spin_lock_init(&sta->lock); 557 spin_lock_init(&sta->ps_lock); 558 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); 559 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 560 mutex_init(&sta->ampdu_mlme.mtx); 561 #ifdef CONFIG_MAC80211_MESH 562 if (ieee80211_vif_is_mesh(&sdata->vif)) { 563 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); 564 if (!sta->mesh) 565 goto free; 566 sta->mesh->plink_sta = sta; 567 spin_lock_init(&sta->mesh->plink_lock); 568 if (!sdata->u.mesh.user_mpm) 569 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 570 0); 571 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; 572 } 573 #endif 574 575 memcpy(sta->addr, addr, ETH_ALEN); 576 memcpy(sta->sta.addr, addr, ETH_ALEN); 577 memcpy(sta->deflink.addr, link_addr, ETH_ALEN); 578 memcpy(sta->sta.deflink.addr, link_addr, ETH_ALEN); 579 sta->sta.max_rx_aggregation_subframes = 580 local->hw.max_rx_aggregation_subframes; 581 582 /* TODO link specific alloc and assignments for MLO Link STA */ 583 584 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. 585 * The Tx path starts to use a key as soon as the key slot ptk_idx 586 * references to is not NULL. To not use the initial Rx-only key 587 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid 588 * which always will refer to a NULL key. 589 */ 590 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); 591 sta->ptk_idx = INVALID_PTK_KEYIDX; 592 593 594 ieee80211_init_frag_cache(&sta->frags); 595 596 sta->sta_state = IEEE80211_STA_NONE; 597 598 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 599 sta->amsdu_mesh_control = -1; 600 601 /* Mark TID as unreserved */ 602 sta->reserved_tid = IEEE80211_TID_UNRESERVED; 603 604 sta->last_connected = ktime_get_seconds(); 605 606 size = sizeof(struct txq_info) + 607 ALIGN(hw->txq_data_size, sizeof(void *)); 608 609 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); 610 if (!txq_data) 611 goto free; 612 613 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 614 struct txq_info *txq = txq_data + i * size; 615 616 /* might not do anything for the (bufferable) MMPDU TXQ */ 617 ieee80211_txq_init(sdata, sta, txq, i); 618 } 619 620 if (sta_prepare_rate_control(local, sta, gfp)) 621 goto free_txq; 622 623 sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; 624 625 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 626 skb_queue_head_init(&sta->ps_tx_buf[i]); 627 skb_queue_head_init(&sta->tx_filtered[i]); 628 sta->airtime[i].deficit = sta->airtime_weight; 629 atomic_set(&sta->airtime[i].aql_tx_pending, 0); 630 sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; 631 sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; 632 } 633 634 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 635 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 636 637 for (i = 0; i < NUM_NL80211_BANDS; i++) { 638 u32 mandatory = 0; 639 int r; 640 641 if (!hw->wiphy->bands[i]) 642 continue; 643 644 switch (i) { 645 case NL80211_BAND_2GHZ: 646 case NL80211_BAND_LC: 647 /* 648 * We use both here, even if we cannot really know for 649 * sure the station will support both, but the only use 650 * for this is when we don't know anything yet and send 651 * management frames, and then we'll pick the lowest 652 * possible rate anyway. 653 * If we don't include _G here, we cannot find a rate 654 * in P2P, and thus trigger the WARN_ONCE() in rate.c 655 */ 656 mandatory = IEEE80211_RATE_MANDATORY_B | 657 IEEE80211_RATE_MANDATORY_G; 658 break; 659 case NL80211_BAND_5GHZ: 660 mandatory = IEEE80211_RATE_MANDATORY_A; 661 break; 662 case NL80211_BAND_60GHZ: 663 WARN_ON(1); 664 mandatory = 0; 665 break; 666 } 667 668 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { 669 struct ieee80211_rate *rate; 670 671 rate = &hw->wiphy->bands[i]->bitrates[r]; 672 673 if (!(rate->flags & mandatory)) 674 continue; 675 sta->sta.deflink.supp_rates[i] |= BIT(r); 676 } 677 } 678 679 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; 680 sta->cparams.target = MS2TIME(20); 681 sta->cparams.interval = MS2TIME(100); 682 sta->cparams.ecn = true; 683 sta->cparams.ce_threshold_selector = 0; 684 sta->cparams.ce_threshold_mask = 0; 685 686 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 687 688 return sta; 689 690 free_txq: 691 kfree(to_txq_info(sta->sta.txq[0])); 692 free: 693 sta_info_free_link(&sta->deflink); 694 #ifdef CONFIG_MAC80211_MESH 695 kfree(sta->mesh); 696 #endif 697 kfree(sta); 698 return NULL; 699 } 700 701 struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 702 const u8 *addr, gfp_t gfp) 703 { 704 return __sta_info_alloc(sdata, addr, -1, addr, gfp); 705 } 706 707 struct sta_info *sta_info_alloc_with_link(struct ieee80211_sub_if_data *sdata, 708 const u8 *mld_addr, 709 unsigned int link_id, 710 const u8 *link_addr, 711 gfp_t gfp) 712 { 713 return __sta_info_alloc(sdata, mld_addr, link_id, link_addr, gfp); 714 } 715 716 static int sta_info_insert_check(struct sta_info *sta) 717 { 718 struct ieee80211_sub_if_data *sdata = sta->sdata; 719 720 /* 721 * Can't be a WARN_ON because it can be triggered through a race: 722 * something inserts a STA (on one CPU) without holding the RTNL 723 * and another CPU turns off the net device. 724 */ 725 if (unlikely(!ieee80211_sdata_running(sdata))) 726 return -ENETDOWN; 727 728 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || 729 !is_valid_ether_addr(sta->sta.addr))) 730 return -EINVAL; 731 732 /* The RCU read lock is required by rhashtable due to 733 * asynchronous resize/rehash. We also require the mutex 734 * for correctness. 735 */ 736 rcu_read_lock(); 737 lockdep_assert_held(&sdata->local->sta_mtx); 738 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && 739 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { 740 rcu_read_unlock(); 741 return -ENOTUNIQ; 742 } 743 rcu_read_unlock(); 744 745 return 0; 746 } 747 748 static int sta_info_insert_drv_state(struct ieee80211_local *local, 749 struct ieee80211_sub_if_data *sdata, 750 struct sta_info *sta) 751 { 752 enum ieee80211_sta_state state; 753 int err = 0; 754 755 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { 756 err = drv_sta_state(local, sdata, sta, state, state + 1); 757 if (err) 758 break; 759 } 760 761 if (!err) { 762 /* 763 * Drivers using legacy sta_add/sta_remove callbacks only 764 * get uploaded set to true after sta_add is called. 765 */ 766 if (!local->ops->sta_add) 767 sta->uploaded = true; 768 return 0; 769 } 770 771 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 772 sdata_info(sdata, 773 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", 774 sta->sta.addr, state + 1, err); 775 err = 0; 776 } 777 778 /* unwind on error */ 779 for (; state > IEEE80211_STA_NOTEXIST; state--) 780 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); 781 782 return err; 783 } 784 785 static void 786 ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) 787 { 788 struct ieee80211_local *local = sdata->local; 789 bool allow_p2p_go_ps = sdata->vif.p2p; 790 struct sta_info *sta; 791 792 rcu_read_lock(); 793 list_for_each_entry_rcu(sta, &local->sta_list, list) { 794 if (sdata != sta->sdata || 795 !test_sta_flag(sta, WLAN_STA_ASSOC)) 796 continue; 797 if (!sta->sta.support_p2p_ps) { 798 allow_p2p_go_ps = false; 799 break; 800 } 801 } 802 rcu_read_unlock(); 803 804 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { 805 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; 806 ieee80211_link_info_change_notify(sdata, &sdata->deflink, 807 BSS_CHANGED_P2P_PS); 808 } 809 } 810 811 /* 812 * should be called with sta_mtx locked 813 * this function replaces the mutex lock 814 * with a RCU lock 815 */ 816 static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) 817 { 818 struct ieee80211_local *local = sta->local; 819 struct ieee80211_sub_if_data *sdata = sta->sdata; 820 struct station_info *sinfo = NULL; 821 int err = 0; 822 823 lockdep_assert_held(&local->sta_mtx); 824 825 /* check if STA exists already */ 826 if (sta_info_get_bss(sdata, sta->sta.addr)) { 827 err = -EEXIST; 828 goto out_cleanup; 829 } 830 831 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); 832 if (!sinfo) { 833 err = -ENOMEM; 834 goto out_cleanup; 835 } 836 837 local->num_sta++; 838 local->sta_generation++; 839 smp_mb(); 840 841 /* simplify things and don't accept BA sessions yet */ 842 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 843 844 /* make the station visible */ 845 err = sta_info_hash_add(local, sta); 846 if (err) 847 goto out_drop_sta; 848 849 if (sta->sta.valid_links) { 850 err = link_sta_info_hash_add(local, &sta->deflink); 851 if (err) { 852 sta_info_hash_del(local, sta); 853 goto out_drop_sta; 854 } 855 } 856 857 list_add_tail_rcu(&sta->list, &local->sta_list); 858 859 /* update channel context before notifying the driver about state 860 * change, this enables driver using the updated channel context right away. 861 */ 862 if (sta->sta_state >= IEEE80211_STA_ASSOC) { 863 ieee80211_recalc_min_chandef(sta->sdata, -1); 864 if (!sta->sta.support_p2p_ps) 865 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 866 } 867 868 /* notify driver */ 869 err = sta_info_insert_drv_state(local, sdata, sta); 870 if (err) 871 goto out_remove; 872 873 set_sta_flag(sta, WLAN_STA_INSERTED); 874 875 /* accept BA sessions now */ 876 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 877 878 ieee80211_sta_debugfs_add(sta); 879 rate_control_add_sta_debugfs(sta); 880 if (sta->sta.valid_links) { 881 int i; 882 883 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 884 struct link_sta_info *link_sta; 885 886 link_sta = rcu_dereference_protected(sta->link[i], 887 lockdep_is_held(&local->sta_mtx)); 888 889 if (!link_sta) 890 continue; 891 892 ieee80211_link_sta_debugfs_add(link_sta); 893 if (sdata->vif.active_links & BIT(i)) 894 ieee80211_link_sta_debugfs_drv_add(link_sta); 895 } 896 } else { 897 ieee80211_link_sta_debugfs_add(&sta->deflink); 898 ieee80211_link_sta_debugfs_drv_add(&sta->deflink); 899 } 900 901 sinfo->generation = local->sta_generation; 902 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 903 kfree(sinfo); 904 905 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); 906 907 /* move reference to rcu-protected */ 908 rcu_read_lock(); 909 mutex_unlock(&local->sta_mtx); 910 911 if (ieee80211_vif_is_mesh(&sdata->vif)) 912 mesh_accept_plinks_update(sdata); 913 914 return 0; 915 out_remove: 916 if (sta->sta.valid_links) 917 link_sta_info_hash_del(local, &sta->deflink); 918 sta_info_hash_del(local, sta); 919 list_del_rcu(&sta->list); 920 out_drop_sta: 921 local->num_sta--; 922 synchronize_net(); 923 out_cleanup: 924 cleanup_single_sta(sta); 925 mutex_unlock(&local->sta_mtx); 926 kfree(sinfo); 927 rcu_read_lock(); 928 return err; 929 } 930 931 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 932 { 933 struct ieee80211_local *local = sta->local; 934 int err; 935 936 might_sleep(); 937 938 mutex_lock(&local->sta_mtx); 939 940 err = sta_info_insert_check(sta); 941 if (err) { 942 sta_info_free(local, sta); 943 mutex_unlock(&local->sta_mtx); 944 rcu_read_lock(); 945 return err; 946 } 947 948 return sta_info_insert_finish(sta); 949 } 950 951 int sta_info_insert(struct sta_info *sta) 952 { 953 int err = sta_info_insert_rcu(sta); 954 955 rcu_read_unlock(); 956 957 return err; 958 } 959 960 static inline void __bss_tim_set(u8 *tim, u16 id) 961 { 962 /* 963 * This format has been mandated by the IEEE specifications, 964 * so this line may not be changed to use the __set_bit() format. 965 */ 966 tim[id / 8] |= (1 << (id % 8)); 967 } 968 969 static inline void __bss_tim_clear(u8 *tim, u16 id) 970 { 971 /* 972 * This format has been mandated by the IEEE specifications, 973 * so this line may not be changed to use the __clear_bit() format. 974 */ 975 tim[id / 8] &= ~(1 << (id % 8)); 976 } 977 978 static inline bool __bss_tim_get(u8 *tim, u16 id) 979 { 980 /* 981 * This format has been mandated by the IEEE specifications, 982 * so this line may not be changed to use the test_bit() format. 983 */ 984 return tim[id / 8] & (1 << (id % 8)); 985 } 986 987 static unsigned long ieee80211_tids_for_ac(int ac) 988 { 989 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 990 switch (ac) { 991 case IEEE80211_AC_VO: 992 return BIT(6) | BIT(7); 993 case IEEE80211_AC_VI: 994 return BIT(4) | BIT(5); 995 case IEEE80211_AC_BE: 996 return BIT(0) | BIT(3); 997 case IEEE80211_AC_BK: 998 return BIT(1) | BIT(2); 999 default: 1000 WARN_ON(1); 1001 return 0; 1002 } 1003 } 1004 1005 static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) 1006 { 1007 struct ieee80211_local *local = sta->local; 1008 struct ps_data *ps; 1009 bool indicate_tim = false; 1010 u8 ignore_for_tim = sta->sta.uapsd_queues; 1011 int ac; 1012 u16 id = sta->sta.aid; 1013 1014 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1015 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 1016 if (WARN_ON_ONCE(!sta->sdata->bss)) 1017 return; 1018 1019 ps = &sta->sdata->bss->ps; 1020 #ifdef CONFIG_MAC80211_MESH 1021 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { 1022 ps = &sta->sdata->u.mesh.ps; 1023 #endif 1024 } else { 1025 return; 1026 } 1027 1028 /* No need to do anything if the driver does all */ 1029 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 1030 return; 1031 1032 if (sta->dead) 1033 goto done; 1034 1035 /* 1036 * If all ACs are delivery-enabled then we should build 1037 * the TIM bit for all ACs anyway; if only some are then 1038 * we ignore those and build the TIM bit using only the 1039 * non-enabled ones. 1040 */ 1041 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) 1042 ignore_for_tim = 0; 1043 1044 if (ignore_pending) 1045 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; 1046 1047 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1048 unsigned long tids; 1049 1050 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) 1051 continue; 1052 1053 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || 1054 !skb_queue_empty(&sta->ps_tx_buf[ac]); 1055 if (indicate_tim) 1056 break; 1057 1058 tids = ieee80211_tids_for_ac(ac); 1059 1060 indicate_tim |= 1061 sta->driver_buffered_tids & tids; 1062 indicate_tim |= 1063 sta->txq_buffered_tids & tids; 1064 } 1065 1066 done: 1067 spin_lock_bh(&local->tim_lock); 1068 1069 if (indicate_tim == __bss_tim_get(ps->tim, id)) 1070 goto out_unlock; 1071 1072 if (indicate_tim) 1073 __bss_tim_set(ps->tim, id); 1074 else 1075 __bss_tim_clear(ps->tim, id); 1076 1077 if (local->ops->set_tim && !WARN_ON(sta->dead)) { 1078 local->tim_in_locked_section = true; 1079 drv_set_tim(local, &sta->sta, indicate_tim); 1080 local->tim_in_locked_section = false; 1081 } 1082 1083 out_unlock: 1084 spin_unlock_bh(&local->tim_lock); 1085 } 1086 1087 void sta_info_recalc_tim(struct sta_info *sta) 1088 { 1089 __sta_info_recalc_tim(sta, false); 1090 } 1091 1092 static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 1093 { 1094 struct ieee80211_tx_info *info; 1095 int timeout; 1096 1097 if (!skb) 1098 return false; 1099 1100 info = IEEE80211_SKB_CB(skb); 1101 1102 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 1103 timeout = (sta->listen_interval * 1104 sta->sdata->vif.bss_conf.beacon_int * 1105 32 / 15625) * HZ; 1106 if (timeout < STA_TX_BUFFER_EXPIRE) 1107 timeout = STA_TX_BUFFER_EXPIRE; 1108 return time_after(jiffies, info->control.jiffies + timeout); 1109 } 1110 1111 1112 static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, 1113 struct sta_info *sta, int ac) 1114 { 1115 unsigned long flags; 1116 struct sk_buff *skb; 1117 1118 /* 1119 * First check for frames that should expire on the filtered 1120 * queue. Frames here were rejected by the driver and are on 1121 * a separate queue to avoid reordering with normal PS-buffered 1122 * frames. They also aren't accounted for right now in the 1123 * total_ps_buffered counter. 1124 */ 1125 for (;;) { 1126 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1127 skb = skb_peek(&sta->tx_filtered[ac]); 1128 if (sta_info_buffer_expired(sta, skb)) 1129 skb = __skb_dequeue(&sta->tx_filtered[ac]); 1130 else 1131 skb = NULL; 1132 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1133 1134 /* 1135 * Frames are queued in order, so if this one 1136 * hasn't expired yet we can stop testing. If 1137 * we actually reached the end of the queue we 1138 * also need to stop, of course. 1139 */ 1140 if (!skb) 1141 break; 1142 ieee80211_free_txskb(&local->hw, skb); 1143 } 1144 1145 /* 1146 * Now also check the normal PS-buffered queue, this will 1147 * only find something if the filtered queue was emptied 1148 * since the filtered frames are all before the normal PS 1149 * buffered frames. 1150 */ 1151 for (;;) { 1152 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1153 skb = skb_peek(&sta->ps_tx_buf[ac]); 1154 if (sta_info_buffer_expired(sta, skb)) 1155 skb = __skb_dequeue(&sta->ps_tx_buf[ac]); 1156 else 1157 skb = NULL; 1158 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1159 1160 /* 1161 * frames are queued in order, so if this one 1162 * hasn't expired yet (or we reached the end of 1163 * the queue) we can stop testing 1164 */ 1165 if (!skb) 1166 break; 1167 1168 local->total_ps_buffered--; 1169 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 1170 sta->sta.addr); 1171 ieee80211_free_txskb(&local->hw, skb); 1172 } 1173 1174 /* 1175 * Finally, recalculate the TIM bit for this station -- it might 1176 * now be clear because the station was too slow to retrieve its 1177 * frames. 1178 */ 1179 sta_info_recalc_tim(sta); 1180 1181 /* 1182 * Return whether there are any frames still buffered, this is 1183 * used to check whether the cleanup timer still needs to run, 1184 * if there are no frames we don't need to rearm the timer. 1185 */ 1186 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && 1187 skb_queue_empty(&sta->tx_filtered[ac])); 1188 } 1189 1190 static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 1191 struct sta_info *sta) 1192 { 1193 bool have_buffered = false; 1194 int ac; 1195 1196 /* This is only necessary for stations on BSS/MBSS interfaces */ 1197 if (!sta->sdata->bss && 1198 !ieee80211_vif_is_mesh(&sta->sdata->vif)) 1199 return false; 1200 1201 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 1202 have_buffered |= 1203 sta_info_cleanup_expire_buffered_ac(local, sta, ac); 1204 1205 return have_buffered; 1206 } 1207 1208 static int __must_check __sta_info_destroy_part1(struct sta_info *sta) 1209 { 1210 struct ieee80211_local *local; 1211 struct ieee80211_sub_if_data *sdata; 1212 int ret, i; 1213 1214 might_sleep(); 1215 1216 if (!sta) 1217 return -ENOENT; 1218 1219 local = sta->local; 1220 sdata = sta->sdata; 1221 1222 lockdep_assert_held(&local->sta_mtx); 1223 1224 /* 1225 * Before removing the station from the driver and 1226 * rate control, it might still start new aggregation 1227 * sessions -- block that to make sure the tear-down 1228 * will be sufficient. 1229 */ 1230 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1231 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1232 1233 /* 1234 * Before removing the station from the driver there might be pending 1235 * rx frames on RSS queues sent prior to the disassociation - wait for 1236 * all such frames to be processed. 1237 */ 1238 drv_sync_rx_queues(local, sta); 1239 1240 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 1241 struct link_sta_info *link_sta; 1242 1243 if (!(sta->sta.valid_links & BIT(i))) 1244 continue; 1245 1246 link_sta = rcu_dereference_protected(sta->link[i], 1247 lockdep_is_held(&local->sta_mtx)); 1248 1249 link_sta_info_hash_del(local, link_sta); 1250 } 1251 1252 ret = sta_info_hash_del(local, sta); 1253 if (WARN_ON(ret)) 1254 return ret; 1255 1256 /* 1257 * for TDLS peers, make sure to return to the base channel before 1258 * removal. 1259 */ 1260 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { 1261 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); 1262 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); 1263 } 1264 1265 list_del_rcu(&sta->list); 1266 sta->removed = true; 1267 1268 if (sta->uploaded) 1269 drv_sta_pre_rcu_remove(local, sta->sdata, sta); 1270 1271 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1272 rcu_access_pointer(sdata->u.vlan.sta) == sta) 1273 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 1274 1275 return 0; 1276 } 1277 1278 static int _sta_info_move_state(struct sta_info *sta, 1279 enum ieee80211_sta_state new_state, 1280 bool recalc) 1281 { 1282 might_sleep(); 1283 1284 if (sta->sta_state == new_state) 1285 return 0; 1286 1287 /* check allowed transitions first */ 1288 1289 switch (new_state) { 1290 case IEEE80211_STA_NONE: 1291 if (sta->sta_state != IEEE80211_STA_AUTH) 1292 return -EINVAL; 1293 break; 1294 case IEEE80211_STA_AUTH: 1295 if (sta->sta_state != IEEE80211_STA_NONE && 1296 sta->sta_state != IEEE80211_STA_ASSOC) 1297 return -EINVAL; 1298 break; 1299 case IEEE80211_STA_ASSOC: 1300 if (sta->sta_state != IEEE80211_STA_AUTH && 1301 sta->sta_state != IEEE80211_STA_AUTHORIZED) 1302 return -EINVAL; 1303 break; 1304 case IEEE80211_STA_AUTHORIZED: 1305 if (sta->sta_state != IEEE80211_STA_ASSOC) 1306 return -EINVAL; 1307 break; 1308 default: 1309 WARN(1, "invalid state %d", new_state); 1310 return -EINVAL; 1311 } 1312 1313 sta_dbg(sta->sdata, "moving STA %pM to state %d\n", 1314 sta->sta.addr, new_state); 1315 1316 /* notify the driver before the actual changes so it can 1317 * fail the transition 1318 */ 1319 if (test_sta_flag(sta, WLAN_STA_INSERTED)) { 1320 int err = drv_sta_state(sta->local, sta->sdata, sta, 1321 sta->sta_state, new_state); 1322 if (err) 1323 return err; 1324 } 1325 1326 /* reflect the change in all state variables */ 1327 1328 switch (new_state) { 1329 case IEEE80211_STA_NONE: 1330 if (sta->sta_state == IEEE80211_STA_AUTH) 1331 clear_bit(WLAN_STA_AUTH, &sta->_flags); 1332 break; 1333 case IEEE80211_STA_AUTH: 1334 if (sta->sta_state == IEEE80211_STA_NONE) { 1335 set_bit(WLAN_STA_AUTH, &sta->_flags); 1336 } else if (sta->sta_state == IEEE80211_STA_ASSOC) { 1337 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 1338 if (recalc) { 1339 ieee80211_recalc_min_chandef(sta->sdata, -1); 1340 if (!sta->sta.support_p2p_ps) 1341 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 1342 } 1343 } 1344 break; 1345 case IEEE80211_STA_ASSOC: 1346 if (sta->sta_state == IEEE80211_STA_AUTH) { 1347 set_bit(WLAN_STA_ASSOC, &sta->_flags); 1348 sta->assoc_at = ktime_get_boottime_ns(); 1349 if (recalc) { 1350 ieee80211_recalc_min_chandef(sta->sdata, -1); 1351 if (!sta->sta.support_p2p_ps) 1352 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 1353 } 1354 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1355 ieee80211_vif_dec_num_mcast(sta->sdata); 1356 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1357 ieee80211_clear_fast_xmit(sta); 1358 ieee80211_clear_fast_rx(sta); 1359 } 1360 break; 1361 case IEEE80211_STA_AUTHORIZED: 1362 if (sta->sta_state == IEEE80211_STA_ASSOC) { 1363 ieee80211_vif_inc_num_mcast(sta->sdata); 1364 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1365 ieee80211_check_fast_xmit(sta); 1366 ieee80211_check_fast_rx(sta); 1367 } 1368 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 1369 sta->sdata->vif.type == NL80211_IFTYPE_AP) 1370 cfg80211_send_layer2_update(sta->sdata->dev, 1371 sta->sta.addr); 1372 break; 1373 default: 1374 break; 1375 } 1376 1377 sta->sta_state = new_state; 1378 1379 return 0; 1380 } 1381 1382 int sta_info_move_state(struct sta_info *sta, 1383 enum ieee80211_sta_state new_state) 1384 { 1385 return _sta_info_move_state(sta, new_state, true); 1386 } 1387 1388 static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc) 1389 { 1390 struct ieee80211_local *local = sta->local; 1391 struct ieee80211_sub_if_data *sdata = sta->sdata; 1392 struct station_info *sinfo; 1393 int ret; 1394 1395 /* 1396 * NOTE: This assumes at least synchronize_net() was done 1397 * after _part1 and before _part2! 1398 */ 1399 1400 might_sleep(); 1401 lockdep_assert_held(&local->sta_mtx); 1402 1403 if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1404 ret = _sta_info_move_state(sta, IEEE80211_STA_ASSOC, recalc); 1405 WARN_ON_ONCE(ret); 1406 } 1407 1408 /* Flush queues before removing keys, as that might remove them 1409 * from hardware, and then depending on the offload method, any 1410 * frames sitting on hardware queues might be sent out without 1411 * any encryption at all. 1412 */ 1413 if (local->ops->set_key) { 1414 if (local->ops->flush_sta) 1415 drv_flush_sta(local, sta->sdata, sta); 1416 else 1417 ieee80211_flush_queues(local, sta->sdata, false); 1418 } 1419 1420 /* now keys can no longer be reached */ 1421 ieee80211_free_sta_keys(local, sta); 1422 1423 /* disable TIM bit - last chance to tell driver */ 1424 __sta_info_recalc_tim(sta, true); 1425 1426 sta->dead = true; 1427 1428 local->num_sta--; 1429 local->sta_generation++; 1430 1431 while (sta->sta_state > IEEE80211_STA_NONE) { 1432 ret = _sta_info_move_state(sta, sta->sta_state - 1, recalc); 1433 if (ret) { 1434 WARN_ON_ONCE(1); 1435 break; 1436 } 1437 } 1438 1439 if (sta->uploaded) { 1440 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, 1441 IEEE80211_STA_NOTEXIST); 1442 WARN_ON_ONCE(ret != 0); 1443 } 1444 1445 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 1446 1447 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); 1448 if (sinfo) 1449 sta_set_sinfo(sta, sinfo, true); 1450 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 1451 kfree(sinfo); 1452 1453 ieee80211_sta_debugfs_remove(sta); 1454 1455 ieee80211_destroy_frag_cache(&sta->frags); 1456 1457 cleanup_single_sta(sta); 1458 } 1459 1460 int __must_check __sta_info_destroy(struct sta_info *sta) 1461 { 1462 int err = __sta_info_destroy_part1(sta); 1463 1464 if (err) 1465 return err; 1466 1467 synchronize_net(); 1468 1469 __sta_info_destroy_part2(sta, true); 1470 1471 return 0; 1472 } 1473 1474 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) 1475 { 1476 struct sta_info *sta; 1477 int ret; 1478 1479 mutex_lock(&sdata->local->sta_mtx); 1480 sta = sta_info_get(sdata, addr); 1481 ret = __sta_info_destroy(sta); 1482 mutex_unlock(&sdata->local->sta_mtx); 1483 1484 return ret; 1485 } 1486 1487 int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 1488 const u8 *addr) 1489 { 1490 struct sta_info *sta; 1491 int ret; 1492 1493 mutex_lock(&sdata->local->sta_mtx); 1494 sta = sta_info_get_bss(sdata, addr); 1495 ret = __sta_info_destroy(sta); 1496 mutex_unlock(&sdata->local->sta_mtx); 1497 1498 return ret; 1499 } 1500 1501 static void sta_info_cleanup(struct timer_list *t) 1502 { 1503 struct ieee80211_local *local = from_timer(local, t, sta_cleanup); 1504 struct sta_info *sta; 1505 bool timer_needed = false; 1506 1507 rcu_read_lock(); 1508 list_for_each_entry_rcu(sta, &local->sta_list, list) 1509 if (sta_info_cleanup_expire_buffered(local, sta)) 1510 timer_needed = true; 1511 rcu_read_unlock(); 1512 1513 if (local->quiescing) 1514 return; 1515 1516 if (!timer_needed) 1517 return; 1518 1519 mod_timer(&local->sta_cleanup, 1520 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); 1521 } 1522 1523 int sta_info_init(struct ieee80211_local *local) 1524 { 1525 int err; 1526 1527 err = rhltable_init(&local->sta_hash, &sta_rht_params); 1528 if (err) 1529 return err; 1530 1531 err = rhltable_init(&local->link_sta_hash, &link_sta_rht_params); 1532 if (err) { 1533 rhltable_destroy(&local->sta_hash); 1534 return err; 1535 } 1536 1537 spin_lock_init(&local->tim_lock); 1538 mutex_init(&local->sta_mtx); 1539 INIT_LIST_HEAD(&local->sta_list); 1540 1541 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); 1542 return 0; 1543 } 1544 1545 void sta_info_stop(struct ieee80211_local *local) 1546 { 1547 del_timer_sync(&local->sta_cleanup); 1548 rhltable_destroy(&local->sta_hash); 1549 rhltable_destroy(&local->link_sta_hash); 1550 } 1551 1552 1553 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) 1554 { 1555 struct ieee80211_local *local = sdata->local; 1556 struct sta_info *sta, *tmp; 1557 LIST_HEAD(free_list); 1558 int ret = 0; 1559 1560 might_sleep(); 1561 1562 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); 1563 WARN_ON(vlans && !sdata->bss); 1564 1565 mutex_lock(&local->sta_mtx); 1566 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1567 if (sdata == sta->sdata || 1568 (vlans && sdata->bss == sta->sdata->bss)) { 1569 if (!WARN_ON(__sta_info_destroy_part1(sta))) 1570 list_add(&sta->free_list, &free_list); 1571 ret++; 1572 } 1573 } 1574 1575 if (!list_empty(&free_list)) { 1576 bool support_p2p_ps = true; 1577 1578 synchronize_net(); 1579 list_for_each_entry_safe(sta, tmp, &free_list, free_list) { 1580 if (!sta->sta.support_p2p_ps) 1581 support_p2p_ps = false; 1582 __sta_info_destroy_part2(sta, false); 1583 } 1584 1585 ieee80211_recalc_min_chandef(sdata, -1); 1586 if (!support_p2p_ps) 1587 ieee80211_recalc_p2p_go_ps_allowed(sdata); 1588 } 1589 mutex_unlock(&local->sta_mtx); 1590 1591 return ret; 1592 } 1593 1594 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 1595 unsigned long exp_time) 1596 { 1597 struct ieee80211_local *local = sdata->local; 1598 struct sta_info *sta, *tmp; 1599 1600 mutex_lock(&local->sta_mtx); 1601 1602 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1603 unsigned long last_active = ieee80211_sta_last_active(sta); 1604 1605 if (sdata != sta->sdata) 1606 continue; 1607 1608 if (time_is_before_jiffies(last_active + exp_time)) { 1609 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 1610 sta->sta.addr); 1611 1612 if (ieee80211_vif_is_mesh(&sdata->vif) && 1613 test_sta_flag(sta, WLAN_STA_PS_STA)) 1614 atomic_dec(&sdata->u.mesh.ps.num_sta_ps); 1615 1616 WARN_ON(__sta_info_destroy(sta)); 1617 } 1618 } 1619 1620 mutex_unlock(&local->sta_mtx); 1621 } 1622 1623 struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, 1624 const u8 *addr, 1625 const u8 *localaddr) 1626 { 1627 struct ieee80211_local *local = hw_to_local(hw); 1628 struct rhlist_head *tmp; 1629 struct sta_info *sta; 1630 1631 /* 1632 * Just return a random station if localaddr is NULL 1633 * ... first in list. 1634 */ 1635 for_each_sta_info(local, addr, sta, tmp) { 1636 if (localaddr && 1637 !ether_addr_equal(sta->sdata->vif.addr, localaddr)) 1638 continue; 1639 if (!sta->uploaded) 1640 return NULL; 1641 return &sta->sta; 1642 } 1643 1644 return NULL; 1645 } 1646 EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); 1647 1648 struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 1649 const u8 *addr) 1650 { 1651 struct sta_info *sta; 1652 1653 if (!vif) 1654 return NULL; 1655 1656 sta = sta_info_get_bss(vif_to_sdata(vif), addr); 1657 if (!sta) 1658 return NULL; 1659 1660 if (!sta->uploaded) 1661 return NULL; 1662 1663 return &sta->sta; 1664 } 1665 EXPORT_SYMBOL(ieee80211_find_sta); 1666 1667 /* powersave support code */ 1668 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1669 { 1670 struct ieee80211_sub_if_data *sdata = sta->sdata; 1671 struct ieee80211_local *local = sdata->local; 1672 struct sk_buff_head pending; 1673 int filtered = 0, buffered = 0, ac, i; 1674 unsigned long flags; 1675 struct ps_data *ps; 1676 1677 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1678 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 1679 u.ap); 1680 1681 if (sdata->vif.type == NL80211_IFTYPE_AP) 1682 ps = &sdata->bss->ps; 1683 else if (ieee80211_vif_is_mesh(&sdata->vif)) 1684 ps = &sdata->u.mesh.ps; 1685 else 1686 return; 1687 1688 clear_sta_flag(sta, WLAN_STA_SP); 1689 1690 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); 1691 sta->driver_buffered_tids = 0; 1692 sta->txq_buffered_tids = 0; 1693 1694 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1695 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1696 1697 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1698 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) 1699 continue; 1700 1701 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); 1702 } 1703 1704 skb_queue_head_init(&pending); 1705 1706 /* sync with ieee80211_tx_h_unicast_ps_buf */ 1707 spin_lock(&sta->ps_lock); 1708 /* Send all buffered frames to the station */ 1709 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1710 int count = skb_queue_len(&pending), tmp; 1711 1712 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1713 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1714 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1715 tmp = skb_queue_len(&pending); 1716 filtered += tmp - count; 1717 count = tmp; 1718 1719 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1720 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1721 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1722 tmp = skb_queue_len(&pending); 1723 buffered += tmp - count; 1724 } 1725 1726 ieee80211_add_pending_skbs(local, &pending); 1727 1728 /* now we're no longer in the deliver code */ 1729 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 1730 1731 /* The station might have polled and then woken up before we responded, 1732 * so clear these flags now to avoid them sticking around. 1733 */ 1734 clear_sta_flag(sta, WLAN_STA_PSPOLL); 1735 clear_sta_flag(sta, WLAN_STA_UAPSD); 1736 spin_unlock(&sta->ps_lock); 1737 1738 atomic_dec(&ps->num_sta_ps); 1739 1740 local->total_ps_buffered -= buffered; 1741 1742 sta_info_recalc_tim(sta); 1743 1744 ps_dbg(sdata, 1745 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1746 sta->sta.addr, sta->sta.aid, filtered, buffered); 1747 1748 ieee80211_check_fast_xmit(sta); 1749 } 1750 1751 static void ieee80211_send_null_response(struct sta_info *sta, int tid, 1752 enum ieee80211_frame_release_type reason, 1753 bool call_driver, bool more_data) 1754 { 1755 struct ieee80211_sub_if_data *sdata = sta->sdata; 1756 struct ieee80211_local *local = sdata->local; 1757 struct ieee80211_qos_hdr *nullfunc; 1758 struct sk_buff *skb; 1759 int size = sizeof(*nullfunc); 1760 __le16 fc; 1761 bool qos = sta->sta.wme; 1762 struct ieee80211_tx_info *info; 1763 struct ieee80211_chanctx_conf *chanctx_conf; 1764 1765 if (qos) { 1766 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1767 IEEE80211_STYPE_QOS_NULLFUNC | 1768 IEEE80211_FCTL_FROMDS); 1769 } else { 1770 size -= 2; 1771 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1772 IEEE80211_STYPE_NULLFUNC | 1773 IEEE80211_FCTL_FROMDS); 1774 } 1775 1776 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 1777 if (!skb) 1778 return; 1779 1780 skb_reserve(skb, local->hw.extra_tx_headroom); 1781 1782 nullfunc = skb_put(skb, size); 1783 nullfunc->frame_control = fc; 1784 nullfunc->duration_id = 0; 1785 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1786 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1787 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1788 nullfunc->seq_ctrl = 0; 1789 1790 skb->priority = tid; 1791 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1792 if (qos) { 1793 nullfunc->qos_ctrl = cpu_to_le16(tid); 1794 1795 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { 1796 nullfunc->qos_ctrl |= 1797 cpu_to_le16(IEEE80211_QOS_CTL_EOSP); 1798 if (more_data) 1799 nullfunc->frame_control |= 1800 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1801 } 1802 } 1803 1804 info = IEEE80211_SKB_CB(skb); 1805 1806 /* 1807 * Tell TX path to send this frame even though the 1808 * STA may still remain is PS mode after this frame 1809 * exchange. Also set EOSP to indicate this packet 1810 * ends the poll/service period. 1811 */ 1812 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | 1813 IEEE80211_TX_STATUS_EOSP | 1814 IEEE80211_TX_CTL_REQ_TX_STATUS; 1815 1816 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1817 1818 if (call_driver) 1819 drv_allow_buffered_frames(local, sta, BIT(tid), 1, 1820 reason, false); 1821 1822 skb->dev = sdata->dev; 1823 1824 rcu_read_lock(); 1825 chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); 1826 if (WARN_ON(!chanctx_conf)) { 1827 rcu_read_unlock(); 1828 kfree_skb(skb); 1829 return; 1830 } 1831 1832 info->band = chanctx_conf->def.chan->band; 1833 ieee80211_xmit(sdata, sta, skb); 1834 rcu_read_unlock(); 1835 } 1836 1837 static int find_highest_prio_tid(unsigned long tids) 1838 { 1839 /* lower 3 TIDs aren't ordered perfectly */ 1840 if (tids & 0xF8) 1841 return fls(tids) - 1; 1842 /* TID 0 is BE just like TID 3 */ 1843 if (tids & BIT(0)) 1844 return 0; 1845 return fls(tids) - 1; 1846 } 1847 1848 /* Indicates if the MORE_DATA bit should be set in the last 1849 * frame obtained by ieee80211_sta_ps_get_frames. 1850 * Note that driver_release_tids is relevant only if 1851 * reason = IEEE80211_FRAME_RELEASE_PSPOLL 1852 */ 1853 static bool 1854 ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, 1855 enum ieee80211_frame_release_type reason, 1856 unsigned long driver_release_tids) 1857 { 1858 int ac; 1859 1860 /* If the driver has data on more than one TID then 1861 * certainly there's more data if we release just a 1862 * single frame now (from a single TID). This will 1863 * only happen for PS-Poll. 1864 */ 1865 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && 1866 hweight16(driver_release_tids) > 1) 1867 return true; 1868 1869 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1870 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1871 continue; 1872 1873 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1874 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1875 return true; 1876 } 1877 1878 return false; 1879 } 1880 1881 static void 1882 ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, 1883 enum ieee80211_frame_release_type reason, 1884 struct sk_buff_head *frames, 1885 unsigned long *driver_release_tids) 1886 { 1887 struct ieee80211_sub_if_data *sdata = sta->sdata; 1888 struct ieee80211_local *local = sdata->local; 1889 int ac; 1890 1891 /* Get response frame(s) and more data bit for the last one. */ 1892 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1893 unsigned long tids; 1894 1895 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1896 continue; 1897 1898 tids = ieee80211_tids_for_ac(ac); 1899 1900 /* if we already have frames from software, then we can't also 1901 * release from hardware queues 1902 */ 1903 if (skb_queue_empty(frames)) { 1904 *driver_release_tids |= 1905 sta->driver_buffered_tids & tids; 1906 *driver_release_tids |= sta->txq_buffered_tids & tids; 1907 } 1908 1909 if (!*driver_release_tids) { 1910 struct sk_buff *skb; 1911 1912 while (n_frames > 0) { 1913 skb = skb_dequeue(&sta->tx_filtered[ac]); 1914 if (!skb) { 1915 skb = skb_dequeue( 1916 &sta->ps_tx_buf[ac]); 1917 if (skb) 1918 local->total_ps_buffered--; 1919 } 1920 if (!skb) 1921 break; 1922 n_frames--; 1923 __skb_queue_tail(frames, skb); 1924 } 1925 } 1926 1927 /* If we have more frames buffered on this AC, then abort the 1928 * loop since we can't send more data from other ACs before 1929 * the buffered frames from this. 1930 */ 1931 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1932 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1933 break; 1934 } 1935 } 1936 1937 static void 1938 ieee80211_sta_ps_deliver_response(struct sta_info *sta, 1939 int n_frames, u8 ignored_acs, 1940 enum ieee80211_frame_release_type reason) 1941 { 1942 struct ieee80211_sub_if_data *sdata = sta->sdata; 1943 struct ieee80211_local *local = sdata->local; 1944 unsigned long driver_release_tids = 0; 1945 struct sk_buff_head frames; 1946 bool more_data; 1947 1948 /* Service or PS-Poll period starts */ 1949 set_sta_flag(sta, WLAN_STA_SP); 1950 1951 __skb_queue_head_init(&frames); 1952 1953 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, 1954 &frames, &driver_release_tids); 1955 1956 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); 1957 1958 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) 1959 driver_release_tids = 1960 BIT(find_highest_prio_tid(driver_release_tids)); 1961 1962 if (skb_queue_empty(&frames) && !driver_release_tids) { 1963 int tid, ac; 1964 1965 /* 1966 * For PS-Poll, this can only happen due to a race condition 1967 * when we set the TIM bit and the station notices it, but 1968 * before it can poll for the frame we expire it. 1969 * 1970 * For uAPSD, this is said in the standard (11.2.1.5 h): 1971 * At each unscheduled SP for a non-AP STA, the AP shall 1972 * attempt to transmit at least one MSDU or MMPDU, but no 1973 * more than the value specified in the Max SP Length field 1974 * in the QoS Capability element from delivery-enabled ACs, 1975 * that are destined for the non-AP STA. 1976 * 1977 * Since we have no other MSDU/MMPDU, transmit a QoS null frame. 1978 */ 1979 1980 /* This will evaluate to 1, 3, 5 or 7. */ 1981 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1982 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) 1983 break; 1984 tid = 7 - 2 * ac; 1985 1986 ieee80211_send_null_response(sta, tid, reason, true, false); 1987 } else if (!driver_release_tids) { 1988 struct sk_buff_head pending; 1989 struct sk_buff *skb; 1990 int num = 0; 1991 u16 tids = 0; 1992 bool need_null = false; 1993 1994 skb_queue_head_init(&pending); 1995 1996 while ((skb = __skb_dequeue(&frames))) { 1997 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1998 struct ieee80211_hdr *hdr = (void *) skb->data; 1999 u8 *qoshdr = NULL; 2000 2001 num++; 2002 2003 /* 2004 * Tell TX path to send this frame even though the 2005 * STA may still remain is PS mode after this frame 2006 * exchange. 2007 */ 2008 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 2009 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 2010 2011 /* 2012 * Use MoreData flag to indicate whether there are 2013 * more buffered frames for this STA 2014 */ 2015 if (more_data || !skb_queue_empty(&frames)) 2016 hdr->frame_control |= 2017 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2018 else 2019 hdr->frame_control &= 2020 cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 2021 2022 if (ieee80211_is_data_qos(hdr->frame_control) || 2023 ieee80211_is_qos_nullfunc(hdr->frame_control)) 2024 qoshdr = ieee80211_get_qos_ctl(hdr); 2025 2026 tids |= BIT(skb->priority); 2027 2028 __skb_queue_tail(&pending, skb); 2029 2030 /* end service period after last frame or add one */ 2031 if (!skb_queue_empty(&frames)) 2032 continue; 2033 2034 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { 2035 /* for PS-Poll, there's only one frame */ 2036 info->flags |= IEEE80211_TX_STATUS_EOSP | 2037 IEEE80211_TX_CTL_REQ_TX_STATUS; 2038 break; 2039 } 2040 2041 /* For uAPSD, things are a bit more complicated. If the 2042 * last frame has a QoS header (i.e. is a QoS-data or 2043 * QoS-nulldata frame) then just set the EOSP bit there 2044 * and be done. 2045 * If the frame doesn't have a QoS header (which means 2046 * it should be a bufferable MMPDU) then we can't set 2047 * the EOSP bit in the QoS header; add a QoS-nulldata 2048 * frame to the list to send it after the MMPDU. 2049 * 2050 * Note that this code is only in the mac80211-release 2051 * code path, we assume that the driver will not buffer 2052 * anything but QoS-data frames, or if it does, will 2053 * create the QoS-nulldata frame by itself if needed. 2054 * 2055 * Cf. 802.11-2012 10.2.1.10 (c). 2056 */ 2057 if (qoshdr) { 2058 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 2059 2060 info->flags |= IEEE80211_TX_STATUS_EOSP | 2061 IEEE80211_TX_CTL_REQ_TX_STATUS; 2062 } else { 2063 /* The standard isn't completely clear on this 2064 * as it says the more-data bit should be set 2065 * if there are more BUs. The QoS-Null frame 2066 * we're about to send isn't buffered yet, we 2067 * only create it below, but let's pretend it 2068 * was buffered just in case some clients only 2069 * expect more-data=0 when eosp=1. 2070 */ 2071 hdr->frame_control |= 2072 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2073 need_null = true; 2074 num++; 2075 } 2076 break; 2077 } 2078 2079 drv_allow_buffered_frames(local, sta, tids, num, 2080 reason, more_data); 2081 2082 ieee80211_add_pending_skbs(local, &pending); 2083 2084 if (need_null) 2085 ieee80211_send_null_response( 2086 sta, find_highest_prio_tid(tids), 2087 reason, false, false); 2088 2089 sta_info_recalc_tim(sta); 2090 } else { 2091 int tid; 2092 2093 /* 2094 * We need to release a frame that is buffered somewhere in the 2095 * driver ... it'll have to handle that. 2096 * Note that the driver also has to check the number of frames 2097 * on the TIDs we're releasing from - if there are more than 2098 * n_frames it has to set the more-data bit (if we didn't ask 2099 * it to set it anyway due to other buffered frames); if there 2100 * are fewer than n_frames it has to make sure to adjust that 2101 * to allow the service period to end properly. 2102 */ 2103 drv_release_buffered_frames(local, sta, driver_release_tids, 2104 n_frames, reason, more_data); 2105 2106 /* 2107 * Note that we don't recalculate the TIM bit here as it would 2108 * most likely have no effect at all unless the driver told us 2109 * that the TID(s) became empty before returning here from the 2110 * release function. 2111 * Either way, however, when the driver tells us that the TID(s) 2112 * became empty or we find that a txq became empty, we'll do the 2113 * TIM recalculation. 2114 */ 2115 2116 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 2117 if (!sta->sta.txq[tid] || 2118 !(driver_release_tids & BIT(tid)) || 2119 txq_has_queue(sta->sta.txq[tid])) 2120 continue; 2121 2122 sta_info_recalc_tim(sta); 2123 break; 2124 } 2125 } 2126 } 2127 2128 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 2129 { 2130 u8 ignore_for_response = sta->sta.uapsd_queues; 2131 2132 /* 2133 * If all ACs are delivery-enabled then we should reply 2134 * from any of them, if only some are enabled we reply 2135 * only from the non-enabled ones. 2136 */ 2137 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) 2138 ignore_for_response = 0; 2139 2140 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, 2141 IEEE80211_FRAME_RELEASE_PSPOLL); 2142 } 2143 2144 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) 2145 { 2146 int n_frames = sta->sta.max_sp; 2147 u8 delivery_enabled = sta->sta.uapsd_queues; 2148 2149 /* 2150 * If we ever grow support for TSPEC this might happen if 2151 * the TSPEC update from hostapd comes in between a trigger 2152 * frame setting WLAN_STA_UAPSD in the RX path and this 2153 * actually getting called. 2154 */ 2155 if (!delivery_enabled) 2156 return; 2157 2158 switch (sta->sta.max_sp) { 2159 case 1: 2160 n_frames = 2; 2161 break; 2162 case 2: 2163 n_frames = 4; 2164 break; 2165 case 3: 2166 n_frames = 6; 2167 break; 2168 case 0: 2169 /* XXX: what is a good value? */ 2170 n_frames = 128; 2171 break; 2172 } 2173 2174 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, 2175 IEEE80211_FRAME_RELEASE_UAPSD); 2176 } 2177 2178 void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 2179 struct ieee80211_sta *pubsta, bool block) 2180 { 2181 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2182 2183 trace_api_sta_block_awake(sta->local, pubsta, block); 2184 2185 if (block) { 2186 set_sta_flag(sta, WLAN_STA_PS_DRIVER); 2187 ieee80211_clear_fast_xmit(sta); 2188 return; 2189 } 2190 2191 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 2192 return; 2193 2194 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { 2195 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 2196 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2197 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 2198 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || 2199 test_sta_flag(sta, WLAN_STA_UAPSD)) { 2200 /* must be asleep in this case */ 2201 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2202 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 2203 } else { 2204 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 2205 ieee80211_check_fast_xmit(sta); 2206 } 2207 } 2208 EXPORT_SYMBOL(ieee80211_sta_block_awake); 2209 2210 void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) 2211 { 2212 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2213 struct ieee80211_local *local = sta->local; 2214 2215 trace_api_eosp(local, pubsta); 2216 2217 clear_sta_flag(sta, WLAN_STA_SP); 2218 } 2219 EXPORT_SYMBOL(ieee80211_sta_eosp); 2220 2221 void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) 2222 { 2223 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2224 enum ieee80211_frame_release_type reason; 2225 bool more_data; 2226 2227 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); 2228 2229 reason = IEEE80211_FRAME_RELEASE_UAPSD; 2230 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, 2231 reason, 0); 2232 2233 ieee80211_send_null_response(sta, tid, reason, false, more_data); 2234 } 2235 EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); 2236 2237 void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 2238 u8 tid, bool buffered) 2239 { 2240 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2241 2242 if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) 2243 return; 2244 2245 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); 2246 2247 if (buffered) 2248 set_bit(tid, &sta->driver_buffered_tids); 2249 else 2250 clear_bit(tid, &sta->driver_buffered_tids); 2251 2252 sta_info_recalc_tim(sta); 2253 } 2254 EXPORT_SYMBOL(ieee80211_sta_set_buffered); 2255 2256 void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, 2257 u32 tx_airtime, u32 rx_airtime) 2258 { 2259 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2260 struct ieee80211_local *local = sta->sdata->local; 2261 u8 ac = ieee80211_ac_from_tid(tid); 2262 u32 airtime = 0; 2263 u32 diff; 2264 2265 if (sta->local->airtime_flags & AIRTIME_USE_TX) 2266 airtime += tx_airtime; 2267 if (sta->local->airtime_flags & AIRTIME_USE_RX) 2268 airtime += rx_airtime; 2269 2270 spin_lock_bh(&local->active_txq_lock[ac]); 2271 sta->airtime[ac].tx_airtime += tx_airtime; 2272 sta->airtime[ac].rx_airtime += rx_airtime; 2273 2274 diff = (u32)jiffies - sta->airtime[ac].last_active; 2275 if (diff <= AIRTIME_ACTIVE_DURATION) 2276 sta->airtime[ac].deficit -= airtime; 2277 2278 spin_unlock_bh(&local->active_txq_lock[ac]); 2279 } 2280 EXPORT_SYMBOL(ieee80211_sta_register_airtime); 2281 2282 void __ieee80211_sta_recalc_aggregates(struct sta_info *sta, u16 active_links) 2283 { 2284 bool first = true; 2285 int link_id; 2286 2287 if (!sta->sta.valid_links || !sta->sta.mlo) { 2288 sta->sta.cur = &sta->sta.deflink.agg; 2289 return; 2290 } 2291 2292 rcu_read_lock(); 2293 for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) { 2294 struct ieee80211_link_sta *link_sta; 2295 int i; 2296 2297 if (!(active_links & BIT(link_id))) 2298 continue; 2299 2300 link_sta = rcu_dereference(sta->sta.link[link_id]); 2301 if (!link_sta) 2302 continue; 2303 2304 if (first) { 2305 sta->cur = sta->sta.deflink.agg; 2306 first = false; 2307 continue; 2308 } 2309 2310 sta->cur.max_amsdu_len = 2311 min(sta->cur.max_amsdu_len, 2312 link_sta->agg.max_amsdu_len); 2313 sta->cur.max_rc_amsdu_len = 2314 min(sta->cur.max_rc_amsdu_len, 2315 link_sta->agg.max_rc_amsdu_len); 2316 2317 for (i = 0; i < ARRAY_SIZE(sta->cur.max_tid_amsdu_len); i++) 2318 sta->cur.max_tid_amsdu_len[i] = 2319 min(sta->cur.max_tid_amsdu_len[i], 2320 link_sta->agg.max_tid_amsdu_len[i]); 2321 } 2322 rcu_read_unlock(); 2323 2324 sta->sta.cur = &sta->cur; 2325 } 2326 2327 void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta) 2328 { 2329 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2330 2331 __ieee80211_sta_recalc_aggregates(sta, sta->sdata->vif.active_links); 2332 } 2333 EXPORT_SYMBOL(ieee80211_sta_recalc_aggregates); 2334 2335 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, 2336 struct sta_info *sta, u8 ac, 2337 u16 tx_airtime, bool tx_completed) 2338 { 2339 int tx_pending; 2340 2341 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) 2342 return; 2343 2344 if (!tx_completed) { 2345 if (sta) 2346 atomic_add(tx_airtime, 2347 &sta->airtime[ac].aql_tx_pending); 2348 2349 atomic_add(tx_airtime, &local->aql_total_pending_airtime); 2350 atomic_add(tx_airtime, &local->aql_ac_pending_airtime[ac]); 2351 return; 2352 } 2353 2354 if (sta) { 2355 tx_pending = atomic_sub_return(tx_airtime, 2356 &sta->airtime[ac].aql_tx_pending); 2357 if (tx_pending < 0) 2358 atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, 2359 tx_pending, 0); 2360 } 2361 2362 atomic_sub(tx_airtime, &local->aql_total_pending_airtime); 2363 tx_pending = atomic_sub_return(tx_airtime, 2364 &local->aql_ac_pending_airtime[ac]); 2365 if (WARN_ONCE(tx_pending < 0, 2366 "Device %s AC %d pending airtime underflow: %u, %u", 2367 wiphy_name(local->hw.wiphy), ac, tx_pending, 2368 tx_airtime)) { 2369 atomic_cmpxchg(&local->aql_ac_pending_airtime[ac], 2370 tx_pending, 0); 2371 atomic_sub(tx_pending, &local->aql_total_pending_airtime); 2372 } 2373 } 2374 2375 static struct ieee80211_sta_rx_stats * 2376 sta_get_last_rx_stats(struct sta_info *sta) 2377 { 2378 struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; 2379 int cpu; 2380 2381 if (!sta->deflink.pcpu_rx_stats) 2382 return stats; 2383 2384 for_each_possible_cpu(cpu) { 2385 struct ieee80211_sta_rx_stats *cpustats; 2386 2387 cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); 2388 2389 if (time_after(cpustats->last_rx, stats->last_rx)) 2390 stats = cpustats; 2391 } 2392 2393 return stats; 2394 } 2395 2396 static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, 2397 struct rate_info *rinfo) 2398 { 2399 rinfo->bw = STA_STATS_GET(BW, rate); 2400 2401 switch (STA_STATS_GET(TYPE, rate)) { 2402 case STA_STATS_RATE_TYPE_VHT: 2403 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; 2404 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); 2405 rinfo->nss = STA_STATS_GET(VHT_NSS, rate); 2406 if (STA_STATS_GET(SGI, rate)) 2407 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2408 break; 2409 case STA_STATS_RATE_TYPE_HT: 2410 rinfo->flags = RATE_INFO_FLAGS_MCS; 2411 rinfo->mcs = STA_STATS_GET(HT_MCS, rate); 2412 if (STA_STATS_GET(SGI, rate)) 2413 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2414 break; 2415 case STA_STATS_RATE_TYPE_LEGACY: { 2416 struct ieee80211_supported_band *sband; 2417 u16 brate; 2418 unsigned int shift; 2419 int band = STA_STATS_GET(LEGACY_BAND, rate); 2420 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); 2421 2422 sband = local->hw.wiphy->bands[band]; 2423 2424 if (WARN_ON_ONCE(!sband->bitrates)) 2425 break; 2426 2427 brate = sband->bitrates[rate_idx].bitrate; 2428 if (rinfo->bw == RATE_INFO_BW_5) 2429 shift = 2; 2430 else if (rinfo->bw == RATE_INFO_BW_10) 2431 shift = 1; 2432 else 2433 shift = 0; 2434 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); 2435 break; 2436 } 2437 case STA_STATS_RATE_TYPE_HE: 2438 rinfo->flags = RATE_INFO_FLAGS_HE_MCS; 2439 rinfo->mcs = STA_STATS_GET(HE_MCS, rate); 2440 rinfo->nss = STA_STATS_GET(HE_NSS, rate); 2441 rinfo->he_gi = STA_STATS_GET(HE_GI, rate); 2442 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); 2443 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); 2444 break; 2445 case STA_STATS_RATE_TYPE_EHT: 2446 rinfo->flags = RATE_INFO_FLAGS_EHT_MCS; 2447 rinfo->mcs = STA_STATS_GET(EHT_MCS, rate); 2448 rinfo->nss = STA_STATS_GET(EHT_NSS, rate); 2449 rinfo->eht_gi = STA_STATS_GET(EHT_GI, rate); 2450 rinfo->eht_ru_alloc = STA_STATS_GET(EHT_RU, rate); 2451 break; 2452 } 2453 } 2454 2455 static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) 2456 { 2457 u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); 2458 2459 if (rate == STA_STATS_RATE_INVALID) 2460 return -EINVAL; 2461 2462 sta_stats_decode_rate(sta->local, rate, rinfo); 2463 return 0; 2464 } 2465 2466 static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, 2467 int tid) 2468 { 2469 unsigned int start; 2470 u64 value; 2471 2472 do { 2473 start = u64_stats_fetch_begin(&rxstats->syncp); 2474 value = rxstats->msdu[tid]; 2475 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2476 2477 return value; 2478 } 2479 2480 static void sta_set_tidstats(struct sta_info *sta, 2481 struct cfg80211_tid_stats *tidstats, 2482 int tid) 2483 { 2484 struct ieee80211_local *local = sta->local; 2485 int cpu; 2486 2487 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { 2488 tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats, 2489 tid); 2490 2491 if (sta->deflink.pcpu_rx_stats) { 2492 for_each_possible_cpu(cpu) { 2493 struct ieee80211_sta_rx_stats *cpurxs; 2494 2495 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2496 cpu); 2497 tidstats->rx_msdu += 2498 sta_get_tidstats_msdu(cpurxs, tid); 2499 } 2500 } 2501 2502 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); 2503 } 2504 2505 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { 2506 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); 2507 tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid]; 2508 } 2509 2510 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && 2511 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2512 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); 2513 tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid]; 2514 } 2515 2516 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && 2517 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2518 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); 2519 tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid]; 2520 } 2521 2522 if (tid < IEEE80211_NUM_TIDS) { 2523 spin_lock_bh(&local->fq.lock); 2524 rcu_read_lock(); 2525 2526 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); 2527 ieee80211_fill_txq_stats(&tidstats->txq_stats, 2528 to_txq_info(sta->sta.txq[tid])); 2529 2530 rcu_read_unlock(); 2531 spin_unlock_bh(&local->fq.lock); 2532 } 2533 } 2534 2535 static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) 2536 { 2537 unsigned int start; 2538 u64 value; 2539 2540 do { 2541 start = u64_stats_fetch_begin(&rxstats->syncp); 2542 value = rxstats->bytes; 2543 } while (u64_stats_fetch_retry(&rxstats->syncp, start)); 2544 2545 return value; 2546 } 2547 2548 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, 2549 bool tidstats) 2550 { 2551 struct ieee80211_sub_if_data *sdata = sta->sdata; 2552 struct ieee80211_local *local = sdata->local; 2553 u32 thr = 0; 2554 int i, ac, cpu; 2555 struct ieee80211_sta_rx_stats *last_rxstats; 2556 2557 last_rxstats = sta_get_last_rx_stats(sta); 2558 2559 sinfo->generation = sdata->local->sta_generation; 2560 2561 /* do before driver, so beacon filtering drivers have a 2562 * chance to e.g. just add the number of filtered beacons 2563 * (or just modify the value entirely, of course) 2564 */ 2565 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2566 sinfo->rx_beacon = sdata->deflink.u.mgd.count_beacon_signal; 2567 2568 drv_sta_statistics(local, sdata, &sta->sta, sinfo); 2569 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | 2570 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | 2571 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | 2572 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | 2573 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | 2574 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); 2575 2576 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2577 sinfo->beacon_loss_count = 2578 sdata->deflink.u.mgd.beacon_loss_count; 2579 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); 2580 } 2581 2582 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 2583 sinfo->assoc_at = sta->assoc_at; 2584 sinfo->inactive_time = 2585 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); 2586 2587 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2588 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { 2589 sinfo->tx_bytes = 0; 2590 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2591 sinfo->tx_bytes += sta->deflink.tx_stats.bytes[ac]; 2592 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); 2593 } 2594 2595 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { 2596 sinfo->tx_packets = 0; 2597 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2598 sinfo->tx_packets += sta->deflink.tx_stats.packets[ac]; 2599 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 2600 } 2601 2602 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | 2603 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { 2604 sinfo->rx_bytes += sta_get_stats_bytes(&sta->deflink.rx_stats); 2605 2606 if (sta->deflink.pcpu_rx_stats) { 2607 for_each_possible_cpu(cpu) { 2608 struct ieee80211_sta_rx_stats *cpurxs; 2609 2610 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2611 cpu); 2612 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); 2613 } 2614 } 2615 2616 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); 2617 } 2618 2619 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { 2620 sinfo->rx_packets = sta->deflink.rx_stats.packets; 2621 if (sta->deflink.pcpu_rx_stats) { 2622 for_each_possible_cpu(cpu) { 2623 struct ieee80211_sta_rx_stats *cpurxs; 2624 2625 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, 2626 cpu); 2627 sinfo->rx_packets += cpurxs->packets; 2628 } 2629 } 2630 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); 2631 } 2632 2633 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { 2634 sinfo->tx_retries = sta->deflink.status_stats.retry_count; 2635 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); 2636 } 2637 2638 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { 2639 sinfo->tx_failed = sta->deflink.status_stats.retry_failed; 2640 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); 2641 } 2642 2643 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { 2644 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2645 sinfo->rx_duration += sta->airtime[ac].rx_airtime; 2646 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 2647 } 2648 2649 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { 2650 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2651 sinfo->tx_duration += sta->airtime[ac].tx_airtime; 2652 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 2653 } 2654 2655 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { 2656 sinfo->airtime_weight = sta->airtime_weight; 2657 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); 2658 } 2659 2660 sinfo->rx_dropped_misc = sta->deflink.rx_stats.dropped; 2661 if (sta->deflink.pcpu_rx_stats) { 2662 for_each_possible_cpu(cpu) { 2663 struct ieee80211_sta_rx_stats *cpurxs; 2664 2665 cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); 2666 sinfo->rx_dropped_misc += cpurxs->dropped; 2667 } 2668 } 2669 2670 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2671 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { 2672 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | 2673 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 2674 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); 2675 } 2676 2677 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || 2678 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { 2679 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { 2680 sinfo->signal = (s8)last_rxstats->last_signal; 2681 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 2682 } 2683 2684 if (!sta->deflink.pcpu_rx_stats && 2685 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { 2686 sinfo->signal_avg = 2687 -ewma_signal_read(&sta->deflink.rx_stats_avg.signal); 2688 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 2689 } 2690 } 2691 2692 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to 2693 * the sta->rx_stats struct, so the check here is fine with and without 2694 * pcpu statistics 2695 */ 2696 if (last_rxstats->chains && 2697 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | 2698 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { 2699 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); 2700 if (!sta->deflink.pcpu_rx_stats) 2701 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); 2702 2703 sinfo->chains = last_rxstats->chains; 2704 2705 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { 2706 sinfo->chain_signal[i] = 2707 last_rxstats->chain_signal_last[i]; 2708 sinfo->chain_signal_avg[i] = 2709 -ewma_signal_read(&sta->deflink.rx_stats_avg.chain_signal[i]); 2710 } 2711 } 2712 2713 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) && 2714 !sta->sta.valid_links) { 2715 sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, 2716 &sinfo->txrate); 2717 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 2718 } 2719 2720 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) && 2721 !sta->sta.valid_links) { 2722 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) 2723 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); 2724 } 2725 2726 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { 2727 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) 2728 sta_set_tidstats(sta, &sinfo->pertid[i], i); 2729 } 2730 2731 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2732 #ifdef CONFIG_MAC80211_MESH 2733 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | 2734 BIT_ULL(NL80211_STA_INFO_PLID) | 2735 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | 2736 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | 2737 BIT_ULL(NL80211_STA_INFO_PEER_PM) | 2738 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | 2739 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | 2740 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); 2741 2742 sinfo->llid = sta->mesh->llid; 2743 sinfo->plid = sta->mesh->plid; 2744 sinfo->plink_state = sta->mesh->plink_state; 2745 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 2746 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); 2747 sinfo->t_offset = sta->mesh->t_offset; 2748 } 2749 sinfo->local_pm = sta->mesh->local_pm; 2750 sinfo->peer_pm = sta->mesh->peer_pm; 2751 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; 2752 sinfo->connected_to_gate = sta->mesh->connected_to_gate; 2753 sinfo->connected_to_as = sta->mesh->connected_to_as; 2754 #endif 2755 } 2756 2757 sinfo->bss_param.flags = 0; 2758 if (sdata->vif.bss_conf.use_cts_prot) 2759 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; 2760 if (sdata->vif.bss_conf.use_short_preamble) 2761 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; 2762 if (sdata->vif.bss_conf.use_short_slot) 2763 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 2764 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; 2765 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 2766 2767 sinfo->sta_flags.set = 0; 2768 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | 2769 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 2770 BIT(NL80211_STA_FLAG_WME) | 2771 BIT(NL80211_STA_FLAG_MFP) | 2772 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 2773 BIT(NL80211_STA_FLAG_ASSOCIATED) | 2774 BIT(NL80211_STA_FLAG_TDLS_PEER); 2775 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 2776 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 2777 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) 2778 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); 2779 if (sta->sta.wme) 2780 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); 2781 if (test_sta_flag(sta, WLAN_STA_MFP)) 2782 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 2783 if (test_sta_flag(sta, WLAN_STA_AUTH)) 2784 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 2785 if (test_sta_flag(sta, WLAN_STA_ASSOC)) 2786 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 2787 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 2788 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 2789 2790 thr = sta_get_expected_throughput(sta); 2791 2792 if (thr != 0) { 2793 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); 2794 sinfo->expected_throughput = thr; 2795 } 2796 2797 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && 2798 sta->deflink.status_stats.ack_signal_filled) { 2799 sinfo->ack_signal = sta->deflink.status_stats.last_ack_signal; 2800 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); 2801 } 2802 2803 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && 2804 sta->deflink.status_stats.ack_signal_filled) { 2805 sinfo->avg_ack_signal = 2806 -(s8)ewma_avg_signal_read( 2807 &sta->deflink.status_stats.avg_ack_signal); 2808 sinfo->filled |= 2809 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); 2810 } 2811 2812 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2813 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); 2814 sinfo->airtime_link_metric = 2815 airtime_link_metric_get(local, sta); 2816 } 2817 } 2818 2819 u32 sta_get_expected_throughput(struct sta_info *sta) 2820 { 2821 struct ieee80211_sub_if_data *sdata = sta->sdata; 2822 struct ieee80211_local *local = sdata->local; 2823 struct rate_control_ref *ref = NULL; 2824 u32 thr = 0; 2825 2826 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) 2827 ref = local->rate_ctrl; 2828 2829 /* check if the driver has a SW RC implementation */ 2830 if (ref && ref->ops->get_expected_throughput) 2831 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); 2832 else 2833 thr = drv_get_expected_throughput(local, sta); 2834 2835 return thr; 2836 } 2837 2838 unsigned long ieee80211_sta_last_active(struct sta_info *sta) 2839 { 2840 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2841 2842 if (!sta->deflink.status_stats.last_ack || 2843 time_after(stats->last_rx, sta->deflink.status_stats.last_ack)) 2844 return stats->last_rx; 2845 return sta->deflink.status_stats.last_ack; 2846 } 2847 2848 static void sta_update_codel_params(struct sta_info *sta, u32 thr) 2849 { 2850 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { 2851 sta->cparams.target = MS2TIME(50); 2852 sta->cparams.interval = MS2TIME(300); 2853 sta->cparams.ecn = false; 2854 } else { 2855 sta->cparams.target = MS2TIME(20); 2856 sta->cparams.interval = MS2TIME(100); 2857 sta->cparams.ecn = true; 2858 } 2859 } 2860 2861 void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, 2862 u32 thr) 2863 { 2864 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2865 2866 sta_update_codel_params(sta, thr); 2867 } 2868 2869 int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id) 2870 { 2871 struct ieee80211_sub_if_data *sdata = sta->sdata; 2872 struct sta_link_alloc *alloc; 2873 int ret; 2874 2875 lockdep_assert_held(&sdata->local->sta_mtx); 2876 2877 /* must represent an MLD from the start */ 2878 if (WARN_ON(!sta->sta.valid_links)) 2879 return -EINVAL; 2880 2881 if (WARN_ON(sta->sta.valid_links & BIT(link_id) || 2882 sta->link[link_id])) 2883 return -EBUSY; 2884 2885 alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); 2886 if (!alloc) 2887 return -ENOMEM; 2888 2889 ret = sta_info_alloc_link(sdata->local, &alloc->info, GFP_KERNEL); 2890 if (ret) { 2891 kfree(alloc); 2892 return ret; 2893 } 2894 2895 sta_info_add_link(sta, link_id, &alloc->info, &alloc->sta); 2896 2897 ieee80211_link_sta_debugfs_add(&alloc->info); 2898 2899 return 0; 2900 } 2901 2902 void ieee80211_sta_free_link(struct sta_info *sta, unsigned int link_id) 2903 { 2904 lockdep_assert_held(&sta->sdata->local->sta_mtx); 2905 2906 sta_remove_link(sta, link_id, false); 2907 } 2908 2909 int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id) 2910 { 2911 struct ieee80211_sub_if_data *sdata = sta->sdata; 2912 struct link_sta_info *link_sta; 2913 u16 old_links = sta->sta.valid_links; 2914 u16 new_links = old_links | BIT(link_id); 2915 int ret; 2916 2917 link_sta = rcu_dereference_protected(sta->link[link_id], 2918 lockdep_is_held(&sdata->local->sta_mtx)); 2919 2920 if (WARN_ON(old_links == new_links || !link_sta)) 2921 return -EINVAL; 2922 2923 rcu_read_lock(); 2924 if (link_sta_info_hash_lookup(sdata->local, link_sta->addr)) { 2925 rcu_read_unlock(); 2926 return -EALREADY; 2927 } 2928 /* we only modify under the mutex so this is fine */ 2929 rcu_read_unlock(); 2930 2931 sta->sta.valid_links = new_links; 2932 2933 if (!test_sta_flag(sta, WLAN_STA_INSERTED)) 2934 goto hash; 2935 2936 ieee80211_recalc_min_chandef(sdata, link_id); 2937 2938 /* Ensure the values are updated for the driver, 2939 * redone by sta_remove_link on failure. 2940 */ 2941 ieee80211_sta_recalc_aggregates(&sta->sta); 2942 2943 ret = drv_change_sta_links(sdata->local, sdata, &sta->sta, 2944 old_links, new_links); 2945 if (ret) { 2946 sta->sta.valid_links = old_links; 2947 sta_remove_link(sta, link_id, false); 2948 return ret; 2949 } 2950 2951 hash: 2952 ret = link_sta_info_hash_add(sdata->local, link_sta); 2953 WARN_ON(ret); 2954 return 0; 2955 } 2956 2957 void ieee80211_sta_remove_link(struct sta_info *sta, unsigned int link_id) 2958 { 2959 struct ieee80211_sub_if_data *sdata = sta->sdata; 2960 u16 old_links = sta->sta.valid_links; 2961 2962 lockdep_assert_held(&sdata->local->sta_mtx); 2963 2964 sta->sta.valid_links &= ~BIT(link_id); 2965 2966 if (test_sta_flag(sta, WLAN_STA_INSERTED)) 2967 drv_change_sta_links(sdata->local, sdata, &sta->sta, 2968 old_links, sta->sta.valid_links); 2969 2970 sta_remove_link(sta, link_id, true); 2971 } 2972 2973 void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta, 2974 const u8 *ext_capab, 2975 unsigned int ext_capab_len) 2976 { 2977 u8 val; 2978 2979 sta->sta.max_amsdu_subframes = 0; 2980 2981 if (ext_capab_len < 8) 2982 return; 2983 2984 /* The sender might not have sent the last bit, consider it to be 0 */ 2985 val = u8_get_bits(ext_capab[7], WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB); 2986 2987 /* we did get all the bits, take the MSB as well */ 2988 if (ext_capab_len >= 9) 2989 val |= u8_get_bits(ext_capab[8], 2990 WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB) << 1; 2991 2992 if (val) 2993 sta->sta.max_amsdu_subframes = 4 << (4 - val); 2994 } 2995 2996 #ifdef CONFIG_LOCKDEP 2997 bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta) 2998 { 2999 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 3000 3001 return lockdep_is_held(&sta->local->sta_mtx); 3002 } 3003 EXPORT_SYMBOL(lockdep_sta_mutex_held); 3004 #endif 3005