1 /* 2 * Atheros CARL9170 driver 3 * 4 * mac80211 interaction code 5 * 6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; see the file COPYING. If not, see 21 * http://www.gnu.org/licenses/. 22 * 23 * This file incorporates work covered by the following copyright and 24 * permission notice: 25 * Copyright (c) 2007-2008 Atheros Communications, Inc. 26 * 27 * Permission to use, copy, modify, and/or distribute this software for any 28 * purpose with or without fee is hereby granted, provided that the above 29 * copyright notice and this permission notice appear in all copies. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 38 */ 39 40 #include <linux/init.h> 41 #include <linux/slab.h> 42 #include <linux/module.h> 43 #include <linux/etherdevice.h> 44 #include <linux/random.h> 45 #include <net/mac80211.h> 46 #include <net/cfg80211.h> 47 #include "hw.h" 48 #include "carl9170.h" 49 #include "cmd.h" 50 51 static bool modparam_nohwcrypt; 52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload."); 54 55 int modparam_noht; 56 module_param_named(noht, modparam_noht, int, S_IRUGO); 57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation."); 58 59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \ 60 .bitrate = (_bitrate), \ 61 .flags = (_flags), \ 62 .hw_value = (_hw_rate) | (_txpidx) << 4, \ 63 } 64 65 struct ieee80211_rate __carl9170_ratetable[] = { 66 RATE(10, 0, 0, 0), 67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE), 68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE), 69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE), 70 RATE(60, 0xb, 0, 0), 71 RATE(90, 0xf, 0, 0), 72 RATE(120, 0xa, 0, 0), 73 RATE(180, 0xe, 0, 0), 74 RATE(240, 0x9, 0, 0), 75 RATE(360, 0xd, 1, 0), 76 RATE(480, 0x8, 2, 0), 77 RATE(540, 0xc, 3, 0), 78 }; 79 #undef RATE 80 81 #define carl9170_g_ratetable (__carl9170_ratetable + 0) 82 #define carl9170_g_ratetable_size 12 83 #define carl9170_a_ratetable (__carl9170_ratetable + 4) 84 #define carl9170_a_ratetable_size 8 85 86 /* 87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params 88 * array in phy.c so that we don't have to do frequency lookups! 89 */ 90 #define CHAN(_freq, _idx) { \ 91 .center_freq = (_freq), \ 92 .hw_value = (_idx), \ 93 .max_power = 18, /* XXX */ \ 94 } 95 96 static struct ieee80211_channel carl9170_2ghz_chantable[] = { 97 CHAN(2412, 0), 98 CHAN(2417, 1), 99 CHAN(2422, 2), 100 CHAN(2427, 3), 101 CHAN(2432, 4), 102 CHAN(2437, 5), 103 CHAN(2442, 6), 104 CHAN(2447, 7), 105 CHAN(2452, 8), 106 CHAN(2457, 9), 107 CHAN(2462, 10), 108 CHAN(2467, 11), 109 CHAN(2472, 12), 110 CHAN(2484, 13), 111 }; 112 113 static struct ieee80211_channel carl9170_5ghz_chantable[] = { 114 CHAN(4920, 14), 115 CHAN(4940, 15), 116 CHAN(4960, 16), 117 CHAN(4980, 17), 118 CHAN(5040, 18), 119 CHAN(5060, 19), 120 CHAN(5080, 20), 121 CHAN(5180, 21), 122 CHAN(5200, 22), 123 CHAN(5220, 23), 124 CHAN(5240, 24), 125 CHAN(5260, 25), 126 CHAN(5280, 26), 127 CHAN(5300, 27), 128 CHAN(5320, 28), 129 CHAN(5500, 29), 130 CHAN(5520, 30), 131 CHAN(5540, 31), 132 CHAN(5560, 32), 133 CHAN(5580, 33), 134 CHAN(5600, 34), 135 CHAN(5620, 35), 136 CHAN(5640, 36), 137 CHAN(5660, 37), 138 CHAN(5680, 38), 139 CHAN(5700, 39), 140 CHAN(5745, 40), 141 CHAN(5765, 41), 142 CHAN(5785, 42), 143 CHAN(5805, 43), 144 CHAN(5825, 44), 145 CHAN(5170, 45), 146 CHAN(5190, 46), 147 CHAN(5210, 47), 148 CHAN(5230, 48), 149 }; 150 #undef CHAN 151 152 #define CARL9170_HT_CAP \ 153 { \ 154 .ht_supported = true, \ 155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \ 156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ 157 IEEE80211_HT_CAP_SGI_40 | \ 158 IEEE80211_HT_CAP_DSSSCCK40 | \ 159 IEEE80211_HT_CAP_SM_PS, \ 160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \ 161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \ 162 .mcs = { \ 163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \ 164 .rx_highest = cpu_to_le16(300), \ 165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \ 166 }, \ 167 } 168 169 static struct ieee80211_supported_band carl9170_band_2GHz = { 170 .channels = carl9170_2ghz_chantable, 171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable), 172 .bitrates = carl9170_g_ratetable, 173 .n_bitrates = carl9170_g_ratetable_size, 174 .ht_cap = CARL9170_HT_CAP, 175 }; 176 177 static struct ieee80211_supported_band carl9170_band_5GHz = { 178 .channels = carl9170_5ghz_chantable, 179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable), 180 .bitrates = carl9170_a_ratetable, 181 .n_bitrates = carl9170_a_ratetable_size, 182 .ht_cap = CARL9170_HT_CAP, 183 }; 184 185 static void carl9170_ampdu_gc(struct ar9170 *ar) 186 { 187 struct carl9170_sta_tid *tid_info; 188 LIST_HEAD(tid_gc); 189 190 rcu_read_lock(); 191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) { 192 spin_lock_bh(&ar->tx_ampdu_list_lock); 193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) { 194 tid_info->state = CARL9170_TID_STATE_KILLED; 195 list_del_rcu(&tid_info->list); 196 ar->tx_ampdu_list_len--; 197 list_add_tail(&tid_info->tmp_list, &tid_gc); 198 } 199 spin_unlock_bh(&ar->tx_ampdu_list_lock); 200 201 } 202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); 203 rcu_read_unlock(); 204 205 synchronize_rcu(); 206 207 while (!list_empty(&tid_gc)) { 208 struct sk_buff *skb; 209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid, 210 tmp_list); 211 212 while ((skb = __skb_dequeue(&tid_info->queue))) 213 carl9170_tx_status(ar, skb, false); 214 215 list_del_init(&tid_info->tmp_list); 216 kfree(tid_info); 217 } 218 } 219 220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued) 221 { 222 if (drop_queued) { 223 int i; 224 225 /* 226 * We can only drop frames which have not been uploaded 227 * to the device yet. 228 */ 229 230 for (i = 0; i < ar->hw->queues; i++) { 231 struct sk_buff *skb; 232 233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) { 234 struct ieee80211_tx_info *info; 235 236 info = IEEE80211_SKB_CB(skb); 237 if (info->flags & IEEE80211_TX_CTL_AMPDU) 238 atomic_dec(&ar->tx_ampdu_upload); 239 240 carl9170_tx_status(ar, skb, false); 241 } 242 } 243 } 244 245 /* Wait for all other outstanding frames to timeout. */ 246 if (atomic_read(&ar->tx_total_queued)) 247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0); 248 } 249 250 static void carl9170_flush_ba(struct ar9170 *ar) 251 { 252 struct sk_buff_head free; 253 struct carl9170_sta_tid *tid_info; 254 struct sk_buff *skb; 255 256 __skb_queue_head_init(&free); 257 258 rcu_read_lock(); 259 spin_lock_bh(&ar->tx_ampdu_list_lock); 260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) { 261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) { 262 tid_info->state = CARL9170_TID_STATE_SUSPEND; 263 264 spin_lock(&tid_info->lock); 265 while ((skb = __skb_dequeue(&tid_info->queue))) 266 __skb_queue_tail(&free, skb); 267 spin_unlock(&tid_info->lock); 268 } 269 } 270 spin_unlock_bh(&ar->tx_ampdu_list_lock); 271 rcu_read_unlock(); 272 273 while ((skb = __skb_dequeue(&free))) 274 carl9170_tx_status(ar, skb, false); 275 } 276 277 static void carl9170_zap_queues(struct ar9170 *ar) 278 { 279 struct carl9170_vif_info *cvif; 280 unsigned int i; 281 282 carl9170_ampdu_gc(ar); 283 284 carl9170_flush_ba(ar); 285 carl9170_flush(ar, true); 286 287 for (i = 0; i < ar->hw->queues; i++) { 288 spin_lock_bh(&ar->tx_status[i].lock); 289 while (!skb_queue_empty(&ar->tx_status[i])) { 290 struct sk_buff *skb; 291 292 skb = skb_peek(&ar->tx_status[i]); 293 carl9170_tx_get_skb(skb); 294 spin_unlock_bh(&ar->tx_status[i].lock); 295 carl9170_tx_drop(ar, skb); 296 spin_lock_bh(&ar->tx_status[i].lock); 297 carl9170_tx_put_skb(skb); 298 } 299 spin_unlock_bh(&ar->tx_status[i].lock); 300 } 301 302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1); 303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT); 304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS); 305 306 /* reinitialize queues statistics */ 307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); 308 for (i = 0; i < ar->hw->queues; i++) 309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD; 310 311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++) 312 ar->mem_bitmap[i] = 0; 313 314 rcu_read_lock(); 315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) { 316 spin_lock_bh(&ar->beacon_lock); 317 dev_kfree_skb_any(cvif->beacon); 318 cvif->beacon = NULL; 319 spin_unlock_bh(&ar->beacon_lock); 320 } 321 rcu_read_unlock(); 322 323 atomic_set(&ar->tx_ampdu_upload, 0); 324 atomic_set(&ar->tx_ampdu_scheduler, 0); 325 atomic_set(&ar->tx_total_pending, 0); 326 atomic_set(&ar->tx_total_queued, 0); 327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks); 328 } 329 330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \ 331 do { \ 332 queue.aifs = ai_fs; \ 333 queue.cw_min = cwmin; \ 334 queue.cw_max = cwmax; \ 335 queue.txop = _txop; \ 336 } while (0) 337 338 static int carl9170_op_start(struct ieee80211_hw *hw) 339 { 340 struct ar9170 *ar = hw->priv; 341 int err, i; 342 343 mutex_lock(&ar->mutex); 344 345 carl9170_zap_queues(ar); 346 347 /* reset QoS defaults */ 348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47); 349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94); 350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0); 351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0); 352 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0); 353 354 ar->current_factor = ar->current_density = -1; 355 /* "The first key is unique." */ 356 ar->usedkeys = 1; 357 ar->filter_state = 0; 358 ar->ps.last_action = jiffies; 359 ar->ps.last_slept = jiffies; 360 ar->erp_mode = CARL9170_ERP_AUTO; 361 ar->rx_software_decryption = false; 362 ar->disable_offload = false; 363 364 for (i = 0; i < ar->hw->queues; i++) { 365 ar->queue_stop_timeout[i] = jiffies; 366 ar->max_queue_stop_timeout[i] = 0; 367 } 368 369 atomic_set(&ar->mem_allocs, 0); 370 371 err = carl9170_usb_open(ar); 372 if (err) 373 goto out; 374 375 err = carl9170_init_mac(ar); 376 if (err) 377 goto out; 378 379 err = carl9170_set_qos(ar); 380 if (err) 381 goto out; 382 383 if (ar->fw.rx_filter) { 384 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA | 385 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD); 386 if (err) 387 goto out; 388 } 389 390 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 391 AR9170_DMA_TRIGGER_RXQ); 392 if (err) 393 goto out; 394 395 /* Clear key-cache */ 396 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) { 397 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE, 398 0, NULL, 0); 399 if (err) 400 goto out; 401 402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE, 403 1, NULL, 0); 404 if (err) 405 goto out; 406 407 if (i < AR9170_CAM_MAX_USER) { 408 err = carl9170_disable_key(ar, i); 409 if (err) 410 goto out; 411 } 412 } 413 414 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED); 415 416 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, 417 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK))); 418 419 ieee80211_wake_queues(ar->hw); 420 err = 0; 421 422 out: 423 mutex_unlock(&ar->mutex); 424 return err; 425 } 426 427 static void carl9170_cancel_worker(struct ar9170 *ar) 428 { 429 cancel_delayed_work_sync(&ar->stat_work); 430 cancel_delayed_work_sync(&ar->tx_janitor); 431 #ifdef CONFIG_CARL9170_LEDS 432 cancel_delayed_work_sync(&ar->led_work); 433 #endif /* CONFIG_CARL9170_LEDS */ 434 cancel_work_sync(&ar->ps_work); 435 cancel_work_sync(&ar->ping_work); 436 cancel_work_sync(&ar->ampdu_work); 437 } 438 439 static void carl9170_op_stop(struct ieee80211_hw *hw) 440 { 441 struct ar9170 *ar = hw->priv; 442 443 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE); 444 445 ieee80211_stop_queues(ar->hw); 446 447 mutex_lock(&ar->mutex); 448 if (IS_ACCEPTING_CMD(ar)) { 449 RCU_INIT_POINTER(ar->beacon_iter, NULL); 450 451 carl9170_led_set_state(ar, 0); 452 453 /* stop DMA */ 454 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0); 455 carl9170_usb_stop(ar); 456 } 457 458 carl9170_zap_queues(ar); 459 mutex_unlock(&ar->mutex); 460 461 carl9170_cancel_worker(ar); 462 } 463 464 static void carl9170_restart_work(struct work_struct *work) 465 { 466 struct ar9170 *ar = container_of(work, struct ar9170, 467 restart_work); 468 int err; 469 470 ar->usedkeys = 0; 471 ar->filter_state = 0; 472 carl9170_cancel_worker(ar); 473 474 mutex_lock(&ar->mutex); 475 err = carl9170_usb_restart(ar); 476 if (net_ratelimit()) { 477 if (err) { 478 dev_err(&ar->udev->dev, "Failed to restart device " 479 " (%d).\n", err); 480 } else { 481 dev_info(&ar->udev->dev, "device restarted " 482 "successfully.\n"); 483 } 484 } 485 486 carl9170_zap_queues(ar); 487 mutex_unlock(&ar->mutex); 488 if (!err) { 489 ar->restart_counter++; 490 atomic_set(&ar->pending_restarts, 0); 491 492 ieee80211_restart_hw(ar->hw); 493 } else { 494 /* 495 * The reset was unsuccessful and the device seems to 496 * be dead. But there's still one option: a low-level 497 * usb subsystem reset... 498 */ 499 500 carl9170_usb_reset(ar); 501 } 502 } 503 504 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r) 505 { 506 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE); 507 508 /* 509 * Sometimes, an error can trigger several different reset events. 510 * By ignoring these *surplus* reset events, the device won't be 511 * killed again, right after it has recovered. 512 */ 513 if (atomic_inc_return(&ar->pending_restarts) > 1) { 514 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r); 515 return; 516 } 517 518 ieee80211_stop_queues(ar->hw); 519 520 dev_err(&ar->udev->dev, "restart device (%d)\n", r); 521 522 if (!WARN_ON(r == CARL9170_RR_NO_REASON) || 523 !WARN_ON(r >= __CARL9170_RR_LAST)) 524 ar->last_reason = r; 525 526 if (!ar->registered) 527 return; 528 529 if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset) 530 ieee80211_queue_work(ar->hw, &ar->restart_work); 531 else 532 carl9170_usb_reset(ar); 533 534 /* 535 * At this point, the device instance might have vanished/disabled. 536 * So, don't put any code which access the ar9170 struct 537 * without proper protection. 538 */ 539 } 540 541 static void carl9170_ping_work(struct work_struct *work) 542 { 543 struct ar9170 *ar = container_of(work, struct ar9170, ping_work); 544 int err; 545 546 if (!IS_STARTED(ar)) 547 return; 548 549 mutex_lock(&ar->mutex); 550 err = carl9170_echo_test(ar, 0xdeadbeef); 551 if (err) 552 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE); 553 mutex_unlock(&ar->mutex); 554 } 555 556 static int carl9170_init_interface(struct ar9170 *ar, 557 struct ieee80211_vif *vif) 558 { 559 struct ath_common *common = &ar->common; 560 int err; 561 562 if (!vif) { 563 WARN_ON_ONCE(IS_STARTED(ar)); 564 return 0; 565 } 566 567 memcpy(common->macaddr, vif->addr, ETH_ALEN); 568 569 if (modparam_nohwcrypt || 570 ((vif->type != NL80211_IFTYPE_STATION) && 571 (vif->type != NL80211_IFTYPE_AP))) { 572 ar->rx_software_decryption = true; 573 ar->disable_offload = true; 574 } 575 576 err = carl9170_set_operating_mode(ar); 577 return err; 578 } 579 580 static int carl9170_op_add_interface(struct ieee80211_hw *hw, 581 struct ieee80211_vif *vif) 582 { 583 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv; 584 struct ieee80211_vif *main_vif; 585 struct ar9170 *ar = hw->priv; 586 int vif_id = -1, err = 0; 587 588 mutex_lock(&ar->mutex); 589 rcu_read_lock(); 590 if (vif_priv->active) { 591 /* 592 * Skip the interface structure initialization, 593 * if the vif survived the _restart call. 594 */ 595 vif_id = vif_priv->id; 596 vif_priv->enable_beacon = false; 597 598 spin_lock_bh(&ar->beacon_lock); 599 dev_kfree_skb_any(vif_priv->beacon); 600 vif_priv->beacon = NULL; 601 spin_unlock_bh(&ar->beacon_lock); 602 603 goto init; 604 } 605 606 main_vif = carl9170_get_main_vif(ar); 607 608 if (main_vif) { 609 switch (main_vif->type) { 610 case NL80211_IFTYPE_STATION: 611 if (vif->type == NL80211_IFTYPE_STATION) 612 break; 613 614 err = -EBUSY; 615 rcu_read_unlock(); 616 617 goto unlock; 618 619 case NL80211_IFTYPE_AP: 620 if ((vif->type == NL80211_IFTYPE_STATION) || 621 (vif->type == NL80211_IFTYPE_WDS) || 622 (vif->type == NL80211_IFTYPE_AP)) 623 break; 624 625 err = -EBUSY; 626 rcu_read_unlock(); 627 goto unlock; 628 629 default: 630 rcu_read_unlock(); 631 goto unlock; 632 } 633 } 634 635 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0); 636 637 if (vif_id < 0) { 638 rcu_read_unlock(); 639 640 err = -ENOSPC; 641 goto unlock; 642 } 643 644 BUG_ON(ar->vif_priv[vif_id].id != vif_id); 645 646 vif_priv->active = true; 647 vif_priv->id = vif_id; 648 vif_priv->enable_beacon = false; 649 ar->vifs++; 650 list_add_tail_rcu(&vif_priv->list, &ar->vif_list); 651 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif); 652 653 init: 654 if (carl9170_get_main_vif(ar) == vif) { 655 rcu_assign_pointer(ar->beacon_iter, vif_priv); 656 rcu_read_unlock(); 657 658 err = carl9170_init_interface(ar, vif); 659 if (err) 660 goto unlock; 661 } else { 662 rcu_read_unlock(); 663 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr); 664 665 if (err) 666 goto unlock; 667 } 668 669 if (ar->fw.tx_seq_table) { 670 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4, 671 0); 672 if (err) 673 goto unlock; 674 } 675 676 unlock: 677 if (err && (vif_id >= 0)) { 678 vif_priv->active = false; 679 bitmap_release_region(&ar->vif_bitmap, vif_id, 0); 680 ar->vifs--; 681 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL); 682 list_del_rcu(&vif_priv->list); 683 mutex_unlock(&ar->mutex); 684 synchronize_rcu(); 685 } else { 686 if (ar->vifs > 1) 687 ar->ps.off_override |= PS_OFF_VIF; 688 689 mutex_unlock(&ar->mutex); 690 } 691 692 return err; 693 } 694 695 static void carl9170_op_remove_interface(struct ieee80211_hw *hw, 696 struct ieee80211_vif *vif) 697 { 698 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv; 699 struct ieee80211_vif *main_vif; 700 struct ar9170 *ar = hw->priv; 701 unsigned int id; 702 703 mutex_lock(&ar->mutex); 704 705 if (WARN_ON_ONCE(!vif_priv->active)) 706 goto unlock; 707 708 ar->vifs--; 709 710 rcu_read_lock(); 711 main_vif = carl9170_get_main_vif(ar); 712 713 id = vif_priv->id; 714 715 vif_priv->active = false; 716 WARN_ON(vif_priv->enable_beacon); 717 vif_priv->enable_beacon = false; 718 list_del_rcu(&vif_priv->list); 719 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL); 720 721 if (vif == main_vif) { 722 rcu_read_unlock(); 723 724 if (ar->vifs) { 725 WARN_ON(carl9170_init_interface(ar, 726 carl9170_get_main_vif(ar))); 727 } else { 728 carl9170_set_operating_mode(ar); 729 } 730 } else { 731 rcu_read_unlock(); 732 733 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL)); 734 } 735 736 carl9170_update_beacon(ar, false); 737 carl9170_flush_cab(ar, id); 738 739 spin_lock_bh(&ar->beacon_lock); 740 dev_kfree_skb_any(vif_priv->beacon); 741 vif_priv->beacon = NULL; 742 spin_unlock_bh(&ar->beacon_lock); 743 744 bitmap_release_region(&ar->vif_bitmap, id, 0); 745 746 carl9170_set_beacon_timers(ar); 747 748 if (ar->vifs == 1) 749 ar->ps.off_override &= ~PS_OFF_VIF; 750 751 unlock: 752 mutex_unlock(&ar->mutex); 753 754 synchronize_rcu(); 755 } 756 757 void carl9170_ps_check(struct ar9170 *ar) 758 { 759 ieee80211_queue_work(ar->hw, &ar->ps_work); 760 } 761 762 /* caller must hold ar->mutex */ 763 static int carl9170_ps_update(struct ar9170 *ar) 764 { 765 bool ps = false; 766 int err = 0; 767 768 if (!ar->ps.off_override) 769 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS); 770 771 if (ps != ar->ps.state) { 772 err = carl9170_powersave(ar, ps); 773 if (err) 774 return err; 775 776 if (ar->ps.state && !ps) { 777 ar->ps.sleep_ms = jiffies_to_msecs(jiffies - 778 ar->ps.last_action); 779 } 780 781 if (ps) 782 ar->ps.last_slept = jiffies; 783 784 ar->ps.last_action = jiffies; 785 ar->ps.state = ps; 786 } 787 788 return 0; 789 } 790 791 static void carl9170_ps_work(struct work_struct *work) 792 { 793 struct ar9170 *ar = container_of(work, struct ar9170, 794 ps_work); 795 mutex_lock(&ar->mutex); 796 if (IS_STARTED(ar)) 797 WARN_ON_ONCE(carl9170_ps_update(ar) != 0); 798 mutex_unlock(&ar->mutex); 799 } 800 801 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise) 802 { 803 int err; 804 805 if (noise) { 806 err = carl9170_get_noisefloor(ar); 807 if (err) 808 return err; 809 } 810 811 if (ar->fw.hw_counters) { 812 err = carl9170_collect_tally(ar); 813 if (err) 814 return err; 815 } 816 817 if (flush) 818 memset(&ar->tally, 0, sizeof(ar->tally)); 819 820 return 0; 821 } 822 823 static void carl9170_stat_work(struct work_struct *work) 824 { 825 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work); 826 int err; 827 828 mutex_lock(&ar->mutex); 829 err = carl9170_update_survey(ar, false, true); 830 mutex_unlock(&ar->mutex); 831 832 if (err) 833 return; 834 835 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, 836 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK))); 837 } 838 839 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed) 840 { 841 struct ar9170 *ar = hw->priv; 842 int err = 0; 843 844 mutex_lock(&ar->mutex); 845 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { 846 /* TODO */ 847 err = 0; 848 } 849 850 if (changed & IEEE80211_CONF_CHANGE_PS) { 851 err = carl9170_ps_update(ar); 852 if (err) 853 goto out; 854 } 855 856 if (changed & IEEE80211_CONF_CHANGE_SMPS) { 857 /* TODO */ 858 err = 0; 859 } 860 861 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 862 /* adjust slot time for 5 GHz */ 863 err = carl9170_set_slot_time(ar); 864 if (err) 865 goto out; 866 867 err = carl9170_update_survey(ar, true, false); 868 if (err) 869 goto out; 870 871 err = carl9170_set_channel(ar, hw->conf.channel, 872 hw->conf.channel_type, CARL9170_RFI_NONE); 873 if (err) 874 goto out; 875 876 err = carl9170_update_survey(ar, false, true); 877 if (err) 878 goto out; 879 880 err = carl9170_set_dyn_sifs_ack(ar); 881 if (err) 882 goto out; 883 884 err = carl9170_set_rts_cts_rate(ar); 885 if (err) 886 goto out; 887 } 888 889 if (changed & IEEE80211_CONF_CHANGE_POWER) { 890 err = carl9170_set_mac_tpc(ar, ar->hw->conf.channel); 891 if (err) 892 goto out; 893 } 894 895 out: 896 mutex_unlock(&ar->mutex); 897 return err; 898 } 899 900 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw, 901 struct netdev_hw_addr_list *mc_list) 902 { 903 struct netdev_hw_addr *ha; 904 u64 mchash; 905 906 /* always get broadcast frames */ 907 mchash = 1ULL << (0xff >> 2); 908 909 netdev_hw_addr_list_for_each(ha, mc_list) 910 mchash |= 1ULL << (ha->addr[5] >> 2); 911 912 return mchash; 913 } 914 915 static void carl9170_op_configure_filter(struct ieee80211_hw *hw, 916 unsigned int changed_flags, 917 unsigned int *new_flags, 918 u64 multicast) 919 { 920 struct ar9170 *ar = hw->priv; 921 922 /* mask supported flags */ 923 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps; 924 925 if (!IS_ACCEPTING_CMD(ar)) 926 return; 927 928 mutex_lock(&ar->mutex); 929 930 ar->filter_state = *new_flags; 931 /* 932 * We can support more by setting the sniffer bit and 933 * then checking the error flags, later. 934 */ 935 936 if (*new_flags & FIF_ALLMULTI) 937 multicast = ~0ULL; 938 939 if (multicast != ar->cur_mc_hash) 940 WARN_ON(carl9170_update_multicast(ar, multicast)); 941 942 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) { 943 ar->sniffer_enabled = !!(*new_flags & 944 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)); 945 946 WARN_ON(carl9170_set_operating_mode(ar)); 947 } 948 949 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) { 950 u32 rx_filter = 0; 951 952 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))) 953 rx_filter |= CARL9170_RX_FILTER_BAD; 954 955 if (!(*new_flags & FIF_CONTROL)) 956 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER; 957 958 if (!(*new_flags & FIF_PSPOLL)) 959 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL; 960 961 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) { 962 rx_filter |= CARL9170_RX_FILTER_OTHER_RA; 963 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL; 964 } 965 966 WARN_ON(carl9170_rx_filter(ar, rx_filter)); 967 } 968 969 mutex_unlock(&ar->mutex); 970 } 971 972 973 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw, 974 struct ieee80211_vif *vif, 975 struct ieee80211_bss_conf *bss_conf, 976 u32 changed) 977 { 978 struct ar9170 *ar = hw->priv; 979 struct ath_common *common = &ar->common; 980 int err = 0; 981 struct carl9170_vif_info *vif_priv; 982 struct ieee80211_vif *main_vif; 983 984 mutex_lock(&ar->mutex); 985 vif_priv = (void *) vif->drv_priv; 986 main_vif = carl9170_get_main_vif(ar); 987 if (WARN_ON(!main_vif)) 988 goto out; 989 990 if (changed & BSS_CHANGED_BEACON_ENABLED) { 991 struct carl9170_vif_info *iter; 992 int i = 0; 993 994 vif_priv->enable_beacon = bss_conf->enable_beacon; 995 rcu_read_lock(); 996 list_for_each_entry_rcu(iter, &ar->vif_list, list) { 997 if (iter->active && iter->enable_beacon) 998 i++; 999 1000 } 1001 rcu_read_unlock(); 1002 1003 ar->beacon_enabled = i; 1004 } 1005 1006 if (changed & BSS_CHANGED_BEACON) { 1007 err = carl9170_update_beacon(ar, false); 1008 if (err) 1009 goto out; 1010 } 1011 1012 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON | 1013 BSS_CHANGED_BEACON_INT)) { 1014 1015 if (main_vif != vif) { 1016 bss_conf->beacon_int = main_vif->bss_conf.beacon_int; 1017 bss_conf->dtim_period = main_vif->bss_conf.dtim_period; 1018 } 1019 1020 /* 1021 * Therefore a hard limit for the broadcast traffic should 1022 * prevent false alarms. 1023 */ 1024 if (vif->type != NL80211_IFTYPE_STATION && 1025 (bss_conf->beacon_int * bss_conf->dtim_period >= 1026 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) { 1027 err = -EINVAL; 1028 goto out; 1029 } 1030 1031 err = carl9170_set_beacon_timers(ar); 1032 if (err) 1033 goto out; 1034 } 1035 1036 if (changed & BSS_CHANGED_HT) { 1037 /* TODO */ 1038 err = 0; 1039 if (err) 1040 goto out; 1041 } 1042 1043 if (main_vif != vif) 1044 goto out; 1045 1046 /* 1047 * The following settings can only be changed by the 1048 * master interface. 1049 */ 1050 1051 if (changed & BSS_CHANGED_BSSID) { 1052 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1053 err = carl9170_set_operating_mode(ar); 1054 if (err) 1055 goto out; 1056 } 1057 1058 if (changed & BSS_CHANGED_ASSOC) { 1059 ar->common.curaid = bss_conf->aid; 1060 err = carl9170_set_beacon_timers(ar); 1061 if (err) 1062 goto out; 1063 } 1064 1065 if (changed & BSS_CHANGED_ERP_SLOT) { 1066 err = carl9170_set_slot_time(ar); 1067 if (err) 1068 goto out; 1069 } 1070 1071 if (changed & BSS_CHANGED_BASIC_RATES) { 1072 err = carl9170_set_mac_rates(ar); 1073 if (err) 1074 goto out; 1075 } 1076 1077 out: 1078 WARN_ON_ONCE(err && IS_STARTED(ar)); 1079 mutex_unlock(&ar->mutex); 1080 } 1081 1082 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw, 1083 struct ieee80211_vif *vif) 1084 { 1085 struct ar9170 *ar = hw->priv; 1086 struct carl9170_tsf_rsp tsf; 1087 int err; 1088 1089 mutex_lock(&ar->mutex); 1090 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF, 1091 0, NULL, sizeof(tsf), &tsf); 1092 mutex_unlock(&ar->mutex); 1093 if (WARN_ON(err)) 1094 return 0; 1095 1096 return le64_to_cpu(tsf.tsf_64); 1097 } 1098 1099 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1100 struct ieee80211_vif *vif, 1101 struct ieee80211_sta *sta, 1102 struct ieee80211_key_conf *key) 1103 { 1104 struct ar9170 *ar = hw->priv; 1105 int err = 0, i; 1106 u8 ktype; 1107 1108 if (ar->disable_offload || !vif) 1109 return -EOPNOTSUPP; 1110 1111 /* 1112 * We have to fall back to software encryption, whenever 1113 * the user choose to participates in an IBSS or is connected 1114 * to more than one network. 1115 * 1116 * This is very unfortunate, because some machines cannot handle 1117 * the high througput speed in 802.11n networks. 1118 */ 1119 1120 if (!is_main_vif(ar, vif)) { 1121 mutex_lock(&ar->mutex); 1122 goto err_softw; 1123 } 1124 1125 /* 1126 * While the hardware supports *catch-all* key, for offloading 1127 * group-key en-/de-cryption. The way of how the hardware 1128 * decides which keyId maps to which key, remains a mystery... 1129 */ 1130 if ((vif->type != NL80211_IFTYPE_STATION && 1131 vif->type != NL80211_IFTYPE_ADHOC) && 1132 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1133 return -EOPNOTSUPP; 1134 1135 switch (key->cipher) { 1136 case WLAN_CIPHER_SUITE_WEP40: 1137 ktype = AR9170_ENC_ALG_WEP64; 1138 break; 1139 case WLAN_CIPHER_SUITE_WEP104: 1140 ktype = AR9170_ENC_ALG_WEP128; 1141 break; 1142 case WLAN_CIPHER_SUITE_TKIP: 1143 ktype = AR9170_ENC_ALG_TKIP; 1144 break; 1145 case WLAN_CIPHER_SUITE_CCMP: 1146 ktype = AR9170_ENC_ALG_AESCCMP; 1147 break; 1148 default: 1149 return -EOPNOTSUPP; 1150 } 1151 1152 mutex_lock(&ar->mutex); 1153 if (cmd == SET_KEY) { 1154 if (!IS_STARTED(ar)) { 1155 err = -EOPNOTSUPP; 1156 goto out; 1157 } 1158 1159 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 1160 sta = NULL; 1161 1162 i = 64 + key->keyidx; 1163 } else { 1164 for (i = 0; i < 64; i++) 1165 if (!(ar->usedkeys & BIT(i))) 1166 break; 1167 if (i == 64) 1168 goto err_softw; 1169 } 1170 1171 key->hw_key_idx = i; 1172 1173 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL, 1174 ktype, 0, key->key, 1175 min_t(u8, 16, key->keylen)); 1176 if (err) 1177 goto out; 1178 1179 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { 1180 err = carl9170_upload_key(ar, i, sta ? sta->addr : 1181 NULL, ktype, 1, 1182 key->key + 16, 16); 1183 if (err) 1184 goto out; 1185 1186 /* 1187 * hardware is not capable generating MMIC 1188 * of fragmented frames! 1189 */ 1190 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1191 } 1192 1193 if (i < 64) 1194 ar->usedkeys |= BIT(i); 1195 1196 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1197 } else { 1198 if (!IS_STARTED(ar)) { 1199 /* The device is gone... together with the key ;-) */ 1200 err = 0; 1201 goto out; 1202 } 1203 1204 if (key->hw_key_idx < 64) { 1205 ar->usedkeys &= ~BIT(key->hw_key_idx); 1206 } else { 1207 err = carl9170_upload_key(ar, key->hw_key_idx, NULL, 1208 AR9170_ENC_ALG_NONE, 0, 1209 NULL, 0); 1210 if (err) 1211 goto out; 1212 1213 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { 1214 err = carl9170_upload_key(ar, key->hw_key_idx, 1215 NULL, 1216 AR9170_ENC_ALG_NONE, 1217 1, NULL, 0); 1218 if (err) 1219 goto out; 1220 } 1221 1222 } 1223 1224 err = carl9170_disable_key(ar, key->hw_key_idx); 1225 if (err) 1226 goto out; 1227 } 1228 1229 out: 1230 mutex_unlock(&ar->mutex); 1231 return err; 1232 1233 err_softw: 1234 if (!ar->rx_software_decryption) { 1235 ar->rx_software_decryption = true; 1236 carl9170_set_operating_mode(ar); 1237 } 1238 mutex_unlock(&ar->mutex); 1239 return -ENOSPC; 1240 } 1241 1242 static int carl9170_op_sta_add(struct ieee80211_hw *hw, 1243 struct ieee80211_vif *vif, 1244 struct ieee80211_sta *sta) 1245 { 1246 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1247 unsigned int i; 1248 1249 atomic_set(&sta_info->pending_frames, 0); 1250 1251 if (sta->ht_cap.ht_supported) { 1252 if (sta->ht_cap.ampdu_density > 6) { 1253 /* 1254 * HW does support 16us AMPDU density. 1255 * No HT-Xmit for station. 1256 */ 1257 1258 return 0; 1259 } 1260 1261 for (i = 0; i < CARL9170_NUM_TID; i++) 1262 RCU_INIT_POINTER(sta_info->agg[i], NULL); 1263 1264 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor); 1265 sta_info->ht_sta = true; 1266 } 1267 1268 return 0; 1269 } 1270 1271 static int carl9170_op_sta_remove(struct ieee80211_hw *hw, 1272 struct ieee80211_vif *vif, 1273 struct ieee80211_sta *sta) 1274 { 1275 struct ar9170 *ar = hw->priv; 1276 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1277 unsigned int i; 1278 bool cleanup = false; 1279 1280 if (sta->ht_cap.ht_supported) { 1281 1282 sta_info->ht_sta = false; 1283 1284 rcu_read_lock(); 1285 for (i = 0; i < CARL9170_NUM_TID; i++) { 1286 struct carl9170_sta_tid *tid_info; 1287 1288 tid_info = rcu_dereference(sta_info->agg[i]); 1289 RCU_INIT_POINTER(sta_info->agg[i], NULL); 1290 1291 if (!tid_info) 1292 continue; 1293 1294 spin_lock_bh(&ar->tx_ampdu_list_lock); 1295 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN) 1296 tid_info->state = CARL9170_TID_STATE_SHUTDOWN; 1297 spin_unlock_bh(&ar->tx_ampdu_list_lock); 1298 cleanup = true; 1299 } 1300 rcu_read_unlock(); 1301 1302 if (cleanup) 1303 carl9170_ampdu_gc(ar); 1304 } 1305 1306 return 0; 1307 } 1308 1309 static int carl9170_op_conf_tx(struct ieee80211_hw *hw, 1310 struct ieee80211_vif *vif, u16 queue, 1311 const struct ieee80211_tx_queue_params *param) 1312 { 1313 struct ar9170 *ar = hw->priv; 1314 int ret; 1315 1316 mutex_lock(&ar->mutex); 1317 if (queue < ar->hw->queues) { 1318 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param)); 1319 ret = carl9170_set_qos(ar); 1320 } else { 1321 ret = -EINVAL; 1322 } 1323 1324 mutex_unlock(&ar->mutex); 1325 return ret; 1326 } 1327 1328 static void carl9170_ampdu_work(struct work_struct *work) 1329 { 1330 struct ar9170 *ar = container_of(work, struct ar9170, 1331 ampdu_work); 1332 1333 if (!IS_STARTED(ar)) 1334 return; 1335 1336 mutex_lock(&ar->mutex); 1337 carl9170_ampdu_gc(ar); 1338 mutex_unlock(&ar->mutex); 1339 } 1340 1341 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, 1342 struct ieee80211_vif *vif, 1343 enum ieee80211_ampdu_mlme_action action, 1344 struct ieee80211_sta *sta, 1345 u16 tid, u16 *ssn, u8 buf_size) 1346 { 1347 struct ar9170 *ar = hw->priv; 1348 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1349 struct carl9170_sta_tid *tid_info; 1350 1351 if (modparam_noht) 1352 return -EOPNOTSUPP; 1353 1354 switch (action) { 1355 case IEEE80211_AMPDU_TX_START: 1356 if (!sta_info->ht_sta) 1357 return -EOPNOTSUPP; 1358 1359 rcu_read_lock(); 1360 if (rcu_dereference(sta_info->agg[tid])) { 1361 rcu_read_unlock(); 1362 return -EBUSY; 1363 } 1364 1365 tid_info = kzalloc(sizeof(struct carl9170_sta_tid), 1366 GFP_ATOMIC); 1367 if (!tid_info) { 1368 rcu_read_unlock(); 1369 return -ENOMEM; 1370 } 1371 1372 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn); 1373 tid_info->state = CARL9170_TID_STATE_PROGRESS; 1374 tid_info->tid = tid; 1375 tid_info->max = sta_info->ampdu_max_len; 1376 1377 INIT_LIST_HEAD(&tid_info->list); 1378 INIT_LIST_HEAD(&tid_info->tmp_list); 1379 skb_queue_head_init(&tid_info->queue); 1380 spin_lock_init(&tid_info->lock); 1381 1382 spin_lock_bh(&ar->tx_ampdu_list_lock); 1383 ar->tx_ampdu_list_len++; 1384 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list); 1385 rcu_assign_pointer(sta_info->agg[tid], tid_info); 1386 spin_unlock_bh(&ar->tx_ampdu_list_lock); 1387 rcu_read_unlock(); 1388 1389 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1390 break; 1391 1392 case IEEE80211_AMPDU_TX_STOP: 1393 rcu_read_lock(); 1394 tid_info = rcu_dereference(sta_info->agg[tid]); 1395 if (tid_info) { 1396 spin_lock_bh(&ar->tx_ampdu_list_lock); 1397 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN) 1398 tid_info->state = CARL9170_TID_STATE_SHUTDOWN; 1399 spin_unlock_bh(&ar->tx_ampdu_list_lock); 1400 } 1401 1402 RCU_INIT_POINTER(sta_info->agg[tid], NULL); 1403 rcu_read_unlock(); 1404 1405 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1406 ieee80211_queue_work(ar->hw, &ar->ampdu_work); 1407 break; 1408 1409 case IEEE80211_AMPDU_TX_OPERATIONAL: 1410 rcu_read_lock(); 1411 tid_info = rcu_dereference(sta_info->agg[tid]); 1412 1413 sta_info->stats[tid].clear = true; 1414 sta_info->stats[tid].req = false; 1415 1416 if (tid_info) { 1417 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE); 1418 tid_info->state = CARL9170_TID_STATE_IDLE; 1419 } 1420 rcu_read_unlock(); 1421 1422 if (WARN_ON_ONCE(!tid_info)) 1423 return -EFAULT; 1424 1425 break; 1426 1427 case IEEE80211_AMPDU_RX_START: 1428 case IEEE80211_AMPDU_RX_STOP: 1429 /* Handled by hardware */ 1430 break; 1431 1432 default: 1433 return -EOPNOTSUPP; 1434 } 1435 1436 return 0; 1437 } 1438 1439 #ifdef CONFIG_CARL9170_WPC 1440 static int carl9170_register_wps_button(struct ar9170 *ar) 1441 { 1442 struct input_dev *input; 1443 int err; 1444 1445 if (!(ar->features & CARL9170_WPS_BUTTON)) 1446 return 0; 1447 1448 input = input_allocate_device(); 1449 if (!input) 1450 return -ENOMEM; 1451 1452 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button", 1453 wiphy_name(ar->hw->wiphy)); 1454 1455 snprintf(ar->wps.phys, sizeof(ar->wps.phys), 1456 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy)); 1457 1458 input->name = ar->wps.name; 1459 input->phys = ar->wps.phys; 1460 input->id.bustype = BUS_USB; 1461 input->dev.parent = &ar->hw->wiphy->dev; 1462 1463 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON); 1464 1465 err = input_register_device(input); 1466 if (err) { 1467 input_free_device(input); 1468 return err; 1469 } 1470 1471 ar->wps.pbc = input; 1472 return 0; 1473 } 1474 #endif /* CONFIG_CARL9170_WPC */ 1475 1476 #ifdef CONFIG_CARL9170_HWRNG 1477 static int carl9170_rng_get(struct ar9170 *ar) 1478 { 1479 1480 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32)) 1481 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN) 1482 1483 static const __le32 rng_load[RW] = { 1484 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)}; 1485 1486 u32 buf[RW]; 1487 1488 unsigned int i, off = 0, transfer, count; 1489 int err; 1490 1491 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN); 1492 1493 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized) 1494 return -EAGAIN; 1495 1496 count = ARRAY_SIZE(ar->rng.cache); 1497 while (count) { 1498 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG, 1499 RB, (u8 *) rng_load, 1500 RB, (u8 *) buf); 1501 if (err) 1502 return err; 1503 1504 transfer = min_t(unsigned int, count, RW); 1505 for (i = 0; i < transfer; i++) 1506 ar->rng.cache[off + i] = buf[i]; 1507 1508 off += transfer; 1509 count -= transfer; 1510 } 1511 1512 ar->rng.cache_idx = 0; 1513 1514 #undef RW 1515 #undef RB 1516 return 0; 1517 } 1518 1519 static int carl9170_rng_read(struct hwrng *rng, u32 *data) 1520 { 1521 struct ar9170 *ar = (struct ar9170 *)rng->priv; 1522 int ret = -EIO; 1523 1524 mutex_lock(&ar->mutex); 1525 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) { 1526 ret = carl9170_rng_get(ar); 1527 if (ret) { 1528 mutex_unlock(&ar->mutex); 1529 return ret; 1530 } 1531 } 1532 1533 *data = ar->rng.cache[ar->rng.cache_idx++]; 1534 mutex_unlock(&ar->mutex); 1535 1536 return sizeof(u16); 1537 } 1538 1539 static void carl9170_unregister_hwrng(struct ar9170 *ar) 1540 { 1541 if (ar->rng.initialized) { 1542 hwrng_unregister(&ar->rng.rng); 1543 ar->rng.initialized = false; 1544 } 1545 } 1546 1547 static int carl9170_register_hwrng(struct ar9170 *ar) 1548 { 1549 int err; 1550 1551 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name), 1552 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy)); 1553 ar->rng.rng.name = ar->rng.name; 1554 ar->rng.rng.data_read = carl9170_rng_read; 1555 ar->rng.rng.priv = (unsigned long)ar; 1556 1557 if (WARN_ON(ar->rng.initialized)) 1558 return -EALREADY; 1559 1560 err = hwrng_register(&ar->rng.rng); 1561 if (err) { 1562 dev_err(&ar->udev->dev, "Failed to register the random " 1563 "number generator (%d)\n", err); 1564 return err; 1565 } 1566 1567 ar->rng.initialized = true; 1568 1569 err = carl9170_rng_get(ar); 1570 if (err) { 1571 carl9170_unregister_hwrng(ar); 1572 return err; 1573 } 1574 1575 return 0; 1576 } 1577 #endif /* CONFIG_CARL9170_HWRNG */ 1578 1579 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx, 1580 struct survey_info *survey) 1581 { 1582 struct ar9170 *ar = hw->priv; 1583 struct ieee80211_channel *chan; 1584 struct ieee80211_supported_band *band; 1585 int err, b, i; 1586 1587 chan = ar->channel; 1588 if (!chan) 1589 return -ENODEV; 1590 1591 if (idx == chan->hw_value) { 1592 mutex_lock(&ar->mutex); 1593 err = carl9170_update_survey(ar, false, true); 1594 mutex_unlock(&ar->mutex); 1595 if (err) 1596 return err; 1597 } 1598 1599 for (b = 0; b < IEEE80211_NUM_BANDS; b++) { 1600 band = ar->hw->wiphy->bands[b]; 1601 1602 if (!band) 1603 continue; 1604 1605 for (i = 0; i < band->n_channels; i++) { 1606 if (band->channels[i].hw_value == idx) { 1607 chan = &band->channels[i]; 1608 goto found; 1609 } 1610 } 1611 } 1612 return -ENOENT; 1613 1614 found: 1615 memcpy(survey, &ar->survey[idx], sizeof(*survey)); 1616 1617 survey->channel = chan; 1618 survey->filled = SURVEY_INFO_NOISE_DBM; 1619 1620 if (ar->channel == chan) 1621 survey->filled |= SURVEY_INFO_IN_USE; 1622 1623 if (ar->fw.hw_counters) { 1624 survey->filled |= SURVEY_INFO_CHANNEL_TIME | 1625 SURVEY_INFO_CHANNEL_TIME_BUSY | 1626 SURVEY_INFO_CHANNEL_TIME_TX; 1627 } 1628 1629 return 0; 1630 } 1631 1632 static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop) 1633 { 1634 struct ar9170 *ar = hw->priv; 1635 unsigned int vid; 1636 1637 mutex_lock(&ar->mutex); 1638 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num) 1639 carl9170_flush_cab(ar, vid); 1640 1641 carl9170_flush(ar, drop); 1642 mutex_unlock(&ar->mutex); 1643 } 1644 1645 static int carl9170_op_get_stats(struct ieee80211_hw *hw, 1646 struct ieee80211_low_level_stats *stats) 1647 { 1648 struct ar9170 *ar = hw->priv; 1649 1650 memset(stats, 0, sizeof(*stats)); 1651 stats->dot11ACKFailureCount = ar->tx_ack_failures; 1652 stats->dot11FCSErrorCount = ar->tx_fcs_errors; 1653 return 0; 1654 } 1655 1656 static void carl9170_op_sta_notify(struct ieee80211_hw *hw, 1657 struct ieee80211_vif *vif, 1658 enum sta_notify_cmd cmd, 1659 struct ieee80211_sta *sta) 1660 { 1661 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1662 1663 switch (cmd) { 1664 case STA_NOTIFY_SLEEP: 1665 sta_info->sleeping = true; 1666 if (atomic_read(&sta_info->pending_frames)) 1667 ieee80211_sta_block_awake(hw, sta, true); 1668 break; 1669 1670 case STA_NOTIFY_AWAKE: 1671 sta_info->sleeping = false; 1672 break; 1673 } 1674 } 1675 1676 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw) 1677 { 1678 struct ar9170 *ar = hw->priv; 1679 1680 return !!atomic_read(&ar->tx_total_queued); 1681 } 1682 1683 static const struct ieee80211_ops carl9170_ops = { 1684 .start = carl9170_op_start, 1685 .stop = carl9170_op_stop, 1686 .tx = carl9170_op_tx, 1687 .flush = carl9170_op_flush, 1688 .add_interface = carl9170_op_add_interface, 1689 .remove_interface = carl9170_op_remove_interface, 1690 .config = carl9170_op_config, 1691 .prepare_multicast = carl9170_op_prepare_multicast, 1692 .configure_filter = carl9170_op_configure_filter, 1693 .conf_tx = carl9170_op_conf_tx, 1694 .bss_info_changed = carl9170_op_bss_info_changed, 1695 .get_tsf = carl9170_op_get_tsf, 1696 .set_key = carl9170_op_set_key, 1697 .sta_add = carl9170_op_sta_add, 1698 .sta_remove = carl9170_op_sta_remove, 1699 .sta_notify = carl9170_op_sta_notify, 1700 .get_survey = carl9170_op_get_survey, 1701 .get_stats = carl9170_op_get_stats, 1702 .ampdu_action = carl9170_op_ampdu_action, 1703 .tx_frames_pending = carl9170_tx_frames_pending, 1704 }; 1705 1706 void *carl9170_alloc(size_t priv_size) 1707 { 1708 struct ieee80211_hw *hw; 1709 struct ar9170 *ar; 1710 struct sk_buff *skb; 1711 int i; 1712 1713 /* 1714 * this buffer is used for rx stream reconstruction. 1715 * Under heavy load this device (or the transport layer?) 1716 * tends to split the streams into separate rx descriptors. 1717 */ 1718 1719 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL); 1720 if (!skb) 1721 goto err_nomem; 1722 1723 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops); 1724 if (!hw) 1725 goto err_nomem; 1726 1727 ar = hw->priv; 1728 ar->hw = hw; 1729 ar->rx_failover = skb; 1730 1731 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head)); 1732 ar->rx_has_plcp = false; 1733 1734 /* 1735 * Here's a hidden pitfall! 1736 * 1737 * All 4 AC queues work perfectly well under _legacy_ operation. 1738 * However as soon as aggregation is enabled, the traffic flow 1739 * gets very bumpy. Therefore we have to _switch_ to a 1740 * software AC with a single HW queue. 1741 */ 1742 hw->queues = __AR9170_NUM_TXQ; 1743 1744 mutex_init(&ar->mutex); 1745 spin_lock_init(&ar->beacon_lock); 1746 spin_lock_init(&ar->cmd_lock); 1747 spin_lock_init(&ar->tx_stats_lock); 1748 spin_lock_init(&ar->tx_ampdu_list_lock); 1749 spin_lock_init(&ar->mem_lock); 1750 spin_lock_init(&ar->state_lock); 1751 atomic_set(&ar->pending_restarts, 0); 1752 ar->vifs = 0; 1753 for (i = 0; i < ar->hw->queues; i++) { 1754 skb_queue_head_init(&ar->tx_status[i]); 1755 skb_queue_head_init(&ar->tx_pending[i]); 1756 } 1757 INIT_WORK(&ar->ps_work, carl9170_ps_work); 1758 INIT_WORK(&ar->ping_work, carl9170_ping_work); 1759 INIT_WORK(&ar->restart_work, carl9170_restart_work); 1760 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work); 1761 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work); 1762 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor); 1763 INIT_LIST_HEAD(&ar->tx_ampdu_list); 1764 rcu_assign_pointer(ar->tx_ampdu_iter, 1765 (struct carl9170_sta_tid *) &ar->tx_ampdu_list); 1766 1767 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num); 1768 INIT_LIST_HEAD(&ar->vif_list); 1769 init_completion(&ar->tx_flush); 1770 1771 /* firmware decides which modes we support */ 1772 hw->wiphy->interface_modes = 0; 1773 1774 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 1775 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 1776 IEEE80211_HW_SUPPORTS_PS | 1777 IEEE80211_HW_PS_NULLFUNC_STACK | 1778 IEEE80211_HW_NEED_DTIM_PERIOD | 1779 IEEE80211_HW_SIGNAL_DBM; 1780 1781 if (!modparam_noht) { 1782 /* 1783 * see the comment above, why we allow the user 1784 * to disable HT by a module parameter. 1785 */ 1786 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 1787 } 1788 1789 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe); 1790 hw->sta_data_size = sizeof(struct carl9170_sta_info); 1791 hw->vif_data_size = sizeof(struct carl9170_vif_info); 1792 1793 hw->max_rates = CARL9170_TX_MAX_RATES; 1794 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES; 1795 1796 for (i = 0; i < ARRAY_SIZE(ar->noise); i++) 1797 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */ 1798 1799 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 1800 1801 /* As IBSS Encryption is software-based, IBSS RSN is supported. */ 1802 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 1803 return ar; 1804 1805 err_nomem: 1806 kfree_skb(skb); 1807 return ERR_PTR(-ENOMEM); 1808 } 1809 1810 static int carl9170_read_eeprom(struct ar9170 *ar) 1811 { 1812 #define RW 8 /* number of words to read at once */ 1813 #define RB (sizeof(u32) * RW) 1814 u8 *eeprom = (void *)&ar->eeprom; 1815 __le32 offsets[RW]; 1816 int i, j, err; 1817 1818 BUILD_BUG_ON(sizeof(ar->eeprom) & 3); 1819 1820 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4); 1821 #ifndef __CHECKER__ 1822 /* don't want to handle trailing remains */ 1823 BUILD_BUG_ON(sizeof(ar->eeprom) % RB); 1824 #endif 1825 1826 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) { 1827 for (j = 0; j < RW; j++) 1828 offsets[j] = cpu_to_le32(AR9170_EEPROM_START + 1829 RB * i + 4 * j); 1830 1831 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG, 1832 RB, (u8 *) &offsets, 1833 RB, eeprom + RB * i); 1834 if (err) 1835 return err; 1836 } 1837 1838 #undef RW 1839 #undef RB 1840 return 0; 1841 } 1842 1843 static int carl9170_parse_eeprom(struct ar9170 *ar) 1844 { 1845 struct ath_regulatory *regulatory = &ar->common.regulatory; 1846 unsigned int rx_streams, tx_streams, tx_params = 0; 1847 int bands = 0; 1848 int chans = 0; 1849 1850 if (ar->eeprom.length == cpu_to_le16(0xffff)) 1851 return -ENODATA; 1852 1853 rx_streams = hweight8(ar->eeprom.rx_mask); 1854 tx_streams = hweight8(ar->eeprom.tx_mask); 1855 1856 if (rx_streams != tx_streams) { 1857 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF; 1858 1859 WARN_ON(!(tx_streams >= 1 && tx_streams <= 1860 IEEE80211_HT_MCS_TX_MAX_STREAMS)); 1861 1862 tx_params = (tx_streams - 1) << 1863 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; 1864 1865 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params; 1866 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params; 1867 } 1868 1869 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) { 1870 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 1871 &carl9170_band_2GHz; 1872 chans += carl9170_band_2GHz.n_channels; 1873 bands++; 1874 } 1875 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) { 1876 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 1877 &carl9170_band_5GHz; 1878 chans += carl9170_band_5GHz.n_channels; 1879 bands++; 1880 } 1881 1882 if (!bands) 1883 return -EINVAL; 1884 1885 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL); 1886 if (!ar->survey) 1887 return -ENOMEM; 1888 ar->num_channels = chans; 1889 1890 /* 1891 * I measured this, a bandswitch takes roughly 1892 * 135 ms and a frequency switch about 80. 1893 * 1894 * FIXME: measure these values again once EEPROM settings 1895 * are used, that will influence them! 1896 */ 1897 if (bands == 2) 1898 ar->hw->channel_change_time = 135 * 1000; 1899 else 1900 ar->hw->channel_change_time = 80 * 1000; 1901 1902 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]); 1903 1904 /* second part of wiphy init */ 1905 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address); 1906 1907 return 0; 1908 } 1909 1910 static int carl9170_reg_notifier(struct wiphy *wiphy, 1911 struct regulatory_request *request) 1912 { 1913 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 1914 struct ar9170 *ar = hw->priv; 1915 1916 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory); 1917 } 1918 1919 int carl9170_register(struct ar9170 *ar) 1920 { 1921 struct ath_regulatory *regulatory = &ar->common.regulatory; 1922 int err = 0, i; 1923 1924 if (WARN_ON(ar->mem_bitmap)) 1925 return -EINVAL; 1926 1927 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) * 1928 sizeof(unsigned long), GFP_KERNEL); 1929 1930 if (!ar->mem_bitmap) 1931 return -ENOMEM; 1932 1933 /* try to read EEPROM, init MAC addr */ 1934 err = carl9170_read_eeprom(ar); 1935 if (err) 1936 return err; 1937 1938 err = carl9170_parse_eeprom(ar); 1939 if (err) 1940 return err; 1941 1942 err = ath_regd_init(regulatory, ar->hw->wiphy, 1943 carl9170_reg_notifier); 1944 if (err) 1945 return err; 1946 1947 if (modparam_noht) { 1948 carl9170_band_2GHz.ht_cap.ht_supported = false; 1949 carl9170_band_5GHz.ht_cap.ht_supported = false; 1950 } 1951 1952 for (i = 0; i < ar->fw.vif_num; i++) { 1953 ar->vif_priv[i].id = i; 1954 ar->vif_priv[i].vif = NULL; 1955 } 1956 1957 err = ieee80211_register_hw(ar->hw); 1958 if (err) 1959 return err; 1960 1961 /* mac80211 interface is now registered */ 1962 ar->registered = true; 1963 1964 if (!ath_is_world_regd(regulatory)) 1965 regulatory_hint(ar->hw->wiphy, regulatory->alpha2); 1966 1967 #ifdef CONFIG_CARL9170_DEBUGFS 1968 carl9170_debugfs_register(ar); 1969 #endif /* CONFIG_CARL9170_DEBUGFS */ 1970 1971 err = carl9170_led_init(ar); 1972 if (err) 1973 goto err_unreg; 1974 1975 #ifdef CONFIG_CARL9170_LEDS 1976 err = carl9170_led_register(ar); 1977 if (err) 1978 goto err_unreg; 1979 #endif /* CONFIG_CARL9170_LEDS */ 1980 1981 #ifdef CONFIG_CARL9170_WPC 1982 err = carl9170_register_wps_button(ar); 1983 if (err) 1984 goto err_unreg; 1985 #endif /* CONFIG_CARL9170_WPC */ 1986 1987 #ifdef CONFIG_CARL9170_HWRNG 1988 err = carl9170_register_hwrng(ar); 1989 if (err) 1990 goto err_unreg; 1991 #endif /* CONFIG_CARL9170_HWRNG */ 1992 1993 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n", 1994 wiphy_name(ar->hw->wiphy)); 1995 1996 return 0; 1997 1998 err_unreg: 1999 carl9170_unregister(ar); 2000 return err; 2001 } 2002 2003 void carl9170_unregister(struct ar9170 *ar) 2004 { 2005 if (!ar->registered) 2006 return; 2007 2008 ar->registered = false; 2009 2010 #ifdef CONFIG_CARL9170_LEDS 2011 carl9170_led_unregister(ar); 2012 #endif /* CONFIG_CARL9170_LEDS */ 2013 2014 #ifdef CONFIG_CARL9170_DEBUGFS 2015 carl9170_debugfs_unregister(ar); 2016 #endif /* CONFIG_CARL9170_DEBUGFS */ 2017 2018 #ifdef CONFIG_CARL9170_WPC 2019 if (ar->wps.pbc) { 2020 input_unregister_device(ar->wps.pbc); 2021 ar->wps.pbc = NULL; 2022 } 2023 #endif /* CONFIG_CARL9170_WPC */ 2024 2025 #ifdef CONFIG_CARL9170_HWRNG 2026 carl9170_unregister_hwrng(ar); 2027 #endif /* CONFIG_CARL9170_HWRNG */ 2028 2029 carl9170_cancel_worker(ar); 2030 cancel_work_sync(&ar->restart_work); 2031 2032 ieee80211_unregister_hw(ar->hw); 2033 } 2034 2035 void carl9170_free(struct ar9170 *ar) 2036 { 2037 WARN_ON(ar->registered); 2038 WARN_ON(IS_INITIALIZED(ar)); 2039 2040 kfree_skb(ar->rx_failover); 2041 ar->rx_failover = NULL; 2042 2043 kfree(ar->mem_bitmap); 2044 ar->mem_bitmap = NULL; 2045 2046 kfree(ar->survey); 2047 ar->survey = NULL; 2048 2049 mutex_destroy(&ar->mutex); 2050 2051 ieee80211_free_hw(ar->hw); 2052 } 2053