1 2 /* 3 * This file is part of wl1271 4 * 5 * Copyright (C) 2008-2010 Nokia Corporation 6 * 7 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21 * 02110-1301 USA 22 * 23 */ 24 25 #include <linux/module.h> 26 #include <linux/firmware.h> 27 #include <linux/delay.h> 28 #include <linux/spi/spi.h> 29 #include <linux/crc32.h> 30 #include <linux/etherdevice.h> 31 #include <linux/vmalloc.h> 32 #include <linux/platform_device.h> 33 #include <linux/slab.h> 34 #include <linux/wl12xx.h> 35 #include <linux/sched.h> 36 #include <linux/interrupt.h> 37 38 #include "wlcore.h" 39 #include "debug.h" 40 #include "wl12xx_80211.h" 41 #include "io.h" 42 #include "event.h" 43 #include "tx.h" 44 #include "rx.h" 45 #include "ps.h" 46 #include "init.h" 47 #include "debugfs.h" 48 #include "cmd.h" 49 #include "boot.h" 50 #include "testmode.h" 51 #include "scan.h" 52 #include "hw_ops.h" 53 54 #define WL1271_BOOT_RETRIES 3 55 56 #define WL1271_BOOT_RETRIES 3 57 58 static char *fwlog_param; 59 static bool bug_on_recovery; 60 static bool no_recovery; 61 62 static void __wl1271_op_remove_interface(struct wl1271 *wl, 63 struct ieee80211_vif *vif, 64 bool reset_tx_queues); 65 static void wlcore_op_stop_locked(struct wl1271 *wl); 66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); 67 68 static int wl12xx_set_authorized(struct wl1271 *wl, 69 struct wl12xx_vif *wlvif) 70 { 71 int ret; 72 73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS)) 74 return -EINVAL; 75 76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 77 return 0; 78 79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags)) 80 return 0; 81 82 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid); 83 if (ret < 0) 84 return ret; 85 86 wl12xx_croc(wl, wlvif->role_id); 87 88 wl1271_info("Association completed."); 89 return 0; 90 } 91 92 static int wl1271_reg_notify(struct wiphy *wiphy, 93 struct regulatory_request *request) 94 { 95 struct ieee80211_supported_band *band; 96 struct ieee80211_channel *ch; 97 int i; 98 99 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 100 for (i = 0; i < band->n_channels; i++) { 101 ch = &band->channels[i]; 102 if (ch->flags & IEEE80211_CHAN_DISABLED) 103 continue; 104 105 if (ch->flags & IEEE80211_CHAN_RADAR) 106 ch->flags |= IEEE80211_CHAN_NO_IBSS | 107 IEEE80211_CHAN_PASSIVE_SCAN; 108 109 } 110 111 return 0; 112 } 113 114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, 115 bool enable) 116 { 117 int ret = 0; 118 119 /* we should hold wl->mutex */ 120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable); 121 if (ret < 0) 122 goto out; 123 124 if (enable) 125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); 126 else 127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); 128 out: 129 return ret; 130 } 131 132 /* 133 * this function is being called when the rx_streaming interval 134 * has beed changed or rx_streaming should be disabled 135 */ 136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif) 137 { 138 int ret = 0; 139 int period = wl->conf.rx_streaming.interval; 140 141 /* don't reconfigure if rx_streaming is disabled */ 142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) 143 goto out; 144 145 /* reconfigure/disable according to new streaming_period */ 146 if (period && 147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && 148 (wl->conf.rx_streaming.always || 149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) 150 ret = wl1271_set_rx_streaming(wl, wlvif, true); 151 else { 152 ret = wl1271_set_rx_streaming(wl, wlvif, false); 153 /* don't cancel_work_sync since we might deadlock */ 154 del_timer_sync(&wlvif->rx_streaming_timer); 155 } 156 out: 157 return ret; 158 } 159 160 static void wl1271_rx_streaming_enable_work(struct work_struct *work) 161 { 162 int ret; 163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, 164 rx_streaming_enable_work); 165 struct wl1271 *wl = wlvif->wl; 166 167 mutex_lock(&wl->mutex); 168 169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) || 170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || 171 (!wl->conf.rx_streaming.always && 172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) 173 goto out; 174 175 if (!wl->conf.rx_streaming.interval) 176 goto out; 177 178 ret = wl1271_ps_elp_wakeup(wl); 179 if (ret < 0) 180 goto out; 181 182 ret = wl1271_set_rx_streaming(wl, wlvif, true); 183 if (ret < 0) 184 goto out_sleep; 185 186 /* stop it after some time of inactivity */ 187 mod_timer(&wlvif->rx_streaming_timer, 188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); 189 190 out_sleep: 191 wl1271_ps_elp_sleep(wl); 192 out: 193 mutex_unlock(&wl->mutex); 194 } 195 196 static void wl1271_rx_streaming_disable_work(struct work_struct *work) 197 { 198 int ret; 199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, 200 rx_streaming_disable_work); 201 struct wl1271 *wl = wlvif->wl; 202 203 mutex_lock(&wl->mutex); 204 205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) 206 goto out; 207 208 ret = wl1271_ps_elp_wakeup(wl); 209 if (ret < 0) 210 goto out; 211 212 ret = wl1271_set_rx_streaming(wl, wlvif, false); 213 if (ret) 214 goto out_sleep; 215 216 out_sleep: 217 wl1271_ps_elp_sleep(wl); 218 out: 219 mutex_unlock(&wl->mutex); 220 } 221 222 static void wl1271_rx_streaming_timer(unsigned long data) 223 { 224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data; 225 struct wl1271 *wl = wlvif->wl; 226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work); 227 } 228 229 /* wl->mutex must be taken */ 230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl) 231 { 232 /* if the watchdog is not armed, don't do anything */ 233 if (wl->tx_allocated_blocks == 0) 234 return; 235 236 cancel_delayed_work(&wl->tx_watchdog_work); 237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work, 238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout)); 239 } 240 241 static void wl12xx_tx_watchdog_work(struct work_struct *work) 242 { 243 struct delayed_work *dwork; 244 struct wl1271 *wl; 245 246 dwork = container_of(work, struct delayed_work, work); 247 wl = container_of(dwork, struct wl1271, tx_watchdog_work); 248 249 mutex_lock(&wl->mutex); 250 251 if (unlikely(wl->state != WLCORE_STATE_ON)) 252 goto out; 253 254 /* Tx went out in the meantime - everything is ok */ 255 if (unlikely(wl->tx_allocated_blocks == 0)) 256 goto out; 257 258 /* 259 * if a ROC is in progress, we might not have any Tx for a long 260 * time (e.g. pending Tx on the non-ROC channels) 261 */ 262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { 263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC", 264 wl->conf.tx.tx_watchdog_timeout); 265 wl12xx_rearm_tx_watchdog_locked(wl); 266 goto out; 267 } 268 269 /* 270 * if a scan is in progress, we might not have any Tx for a long 271 * time 272 */ 273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { 274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan", 275 wl->conf.tx.tx_watchdog_timeout); 276 wl12xx_rearm_tx_watchdog_locked(wl); 277 goto out; 278 } 279 280 /* 281 * AP might cache a frame for a long time for a sleeping station, 282 * so rearm the timer if there's an AP interface with stations. If 283 * Tx is genuinely stuck we will most hopefully discover it when all 284 * stations are removed due to inactivity. 285 */ 286 if (wl->active_sta_count) { 287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has " 288 " %d stations", 289 wl->conf.tx.tx_watchdog_timeout, 290 wl->active_sta_count); 291 wl12xx_rearm_tx_watchdog_locked(wl); 292 goto out; 293 } 294 295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery", 296 wl->conf.tx.tx_watchdog_timeout); 297 wl12xx_queue_recovery_work(wl); 298 299 out: 300 mutex_unlock(&wl->mutex); 301 } 302 303 static void wlcore_adjust_conf(struct wl1271 *wl) 304 { 305 /* Adjust settings according to optional module parameters */ 306 if (fwlog_param) { 307 if (!strcmp(fwlog_param, "continuous")) { 308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; 309 } else if (!strcmp(fwlog_param, "ondemand")) { 310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND; 311 } else if (!strcmp(fwlog_param, "dbgpins")) { 312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; 313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS; 314 } else if (!strcmp(fwlog_param, "disable")) { 315 wl->conf.fwlog.mem_blocks = 0; 316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE; 317 } else { 318 wl1271_error("Unknown fwlog parameter %s", fwlog_param); 319 } 320 } 321 } 322 323 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, 324 struct wl12xx_vif *wlvif, 325 u8 hlid, u8 tx_pkts) 326 { 327 bool fw_ps, single_sta; 328 329 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 330 single_sta = (wl->active_sta_count == 1); 331 332 /* 333 * Wake up from high level PS if the STA is asleep with too little 334 * packets in FW or if the STA is awake. 335 */ 336 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS) 337 wl12xx_ps_link_end(wl, wlvif, hlid); 338 339 /* 340 * Start high-level PS if the STA is asleep with enough blocks in FW. 341 * Make an exception if this is the only connected station. In this 342 * case FW-memory congestion is not a problem. 343 */ 344 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 345 wl12xx_ps_link_start(wl, wlvif, hlid, true); 346 } 347 348 static void wl12xx_irq_update_links_status(struct wl1271 *wl, 349 struct wl12xx_vif *wlvif, 350 struct wl_fw_status_2 *status) 351 { 352 struct wl1271_link *lnk; 353 u32 cur_fw_ps_map; 354 u8 hlid, cnt; 355 356 /* TODO: also use link_fast_bitmap here */ 357 358 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap); 359 if (wl->ap_fw_ps_map != cur_fw_ps_map) { 360 wl1271_debug(DEBUG_PSM, 361 "link ps prev 0x%x cur 0x%x changed 0x%x", 362 wl->ap_fw_ps_map, cur_fw_ps_map, 363 wl->ap_fw_ps_map ^ cur_fw_ps_map); 364 365 wl->ap_fw_ps_map = cur_fw_ps_map; 366 } 367 368 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) { 369 lnk = &wl->links[hlid]; 370 cnt = status->counters.tx_lnk_free_pkts[hlid] - 371 lnk->prev_freed_pkts; 372 373 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid]; 374 lnk->allocated_pkts -= cnt; 375 376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, 377 lnk->allocated_pkts); 378 } 379 } 380 381 static int wlcore_fw_status(struct wl1271 *wl, 382 struct wl_fw_status_1 *status_1, 383 struct wl_fw_status_2 *status_2) 384 { 385 struct wl12xx_vif *wlvif; 386 struct timespec ts; 387 u32 old_tx_blk_count = wl->tx_blocks_available; 388 int avail, freed_blocks; 389 int i; 390 size_t status_len; 391 int ret; 392 393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) + 394 sizeof(*status_2) + wl->fw_status_priv_len; 395 396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1, 397 status_len, false); 398 if (ret < 0) 399 return ret; 400 401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 402 "drv_rx_counter = %d, tx_results_counter = %d)", 403 status_1->intr, 404 status_1->fw_rx_counter, 405 status_1->drv_rx_counter, 406 status_1->tx_results_counter); 407 408 for (i = 0; i < NUM_TX_QUEUES; i++) { 409 /* prevent wrap-around in freed-packets counter */ 410 wl->tx_allocated_pkts[i] -= 411 (status_2->counters.tx_released_pkts[i] - 412 wl->tx_pkts_freed[i]) & 0xff; 413 414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i]; 415 } 416 417 /* prevent wrap-around in total blocks counter */ 418 if (likely(wl->tx_blocks_freed <= 419 le32_to_cpu(status_2->total_released_blks))) 420 freed_blocks = le32_to_cpu(status_2->total_released_blks) - 421 wl->tx_blocks_freed; 422 else 423 freed_blocks = 0x100000000LL - wl->tx_blocks_freed + 424 le32_to_cpu(status_2->total_released_blks); 425 426 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks); 427 428 wl->tx_allocated_blocks -= freed_blocks; 429 430 /* 431 * If the FW freed some blocks: 432 * If we still have allocated blocks - re-arm the timer, Tx is 433 * not stuck. Otherwise, cancel the timer (no Tx currently). 434 */ 435 if (freed_blocks) { 436 if (wl->tx_allocated_blocks) 437 wl12xx_rearm_tx_watchdog_locked(wl); 438 else 439 cancel_delayed_work(&wl->tx_watchdog_work); 440 } 441 442 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks; 443 444 /* 445 * The FW might change the total number of TX memblocks before 446 * we get a notification about blocks being released. Thus, the 447 * available blocks calculation might yield a temporary result 448 * which is lower than the actual available blocks. Keeping in 449 * mind that only blocks that were allocated can be moved from 450 * TX to RX, tx_blocks_available should never decrease here. 451 */ 452 wl->tx_blocks_available = max((int)wl->tx_blocks_available, 453 avail); 454 455 /* if more blocks are available now, tx work can be scheduled */ 456 if (wl->tx_blocks_available > old_tx_blk_count) 457 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 458 459 /* for AP update num of allocated TX blocks per link and ps status */ 460 wl12xx_for_each_wlvif_ap(wl, wlvif) { 461 wl12xx_irq_update_links_status(wl, wlvif, status_2); 462 } 463 464 /* update the host-chipset time offset */ 465 getnstimeofday(&ts); 466 wl->time_offset = (timespec_to_ns(&ts) >> 10) - 467 (s64)le32_to_cpu(status_2->fw_localtime); 468 469 return 0; 470 } 471 472 static void wl1271_flush_deferred_work(struct wl1271 *wl) 473 { 474 struct sk_buff *skb; 475 476 /* Pass all received frames to the network stack */ 477 while ((skb = skb_dequeue(&wl->deferred_rx_queue))) 478 ieee80211_rx_ni(wl->hw, skb); 479 480 /* Return sent skbs to the network stack */ 481 while ((skb = skb_dequeue(&wl->deferred_tx_queue))) 482 ieee80211_tx_status_ni(wl->hw, skb); 483 } 484 485 static void wl1271_netstack_work(struct work_struct *work) 486 { 487 struct wl1271 *wl = 488 container_of(work, struct wl1271, netstack_work); 489 490 do { 491 wl1271_flush_deferred_work(wl); 492 } while (skb_queue_len(&wl->deferred_rx_queue)); 493 } 494 495 #define WL1271_IRQ_MAX_LOOPS 256 496 497 static int wlcore_irq_locked(struct wl1271 *wl) 498 { 499 int ret = 0; 500 u32 intr; 501 int loopcount = WL1271_IRQ_MAX_LOOPS; 502 bool done = false; 503 unsigned int defer_count; 504 unsigned long flags; 505 506 /* 507 * In case edge triggered interrupt must be used, we cannot iterate 508 * more than once without introducing race conditions with the hardirq. 509 */ 510 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 511 loopcount = 1; 512 513 wl1271_debug(DEBUG_IRQ, "IRQ work"); 514 515 if (unlikely(wl->state != WLCORE_STATE_ON)) 516 goto out; 517 518 ret = wl1271_ps_elp_wakeup(wl); 519 if (ret < 0) 520 goto out; 521 522 while (!done && loopcount--) { 523 /* 524 * In order to avoid a race with the hardirq, clear the flag 525 * before acknowledging the chip. Since the mutex is held, 526 * wl1271_ps_elp_wakeup cannot be called concurrently. 527 */ 528 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 529 smp_mb__after_clear_bit(); 530 531 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2); 532 if (ret < 0) 533 goto out; 534 535 wlcore_hw_tx_immediate_compl(wl); 536 537 intr = le32_to_cpu(wl->fw_status_1->intr); 538 intr &= WLCORE_ALL_INTR_MASK; 539 if (!intr) { 540 done = true; 541 continue; 542 } 543 544 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { 545 wl1271_error("HW watchdog interrupt received! starting recovery."); 546 wl->watchdog_recovery = true; 547 ret = -EIO; 548 549 /* restarting the chip. ignore any other interrupt. */ 550 goto out; 551 } 552 553 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) { 554 wl1271_error("SW watchdog interrupt received! " 555 "starting recovery."); 556 wl->watchdog_recovery = true; 557 ret = -EIO; 558 559 /* restarting the chip. ignore any other interrupt. */ 560 goto out; 561 } 562 563 if (likely(intr & WL1271_ACX_INTR_DATA)) { 564 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 565 566 ret = wlcore_rx(wl, wl->fw_status_1); 567 if (ret < 0) 568 goto out; 569 570 /* Check if any tx blocks were freed */ 571 spin_lock_irqsave(&wl->wl_lock, flags); 572 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 573 wl1271_tx_total_queue_count(wl) > 0) { 574 spin_unlock_irqrestore(&wl->wl_lock, flags); 575 /* 576 * In order to avoid starvation of the TX path, 577 * call the work function directly. 578 */ 579 ret = wlcore_tx_work_locked(wl); 580 if (ret < 0) 581 goto out; 582 } else { 583 spin_unlock_irqrestore(&wl->wl_lock, flags); 584 } 585 586 /* check for tx results */ 587 ret = wlcore_hw_tx_delayed_compl(wl); 588 if (ret < 0) 589 goto out; 590 591 /* Make sure the deferred queues don't get too long */ 592 defer_count = skb_queue_len(&wl->deferred_tx_queue) + 593 skb_queue_len(&wl->deferred_rx_queue); 594 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT) 595 wl1271_flush_deferred_work(wl); 596 } 597 598 if (intr & WL1271_ACX_INTR_EVENT_A) { 599 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 600 ret = wl1271_event_handle(wl, 0); 601 if (ret < 0) 602 goto out; 603 } 604 605 if (intr & WL1271_ACX_INTR_EVENT_B) { 606 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 607 ret = wl1271_event_handle(wl, 1); 608 if (ret < 0) 609 goto out; 610 } 611 612 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 613 wl1271_debug(DEBUG_IRQ, 614 "WL1271_ACX_INTR_INIT_COMPLETE"); 615 616 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 617 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 618 } 619 620 wl1271_ps_elp_sleep(wl); 621 622 out: 623 return ret; 624 } 625 626 static irqreturn_t wlcore_irq(int irq, void *cookie) 627 { 628 int ret; 629 unsigned long flags; 630 struct wl1271 *wl = cookie; 631 632 /* TX might be handled here, avoid redundant work */ 633 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 634 cancel_work_sync(&wl->tx_work); 635 636 mutex_lock(&wl->mutex); 637 638 ret = wlcore_irq_locked(wl); 639 if (ret) 640 wl12xx_queue_recovery_work(wl); 641 642 spin_lock_irqsave(&wl->wl_lock, flags); 643 /* In case TX was not handled here, queue TX work */ 644 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 645 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 646 wl1271_tx_total_queue_count(wl) > 0) 647 ieee80211_queue_work(wl->hw, &wl->tx_work); 648 spin_unlock_irqrestore(&wl->wl_lock, flags); 649 650 mutex_unlock(&wl->mutex); 651 652 return IRQ_HANDLED; 653 } 654 655 struct vif_counter_data { 656 u8 counter; 657 658 struct ieee80211_vif *cur_vif; 659 bool cur_vif_running; 660 }; 661 662 static void wl12xx_vif_count_iter(void *data, u8 *mac, 663 struct ieee80211_vif *vif) 664 { 665 struct vif_counter_data *counter = data; 666 667 counter->counter++; 668 if (counter->cur_vif == vif) 669 counter->cur_vif_running = true; 670 } 671 672 /* caller must not hold wl->mutex, as it might deadlock */ 673 static void wl12xx_get_vif_count(struct ieee80211_hw *hw, 674 struct ieee80211_vif *cur_vif, 675 struct vif_counter_data *data) 676 { 677 memset(data, 0, sizeof(*data)); 678 data->cur_vif = cur_vif; 679 680 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, 681 wl12xx_vif_count_iter, data); 682 } 683 684 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt) 685 { 686 const struct firmware *fw; 687 const char *fw_name; 688 enum wl12xx_fw_type fw_type; 689 int ret; 690 691 if (plt) { 692 fw_type = WL12XX_FW_TYPE_PLT; 693 fw_name = wl->plt_fw_name; 694 } else { 695 /* 696 * we can't call wl12xx_get_vif_count() here because 697 * wl->mutex is taken, so use the cached last_vif_count value 698 */ 699 if (wl->last_vif_count > 1 && wl->mr_fw_name) { 700 fw_type = WL12XX_FW_TYPE_MULTI; 701 fw_name = wl->mr_fw_name; 702 } else { 703 fw_type = WL12XX_FW_TYPE_NORMAL; 704 fw_name = wl->sr_fw_name; 705 } 706 } 707 708 if (wl->fw_type == fw_type) 709 return 0; 710 711 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); 712 713 ret = request_firmware(&fw, fw_name, wl->dev); 714 715 if (ret < 0) { 716 wl1271_error("could not get firmware %s: %d", fw_name, ret); 717 return ret; 718 } 719 720 if (fw->size % 4) { 721 wl1271_error("firmware size is not multiple of 32 bits: %zu", 722 fw->size); 723 ret = -EILSEQ; 724 goto out; 725 } 726 727 vfree(wl->fw); 728 wl->fw_type = WL12XX_FW_TYPE_NONE; 729 wl->fw_len = fw->size; 730 wl->fw = vmalloc(wl->fw_len); 731 732 if (!wl->fw) { 733 wl1271_error("could not allocate memory for the firmware"); 734 ret = -ENOMEM; 735 goto out; 736 } 737 738 memcpy(wl->fw, fw->data, wl->fw_len); 739 ret = 0; 740 wl->fw_type = fw_type; 741 out: 742 release_firmware(fw); 743 744 return ret; 745 } 746 747 void wl12xx_queue_recovery_work(struct wl1271 *wl) 748 { 749 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 750 751 /* Avoid a recursive recovery */ 752 if (wl->state == WLCORE_STATE_ON) { 753 wl->state = WLCORE_STATE_RESTARTING; 754 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 755 wlcore_disable_interrupts_nosync(wl); 756 ieee80211_queue_work(wl->hw, &wl->recovery_work); 757 } 758 } 759 760 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) 761 { 762 size_t len = 0; 763 764 /* The FW log is a length-value list, find where the log end */ 765 while (len < maxlen) { 766 if (memblock[len] == 0) 767 break; 768 if (len + memblock[len] + 1 > maxlen) 769 break; 770 len += memblock[len] + 1; 771 } 772 773 /* Make sure we have enough room */ 774 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size)); 775 776 /* Fill the FW log file, consumed by the sysfs fwlog entry */ 777 memcpy(wl->fwlog + wl->fwlog_size, memblock, len); 778 wl->fwlog_size += len; 779 780 return len; 781 } 782 783 #define WLCORE_FW_LOG_END 0x2000000 784 785 static void wl12xx_read_fwlog_panic(struct wl1271 *wl) 786 { 787 u32 addr; 788 u32 offset; 789 u32 end_of_log; 790 u8 *block; 791 int ret; 792 793 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) || 794 (wl->conf.fwlog.mem_blocks == 0)) 795 return; 796 797 wl1271_info("Reading FW panic log"); 798 799 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL); 800 if (!block) 801 return; 802 803 /* 804 * Make sure the chip is awake and the logger isn't active. 805 * Do not send a stop fwlog command if the fw is hanged. 806 */ 807 if (wl1271_ps_elp_wakeup(wl)) 808 goto out; 809 if (!wl->watchdog_recovery) 810 wl12xx_cmd_stop_fwlog(wl); 811 812 /* Read the first memory block address */ 813 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2); 814 if (ret < 0) 815 goto out; 816 817 addr = le32_to_cpu(wl->fw_status_2->log_start_addr); 818 if (!addr) 819 goto out; 820 821 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) { 822 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor); 823 end_of_log = WLCORE_FW_LOG_END; 824 } else { 825 offset = sizeof(addr); 826 end_of_log = addr; 827 } 828 829 /* Traverse the memory blocks linked list */ 830 do { 831 memset(block, 0, WL12XX_HW_BLOCK_SIZE); 832 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE, 833 false); 834 if (ret < 0) 835 goto out; 836 837 /* 838 * Memory blocks are linked to one another. The first 4 bytes 839 * of each memory block hold the hardware address of the next 840 * one. The last memory block points to the first one in 841 * on demand mode and is equal to 0x2000000 in continuous mode. 842 */ 843 addr = le32_to_cpup((__le32 *)block); 844 if (!wl12xx_copy_fwlog(wl, block + offset, 845 WL12XX_HW_BLOCK_SIZE - offset)) 846 break; 847 } while (addr && (addr != end_of_log)); 848 849 wake_up_interruptible(&wl->fwlog_waitq); 850 851 out: 852 kfree(block); 853 } 854 855 static void wlcore_print_recovery(struct wl1271 *wl) 856 { 857 u32 pc = 0; 858 u32 hint_sts = 0; 859 int ret; 860 861 wl1271_info("Hardware recovery in progress. FW ver: %s", 862 wl->chip.fw_ver_str); 863 864 /* change partitions momentarily so we can read the FW pc */ 865 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 866 if (ret < 0) 867 return; 868 869 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc); 870 if (ret < 0) 871 return; 872 873 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts); 874 if (ret < 0) 875 return; 876 877 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts); 878 879 wlcore_set_partition(wl, &wl->ptable[PART_WORK]); 880 } 881 882 883 static void wl1271_recovery_work(struct work_struct *work) 884 { 885 struct wl1271 *wl = 886 container_of(work, struct wl1271, recovery_work); 887 struct wl12xx_vif *wlvif; 888 struct ieee80211_vif *vif; 889 890 mutex_lock(&wl->mutex); 891 892 if (wl->state == WLCORE_STATE_OFF || wl->plt) 893 goto out_unlock; 894 895 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { 896 wl12xx_read_fwlog_panic(wl); 897 wlcore_print_recovery(wl); 898 } 899 900 BUG_ON(bug_on_recovery && 901 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 902 903 if (no_recovery) { 904 wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); 905 goto out_unlock; 906 } 907 908 /* 909 * Advance security sequence number to overcome potential progress 910 * in the firmware during recovery. This doens't hurt if the network is 911 * not encrypted. 912 */ 913 wl12xx_for_each_wlvif(wl, wlvif) { 914 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || 915 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 916 wlvif->tx_security_seq += 917 WL1271_TX_SQN_POST_RECOVERY_PADDING; 918 } 919 920 /* Prevent spurious TX during FW restart */ 921 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); 922 923 if (wl->sched_scanning) { 924 ieee80211_sched_scan_stopped(wl->hw); 925 wl->sched_scanning = false; 926 } 927 928 /* reboot the chipset */ 929 while (!list_empty(&wl->wlvif_list)) { 930 wlvif = list_first_entry(&wl->wlvif_list, 931 struct wl12xx_vif, list); 932 vif = wl12xx_wlvif_to_vif(wlvif); 933 __wl1271_op_remove_interface(wl, vif, false); 934 } 935 936 wlcore_op_stop_locked(wl); 937 938 ieee80211_restart_hw(wl->hw); 939 940 /* 941 * Its safe to enable TX now - the queues are stopped after a request 942 * to restart the HW. 943 */ 944 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); 945 946 out_unlock: 947 wl->watchdog_recovery = false; 948 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 949 mutex_unlock(&wl->mutex); 950 } 951 952 static int wlcore_fw_wakeup(struct wl1271 *wl) 953 { 954 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); 955 } 956 957 static int wl1271_setup(struct wl1271 *wl) 958 { 959 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) + 960 sizeof(*wl->fw_status_2) + 961 wl->fw_status_priv_len, GFP_KERNEL); 962 if (!wl->fw_status_1) 963 return -ENOMEM; 964 965 wl->fw_status_2 = (struct wl_fw_status_2 *) 966 (((u8 *) wl->fw_status_1) + 967 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc)); 968 969 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); 970 if (!wl->tx_res_if) { 971 kfree(wl->fw_status_1); 972 return -ENOMEM; 973 } 974 975 return 0; 976 } 977 978 static int wl12xx_set_power_on(struct wl1271 *wl) 979 { 980 int ret; 981 982 msleep(WL1271_PRE_POWER_ON_SLEEP); 983 ret = wl1271_power_on(wl); 984 if (ret < 0) 985 goto out; 986 msleep(WL1271_POWER_ON_SLEEP); 987 wl1271_io_reset(wl); 988 wl1271_io_init(wl); 989 990 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 991 if (ret < 0) 992 goto fail; 993 994 /* ELP module wake up */ 995 ret = wlcore_fw_wakeup(wl); 996 if (ret < 0) 997 goto fail; 998 999 out: 1000 return ret; 1001 1002 fail: 1003 wl1271_power_off(wl); 1004 return ret; 1005 } 1006 1007 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) 1008 { 1009 int ret = 0; 1010 1011 ret = wl12xx_set_power_on(wl); 1012 if (ret < 0) 1013 goto out; 1014 1015 /* 1016 * For wl127x based devices we could use the default block 1017 * size (512 bytes), but due to a bug in the sdio driver, we 1018 * need to set it explicitly after the chip is powered on. To 1019 * simplify the code and since the performance impact is 1020 * negligible, we use the same block size for all different 1021 * chip types. 1022 * 1023 * Check if the bus supports blocksize alignment and, if it 1024 * doesn't, make sure we don't have the quirk. 1025 */ 1026 if (!wl1271_set_block_size(wl)) 1027 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; 1028 1029 /* TODO: make sure the lower driver has set things up correctly */ 1030 1031 ret = wl1271_setup(wl); 1032 if (ret < 0) 1033 goto out; 1034 1035 ret = wl12xx_fetch_firmware(wl, plt); 1036 if (ret < 0) 1037 goto out; 1038 1039 out: 1040 return ret; 1041 } 1042 1043 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode) 1044 { 1045 int retries = WL1271_BOOT_RETRIES; 1046 struct wiphy *wiphy = wl->hw->wiphy; 1047 1048 static const char* const PLT_MODE[] = { 1049 "PLT_OFF", 1050 "PLT_ON", 1051 "PLT_FEM_DETECT" 1052 }; 1053 1054 int ret; 1055 1056 mutex_lock(&wl->mutex); 1057 1058 wl1271_notice("power up"); 1059 1060 if (wl->state != WLCORE_STATE_OFF) { 1061 wl1271_error("cannot go into PLT state because not " 1062 "in off state: %d", wl->state); 1063 ret = -EBUSY; 1064 goto out; 1065 } 1066 1067 /* Indicate to lower levels that we are now in PLT mode */ 1068 wl->plt = true; 1069 wl->plt_mode = plt_mode; 1070 1071 while (retries) { 1072 retries--; 1073 ret = wl12xx_chip_wakeup(wl, true); 1074 if (ret < 0) 1075 goto power_off; 1076 1077 ret = wl->ops->plt_init(wl); 1078 if (ret < 0) 1079 goto power_off; 1080 1081 wl->state = WLCORE_STATE_ON; 1082 wl1271_notice("firmware booted in PLT mode %s (%s)", 1083 PLT_MODE[plt_mode], 1084 wl->chip.fw_ver_str); 1085 1086 /* update hw/fw version info in wiphy struct */ 1087 wiphy->hw_version = wl->chip.id; 1088 strncpy(wiphy->fw_version, wl->chip.fw_ver_str, 1089 sizeof(wiphy->fw_version)); 1090 1091 goto out; 1092 1093 power_off: 1094 wl1271_power_off(wl); 1095 } 1096 1097 wl->plt = false; 1098 wl->plt_mode = PLT_OFF; 1099 1100 wl1271_error("firmware boot in PLT mode failed despite %d retries", 1101 WL1271_BOOT_RETRIES); 1102 out: 1103 mutex_unlock(&wl->mutex); 1104 1105 return ret; 1106 } 1107 1108 int wl1271_plt_stop(struct wl1271 *wl) 1109 { 1110 int ret = 0; 1111 1112 wl1271_notice("power down"); 1113 1114 /* 1115 * Interrupts must be disabled before setting the state to OFF. 1116 * Otherwise, the interrupt handler might be called and exit without 1117 * reading the interrupt status. 1118 */ 1119 wlcore_disable_interrupts(wl); 1120 mutex_lock(&wl->mutex); 1121 if (!wl->plt) { 1122 mutex_unlock(&wl->mutex); 1123 1124 /* 1125 * This will not necessarily enable interrupts as interrupts 1126 * may have been disabled when op_stop was called. It will, 1127 * however, balance the above call to disable_interrupts(). 1128 */ 1129 wlcore_enable_interrupts(wl); 1130 1131 wl1271_error("cannot power down because not in PLT " 1132 "state: %d", wl->state); 1133 ret = -EBUSY; 1134 goto out; 1135 } 1136 1137 mutex_unlock(&wl->mutex); 1138 1139 wl1271_flush_deferred_work(wl); 1140 cancel_work_sync(&wl->netstack_work); 1141 cancel_work_sync(&wl->recovery_work); 1142 cancel_delayed_work_sync(&wl->elp_work); 1143 cancel_delayed_work_sync(&wl->tx_watchdog_work); 1144 cancel_delayed_work_sync(&wl->connection_loss_work); 1145 1146 mutex_lock(&wl->mutex); 1147 wl1271_power_off(wl); 1148 wl->flags = 0; 1149 wl->sleep_auth = WL1271_PSM_ILLEGAL; 1150 wl->state = WLCORE_STATE_OFF; 1151 wl->plt = false; 1152 wl->plt_mode = PLT_OFF; 1153 wl->rx_counter = 0; 1154 mutex_unlock(&wl->mutex); 1155 1156 out: 1157 return ret; 1158 } 1159 1160 static void wl1271_op_tx(struct ieee80211_hw *hw, 1161 struct ieee80211_tx_control *control, 1162 struct sk_buff *skb) 1163 { 1164 struct wl1271 *wl = hw->priv; 1165 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1166 struct ieee80211_vif *vif = info->control.vif; 1167 struct wl12xx_vif *wlvif = NULL; 1168 unsigned long flags; 1169 int q, mapping; 1170 u8 hlid; 1171 1172 if (vif) 1173 wlvif = wl12xx_vif_to_data(vif); 1174 1175 mapping = skb_get_queue_mapping(skb); 1176 q = wl1271_tx_get_queue(mapping); 1177 1178 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta); 1179 1180 spin_lock_irqsave(&wl->wl_lock, flags); 1181 1182 /* 1183 * drop the packet if the link is invalid or the queue is stopped 1184 * for any reason but watermark. Watermark is a "soft"-stop so we 1185 * allow these packets through. 1186 */ 1187 if (hlid == WL12XX_INVALID_LINK_ID || 1188 (wlvif && !test_bit(hlid, wlvif->links_map)) || 1189 (wlcore_is_queue_stopped(wl, q) && 1190 !wlcore_is_queue_stopped_by_reason(wl, q, 1191 WLCORE_QUEUE_STOP_REASON_WATERMARK))) { 1192 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); 1193 ieee80211_free_txskb(hw, skb); 1194 goto out; 1195 } 1196 1197 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d", 1198 hlid, q, skb->len); 1199 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); 1200 1201 wl->tx_queue_count[q]++; 1202 1203 /* 1204 * The workqueue is slow to process the tx_queue and we need stop 1205 * the queue here, otherwise the queue will get too long. 1206 */ 1207 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK && 1208 !wlcore_is_queue_stopped_by_reason(wl, q, 1209 WLCORE_QUEUE_STOP_REASON_WATERMARK)) { 1210 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); 1211 wlcore_stop_queue_locked(wl, q, 1212 WLCORE_QUEUE_STOP_REASON_WATERMARK); 1213 } 1214 1215 /* 1216 * The chip specific setup must run before the first TX packet - 1217 * before that, the tx_work will not be initialized! 1218 */ 1219 1220 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 1221 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags)) 1222 ieee80211_queue_work(wl->hw, &wl->tx_work); 1223 1224 out: 1225 spin_unlock_irqrestore(&wl->wl_lock, flags); 1226 } 1227 1228 int wl1271_tx_dummy_packet(struct wl1271 *wl) 1229 { 1230 unsigned long flags; 1231 int q; 1232 1233 /* no need to queue a new dummy packet if one is already pending */ 1234 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) 1235 return 0; 1236 1237 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet)); 1238 1239 spin_lock_irqsave(&wl->wl_lock, flags); 1240 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 1241 wl->tx_queue_count[q]++; 1242 spin_unlock_irqrestore(&wl->wl_lock, flags); 1243 1244 /* The FW is low on RX memory blocks, so send the dummy packet asap */ 1245 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) 1246 return wlcore_tx_work_locked(wl); 1247 1248 /* 1249 * If the FW TX is busy, TX work will be scheduled by the threaded 1250 * interrupt handler function 1251 */ 1252 return 0; 1253 } 1254 1255 /* 1256 * The size of the dummy packet should be at least 1400 bytes. However, in 1257 * order to minimize the number of bus transactions, aligning it to 512 bytes 1258 * boundaries could be beneficial, performance wise 1259 */ 1260 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512)) 1261 1262 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl) 1263 { 1264 struct sk_buff *skb; 1265 struct ieee80211_hdr_3addr *hdr; 1266 unsigned int dummy_packet_size; 1267 1268 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE - 1269 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr); 1270 1271 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE); 1272 if (!skb) { 1273 wl1271_warning("Failed to allocate a dummy packet skb"); 1274 return NULL; 1275 } 1276 1277 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr)); 1278 1279 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr)); 1280 memset(hdr, 0, sizeof(*hdr)); 1281 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 1282 IEEE80211_STYPE_NULLFUNC | 1283 IEEE80211_FCTL_TODS); 1284 1285 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size); 1286 1287 /* Dummy packets require the TID to be management */ 1288 skb->priority = WL1271_TID_MGMT; 1289 1290 /* Initialize all fields that might be used */ 1291 skb_set_queue_mapping(skb, 0); 1292 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info)); 1293 1294 return skb; 1295 } 1296 1297 1298 #ifdef CONFIG_PM 1299 static int 1300 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p) 1301 { 1302 int num_fields = 0, in_field = 0, fields_size = 0; 1303 int i, pattern_len = 0; 1304 1305 if (!p->mask) { 1306 wl1271_warning("No mask in WoWLAN pattern"); 1307 return -EINVAL; 1308 } 1309 1310 /* 1311 * The pattern is broken up into segments of bytes at different offsets 1312 * that need to be checked by the FW filter. Each segment is called 1313 * a field in the FW API. We verify that the total number of fields 1314 * required for this pattern won't exceed FW limits (8) 1315 * as well as the total fields buffer won't exceed the FW limit. 1316 * Note that if there's a pattern which crosses Ethernet/IP header 1317 * boundary a new field is required. 1318 */ 1319 for (i = 0; i < p->pattern_len; i++) { 1320 if (test_bit(i, (unsigned long *)p->mask)) { 1321 if (!in_field) { 1322 in_field = 1; 1323 pattern_len = 1; 1324 } else { 1325 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) { 1326 num_fields++; 1327 fields_size += pattern_len + 1328 RX_FILTER_FIELD_OVERHEAD; 1329 pattern_len = 1; 1330 } else 1331 pattern_len++; 1332 } 1333 } else { 1334 if (in_field) { 1335 in_field = 0; 1336 fields_size += pattern_len + 1337 RX_FILTER_FIELD_OVERHEAD; 1338 num_fields++; 1339 } 1340 } 1341 } 1342 1343 if (in_field) { 1344 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD; 1345 num_fields++; 1346 } 1347 1348 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) { 1349 wl1271_warning("RX Filter too complex. Too many segments"); 1350 return -EINVAL; 1351 } 1352 1353 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) { 1354 wl1271_warning("RX filter pattern is too big"); 1355 return -E2BIG; 1356 } 1357 1358 return 0; 1359 } 1360 1361 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void) 1362 { 1363 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL); 1364 } 1365 1366 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter) 1367 { 1368 int i; 1369 1370 if (filter == NULL) 1371 return; 1372 1373 for (i = 0; i < filter->num_fields; i++) 1374 kfree(filter->fields[i].pattern); 1375 1376 kfree(filter); 1377 } 1378 1379 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, 1380 u16 offset, u8 flags, 1381 u8 *pattern, u8 len) 1382 { 1383 struct wl12xx_rx_filter_field *field; 1384 1385 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) { 1386 wl1271_warning("Max fields per RX filter. can't alloc another"); 1387 return -EINVAL; 1388 } 1389 1390 field = &filter->fields[filter->num_fields]; 1391 1392 field->pattern = kzalloc(len, GFP_KERNEL); 1393 if (!field->pattern) { 1394 wl1271_warning("Failed to allocate RX filter pattern"); 1395 return -ENOMEM; 1396 } 1397 1398 filter->num_fields++; 1399 1400 field->offset = cpu_to_le16(offset); 1401 field->flags = flags; 1402 field->len = len; 1403 memcpy(field->pattern, pattern, len); 1404 1405 return 0; 1406 } 1407 1408 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter) 1409 { 1410 int i, fields_size = 0; 1411 1412 for (i = 0; i < filter->num_fields; i++) 1413 fields_size += filter->fields[i].len + 1414 sizeof(struct wl12xx_rx_filter_field) - 1415 sizeof(u8 *); 1416 1417 return fields_size; 1418 } 1419 1420 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter, 1421 u8 *buf) 1422 { 1423 int i; 1424 struct wl12xx_rx_filter_field *field; 1425 1426 for (i = 0; i < filter->num_fields; i++) { 1427 field = (struct wl12xx_rx_filter_field *)buf; 1428 1429 field->offset = filter->fields[i].offset; 1430 field->flags = filter->fields[i].flags; 1431 field->len = filter->fields[i].len; 1432 1433 memcpy(&field->pattern, filter->fields[i].pattern, field->len); 1434 buf += sizeof(struct wl12xx_rx_filter_field) - 1435 sizeof(u8 *) + field->len; 1436 } 1437 } 1438 1439 /* 1440 * Allocates an RX filter returned through f 1441 * which needs to be freed using rx_filter_free() 1442 */ 1443 static int wl1271_convert_wowlan_pattern_to_rx_filter( 1444 struct cfg80211_wowlan_trig_pkt_pattern *p, 1445 struct wl12xx_rx_filter **f) 1446 { 1447 int i, j, ret = 0; 1448 struct wl12xx_rx_filter *filter; 1449 u16 offset; 1450 u8 flags, len; 1451 1452 filter = wl1271_rx_filter_alloc(); 1453 if (!filter) { 1454 wl1271_warning("Failed to alloc rx filter"); 1455 ret = -ENOMEM; 1456 goto err; 1457 } 1458 1459 i = 0; 1460 while (i < p->pattern_len) { 1461 if (!test_bit(i, (unsigned long *)p->mask)) { 1462 i++; 1463 continue; 1464 } 1465 1466 for (j = i; j < p->pattern_len; j++) { 1467 if (!test_bit(j, (unsigned long *)p->mask)) 1468 break; 1469 1470 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE && 1471 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE) 1472 break; 1473 } 1474 1475 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) { 1476 offset = i; 1477 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER; 1478 } else { 1479 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE; 1480 flags = WL1271_RX_FILTER_FLAG_IP_HEADER; 1481 } 1482 1483 len = j - i; 1484 1485 ret = wl1271_rx_filter_alloc_field(filter, 1486 offset, 1487 flags, 1488 &p->pattern[i], len); 1489 if (ret) 1490 goto err; 1491 1492 i = j; 1493 } 1494 1495 filter->action = FILTER_SIGNAL; 1496 1497 *f = filter; 1498 return 0; 1499 1500 err: 1501 wl1271_rx_filter_free(filter); 1502 *f = NULL; 1503 1504 return ret; 1505 } 1506 1507 static int wl1271_configure_wowlan(struct wl1271 *wl, 1508 struct cfg80211_wowlan *wow) 1509 { 1510 int i, ret; 1511 1512 if (!wow || wow->any || !wow->n_patterns) { 1513 ret = wl1271_acx_default_rx_filter_enable(wl, 0, 1514 FILTER_SIGNAL); 1515 if (ret) 1516 goto out; 1517 1518 ret = wl1271_rx_filter_clear_all(wl); 1519 if (ret) 1520 goto out; 1521 1522 return 0; 1523 } 1524 1525 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS)) 1526 return -EINVAL; 1527 1528 /* Validate all incoming patterns before clearing current FW state */ 1529 for (i = 0; i < wow->n_patterns; i++) { 1530 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]); 1531 if (ret) { 1532 wl1271_warning("Bad wowlan pattern %d", i); 1533 return ret; 1534 } 1535 } 1536 1537 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL); 1538 if (ret) 1539 goto out; 1540 1541 ret = wl1271_rx_filter_clear_all(wl); 1542 if (ret) 1543 goto out; 1544 1545 /* Translate WoWLAN patterns into filters */ 1546 for (i = 0; i < wow->n_patterns; i++) { 1547 struct cfg80211_wowlan_trig_pkt_pattern *p; 1548 struct wl12xx_rx_filter *filter = NULL; 1549 1550 p = &wow->patterns[i]; 1551 1552 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter); 1553 if (ret) { 1554 wl1271_warning("Failed to create an RX filter from " 1555 "wowlan pattern %d", i); 1556 goto out; 1557 } 1558 1559 ret = wl1271_rx_filter_enable(wl, i, 1, filter); 1560 1561 wl1271_rx_filter_free(filter); 1562 if (ret) 1563 goto out; 1564 } 1565 1566 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP); 1567 1568 out: 1569 return ret; 1570 } 1571 1572 static int wl1271_configure_suspend_sta(struct wl1271 *wl, 1573 struct wl12xx_vif *wlvif, 1574 struct cfg80211_wowlan *wow) 1575 { 1576 int ret = 0; 1577 1578 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1579 goto out; 1580 1581 ret = wl1271_ps_elp_wakeup(wl); 1582 if (ret < 0) 1583 goto out; 1584 1585 ret = wl1271_configure_wowlan(wl, wow); 1586 if (ret < 0) 1587 goto out_sleep; 1588 1589 if ((wl->conf.conn.suspend_wake_up_event == 1590 wl->conf.conn.wake_up_event) && 1591 (wl->conf.conn.suspend_listen_interval == 1592 wl->conf.conn.listen_interval)) 1593 goto out_sleep; 1594 1595 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1596 wl->conf.conn.suspend_wake_up_event, 1597 wl->conf.conn.suspend_listen_interval); 1598 1599 if (ret < 0) 1600 wl1271_error("suspend: set wake up conditions failed: %d", ret); 1601 1602 out_sleep: 1603 wl1271_ps_elp_sleep(wl); 1604 out: 1605 return ret; 1606 1607 } 1608 1609 static int wl1271_configure_suspend_ap(struct wl1271 *wl, 1610 struct wl12xx_vif *wlvif) 1611 { 1612 int ret = 0; 1613 1614 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 1615 goto out; 1616 1617 ret = wl1271_ps_elp_wakeup(wl); 1618 if (ret < 0) 1619 goto out; 1620 1621 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); 1622 1623 wl1271_ps_elp_sleep(wl); 1624 out: 1625 return ret; 1626 1627 } 1628 1629 static int wl1271_configure_suspend(struct wl1271 *wl, 1630 struct wl12xx_vif *wlvif, 1631 struct cfg80211_wowlan *wow) 1632 { 1633 if (wlvif->bss_type == BSS_TYPE_STA_BSS) 1634 return wl1271_configure_suspend_sta(wl, wlvif, wow); 1635 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 1636 return wl1271_configure_suspend_ap(wl, wlvif); 1637 return 0; 1638 } 1639 1640 static void wl1271_configure_resume(struct wl1271 *wl, 1641 struct wl12xx_vif *wlvif) 1642 { 1643 int ret = 0; 1644 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; 1645 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; 1646 1647 if ((!is_ap) && (!is_sta)) 1648 return; 1649 1650 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1651 return; 1652 1653 ret = wl1271_ps_elp_wakeup(wl); 1654 if (ret < 0) 1655 return; 1656 1657 if (is_sta) { 1658 wl1271_configure_wowlan(wl, NULL); 1659 1660 if ((wl->conf.conn.suspend_wake_up_event == 1661 wl->conf.conn.wake_up_event) && 1662 (wl->conf.conn.suspend_listen_interval == 1663 wl->conf.conn.listen_interval)) 1664 goto out_sleep; 1665 1666 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1667 wl->conf.conn.wake_up_event, 1668 wl->conf.conn.listen_interval); 1669 1670 if (ret < 0) 1671 wl1271_error("resume: wake up conditions failed: %d", 1672 ret); 1673 1674 } else if (is_ap) { 1675 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); 1676 } 1677 1678 out_sleep: 1679 wl1271_ps_elp_sleep(wl); 1680 } 1681 1682 static int wl1271_op_suspend(struct ieee80211_hw *hw, 1683 struct cfg80211_wowlan *wow) 1684 { 1685 struct wl1271 *wl = hw->priv; 1686 struct wl12xx_vif *wlvif; 1687 int ret; 1688 1689 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); 1690 WARN_ON(!wow); 1691 1692 /* we want to perform the recovery before suspending */ 1693 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { 1694 wl1271_warning("postponing suspend to perform recovery"); 1695 return -EBUSY; 1696 } 1697 1698 wl1271_tx_flush(wl); 1699 1700 mutex_lock(&wl->mutex); 1701 wl->wow_enabled = true; 1702 wl12xx_for_each_wlvif(wl, wlvif) { 1703 ret = wl1271_configure_suspend(wl, wlvif, wow); 1704 if (ret < 0) { 1705 mutex_unlock(&wl->mutex); 1706 wl1271_warning("couldn't prepare device to suspend"); 1707 return ret; 1708 } 1709 } 1710 mutex_unlock(&wl->mutex); 1711 /* flush any remaining work */ 1712 wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); 1713 1714 /* 1715 * disable and re-enable interrupts in order to flush 1716 * the threaded_irq 1717 */ 1718 wlcore_disable_interrupts(wl); 1719 1720 /* 1721 * set suspended flag to avoid triggering a new threaded_irq 1722 * work. no need for spinlock as interrupts are disabled. 1723 */ 1724 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1725 1726 wlcore_enable_interrupts(wl); 1727 flush_work(&wl->tx_work); 1728 flush_delayed_work(&wl->elp_work); 1729 1730 return 0; 1731 } 1732 1733 static int wl1271_op_resume(struct ieee80211_hw *hw) 1734 { 1735 struct wl1271 *wl = hw->priv; 1736 struct wl12xx_vif *wlvif; 1737 unsigned long flags; 1738 bool run_irq_work = false, pending_recovery; 1739 int ret; 1740 1741 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d", 1742 wl->wow_enabled); 1743 WARN_ON(!wl->wow_enabled); 1744 1745 /* 1746 * re-enable irq_work enqueuing, and call irq_work directly if 1747 * there is a pending work. 1748 */ 1749 spin_lock_irqsave(&wl->wl_lock, flags); 1750 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1751 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags)) 1752 run_irq_work = true; 1753 spin_unlock_irqrestore(&wl->wl_lock, flags); 1754 1755 mutex_lock(&wl->mutex); 1756 1757 /* test the recovery flag before calling any SDIO functions */ 1758 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, 1759 &wl->flags); 1760 1761 if (run_irq_work) { 1762 wl1271_debug(DEBUG_MAC80211, 1763 "run postponed irq_work directly"); 1764 1765 /* don't talk to the HW if recovery is pending */ 1766 if (!pending_recovery) { 1767 ret = wlcore_irq_locked(wl); 1768 if (ret) 1769 wl12xx_queue_recovery_work(wl); 1770 } 1771 1772 wlcore_enable_interrupts(wl); 1773 } 1774 1775 if (pending_recovery) { 1776 wl1271_warning("queuing forgotten recovery on resume"); 1777 ieee80211_queue_work(wl->hw, &wl->recovery_work); 1778 goto out; 1779 } 1780 1781 wl12xx_for_each_wlvif(wl, wlvif) { 1782 wl1271_configure_resume(wl, wlvif); 1783 } 1784 1785 out: 1786 wl->wow_enabled = false; 1787 mutex_unlock(&wl->mutex); 1788 1789 return 0; 1790 } 1791 #endif 1792 1793 static int wl1271_op_start(struct ieee80211_hw *hw) 1794 { 1795 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 1796 1797 /* 1798 * We have to delay the booting of the hardware because 1799 * we need to know the local MAC address before downloading and 1800 * initializing the firmware. The MAC address cannot be changed 1801 * after boot, and without the proper MAC address, the firmware 1802 * will not function properly. 1803 * 1804 * The MAC address is first known when the corresponding interface 1805 * is added. That is where we will initialize the hardware. 1806 */ 1807 1808 return 0; 1809 } 1810 1811 static void wlcore_op_stop_locked(struct wl1271 *wl) 1812 { 1813 int i; 1814 1815 if (wl->state == WLCORE_STATE_OFF) { 1816 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, 1817 &wl->flags)) 1818 wlcore_enable_interrupts(wl); 1819 1820 return; 1821 } 1822 1823 /* 1824 * this must be before the cancel_work calls below, so that the work 1825 * functions don't perform further work. 1826 */ 1827 wl->state = WLCORE_STATE_OFF; 1828 1829 /* 1830 * Use the nosync variant to disable interrupts, so the mutex could be 1831 * held while doing so without deadlocking. 1832 */ 1833 wlcore_disable_interrupts_nosync(wl); 1834 1835 mutex_unlock(&wl->mutex); 1836 1837 wlcore_synchronize_interrupts(wl); 1838 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 1839 cancel_work_sync(&wl->recovery_work); 1840 wl1271_flush_deferred_work(wl); 1841 cancel_delayed_work_sync(&wl->scan_complete_work); 1842 cancel_work_sync(&wl->netstack_work); 1843 cancel_work_sync(&wl->tx_work); 1844 cancel_delayed_work_sync(&wl->elp_work); 1845 cancel_delayed_work_sync(&wl->tx_watchdog_work); 1846 cancel_delayed_work_sync(&wl->connection_loss_work); 1847 1848 /* let's notify MAC80211 about the remaining pending TX frames */ 1849 wl12xx_tx_reset(wl); 1850 mutex_lock(&wl->mutex); 1851 1852 wl1271_power_off(wl); 1853 /* 1854 * In case a recovery was scheduled, interrupts were disabled to avoid 1855 * an interrupt storm. Now that the power is down, it is safe to 1856 * re-enable interrupts to balance the disable depth 1857 */ 1858 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 1859 wlcore_enable_interrupts(wl); 1860 1861 wl->band = IEEE80211_BAND_2GHZ; 1862 1863 wl->rx_counter = 0; 1864 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1865 wl->channel_type = NL80211_CHAN_NO_HT; 1866 wl->tx_blocks_available = 0; 1867 wl->tx_allocated_blocks = 0; 1868 wl->tx_results_count = 0; 1869 wl->tx_packets_count = 0; 1870 wl->time_offset = 0; 1871 wl->ap_fw_ps_map = 0; 1872 wl->ap_ps_map = 0; 1873 wl->sched_scanning = false; 1874 wl->sleep_auth = WL1271_PSM_ILLEGAL; 1875 memset(wl->roles_map, 0, sizeof(wl->roles_map)); 1876 memset(wl->links_map, 0, sizeof(wl->links_map)); 1877 memset(wl->roc_map, 0, sizeof(wl->roc_map)); 1878 wl->active_sta_count = 0; 1879 1880 /* The system link is always allocated */ 1881 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); 1882 1883 /* 1884 * this is performed after the cancel_work calls and the associated 1885 * mutex_lock, so that wl1271_op_add_interface does not accidentally 1886 * get executed before all these vars have been reset. 1887 */ 1888 wl->flags = 0; 1889 1890 wl->tx_blocks_freed = 0; 1891 1892 for (i = 0; i < NUM_TX_QUEUES; i++) { 1893 wl->tx_pkts_freed[i] = 0; 1894 wl->tx_allocated_pkts[i] = 0; 1895 } 1896 1897 wl1271_debugfs_reset(wl); 1898 1899 kfree(wl->fw_status_1); 1900 wl->fw_status_1 = NULL; 1901 wl->fw_status_2 = NULL; 1902 kfree(wl->tx_res_if); 1903 wl->tx_res_if = NULL; 1904 kfree(wl->target_mem_map); 1905 wl->target_mem_map = NULL; 1906 } 1907 1908 static void wlcore_op_stop(struct ieee80211_hw *hw) 1909 { 1910 struct wl1271 *wl = hw->priv; 1911 1912 wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); 1913 1914 mutex_lock(&wl->mutex); 1915 1916 wlcore_op_stop_locked(wl); 1917 1918 mutex_unlock(&wl->mutex); 1919 } 1920 1921 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx) 1922 { 1923 u8 policy = find_first_zero_bit(wl->rate_policies_map, 1924 WL12XX_MAX_RATE_POLICIES); 1925 if (policy >= WL12XX_MAX_RATE_POLICIES) 1926 return -EBUSY; 1927 1928 __set_bit(policy, wl->rate_policies_map); 1929 *idx = policy; 1930 return 0; 1931 } 1932 1933 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx) 1934 { 1935 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES)) 1936 return; 1937 1938 __clear_bit(*idx, wl->rate_policies_map); 1939 *idx = WL12XX_MAX_RATE_POLICIES; 1940 } 1941 1942 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx) 1943 { 1944 u8 policy = find_first_zero_bit(wl->klv_templates_map, 1945 WLCORE_MAX_KLV_TEMPLATES); 1946 if (policy >= WLCORE_MAX_KLV_TEMPLATES) 1947 return -EBUSY; 1948 1949 __set_bit(policy, wl->klv_templates_map); 1950 *idx = policy; 1951 return 0; 1952 } 1953 1954 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx) 1955 { 1956 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES)) 1957 return; 1958 1959 __clear_bit(*idx, wl->klv_templates_map); 1960 *idx = WLCORE_MAX_KLV_TEMPLATES; 1961 } 1962 1963 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1964 { 1965 switch (wlvif->bss_type) { 1966 case BSS_TYPE_AP_BSS: 1967 if (wlvif->p2p) 1968 return WL1271_ROLE_P2P_GO; 1969 else 1970 return WL1271_ROLE_AP; 1971 1972 case BSS_TYPE_STA_BSS: 1973 if (wlvif->p2p) 1974 return WL1271_ROLE_P2P_CL; 1975 else 1976 return WL1271_ROLE_STA; 1977 1978 case BSS_TYPE_IBSS: 1979 return WL1271_ROLE_IBSS; 1980 1981 default: 1982 wl1271_error("invalid bss_type: %d", wlvif->bss_type); 1983 } 1984 return WL12XX_INVALID_ROLE_TYPE; 1985 } 1986 1987 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) 1988 { 1989 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 1990 int i; 1991 1992 /* clear everything but the persistent data */ 1993 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent)); 1994 1995 switch (ieee80211_vif_type_p2p(vif)) { 1996 case NL80211_IFTYPE_P2P_CLIENT: 1997 wlvif->p2p = 1; 1998 /* fall-through */ 1999 case NL80211_IFTYPE_STATION: 2000 wlvif->bss_type = BSS_TYPE_STA_BSS; 2001 break; 2002 case NL80211_IFTYPE_ADHOC: 2003 wlvif->bss_type = BSS_TYPE_IBSS; 2004 break; 2005 case NL80211_IFTYPE_P2P_GO: 2006 wlvif->p2p = 1; 2007 /* fall-through */ 2008 case NL80211_IFTYPE_AP: 2009 wlvif->bss_type = BSS_TYPE_AP_BSS; 2010 break; 2011 default: 2012 wlvif->bss_type = MAX_BSS_TYPE; 2013 return -EOPNOTSUPP; 2014 } 2015 2016 wlvif->role_id = WL12XX_INVALID_ROLE_ID; 2017 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; 2018 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; 2019 2020 if (wlvif->bss_type == BSS_TYPE_STA_BSS || 2021 wlvif->bss_type == BSS_TYPE_IBSS) { 2022 /* init sta/ibss data */ 2023 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; 2024 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); 2025 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); 2026 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 2027 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id); 2028 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 2029 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC; 2030 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC; 2031 } else { 2032 /* init ap data */ 2033 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; 2034 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; 2035 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); 2036 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx); 2037 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) 2038 wl12xx_allocate_rate_policy(wl, 2039 &wlvif->ap.ucast_rate_idx[i]); 2040 wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES; 2041 /* 2042 * TODO: check if basic_rate shouldn't be 2043 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 2044 * instead (the same thing for STA above). 2045 */ 2046 wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES; 2047 /* TODO: this seems to be used only for STA, check it */ 2048 wlvif->rate_set = CONF_TX_AP_ENABLED_RATES; 2049 } 2050 2051 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; 2052 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; 2053 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; 2054 2055 /* 2056 * mac80211 configures some values globally, while we treat them 2057 * per-interface. thus, on init, we have to copy them from wl 2058 */ 2059 wlvif->band = wl->band; 2060 wlvif->channel = wl->channel; 2061 wlvif->power_level = wl->power_level; 2062 wlvif->channel_type = wl->channel_type; 2063 2064 INIT_WORK(&wlvif->rx_streaming_enable_work, 2065 wl1271_rx_streaming_enable_work); 2066 INIT_WORK(&wlvif->rx_streaming_disable_work, 2067 wl1271_rx_streaming_disable_work); 2068 INIT_LIST_HEAD(&wlvif->list); 2069 2070 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 2071 (unsigned long) wlvif); 2072 return 0; 2073 } 2074 2075 static bool wl12xx_init_fw(struct wl1271 *wl) 2076 { 2077 int retries = WL1271_BOOT_RETRIES; 2078 bool booted = false; 2079 struct wiphy *wiphy = wl->hw->wiphy; 2080 int ret; 2081 2082 while (retries) { 2083 retries--; 2084 ret = wl12xx_chip_wakeup(wl, false); 2085 if (ret < 0) 2086 goto power_off; 2087 2088 ret = wl->ops->boot(wl); 2089 if (ret < 0) 2090 goto power_off; 2091 2092 ret = wl1271_hw_init(wl); 2093 if (ret < 0) 2094 goto irq_disable; 2095 2096 booted = true; 2097 break; 2098 2099 irq_disable: 2100 mutex_unlock(&wl->mutex); 2101 /* Unlocking the mutex in the middle of handling is 2102 inherently unsafe. In this case we deem it safe to do, 2103 because we need to let any possibly pending IRQ out of 2104 the system (and while we are WLCORE_STATE_OFF the IRQ 2105 work function will not do anything.) Also, any other 2106 possible concurrent operations will fail due to the 2107 current state, hence the wl1271 struct should be safe. */ 2108 wlcore_disable_interrupts(wl); 2109 wl1271_flush_deferred_work(wl); 2110 cancel_work_sync(&wl->netstack_work); 2111 mutex_lock(&wl->mutex); 2112 power_off: 2113 wl1271_power_off(wl); 2114 } 2115 2116 if (!booted) { 2117 wl1271_error("firmware boot failed despite %d retries", 2118 WL1271_BOOT_RETRIES); 2119 goto out; 2120 } 2121 2122 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); 2123 2124 /* update hw/fw version info in wiphy struct */ 2125 wiphy->hw_version = wl->chip.id; 2126 strncpy(wiphy->fw_version, wl->chip.fw_ver_str, 2127 sizeof(wiphy->fw_version)); 2128 2129 /* 2130 * Now we know if 11a is supported (info from the NVS), so disable 2131 * 11a channels if not supported 2132 */ 2133 if (!wl->enable_11a) 2134 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0; 2135 2136 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", 2137 wl->enable_11a ? "" : "not "); 2138 2139 wl->state = WLCORE_STATE_ON; 2140 out: 2141 return booted; 2142 } 2143 2144 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif) 2145 { 2146 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID; 2147 } 2148 2149 /* 2150 * Check whether a fw switch (i.e. moving from one loaded 2151 * fw to another) is needed. This function is also responsible 2152 * for updating wl->last_vif_count, so it must be called before 2153 * loading a non-plt fw (so the correct fw (single-role/multi-role) 2154 * will be used). 2155 */ 2156 static bool wl12xx_need_fw_change(struct wl1271 *wl, 2157 struct vif_counter_data vif_counter_data, 2158 bool add) 2159 { 2160 enum wl12xx_fw_type current_fw = wl->fw_type; 2161 u8 vif_count = vif_counter_data.counter; 2162 2163 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags)) 2164 return false; 2165 2166 /* increase the vif count if this is a new vif */ 2167 if (add && !vif_counter_data.cur_vif_running) 2168 vif_count++; 2169 2170 wl->last_vif_count = vif_count; 2171 2172 /* no need for fw change if the device is OFF */ 2173 if (wl->state == WLCORE_STATE_OFF) 2174 return false; 2175 2176 /* no need for fw change if a single fw is used */ 2177 if (!wl->mr_fw_name) 2178 return false; 2179 2180 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL) 2181 return true; 2182 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI) 2183 return true; 2184 2185 return false; 2186 } 2187 2188 /* 2189 * Enter "forced psm". Make sure the sta is in psm against the ap, 2190 * to make the fw switch a bit more disconnection-persistent. 2191 */ 2192 static void wl12xx_force_active_psm(struct wl1271 *wl) 2193 { 2194 struct wl12xx_vif *wlvif; 2195 2196 wl12xx_for_each_wlvif_sta(wl, wlvif) { 2197 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE); 2198 } 2199 } 2200 2201 static int wl1271_op_add_interface(struct ieee80211_hw *hw, 2202 struct ieee80211_vif *vif) 2203 { 2204 struct wl1271 *wl = hw->priv; 2205 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2206 struct vif_counter_data vif_count; 2207 int ret = 0; 2208 u8 role_type; 2209 bool booted = false; 2210 2211 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 2212 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 2213 2214 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 2215 ieee80211_vif_type_p2p(vif), vif->addr); 2216 2217 wl12xx_get_vif_count(hw, vif, &vif_count); 2218 2219 mutex_lock(&wl->mutex); 2220 ret = wl1271_ps_elp_wakeup(wl); 2221 if (ret < 0) 2222 goto out_unlock; 2223 2224 /* 2225 * in some very corner case HW recovery scenarios its possible to 2226 * get here before __wl1271_op_remove_interface is complete, so 2227 * opt out if that is the case. 2228 */ 2229 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) || 2230 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) { 2231 ret = -EBUSY; 2232 goto out; 2233 } 2234 2235 2236 ret = wl12xx_init_vif_data(wl, vif); 2237 if (ret < 0) 2238 goto out; 2239 2240 wlvif->wl = wl; 2241 role_type = wl12xx_get_role_type(wl, wlvif); 2242 if (role_type == WL12XX_INVALID_ROLE_TYPE) { 2243 ret = -EINVAL; 2244 goto out; 2245 } 2246 2247 if (wl12xx_need_fw_change(wl, vif_count, true)) { 2248 wl12xx_force_active_psm(wl); 2249 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); 2250 mutex_unlock(&wl->mutex); 2251 wl1271_recovery_work(&wl->recovery_work); 2252 return 0; 2253 } 2254 2255 /* 2256 * TODO: after the nvs issue will be solved, move this block 2257 * to start(), and make sure here the driver is ON. 2258 */ 2259 if (wl->state == WLCORE_STATE_OFF) { 2260 /* 2261 * we still need this in order to configure the fw 2262 * while uploading the nvs 2263 */ 2264 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN); 2265 2266 booted = wl12xx_init_fw(wl); 2267 if (!booted) { 2268 ret = -EINVAL; 2269 goto out; 2270 } 2271 } 2272 2273 ret = wl12xx_cmd_role_enable(wl, vif->addr, 2274 role_type, &wlvif->role_id); 2275 if (ret < 0) 2276 goto out; 2277 2278 ret = wl1271_init_vif_specific(wl, vif); 2279 if (ret < 0) 2280 goto out; 2281 2282 list_add(&wlvif->list, &wl->wlvif_list); 2283 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags); 2284 2285 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 2286 wl->ap_count++; 2287 else 2288 wl->sta_count++; 2289 out: 2290 wl1271_ps_elp_sleep(wl); 2291 out_unlock: 2292 mutex_unlock(&wl->mutex); 2293 2294 return ret; 2295 } 2296 2297 static void __wl1271_op_remove_interface(struct wl1271 *wl, 2298 struct ieee80211_vif *vif, 2299 bool reset_tx_queues) 2300 { 2301 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2302 int i, ret; 2303 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 2304 2305 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 2306 2307 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 2308 return; 2309 2310 /* because of hardware recovery, we may get here twice */ 2311 if (wl->state == WLCORE_STATE_OFF) 2312 return; 2313 2314 wl1271_info("down"); 2315 2316 if (wl->scan.state != WL1271_SCAN_STATE_IDLE && 2317 wl->scan_vif == vif) { 2318 /* 2319 * Rearm the tx watchdog just before idling scan. This 2320 * prevents just-finished scans from triggering the watchdog 2321 */ 2322 wl12xx_rearm_tx_watchdog_locked(wl); 2323 2324 wl->scan.state = WL1271_SCAN_STATE_IDLE; 2325 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 2326 wl->scan_vif = NULL; 2327 wl->scan.req = NULL; 2328 ieee80211_scan_completed(wl->hw, true); 2329 } 2330 2331 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { 2332 /* disable active roles */ 2333 ret = wl1271_ps_elp_wakeup(wl); 2334 if (ret < 0) 2335 goto deinit; 2336 2337 if (wlvif->bss_type == BSS_TYPE_STA_BSS || 2338 wlvif->bss_type == BSS_TYPE_IBSS) { 2339 if (wl12xx_dev_role_started(wlvif)) 2340 wl12xx_stop_dev(wl, wlvif); 2341 } 2342 2343 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); 2344 if (ret < 0) 2345 goto deinit; 2346 2347 wl1271_ps_elp_sleep(wl); 2348 } 2349 deinit: 2350 /* clear all hlids (except system_hlid) */ 2351 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; 2352 2353 if (wlvif->bss_type == BSS_TYPE_STA_BSS || 2354 wlvif->bss_type == BSS_TYPE_IBSS) { 2355 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; 2356 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx); 2357 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx); 2358 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 2359 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id); 2360 } else { 2361 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; 2362 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; 2363 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); 2364 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx); 2365 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) 2366 wl12xx_free_rate_policy(wl, 2367 &wlvif->ap.ucast_rate_idx[i]); 2368 wl1271_free_ap_keys(wl, wlvif); 2369 } 2370 2371 dev_kfree_skb(wlvif->probereq); 2372 wlvif->probereq = NULL; 2373 wl12xx_tx_reset_wlvif(wl, wlvif); 2374 if (wl->last_wlvif == wlvif) 2375 wl->last_wlvif = NULL; 2376 list_del(&wlvif->list); 2377 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map)); 2378 wlvif->role_id = WL12XX_INVALID_ROLE_ID; 2379 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; 2380 2381 if (is_ap) 2382 wl->ap_count--; 2383 else 2384 wl->sta_count--; 2385 2386 /* 2387 * Last AP, have more stations. Configure sleep auth according to STA. 2388 * Don't do thin on unintended recovery. 2389 */ 2390 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) && 2391 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) 2392 goto unlock; 2393 2394 if (wl->ap_count == 0 && is_ap && wl->sta_count) { 2395 u8 sta_auth = wl->conf.conn.sta_sleep_auth; 2396 /* Configure for power according to debugfs */ 2397 if (sta_auth != WL1271_PSM_ILLEGAL) 2398 wl1271_acx_sleep_auth(wl, sta_auth); 2399 /* Configure for power always on */ 2400 else if (wl->quirks & WLCORE_QUIRK_NO_ELP) 2401 wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); 2402 /* Configure for ELP power saving */ 2403 else 2404 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); 2405 } 2406 2407 unlock: 2408 mutex_unlock(&wl->mutex); 2409 2410 del_timer_sync(&wlvif->rx_streaming_timer); 2411 cancel_work_sync(&wlvif->rx_streaming_enable_work); 2412 cancel_work_sync(&wlvif->rx_streaming_disable_work); 2413 2414 mutex_lock(&wl->mutex); 2415 } 2416 2417 static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 2418 struct ieee80211_vif *vif) 2419 { 2420 struct wl1271 *wl = hw->priv; 2421 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2422 struct wl12xx_vif *iter; 2423 struct vif_counter_data vif_count; 2424 2425 wl12xx_get_vif_count(hw, vif, &vif_count); 2426 mutex_lock(&wl->mutex); 2427 2428 if (wl->state == WLCORE_STATE_OFF || 2429 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 2430 goto out; 2431 2432 /* 2433 * wl->vif can be null here if someone shuts down the interface 2434 * just when hardware recovery has been started. 2435 */ 2436 wl12xx_for_each_wlvif(wl, iter) { 2437 if (iter != wlvif) 2438 continue; 2439 2440 __wl1271_op_remove_interface(wl, vif, true); 2441 break; 2442 } 2443 WARN_ON(iter != wlvif); 2444 if (wl12xx_need_fw_change(wl, vif_count, false)) { 2445 wl12xx_force_active_psm(wl); 2446 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); 2447 wl12xx_queue_recovery_work(wl); 2448 } 2449 out: 2450 mutex_unlock(&wl->mutex); 2451 } 2452 2453 static int wl12xx_op_change_interface(struct ieee80211_hw *hw, 2454 struct ieee80211_vif *vif, 2455 enum nl80211_iftype new_type, bool p2p) 2456 { 2457 struct wl1271 *wl = hw->priv; 2458 int ret; 2459 2460 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags); 2461 wl1271_op_remove_interface(hw, vif); 2462 2463 vif->type = new_type; 2464 vif->p2p = p2p; 2465 ret = wl1271_op_add_interface(hw, vif); 2466 2467 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags); 2468 return ret; 2469 } 2470 2471 static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, 2472 bool set_assoc) 2473 { 2474 int ret; 2475 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); 2476 2477 /* 2478 * One of the side effects of the JOIN command is that is clears 2479 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated 2480 * to a WPA/WPA2 access point will therefore kill the data-path. 2481 * Currently the only valid scenario for JOIN during association 2482 * is on roaming, in which case we will also be given new keys. 2483 * Keep the below message for now, unless it starts bothering 2484 * users who really like to roam a lot :) 2485 */ 2486 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 2487 wl1271_info("JOIN while associated."); 2488 2489 /* clear encryption type */ 2490 wlvif->encryption_type = KEY_NONE; 2491 2492 if (set_assoc) 2493 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); 2494 2495 if (is_ibss) 2496 ret = wl12xx_cmd_role_start_ibss(wl, wlvif); 2497 else 2498 ret = wl12xx_cmd_role_start_sta(wl, wlvif); 2499 if (ret < 0) 2500 goto out; 2501 2502 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 2503 goto out; 2504 2505 /* 2506 * The join command disable the keep-alive mode, shut down its process, 2507 * and also clear the template config, so we need to reset it all after 2508 * the join. The acx_aid starts the keep-alive process, and the order 2509 * of the commands below is relevant. 2510 */ 2511 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true); 2512 if (ret < 0) 2513 goto out; 2514 2515 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid); 2516 if (ret < 0) 2517 goto out; 2518 2519 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif); 2520 if (ret < 0) 2521 goto out; 2522 2523 ret = wl1271_acx_keep_alive_config(wl, wlvif, 2524 wlvif->sta.klv_template_id, 2525 ACX_KEEP_ALIVE_TPL_VALID); 2526 if (ret < 0) 2527 goto out; 2528 2529 out: 2530 return ret; 2531 } 2532 2533 static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif) 2534 { 2535 int ret; 2536 2537 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) { 2538 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 2539 2540 wl12xx_cmd_stop_channel_switch(wl); 2541 ieee80211_chswitch_done(vif, false); 2542 } 2543 2544 /* invalidate keep-alive template */ 2545 wl1271_acx_keep_alive_config(wl, wlvif, 2546 wlvif->sta.klv_template_id, 2547 ACX_KEEP_ALIVE_TPL_INVALID); 2548 2549 /* to stop listening to a channel, we disconnect */ 2550 ret = wl12xx_cmd_role_stop_sta(wl, wlvif); 2551 if (ret < 0) 2552 goto out; 2553 2554 /* reset TX security counters on a clean disconnect */ 2555 wlvif->tx_security_last_seq_lsb = 0; 2556 wlvif->tx_security_seq = 0; 2557 2558 out: 2559 return ret; 2560 } 2561 2562 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif) 2563 { 2564 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band]; 2565 wlvif->rate_set = wlvif->basic_rate_set; 2566 } 2567 2568 static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif, 2569 bool idle) 2570 { 2571 int ret; 2572 bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); 2573 2574 if (idle == cur_idle) 2575 return 0; 2576 2577 if (idle) { 2578 /* no need to croc if we weren't busy (e.g. during boot) */ 2579 if (wl12xx_dev_role_started(wlvif)) { 2580 ret = wl12xx_stop_dev(wl, wlvif); 2581 if (ret < 0) 2582 goto out; 2583 } 2584 wlvif->rate_set = 2585 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 2586 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 2587 if (ret < 0) 2588 goto out; 2589 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); 2590 } else { 2591 /* The current firmware only supports sched_scan in idle */ 2592 if (wl->sched_scanning) { 2593 wl1271_scan_sched_scan_stop(wl, wlvif); 2594 ieee80211_sched_scan_stopped(wl->hw); 2595 } 2596 2597 ret = wl12xx_start_dev(wl, wlvif); 2598 if (ret < 0) 2599 goto out; 2600 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); 2601 } 2602 2603 out: 2604 return ret; 2605 } 2606 2607 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, 2608 struct ieee80211_conf *conf, u32 changed) 2609 { 2610 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 2611 int channel, ret; 2612 2613 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 2614 2615 /* if the channel changes while joined, join again */ 2616 if (changed & IEEE80211_CONF_CHANGE_CHANNEL && 2617 ((wlvif->band != conf->channel->band) || 2618 (wlvif->channel != channel) || 2619 (wlvif->channel_type != conf->channel_type))) { 2620 /* send all pending packets */ 2621 ret = wlcore_tx_work_locked(wl); 2622 if (ret < 0) 2623 return ret; 2624 2625 wlvif->band = conf->channel->band; 2626 wlvif->channel = channel; 2627 wlvif->channel_type = conf->channel_type; 2628 2629 if (is_ap) { 2630 wl1271_set_band_rate(wl, wlvif); 2631 ret = wl1271_init_ap_rates(wl, wlvif); 2632 if (ret < 0) 2633 wl1271_error("AP rate policy change failed %d", 2634 ret); 2635 } else { 2636 /* 2637 * FIXME: the mac80211 should really provide a fixed 2638 * rate to use here. for now, just use the smallest 2639 * possible rate for the band as a fixed rate for 2640 * association frames and other control messages. 2641 */ 2642 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 2643 wl1271_set_band_rate(wl, wlvif); 2644 2645 wlvif->basic_rate = 2646 wl1271_tx_min_rate_get(wl, 2647 wlvif->basic_rate_set); 2648 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 2649 if (ret < 0) 2650 wl1271_warning("rate policy for channel " 2651 "failed %d", ret); 2652 2653 /* 2654 * change the ROC channel. do it only if we are 2655 * not idle. otherwise, CROC will be called 2656 * anyway. 2657 */ 2658 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, 2659 &wlvif->flags) && 2660 wl12xx_dev_role_started(wlvif) && 2661 !(conf->flags & IEEE80211_CONF_IDLE)) { 2662 ret = wl12xx_stop_dev(wl, wlvif); 2663 if (ret < 0) 2664 return ret; 2665 2666 ret = wl12xx_start_dev(wl, wlvif); 2667 if (ret < 0) 2668 return ret; 2669 } 2670 } 2671 } 2672 2673 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) { 2674 2675 if ((conf->flags & IEEE80211_CONF_PS) && 2676 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && 2677 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { 2678 2679 int ps_mode; 2680 char *ps_mode_str; 2681 2682 if (wl->conf.conn.forced_ps) { 2683 ps_mode = STATION_POWER_SAVE_MODE; 2684 ps_mode_str = "forced"; 2685 } else { 2686 ps_mode = STATION_AUTO_PS_MODE; 2687 ps_mode_str = "auto"; 2688 } 2689 2690 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str); 2691 2692 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode); 2693 2694 if (ret < 0) 2695 wl1271_warning("enter %s ps failed %d", 2696 ps_mode_str, ret); 2697 2698 } else if (!(conf->flags & IEEE80211_CONF_PS) && 2699 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { 2700 2701 wl1271_debug(DEBUG_PSM, "auto ps disabled"); 2702 2703 ret = wl1271_ps_set_mode(wl, wlvif, 2704 STATION_ACTIVE_MODE); 2705 if (ret < 0) 2706 wl1271_warning("exit auto ps failed %d", ret); 2707 } 2708 } 2709 2710 if (conf->power_level != wlvif->power_level) { 2711 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); 2712 if (ret < 0) 2713 return ret; 2714 2715 wlvif->power_level = conf->power_level; 2716 } 2717 2718 return 0; 2719 } 2720 2721 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) 2722 { 2723 struct wl1271 *wl = hw->priv; 2724 struct wl12xx_vif *wlvif; 2725 struct ieee80211_conf *conf = &hw->conf; 2726 int channel, ret = 0; 2727 2728 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 2729 2730 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" 2731 " changed 0x%x", 2732 channel, 2733 conf->flags & IEEE80211_CONF_PS ? "on" : "off", 2734 conf->power_level, 2735 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", 2736 changed); 2737 2738 /* 2739 * mac80211 will go to idle nearly immediately after transmitting some 2740 * frames, such as the deauth. To make sure those frames reach the air, 2741 * wait here until the TX queue is fully flushed. 2742 */ 2743 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || 2744 ((changed & IEEE80211_CONF_CHANGE_IDLE) && 2745 (conf->flags & IEEE80211_CONF_IDLE))) 2746 wl1271_tx_flush(wl); 2747 2748 mutex_lock(&wl->mutex); 2749 2750 /* we support configuring the channel and band even while off */ 2751 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 2752 wl->band = conf->channel->band; 2753 wl->channel = channel; 2754 wl->channel_type = conf->channel_type; 2755 } 2756 2757 if (changed & IEEE80211_CONF_CHANGE_POWER) 2758 wl->power_level = conf->power_level; 2759 2760 if (unlikely(wl->state != WLCORE_STATE_ON)) 2761 goto out; 2762 2763 ret = wl1271_ps_elp_wakeup(wl); 2764 if (ret < 0) 2765 goto out; 2766 2767 /* configure each interface */ 2768 wl12xx_for_each_wlvif(wl, wlvif) { 2769 ret = wl12xx_config_vif(wl, wlvif, conf, changed); 2770 if (ret < 0) 2771 goto out_sleep; 2772 } 2773 2774 out_sleep: 2775 wl1271_ps_elp_sleep(wl); 2776 2777 out: 2778 mutex_unlock(&wl->mutex); 2779 2780 return ret; 2781 } 2782 2783 struct wl1271_filter_params { 2784 bool enabled; 2785 int mc_list_length; 2786 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; 2787 }; 2788 2789 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, 2790 struct netdev_hw_addr_list *mc_list) 2791 { 2792 struct wl1271_filter_params *fp; 2793 struct netdev_hw_addr *ha; 2794 2795 fp = kzalloc(sizeof(*fp), GFP_ATOMIC); 2796 if (!fp) { 2797 wl1271_error("Out of memory setting filters."); 2798 return 0; 2799 } 2800 2801 /* update multicast filtering parameters */ 2802 fp->mc_list_length = 0; 2803 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) { 2804 fp->enabled = false; 2805 } else { 2806 fp->enabled = true; 2807 netdev_hw_addr_list_for_each(ha, mc_list) { 2808 memcpy(fp->mc_list[fp->mc_list_length], 2809 ha->addr, ETH_ALEN); 2810 fp->mc_list_length++; 2811 } 2812 } 2813 2814 return (u64)(unsigned long)fp; 2815 } 2816 2817 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ 2818 FIF_ALLMULTI | \ 2819 FIF_FCSFAIL | \ 2820 FIF_BCN_PRBRESP_PROMISC | \ 2821 FIF_CONTROL | \ 2822 FIF_OTHER_BSS) 2823 2824 static void wl1271_op_configure_filter(struct ieee80211_hw *hw, 2825 unsigned int changed, 2826 unsigned int *total, u64 multicast) 2827 { 2828 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; 2829 struct wl1271 *wl = hw->priv; 2830 struct wl12xx_vif *wlvif; 2831 2832 int ret; 2833 2834 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" 2835 " total %x", changed, *total); 2836 2837 mutex_lock(&wl->mutex); 2838 2839 *total &= WL1271_SUPPORTED_FILTERS; 2840 changed &= WL1271_SUPPORTED_FILTERS; 2841 2842 if (unlikely(wl->state != WLCORE_STATE_ON)) 2843 goto out; 2844 2845 ret = wl1271_ps_elp_wakeup(wl); 2846 if (ret < 0) 2847 goto out; 2848 2849 wl12xx_for_each_wlvif(wl, wlvif) { 2850 if (wlvif->bss_type != BSS_TYPE_AP_BSS) { 2851 if (*total & FIF_ALLMULTI) 2852 ret = wl1271_acx_group_address_tbl(wl, wlvif, 2853 false, 2854 NULL, 0); 2855 else if (fp) 2856 ret = wl1271_acx_group_address_tbl(wl, wlvif, 2857 fp->enabled, 2858 fp->mc_list, 2859 fp->mc_list_length); 2860 if (ret < 0) 2861 goto out_sleep; 2862 } 2863 } 2864 2865 /* 2866 * the fw doesn't provide an api to configure the filters. instead, 2867 * the filters configuration is based on the active roles / ROC 2868 * state. 2869 */ 2870 2871 out_sleep: 2872 wl1271_ps_elp_sleep(wl); 2873 2874 out: 2875 mutex_unlock(&wl->mutex); 2876 kfree(fp); 2877 } 2878 2879 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, 2880 u8 id, u8 key_type, u8 key_size, 2881 const u8 *key, u8 hlid, u32 tx_seq_32, 2882 u16 tx_seq_16) 2883 { 2884 struct wl1271_ap_key *ap_key; 2885 int i; 2886 2887 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id); 2888 2889 if (key_size > MAX_KEY_SIZE) 2890 return -EINVAL; 2891 2892 /* 2893 * Find next free entry in ap_keys. Also check we are not replacing 2894 * an existing key. 2895 */ 2896 for (i = 0; i < MAX_NUM_KEYS; i++) { 2897 if (wlvif->ap.recorded_keys[i] == NULL) 2898 break; 2899 2900 if (wlvif->ap.recorded_keys[i]->id == id) { 2901 wl1271_warning("trying to record key replacement"); 2902 return -EINVAL; 2903 } 2904 } 2905 2906 if (i == MAX_NUM_KEYS) 2907 return -EBUSY; 2908 2909 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL); 2910 if (!ap_key) 2911 return -ENOMEM; 2912 2913 ap_key->id = id; 2914 ap_key->key_type = key_type; 2915 ap_key->key_size = key_size; 2916 memcpy(ap_key->key, key, key_size); 2917 ap_key->hlid = hlid; 2918 ap_key->tx_seq_32 = tx_seq_32; 2919 ap_key->tx_seq_16 = tx_seq_16; 2920 2921 wlvif->ap.recorded_keys[i] = ap_key; 2922 return 0; 2923 } 2924 2925 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif) 2926 { 2927 int i; 2928 2929 for (i = 0; i < MAX_NUM_KEYS; i++) { 2930 kfree(wlvif->ap.recorded_keys[i]); 2931 wlvif->ap.recorded_keys[i] = NULL; 2932 } 2933 } 2934 2935 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif) 2936 { 2937 int i, ret = 0; 2938 struct wl1271_ap_key *key; 2939 bool wep_key_added = false; 2940 2941 for (i = 0; i < MAX_NUM_KEYS; i++) { 2942 u8 hlid; 2943 if (wlvif->ap.recorded_keys[i] == NULL) 2944 break; 2945 2946 key = wlvif->ap.recorded_keys[i]; 2947 hlid = key->hlid; 2948 if (hlid == WL12XX_INVALID_LINK_ID) 2949 hlid = wlvif->ap.bcast_hlid; 2950 2951 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE, 2952 key->id, key->key_type, 2953 key->key_size, key->key, 2954 hlid, key->tx_seq_32, 2955 key->tx_seq_16); 2956 if (ret < 0) 2957 goto out; 2958 2959 if (key->key_type == KEY_WEP) 2960 wep_key_added = true; 2961 } 2962 2963 if (wep_key_added) { 2964 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key, 2965 wlvif->ap.bcast_hlid); 2966 if (ret < 0) 2967 goto out; 2968 } 2969 2970 out: 2971 wl1271_free_ap_keys(wl, wlvif); 2972 return ret; 2973 } 2974 2975 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, 2976 u16 action, u8 id, u8 key_type, 2977 u8 key_size, const u8 *key, u32 tx_seq_32, 2978 u16 tx_seq_16, struct ieee80211_sta *sta) 2979 { 2980 int ret; 2981 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 2982 2983 if (is_ap) { 2984 struct wl1271_station *wl_sta; 2985 u8 hlid; 2986 2987 if (sta) { 2988 wl_sta = (struct wl1271_station *)sta->drv_priv; 2989 hlid = wl_sta->hlid; 2990 } else { 2991 hlid = wlvif->ap.bcast_hlid; 2992 } 2993 2994 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { 2995 /* 2996 * We do not support removing keys after AP shutdown. 2997 * Pretend we do to make mac80211 happy. 2998 */ 2999 if (action != KEY_ADD_OR_REPLACE) 3000 return 0; 3001 3002 ret = wl1271_record_ap_key(wl, wlvif, id, 3003 key_type, key_size, 3004 key, hlid, tx_seq_32, 3005 tx_seq_16); 3006 } else { 3007 ret = wl1271_cmd_set_ap_key(wl, wlvif, action, 3008 id, key_type, key_size, 3009 key, hlid, tx_seq_32, 3010 tx_seq_16); 3011 } 3012 3013 if (ret < 0) 3014 return ret; 3015 } else { 3016 const u8 *addr; 3017 static const u8 bcast_addr[ETH_ALEN] = { 3018 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 3019 }; 3020 3021 addr = sta ? sta->addr : bcast_addr; 3022 3023 if (is_zero_ether_addr(addr)) { 3024 /* We dont support TX only encryption */ 3025 return -EOPNOTSUPP; 3026 } 3027 3028 /* The wl1271 does not allow to remove unicast keys - they 3029 will be cleared automatically on next CMD_JOIN. Ignore the 3030 request silently, as we dont want the mac80211 to emit 3031 an error message. */ 3032 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr)) 3033 return 0; 3034 3035 /* don't remove key if hlid was already deleted */ 3036 if (action == KEY_REMOVE && 3037 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) 3038 return 0; 3039 3040 ret = wl1271_cmd_set_sta_key(wl, wlvif, action, 3041 id, key_type, key_size, 3042 key, addr, tx_seq_32, 3043 tx_seq_16); 3044 if (ret < 0) 3045 return ret; 3046 3047 /* the default WEP key needs to be configured at least once */ 3048 if (key_type == KEY_WEP) { 3049 ret = wl12xx_cmd_set_default_wep_key(wl, 3050 wlvif->default_key, 3051 wlvif->sta.hlid); 3052 if (ret < 0) 3053 return ret; 3054 } 3055 } 3056 3057 return 0; 3058 } 3059 3060 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3061 struct ieee80211_vif *vif, 3062 struct ieee80211_sta *sta, 3063 struct ieee80211_key_conf *key_conf) 3064 { 3065 struct wl1271 *wl = hw->priv; 3066 int ret; 3067 bool might_change_spare = 3068 key_conf->cipher == WL1271_CIPHER_SUITE_GEM || 3069 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP; 3070 3071 if (might_change_spare) { 3072 /* 3073 * stop the queues and flush to ensure the next packets are 3074 * in sync with FW spare block accounting 3075 */ 3076 mutex_lock(&wl->mutex); 3077 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK); 3078 mutex_unlock(&wl->mutex); 3079 3080 wl1271_tx_flush(wl); 3081 } 3082 3083 mutex_lock(&wl->mutex); 3084 3085 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3086 ret = -EAGAIN; 3087 goto out_wake_queues; 3088 } 3089 3090 ret = wl1271_ps_elp_wakeup(wl); 3091 if (ret < 0) 3092 goto out_wake_queues; 3093 3094 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); 3095 3096 wl1271_ps_elp_sleep(wl); 3097 3098 out_wake_queues: 3099 if (might_change_spare) 3100 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK); 3101 3102 mutex_unlock(&wl->mutex); 3103 3104 return ret; 3105 } 3106 3107 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, 3108 struct ieee80211_vif *vif, 3109 struct ieee80211_sta *sta, 3110 struct ieee80211_key_conf *key_conf) 3111 { 3112 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3113 int ret; 3114 u32 tx_seq_32 = 0; 3115 u16 tx_seq_16 = 0; 3116 u8 key_type; 3117 3118 wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); 3119 3120 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta); 3121 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", 3122 key_conf->cipher, key_conf->keyidx, 3123 key_conf->keylen, key_conf->flags); 3124 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 3125 3126 switch (key_conf->cipher) { 3127 case WLAN_CIPHER_SUITE_WEP40: 3128 case WLAN_CIPHER_SUITE_WEP104: 3129 key_type = KEY_WEP; 3130 3131 key_conf->hw_key_idx = key_conf->keyidx; 3132 break; 3133 case WLAN_CIPHER_SUITE_TKIP: 3134 key_type = KEY_TKIP; 3135 3136 key_conf->hw_key_idx = key_conf->keyidx; 3137 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); 3138 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); 3139 break; 3140 case WLAN_CIPHER_SUITE_CCMP: 3141 key_type = KEY_AES; 3142 3143 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3144 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); 3145 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); 3146 break; 3147 case WL1271_CIPHER_SUITE_GEM: 3148 key_type = KEY_GEM; 3149 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); 3150 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); 3151 break; 3152 default: 3153 wl1271_error("Unknown key algo 0x%x", key_conf->cipher); 3154 3155 return -EOPNOTSUPP; 3156 } 3157 3158 switch (cmd) { 3159 case SET_KEY: 3160 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE, 3161 key_conf->keyidx, key_type, 3162 key_conf->keylen, key_conf->key, 3163 tx_seq_32, tx_seq_16, sta); 3164 if (ret < 0) { 3165 wl1271_error("Could not add or replace key"); 3166 return ret; 3167 } 3168 3169 /* 3170 * reconfiguring arp response if the unicast (or common) 3171 * encryption key type was changed 3172 */ 3173 if (wlvif->bss_type == BSS_TYPE_STA_BSS && 3174 (sta || key_type == KEY_WEP) && 3175 wlvif->encryption_type != key_type) { 3176 wlvif->encryption_type = key_type; 3177 ret = wl1271_cmd_build_arp_rsp(wl, wlvif); 3178 if (ret < 0) { 3179 wl1271_warning("build arp rsp failed: %d", ret); 3180 return ret; 3181 } 3182 } 3183 break; 3184 3185 case DISABLE_KEY: 3186 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE, 3187 key_conf->keyidx, key_type, 3188 key_conf->keylen, key_conf->key, 3189 0, 0, sta); 3190 if (ret < 0) { 3191 wl1271_error("Could not remove key"); 3192 return ret; 3193 } 3194 break; 3195 3196 default: 3197 wl1271_error("Unsupported key cmd 0x%x", cmd); 3198 return -EOPNOTSUPP; 3199 } 3200 3201 return ret; 3202 } 3203 EXPORT_SYMBOL_GPL(wlcore_set_key); 3204 3205 static int wl1271_op_hw_scan(struct ieee80211_hw *hw, 3206 struct ieee80211_vif *vif, 3207 struct cfg80211_scan_request *req) 3208 { 3209 struct wl1271 *wl = hw->priv; 3210 int ret; 3211 u8 *ssid = NULL; 3212 size_t len = 0; 3213 3214 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); 3215 3216 if (req->n_ssids) { 3217 ssid = req->ssids[0].ssid; 3218 len = req->ssids[0].ssid_len; 3219 } 3220 3221 mutex_lock(&wl->mutex); 3222 3223 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3224 /* 3225 * We cannot return -EBUSY here because cfg80211 will expect 3226 * a call to ieee80211_scan_completed if we do - in this case 3227 * there won't be any call. 3228 */ 3229 ret = -EAGAIN; 3230 goto out; 3231 } 3232 3233 ret = wl1271_ps_elp_wakeup(wl); 3234 if (ret < 0) 3235 goto out; 3236 3237 /* fail if there is any role in ROC */ 3238 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { 3239 /* don't allow scanning right now */ 3240 ret = -EBUSY; 3241 goto out_sleep; 3242 } 3243 3244 ret = wl1271_scan(hw->priv, vif, ssid, len, req); 3245 out_sleep: 3246 wl1271_ps_elp_sleep(wl); 3247 out: 3248 mutex_unlock(&wl->mutex); 3249 3250 return ret; 3251 } 3252 3253 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, 3254 struct ieee80211_vif *vif) 3255 { 3256 struct wl1271 *wl = hw->priv; 3257 int ret; 3258 3259 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan"); 3260 3261 mutex_lock(&wl->mutex); 3262 3263 if (unlikely(wl->state != WLCORE_STATE_ON)) 3264 goto out; 3265 3266 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) 3267 goto out; 3268 3269 ret = wl1271_ps_elp_wakeup(wl); 3270 if (ret < 0) 3271 goto out; 3272 3273 if (wl->scan.state != WL1271_SCAN_STATE_DONE) { 3274 ret = wl1271_scan_stop(wl); 3275 if (ret < 0) 3276 goto out_sleep; 3277 } 3278 3279 /* 3280 * Rearm the tx watchdog just before idling scan. This 3281 * prevents just-finished scans from triggering the watchdog 3282 */ 3283 wl12xx_rearm_tx_watchdog_locked(wl); 3284 3285 wl->scan.state = WL1271_SCAN_STATE_IDLE; 3286 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 3287 wl->scan_vif = NULL; 3288 wl->scan.req = NULL; 3289 ieee80211_scan_completed(wl->hw, true); 3290 3291 out_sleep: 3292 wl1271_ps_elp_sleep(wl); 3293 out: 3294 mutex_unlock(&wl->mutex); 3295 3296 cancel_delayed_work_sync(&wl->scan_complete_work); 3297 } 3298 3299 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, 3300 struct ieee80211_vif *vif, 3301 struct cfg80211_sched_scan_request *req, 3302 struct ieee80211_sched_scan_ies *ies) 3303 { 3304 struct wl1271 *wl = hw->priv; 3305 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3306 int ret; 3307 3308 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start"); 3309 3310 mutex_lock(&wl->mutex); 3311 3312 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3313 ret = -EAGAIN; 3314 goto out; 3315 } 3316 3317 ret = wl1271_ps_elp_wakeup(wl); 3318 if (ret < 0) 3319 goto out; 3320 3321 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies); 3322 if (ret < 0) 3323 goto out_sleep; 3324 3325 ret = wl1271_scan_sched_scan_start(wl, wlvif); 3326 if (ret < 0) 3327 goto out_sleep; 3328 3329 wl->sched_scanning = true; 3330 3331 out_sleep: 3332 wl1271_ps_elp_sleep(wl); 3333 out: 3334 mutex_unlock(&wl->mutex); 3335 return ret; 3336 } 3337 3338 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, 3339 struct ieee80211_vif *vif) 3340 { 3341 struct wl1271 *wl = hw->priv; 3342 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3343 int ret; 3344 3345 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop"); 3346 3347 mutex_lock(&wl->mutex); 3348 3349 if (unlikely(wl->state != WLCORE_STATE_ON)) 3350 goto out; 3351 3352 ret = wl1271_ps_elp_wakeup(wl); 3353 if (ret < 0) 3354 goto out; 3355 3356 wl1271_scan_sched_scan_stop(wl, wlvif); 3357 3358 wl1271_ps_elp_sleep(wl); 3359 out: 3360 mutex_unlock(&wl->mutex); 3361 } 3362 3363 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 3364 { 3365 struct wl1271 *wl = hw->priv; 3366 int ret = 0; 3367 3368 mutex_lock(&wl->mutex); 3369 3370 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3371 ret = -EAGAIN; 3372 goto out; 3373 } 3374 3375 ret = wl1271_ps_elp_wakeup(wl); 3376 if (ret < 0) 3377 goto out; 3378 3379 ret = wl1271_acx_frag_threshold(wl, value); 3380 if (ret < 0) 3381 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); 3382 3383 wl1271_ps_elp_sleep(wl); 3384 3385 out: 3386 mutex_unlock(&wl->mutex); 3387 3388 return ret; 3389 } 3390 3391 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3392 { 3393 struct wl1271 *wl = hw->priv; 3394 struct wl12xx_vif *wlvif; 3395 int ret = 0; 3396 3397 mutex_lock(&wl->mutex); 3398 3399 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3400 ret = -EAGAIN; 3401 goto out; 3402 } 3403 3404 ret = wl1271_ps_elp_wakeup(wl); 3405 if (ret < 0) 3406 goto out; 3407 3408 wl12xx_for_each_wlvif(wl, wlvif) { 3409 ret = wl1271_acx_rts_threshold(wl, wlvif, value); 3410 if (ret < 0) 3411 wl1271_warning("set rts threshold failed: %d", ret); 3412 } 3413 wl1271_ps_elp_sleep(wl); 3414 3415 out: 3416 mutex_unlock(&wl->mutex); 3417 3418 return ret; 3419 } 3420 3421 static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb, 3422 int offset) 3423 { 3424 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3425 u8 ssid_len; 3426 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, 3427 skb->len - offset); 3428 3429 if (!ptr) { 3430 wl1271_error("No SSID in IEs!"); 3431 return -ENOENT; 3432 } 3433 3434 ssid_len = ptr[1]; 3435 if (ssid_len > IEEE80211_MAX_SSID_LEN) { 3436 wl1271_error("SSID is too long!"); 3437 return -EINVAL; 3438 } 3439 3440 wlvif->ssid_len = ssid_len; 3441 memcpy(wlvif->ssid, ptr+2, ssid_len); 3442 return 0; 3443 } 3444 3445 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset) 3446 { 3447 int len; 3448 const u8 *next, *end = skb->data + skb->len; 3449 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset, 3450 skb->len - ieoffset); 3451 if (!ie) 3452 return; 3453 len = ie[1] + 2; 3454 next = ie + len; 3455 memmove(ie, next, end - next); 3456 skb_trim(skb, skb->len - len); 3457 } 3458 3459 static void wl12xx_remove_vendor_ie(struct sk_buff *skb, 3460 unsigned int oui, u8 oui_type, 3461 int ieoffset) 3462 { 3463 int len; 3464 const u8 *next, *end = skb->data + skb->len; 3465 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 3466 skb->data + ieoffset, 3467 skb->len - ieoffset); 3468 if (!ie) 3469 return; 3470 len = ie[1] + 2; 3471 next = ie + len; 3472 memmove(ie, next, end - next); 3473 skb_trim(skb, skb->len - len); 3474 } 3475 3476 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates, 3477 struct ieee80211_vif *vif) 3478 { 3479 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3480 struct sk_buff *skb; 3481 int ret; 3482 3483 skb = ieee80211_proberesp_get(wl->hw, vif); 3484 if (!skb) 3485 return -EOPNOTSUPP; 3486 3487 ret = wl1271_cmd_template_set(wl, wlvif->role_id, 3488 CMD_TEMPL_AP_PROBE_RESPONSE, 3489 skb->data, 3490 skb->len, 0, 3491 rates); 3492 dev_kfree_skb(skb); 3493 3494 if (ret < 0) 3495 goto out; 3496 3497 wl1271_debug(DEBUG_AP, "probe response updated"); 3498 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags); 3499 3500 out: 3501 return ret; 3502 } 3503 3504 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl, 3505 struct ieee80211_vif *vif, 3506 u8 *probe_rsp_data, 3507 size_t probe_rsp_len, 3508 u32 rates) 3509 { 3510 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3511 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 3512 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE]; 3513 int ssid_ie_offset, ie_offset, templ_len; 3514 const u8 *ptr; 3515 3516 /* no need to change probe response if the SSID is set correctly */ 3517 if (wlvif->ssid_len > 0) 3518 return wl1271_cmd_template_set(wl, wlvif->role_id, 3519 CMD_TEMPL_AP_PROBE_RESPONSE, 3520 probe_rsp_data, 3521 probe_rsp_len, 0, 3522 rates); 3523 3524 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) { 3525 wl1271_error("probe_rsp template too big"); 3526 return -EINVAL; 3527 } 3528 3529 /* start searching from IE offset */ 3530 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); 3531 3532 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset, 3533 probe_rsp_len - ie_offset); 3534 if (!ptr) { 3535 wl1271_error("No SSID in beacon!"); 3536 return -EINVAL; 3537 } 3538 3539 ssid_ie_offset = ptr - probe_rsp_data; 3540 ptr += (ptr[1] + 2); 3541 3542 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset); 3543 3544 /* insert SSID from bss_conf */ 3545 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID; 3546 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len; 3547 memcpy(probe_rsp_templ + ssid_ie_offset + 2, 3548 bss_conf->ssid, bss_conf->ssid_len); 3549 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len; 3550 3551 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len, 3552 ptr, probe_rsp_len - (ptr - probe_rsp_data)); 3553 templ_len += probe_rsp_len - (ptr - probe_rsp_data); 3554 3555 return wl1271_cmd_template_set(wl, wlvif->role_id, 3556 CMD_TEMPL_AP_PROBE_RESPONSE, 3557 probe_rsp_templ, 3558 templ_len, 0, 3559 rates); 3560 } 3561 3562 static int wl1271_bss_erp_info_changed(struct wl1271 *wl, 3563 struct ieee80211_vif *vif, 3564 struct ieee80211_bss_conf *bss_conf, 3565 u32 changed) 3566 { 3567 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3568 int ret = 0; 3569 3570 if (changed & BSS_CHANGED_ERP_SLOT) { 3571 if (bss_conf->use_short_slot) 3572 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT); 3573 else 3574 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG); 3575 if (ret < 0) { 3576 wl1271_warning("Set slot time failed %d", ret); 3577 goto out; 3578 } 3579 } 3580 3581 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 3582 if (bss_conf->use_short_preamble) 3583 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT); 3584 else 3585 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG); 3586 } 3587 3588 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 3589 if (bss_conf->use_cts_prot) 3590 ret = wl1271_acx_cts_protect(wl, wlvif, 3591 CTSPROTECT_ENABLE); 3592 else 3593 ret = wl1271_acx_cts_protect(wl, wlvif, 3594 CTSPROTECT_DISABLE); 3595 if (ret < 0) { 3596 wl1271_warning("Set ctsprotect failed %d", ret); 3597 goto out; 3598 } 3599 } 3600 3601 out: 3602 return ret; 3603 } 3604 3605 static int wlcore_set_beacon_template(struct wl1271 *wl, 3606 struct ieee80211_vif *vif, 3607 bool is_ap) 3608 { 3609 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3610 struct ieee80211_hdr *hdr; 3611 u32 min_rate; 3612 int ret; 3613 int ieoffset = offsetof(struct ieee80211_mgmt, 3614 u.beacon.variable); 3615 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); 3616 u16 tmpl_id; 3617 3618 if (!beacon) { 3619 ret = -EINVAL; 3620 goto out; 3621 } 3622 3623 wl1271_debug(DEBUG_MASTER, "beacon updated"); 3624 3625 ret = wl1271_ssid_set(vif, beacon, ieoffset); 3626 if (ret < 0) { 3627 dev_kfree_skb(beacon); 3628 goto out; 3629 } 3630 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 3631 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON : 3632 CMD_TEMPL_BEACON; 3633 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id, 3634 beacon->data, 3635 beacon->len, 0, 3636 min_rate); 3637 if (ret < 0) { 3638 dev_kfree_skb(beacon); 3639 goto out; 3640 } 3641 3642 /* 3643 * In case we already have a probe-resp beacon set explicitly 3644 * by usermode, don't use the beacon data. 3645 */ 3646 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags)) 3647 goto end_bcn; 3648 3649 /* remove TIM ie from probe response */ 3650 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset); 3651 3652 /* 3653 * remove p2p ie from probe response. 3654 * the fw reponds to probe requests that don't include 3655 * the p2p ie. probe requests with p2p ie will be passed, 3656 * and will be responded by the supplicant (the spec 3657 * forbids including the p2p ie when responding to probe 3658 * requests that didn't include it). 3659 */ 3660 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA, 3661 WLAN_OUI_TYPE_WFA_P2P, ieoffset); 3662 3663 hdr = (struct ieee80211_hdr *) beacon->data; 3664 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 3665 IEEE80211_STYPE_PROBE_RESP); 3666 if (is_ap) 3667 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif, 3668 beacon->data, 3669 beacon->len, 3670 min_rate); 3671 else 3672 ret = wl1271_cmd_template_set(wl, wlvif->role_id, 3673 CMD_TEMPL_PROBE_RESPONSE, 3674 beacon->data, 3675 beacon->len, 0, 3676 min_rate); 3677 end_bcn: 3678 dev_kfree_skb(beacon); 3679 if (ret < 0) 3680 goto out; 3681 3682 out: 3683 return ret; 3684 } 3685 3686 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, 3687 struct ieee80211_vif *vif, 3688 struct ieee80211_bss_conf *bss_conf, 3689 u32 changed) 3690 { 3691 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3692 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 3693 int ret = 0; 3694 3695 if ((changed & BSS_CHANGED_BEACON_INT)) { 3696 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", 3697 bss_conf->beacon_int); 3698 3699 wlvif->beacon_int = bss_conf->beacon_int; 3700 } 3701 3702 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) { 3703 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 3704 3705 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif); 3706 } 3707 3708 if ((changed & BSS_CHANGED_BEACON)) { 3709 ret = wlcore_set_beacon_template(wl, vif, is_ap); 3710 if (ret < 0) 3711 goto out; 3712 } 3713 3714 out: 3715 if (ret != 0) 3716 wl1271_error("beacon info change failed: %d", ret); 3717 return ret; 3718 } 3719 3720 /* AP mode changes */ 3721 static void wl1271_bss_info_changed_ap(struct wl1271 *wl, 3722 struct ieee80211_vif *vif, 3723 struct ieee80211_bss_conf *bss_conf, 3724 u32 changed) 3725 { 3726 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3727 int ret = 0; 3728 3729 if ((changed & BSS_CHANGED_BASIC_RATES)) { 3730 u32 rates = bss_conf->basic_rates; 3731 3732 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, 3733 wlvif->band); 3734 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, 3735 wlvif->basic_rate_set); 3736 3737 ret = wl1271_init_ap_rates(wl, wlvif); 3738 if (ret < 0) { 3739 wl1271_error("AP rate policy change failed %d", ret); 3740 goto out; 3741 } 3742 3743 ret = wl1271_ap_init_templates(wl, vif); 3744 if (ret < 0) 3745 goto out; 3746 3747 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif); 3748 if (ret < 0) 3749 goto out; 3750 3751 ret = wlcore_set_beacon_template(wl, vif, true); 3752 if (ret < 0) 3753 goto out; 3754 } 3755 3756 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); 3757 if (ret < 0) 3758 goto out; 3759 3760 if ((changed & BSS_CHANGED_BEACON_ENABLED)) { 3761 if (bss_conf->enable_beacon) { 3762 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { 3763 ret = wl12xx_cmd_role_start_ap(wl, wlvif); 3764 if (ret < 0) 3765 goto out; 3766 3767 ret = wl1271_ap_init_hwenc(wl, wlvif); 3768 if (ret < 0) 3769 goto out; 3770 3771 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); 3772 wl1271_debug(DEBUG_AP, "started AP"); 3773 } 3774 } else { 3775 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { 3776 ret = wl12xx_cmd_role_stop_ap(wl, wlvif); 3777 if (ret < 0) 3778 goto out; 3779 3780 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); 3781 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, 3782 &wlvif->flags); 3783 wl1271_debug(DEBUG_AP, "stopped AP"); 3784 } 3785 } 3786 } 3787 3788 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); 3789 if (ret < 0) 3790 goto out; 3791 3792 /* Handle HT information change */ 3793 if ((changed & BSS_CHANGED_HT) && 3794 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) { 3795 ret = wl1271_acx_set_ht_information(wl, wlvif, 3796 bss_conf->ht_operation_mode); 3797 if (ret < 0) { 3798 wl1271_warning("Set ht information failed %d", ret); 3799 goto out; 3800 } 3801 } 3802 3803 out: 3804 return; 3805 } 3806 3807 /* STA/IBSS mode changes */ 3808 static void wl1271_bss_info_changed_sta(struct wl1271 *wl, 3809 struct ieee80211_vif *vif, 3810 struct ieee80211_bss_conf *bss_conf, 3811 u32 changed) 3812 { 3813 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3814 bool do_join = false, set_assoc = false; 3815 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); 3816 bool ibss_joined = false; 3817 u32 sta_rate_set = 0; 3818 int ret; 3819 struct ieee80211_sta *sta; 3820 bool sta_exists = false; 3821 struct ieee80211_sta_ht_cap sta_ht_cap; 3822 3823 if (is_ibss) { 3824 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, 3825 changed); 3826 if (ret < 0) 3827 goto out; 3828 } 3829 3830 if (changed & BSS_CHANGED_IBSS) { 3831 if (bss_conf->ibss_joined) { 3832 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags); 3833 ibss_joined = true; 3834 } else { 3835 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, 3836 &wlvif->flags)) 3837 wl1271_unjoin(wl, wlvif); 3838 } 3839 } 3840 3841 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined) 3842 do_join = true; 3843 3844 /* Need to update the SSID (for filtering etc) */ 3845 if ((changed & BSS_CHANGED_BEACON) && ibss_joined) 3846 do_join = true; 3847 3848 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) { 3849 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", 3850 bss_conf->enable_beacon ? "enabled" : "disabled"); 3851 3852 do_join = true; 3853 } 3854 3855 if (changed & BSS_CHANGED_IDLE && !is_ibss) { 3856 ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle); 3857 if (ret < 0) 3858 wl1271_warning("idle mode change failed %d", ret); 3859 } 3860 3861 if ((changed & BSS_CHANGED_CQM)) { 3862 bool enable = false; 3863 if (bss_conf->cqm_rssi_thold) 3864 enable = true; 3865 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable, 3866 bss_conf->cqm_rssi_thold, 3867 bss_conf->cqm_rssi_hyst); 3868 if (ret < 0) 3869 goto out; 3870 wlvif->rssi_thold = bss_conf->cqm_rssi_thold; 3871 } 3872 3873 if (changed & BSS_CHANGED_BSSID) 3874 if (!is_zero_ether_addr(bss_conf->bssid)) { 3875 ret = wl12xx_cmd_build_null_data(wl, wlvif); 3876 if (ret < 0) 3877 goto out; 3878 3879 ret = wl1271_build_qos_null_data(wl, vif); 3880 if (ret < 0) 3881 goto out; 3882 } 3883 3884 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { 3885 rcu_read_lock(); 3886 sta = ieee80211_find_sta(vif, bss_conf->bssid); 3887 if (!sta) 3888 goto sta_not_found; 3889 3890 /* save the supp_rates of the ap */ 3891 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band]; 3892 if (sta->ht_cap.ht_supported) 3893 sta_rate_set |= 3894 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) | 3895 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET); 3896 sta_ht_cap = sta->ht_cap; 3897 sta_exists = true; 3898 3899 sta_not_found: 3900 rcu_read_unlock(); 3901 } 3902 3903 if ((changed & BSS_CHANGED_ASSOC)) { 3904 if (bss_conf->assoc) { 3905 u32 rates; 3906 int ieoffset; 3907 wlvif->aid = bss_conf->aid; 3908 wlvif->channel_type = 3909 cfg80211_get_chandef_type(&bss_conf->chandef); 3910 wlvif->beacon_int = bss_conf->beacon_int; 3911 do_join = true; 3912 set_assoc = true; 3913 3914 /* 3915 * use basic rates from AP, and determine lowest rate 3916 * to use with control frames. 3917 */ 3918 rates = bss_conf->basic_rates; 3919 wlvif->basic_rate_set = 3920 wl1271_tx_enabled_rates_get(wl, rates, 3921 wlvif->band); 3922 wlvif->basic_rate = 3923 wl1271_tx_min_rate_get(wl, 3924 wlvif->basic_rate_set); 3925 if (sta_rate_set) 3926 wlvif->rate_set = 3927 wl1271_tx_enabled_rates_get(wl, 3928 sta_rate_set, 3929 wlvif->band); 3930 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 3931 if (ret < 0) 3932 goto out; 3933 3934 /* 3935 * with wl1271, we don't need to update the 3936 * beacon_int and dtim_period, because the firmware 3937 * updates it by itself when the first beacon is 3938 * received after a join. 3939 */ 3940 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid); 3941 if (ret < 0) 3942 goto out; 3943 3944 /* 3945 * Get a template for hardware connection maintenance 3946 */ 3947 dev_kfree_skb(wlvif->probereq); 3948 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl, 3949 wlvif, 3950 NULL); 3951 ieoffset = offsetof(struct ieee80211_mgmt, 3952 u.probe_req.variable); 3953 wl1271_ssid_set(vif, wlvif->probereq, ieoffset); 3954 3955 /* enable the connection monitoring feature */ 3956 ret = wl1271_acx_conn_monit_params(wl, wlvif, true); 3957 if (ret < 0) 3958 goto out; 3959 } else { 3960 /* use defaults when not associated */ 3961 bool was_assoc = 3962 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, 3963 &wlvif->flags); 3964 bool was_ifup = 3965 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT, 3966 &wlvif->flags); 3967 wlvif->aid = 0; 3968 3969 /* free probe-request template */ 3970 dev_kfree_skb(wlvif->probereq); 3971 wlvif->probereq = NULL; 3972 3973 /* revert back to minimum rates for the current band */ 3974 wl1271_set_band_rate(wl, wlvif); 3975 wlvif->basic_rate = 3976 wl1271_tx_min_rate_get(wl, 3977 wlvif->basic_rate_set); 3978 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 3979 if (ret < 0) 3980 goto out; 3981 3982 /* disable connection monitor features */ 3983 ret = wl1271_acx_conn_monit_params(wl, wlvif, false); 3984 3985 /* Disable the keep-alive feature */ 3986 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); 3987 if (ret < 0) 3988 goto out; 3989 3990 /* restore the bssid filter and go to dummy bssid */ 3991 if (was_assoc) { 3992 /* 3993 * we might have to disable roc, if there was 3994 * no IF_OPER_UP notification. 3995 */ 3996 if (!was_ifup) { 3997 ret = wl12xx_croc(wl, wlvif->role_id); 3998 if (ret < 0) 3999 goto out; 4000 } 4001 /* 4002 * (we also need to disable roc in case of 4003 * roaming on the same channel. until we will 4004 * have a better flow...) 4005 */ 4006 if (test_bit(wlvif->dev_role_id, wl->roc_map)) { 4007 ret = wl12xx_croc(wl, 4008 wlvif->dev_role_id); 4009 if (ret < 0) 4010 goto out; 4011 } 4012 4013 wl1271_unjoin(wl, wlvif); 4014 if (!bss_conf->idle) 4015 wl12xx_start_dev(wl, wlvif); 4016 } 4017 } 4018 } 4019 4020 if (changed & BSS_CHANGED_IBSS) { 4021 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d", 4022 bss_conf->ibss_joined); 4023 4024 if (bss_conf->ibss_joined) { 4025 u32 rates = bss_conf->basic_rates; 4026 wlvif->basic_rate_set = 4027 wl1271_tx_enabled_rates_get(wl, rates, 4028 wlvif->band); 4029 wlvif->basic_rate = 4030 wl1271_tx_min_rate_get(wl, 4031 wlvif->basic_rate_set); 4032 4033 /* by default, use 11b + OFDM rates */ 4034 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES; 4035 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 4036 if (ret < 0) 4037 goto out; 4038 } 4039 } 4040 4041 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); 4042 if (ret < 0) 4043 goto out; 4044 4045 if (do_join) { 4046 ret = wl1271_join(wl, wlvif, set_assoc); 4047 if (ret < 0) { 4048 wl1271_warning("cmd join failed %d", ret); 4049 goto out; 4050 } 4051 4052 /* ROC until connected (after EAPOL exchange) */ 4053 if (!is_ibss) { 4054 ret = wl12xx_roc(wl, wlvif, wlvif->role_id); 4055 if (ret < 0) 4056 goto out; 4057 4058 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) 4059 wl12xx_set_authorized(wl, wlvif); 4060 } 4061 /* 4062 * stop device role if started (we might already be in 4063 * STA/IBSS role). 4064 */ 4065 if (wl12xx_dev_role_started(wlvif)) { 4066 ret = wl12xx_stop_dev(wl, wlvif); 4067 if (ret < 0) 4068 goto out; 4069 } 4070 } 4071 4072 /* Handle new association with HT. Do this after join. */ 4073 if (sta_exists) { 4074 if ((changed & BSS_CHANGED_HT) && 4075 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) { 4076 ret = wl1271_acx_set_ht_capabilities(wl, 4077 &sta_ht_cap, 4078 true, 4079 wlvif->sta.hlid); 4080 if (ret < 0) { 4081 wl1271_warning("Set ht cap true failed %d", 4082 ret); 4083 goto out; 4084 } 4085 } 4086 /* handle new association without HT and disassociation */ 4087 else if (changed & BSS_CHANGED_ASSOC) { 4088 ret = wl1271_acx_set_ht_capabilities(wl, 4089 &sta_ht_cap, 4090 false, 4091 wlvif->sta.hlid); 4092 if (ret < 0) { 4093 wl1271_warning("Set ht cap false failed %d", 4094 ret); 4095 goto out; 4096 } 4097 } 4098 } 4099 4100 /* Handle HT information change. Done after join. */ 4101 if ((changed & BSS_CHANGED_HT) && 4102 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) { 4103 ret = wl1271_acx_set_ht_information(wl, wlvif, 4104 bss_conf->ht_operation_mode); 4105 if (ret < 0) { 4106 wl1271_warning("Set ht information failed %d", ret); 4107 goto out; 4108 } 4109 } 4110 4111 /* Handle arp filtering. Done after join. */ 4112 if ((changed & BSS_CHANGED_ARP_FILTER) || 4113 (!is_ibss && (changed & BSS_CHANGED_QOS))) { 4114 __be32 addr = bss_conf->arp_addr_list[0]; 4115 wlvif->sta.qos = bss_conf->qos; 4116 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS); 4117 4118 if (bss_conf->arp_addr_cnt == 1 && 4119 bss_conf->arp_filter_enabled) { 4120 wlvif->ip_addr = addr; 4121 /* 4122 * The template should have been configured only upon 4123 * association. however, it seems that the correct ip 4124 * isn't being set (when sending), so we have to 4125 * reconfigure the template upon every ip change. 4126 */ 4127 ret = wl1271_cmd_build_arp_rsp(wl, wlvif); 4128 if (ret < 0) { 4129 wl1271_warning("build arp rsp failed: %d", ret); 4130 goto out; 4131 } 4132 4133 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 4134 (ACX_ARP_FILTER_ARP_FILTERING | 4135 ACX_ARP_FILTER_AUTO_ARP), 4136 addr); 4137 } else { 4138 wlvif->ip_addr = 0; 4139 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr); 4140 } 4141 4142 if (ret < 0) 4143 goto out; 4144 } 4145 4146 out: 4147 return; 4148 } 4149 4150 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 4151 struct ieee80211_vif *vif, 4152 struct ieee80211_bss_conf *bss_conf, 4153 u32 changed) 4154 { 4155 struct wl1271 *wl = hw->priv; 4156 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4157 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 4158 int ret; 4159 4160 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", 4161 (int)changed); 4162 4163 /* 4164 * make sure to cancel pending disconnections if our association 4165 * state changed 4166 */ 4167 if (!is_ap && (changed & BSS_CHANGED_ASSOC)) 4168 cancel_delayed_work_sync(&wl->connection_loss_work); 4169 4170 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) && 4171 !bss_conf->enable_beacon) 4172 wl1271_tx_flush(wl); 4173 4174 mutex_lock(&wl->mutex); 4175 4176 if (unlikely(wl->state != WLCORE_STATE_ON)) 4177 goto out; 4178 4179 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) 4180 goto out; 4181 4182 ret = wl1271_ps_elp_wakeup(wl); 4183 if (ret < 0) 4184 goto out; 4185 4186 if (is_ap) 4187 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed); 4188 else 4189 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); 4190 4191 wl1271_ps_elp_sleep(wl); 4192 4193 out: 4194 mutex_unlock(&wl->mutex); 4195 } 4196 4197 static int wl1271_op_conf_tx(struct ieee80211_hw *hw, 4198 struct ieee80211_vif *vif, u16 queue, 4199 const struct ieee80211_tx_queue_params *params) 4200 { 4201 struct wl1271 *wl = hw->priv; 4202 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4203 u8 ps_scheme; 4204 int ret = 0; 4205 4206 mutex_lock(&wl->mutex); 4207 4208 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); 4209 4210 if (params->uapsd) 4211 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER; 4212 else 4213 ps_scheme = CONF_PS_SCHEME_LEGACY; 4214 4215 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 4216 goto out; 4217 4218 ret = wl1271_ps_elp_wakeup(wl); 4219 if (ret < 0) 4220 goto out; 4221 4222 /* 4223 * the txop is confed in units of 32us by the mac80211, 4224 * we need us 4225 */ 4226 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue), 4227 params->cw_min, params->cw_max, 4228 params->aifs, params->txop << 5); 4229 if (ret < 0) 4230 goto out_sleep; 4231 4232 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue), 4233 CONF_CHANNEL_TYPE_EDCF, 4234 wl1271_tx_get_queue(queue), 4235 ps_scheme, CONF_ACK_POLICY_LEGACY, 4236 0, 0); 4237 4238 out_sleep: 4239 wl1271_ps_elp_sleep(wl); 4240 4241 out: 4242 mutex_unlock(&wl->mutex); 4243 4244 return ret; 4245 } 4246 4247 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, 4248 struct ieee80211_vif *vif) 4249 { 4250 4251 struct wl1271 *wl = hw->priv; 4252 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4253 u64 mactime = ULLONG_MAX; 4254 int ret; 4255 4256 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf"); 4257 4258 mutex_lock(&wl->mutex); 4259 4260 if (unlikely(wl->state != WLCORE_STATE_ON)) 4261 goto out; 4262 4263 ret = wl1271_ps_elp_wakeup(wl); 4264 if (ret < 0) 4265 goto out; 4266 4267 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime); 4268 if (ret < 0) 4269 goto out_sleep; 4270 4271 out_sleep: 4272 wl1271_ps_elp_sleep(wl); 4273 4274 out: 4275 mutex_unlock(&wl->mutex); 4276 return mactime; 4277 } 4278 4279 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, 4280 struct survey_info *survey) 4281 { 4282 struct ieee80211_conf *conf = &hw->conf; 4283 4284 if (idx != 0) 4285 return -ENOENT; 4286 4287 survey->channel = conf->channel; 4288 survey->filled = 0; 4289 return 0; 4290 } 4291 4292 static int wl1271_allocate_sta(struct wl1271 *wl, 4293 struct wl12xx_vif *wlvif, 4294 struct ieee80211_sta *sta) 4295 { 4296 struct wl1271_station *wl_sta; 4297 int ret; 4298 4299 4300 if (wl->active_sta_count >= AP_MAX_STATIONS) { 4301 wl1271_warning("could not allocate HLID - too much stations"); 4302 return -EBUSY; 4303 } 4304 4305 wl_sta = (struct wl1271_station *)sta->drv_priv; 4306 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid); 4307 if (ret < 0) { 4308 wl1271_warning("could not allocate HLID - too many links"); 4309 return -EBUSY; 4310 } 4311 4312 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map); 4313 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); 4314 wl->active_sta_count++; 4315 return 0; 4316 } 4317 4318 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) 4319 { 4320 if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) 4321 return; 4322 4323 clear_bit(hlid, wlvif->ap.sta_hlid_map); 4324 memset(wl->links[hlid].addr, 0, ETH_ALEN); 4325 wl->links[hlid].ba_bitmap = 0; 4326 __clear_bit(hlid, &wl->ap_ps_map); 4327 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 4328 wl12xx_free_link(wl, wlvif, &hlid); 4329 wl->active_sta_count--; 4330 4331 /* 4332 * rearm the tx watchdog when the last STA is freed - give the FW a 4333 * chance to return STA-buffered packets before complaining. 4334 */ 4335 if (wl->active_sta_count == 0) 4336 wl12xx_rearm_tx_watchdog_locked(wl); 4337 } 4338 4339 static int wl12xx_sta_add(struct wl1271 *wl, 4340 struct wl12xx_vif *wlvif, 4341 struct ieee80211_sta *sta) 4342 { 4343 struct wl1271_station *wl_sta; 4344 int ret = 0; 4345 u8 hlid; 4346 4347 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); 4348 4349 ret = wl1271_allocate_sta(wl, wlvif, sta); 4350 if (ret < 0) 4351 return ret; 4352 4353 wl_sta = (struct wl1271_station *)sta->drv_priv; 4354 hlid = wl_sta->hlid; 4355 4356 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid); 4357 if (ret < 0) 4358 wl1271_free_sta(wl, wlvif, hlid); 4359 4360 return ret; 4361 } 4362 4363 static int wl12xx_sta_remove(struct wl1271 *wl, 4364 struct wl12xx_vif *wlvif, 4365 struct ieee80211_sta *sta) 4366 { 4367 struct wl1271_station *wl_sta; 4368 int ret = 0, id; 4369 4370 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid); 4371 4372 wl_sta = (struct wl1271_station *)sta->drv_priv; 4373 id = wl_sta->hlid; 4374 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map))) 4375 return -EINVAL; 4376 4377 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid); 4378 if (ret < 0) 4379 return ret; 4380 4381 wl1271_free_sta(wl, wlvif, wl_sta->hlid); 4382 return ret; 4383 } 4384 4385 static int wl12xx_update_sta_state(struct wl1271 *wl, 4386 struct wl12xx_vif *wlvif, 4387 struct ieee80211_sta *sta, 4388 enum ieee80211_sta_state old_state, 4389 enum ieee80211_sta_state new_state) 4390 { 4391 struct wl1271_station *wl_sta; 4392 u8 hlid; 4393 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; 4394 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; 4395 int ret; 4396 4397 wl_sta = (struct wl1271_station *)sta->drv_priv; 4398 hlid = wl_sta->hlid; 4399 4400 /* Add station (AP mode) */ 4401 if (is_ap && 4402 old_state == IEEE80211_STA_NOTEXIST && 4403 new_state == IEEE80211_STA_NONE) 4404 return wl12xx_sta_add(wl, wlvif, sta); 4405 4406 /* Remove station (AP mode) */ 4407 if (is_ap && 4408 old_state == IEEE80211_STA_NONE && 4409 new_state == IEEE80211_STA_NOTEXIST) { 4410 /* must not fail */ 4411 wl12xx_sta_remove(wl, wlvif, sta); 4412 return 0; 4413 } 4414 4415 /* Authorize station (AP mode) */ 4416 if (is_ap && 4417 new_state == IEEE80211_STA_AUTHORIZED) { 4418 ret = wl12xx_cmd_set_peer_state(wl, hlid); 4419 if (ret < 0) 4420 return ret; 4421 4422 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, 4423 hlid); 4424 return ret; 4425 } 4426 4427 /* Authorize station */ 4428 if (is_sta && 4429 new_state == IEEE80211_STA_AUTHORIZED) { 4430 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); 4431 return wl12xx_set_authorized(wl, wlvif); 4432 } 4433 4434 if (is_sta && 4435 old_state == IEEE80211_STA_AUTHORIZED && 4436 new_state == IEEE80211_STA_ASSOC) { 4437 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); 4438 return 0; 4439 } 4440 4441 return 0; 4442 } 4443 4444 static int wl12xx_op_sta_state(struct ieee80211_hw *hw, 4445 struct ieee80211_vif *vif, 4446 struct ieee80211_sta *sta, 4447 enum ieee80211_sta_state old_state, 4448 enum ieee80211_sta_state new_state) 4449 { 4450 struct wl1271 *wl = hw->priv; 4451 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4452 int ret; 4453 4454 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d", 4455 sta->aid, old_state, new_state); 4456 4457 mutex_lock(&wl->mutex); 4458 4459 if (unlikely(wl->state != WLCORE_STATE_ON)) { 4460 ret = -EBUSY; 4461 goto out; 4462 } 4463 4464 ret = wl1271_ps_elp_wakeup(wl); 4465 if (ret < 0) 4466 goto out; 4467 4468 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); 4469 4470 wl1271_ps_elp_sleep(wl); 4471 out: 4472 mutex_unlock(&wl->mutex); 4473 if (new_state < old_state) 4474 return 0; 4475 return ret; 4476 } 4477 4478 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, 4479 struct ieee80211_vif *vif, 4480 enum ieee80211_ampdu_mlme_action action, 4481 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 4482 u8 buf_size) 4483 { 4484 struct wl1271 *wl = hw->priv; 4485 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4486 int ret; 4487 u8 hlid, *ba_bitmap; 4488 4489 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action, 4490 tid); 4491 4492 /* sanity check - the fields in FW are only 8bits wide */ 4493 if (WARN_ON(tid > 0xFF)) 4494 return -ENOTSUPP; 4495 4496 mutex_lock(&wl->mutex); 4497 4498 if (unlikely(wl->state != WLCORE_STATE_ON)) { 4499 ret = -EAGAIN; 4500 goto out; 4501 } 4502 4503 if (wlvif->bss_type == BSS_TYPE_STA_BSS) { 4504 hlid = wlvif->sta.hlid; 4505 ba_bitmap = &wlvif->sta.ba_rx_bitmap; 4506 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { 4507 struct wl1271_station *wl_sta; 4508 4509 wl_sta = (struct wl1271_station *)sta->drv_priv; 4510 hlid = wl_sta->hlid; 4511 ba_bitmap = &wl->links[hlid].ba_bitmap; 4512 } else { 4513 ret = -EINVAL; 4514 goto out; 4515 } 4516 4517 ret = wl1271_ps_elp_wakeup(wl); 4518 if (ret < 0) 4519 goto out; 4520 4521 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d", 4522 tid, action); 4523 4524 switch (action) { 4525 case IEEE80211_AMPDU_RX_START: 4526 if (!wlvif->ba_support || !wlvif->ba_allowed) { 4527 ret = -ENOTSUPP; 4528 break; 4529 } 4530 4531 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) { 4532 ret = -EBUSY; 4533 wl1271_error("exceeded max RX BA sessions"); 4534 break; 4535 } 4536 4537 if (*ba_bitmap & BIT(tid)) { 4538 ret = -EINVAL; 4539 wl1271_error("cannot enable RX BA session on active " 4540 "tid: %d", tid); 4541 break; 4542 } 4543 4544 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true, 4545 hlid); 4546 if (!ret) { 4547 *ba_bitmap |= BIT(tid); 4548 wl->ba_rx_session_count++; 4549 } 4550 break; 4551 4552 case IEEE80211_AMPDU_RX_STOP: 4553 if (!(*ba_bitmap & BIT(tid))) { 4554 /* 4555 * this happens on reconfig - so only output a debug 4556 * message for now, and don't fail the function. 4557 */ 4558 wl1271_debug(DEBUG_MAC80211, 4559 "no active RX BA session on tid: %d", 4560 tid); 4561 ret = 0; 4562 break; 4563 } 4564 4565 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false, 4566 hlid); 4567 if (!ret) { 4568 *ba_bitmap &= ~BIT(tid); 4569 wl->ba_rx_session_count--; 4570 } 4571 break; 4572 4573 /* 4574 * The BA initiator session management in FW independently. 4575 * Falling break here on purpose for all TX APDU commands. 4576 */ 4577 case IEEE80211_AMPDU_TX_START: 4578 case IEEE80211_AMPDU_TX_STOP: 4579 case IEEE80211_AMPDU_TX_OPERATIONAL: 4580 ret = -EINVAL; 4581 break; 4582 4583 default: 4584 wl1271_error("Incorrect ampdu action id=%x\n", action); 4585 ret = -EINVAL; 4586 } 4587 4588 wl1271_ps_elp_sleep(wl); 4589 4590 out: 4591 mutex_unlock(&wl->mutex); 4592 4593 return ret; 4594 } 4595 4596 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, 4597 struct ieee80211_vif *vif, 4598 const struct cfg80211_bitrate_mask *mask) 4599 { 4600 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4601 struct wl1271 *wl = hw->priv; 4602 int i, ret = 0; 4603 4604 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x", 4605 mask->control[NL80211_BAND_2GHZ].legacy, 4606 mask->control[NL80211_BAND_5GHZ].legacy); 4607 4608 mutex_lock(&wl->mutex); 4609 4610 for (i = 0; i < WLCORE_NUM_BANDS; i++) 4611 wlvif->bitrate_masks[i] = 4612 wl1271_tx_enabled_rates_get(wl, 4613 mask->control[i].legacy, 4614 i); 4615 4616 if (unlikely(wl->state != WLCORE_STATE_ON)) 4617 goto out; 4618 4619 if (wlvif->bss_type == BSS_TYPE_STA_BSS && 4620 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { 4621 4622 ret = wl1271_ps_elp_wakeup(wl); 4623 if (ret < 0) 4624 goto out; 4625 4626 wl1271_set_band_rate(wl, wlvif); 4627 wlvif->basic_rate = 4628 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 4629 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 4630 4631 wl1271_ps_elp_sleep(wl); 4632 } 4633 out: 4634 mutex_unlock(&wl->mutex); 4635 4636 return ret; 4637 } 4638 4639 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, 4640 struct ieee80211_channel_switch *ch_switch) 4641 { 4642 struct wl1271 *wl = hw->priv; 4643 struct wl12xx_vif *wlvif; 4644 int ret; 4645 4646 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch"); 4647 4648 wl1271_tx_flush(wl); 4649 4650 mutex_lock(&wl->mutex); 4651 4652 if (unlikely(wl->state == WLCORE_STATE_OFF)) { 4653 wl12xx_for_each_wlvif_sta(wl, wlvif) { 4654 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 4655 ieee80211_chswitch_done(vif, false); 4656 } 4657 goto out; 4658 } else if (unlikely(wl->state != WLCORE_STATE_ON)) { 4659 goto out; 4660 } 4661 4662 ret = wl1271_ps_elp_wakeup(wl); 4663 if (ret < 0) 4664 goto out; 4665 4666 /* TODO: change mac80211 to pass vif as param */ 4667 wl12xx_for_each_wlvif_sta(wl, wlvif) { 4668 ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch); 4669 4670 if (!ret) 4671 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); 4672 } 4673 4674 wl1271_ps_elp_sleep(wl); 4675 4676 out: 4677 mutex_unlock(&wl->mutex); 4678 } 4679 4680 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop) 4681 { 4682 struct wl1271 *wl = hw->priv; 4683 4684 wl1271_tx_flush(wl); 4685 } 4686 4687 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) 4688 { 4689 struct wl1271 *wl = hw->priv; 4690 bool ret = false; 4691 4692 mutex_lock(&wl->mutex); 4693 4694 if (unlikely(wl->state != WLCORE_STATE_ON)) 4695 goto out; 4696 4697 /* packets are considered pending if in the TX queue or the FW */ 4698 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0); 4699 out: 4700 mutex_unlock(&wl->mutex); 4701 4702 return ret; 4703 } 4704 4705 /* can't be const, mac80211 writes to this */ 4706 static struct ieee80211_rate wl1271_rates[] = { 4707 { .bitrate = 10, 4708 .hw_value = CONF_HW_BIT_RATE_1MBPS, 4709 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, }, 4710 { .bitrate = 20, 4711 .hw_value = CONF_HW_BIT_RATE_2MBPS, 4712 .hw_value_short = CONF_HW_BIT_RATE_2MBPS, 4713 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 4714 { .bitrate = 55, 4715 .hw_value = CONF_HW_BIT_RATE_5_5MBPS, 4716 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS, 4717 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 4718 { .bitrate = 110, 4719 .hw_value = CONF_HW_BIT_RATE_11MBPS, 4720 .hw_value_short = CONF_HW_BIT_RATE_11MBPS, 4721 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 4722 { .bitrate = 60, 4723 .hw_value = CONF_HW_BIT_RATE_6MBPS, 4724 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, 4725 { .bitrate = 90, 4726 .hw_value = CONF_HW_BIT_RATE_9MBPS, 4727 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, 4728 { .bitrate = 120, 4729 .hw_value = CONF_HW_BIT_RATE_12MBPS, 4730 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, 4731 { .bitrate = 180, 4732 .hw_value = CONF_HW_BIT_RATE_18MBPS, 4733 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, 4734 { .bitrate = 240, 4735 .hw_value = CONF_HW_BIT_RATE_24MBPS, 4736 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, 4737 { .bitrate = 360, 4738 .hw_value = CONF_HW_BIT_RATE_36MBPS, 4739 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, 4740 { .bitrate = 480, 4741 .hw_value = CONF_HW_BIT_RATE_48MBPS, 4742 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, 4743 { .bitrate = 540, 4744 .hw_value = CONF_HW_BIT_RATE_54MBPS, 4745 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 4746 }; 4747 4748 /* can't be const, mac80211 writes to this */ 4749 static struct ieee80211_channel wl1271_channels[] = { 4750 { .hw_value = 1, .center_freq = 2412, .max_power = 25 }, 4751 { .hw_value = 2, .center_freq = 2417, .max_power = 25 }, 4752 { .hw_value = 3, .center_freq = 2422, .max_power = 25 }, 4753 { .hw_value = 4, .center_freq = 2427, .max_power = 25 }, 4754 { .hw_value = 5, .center_freq = 2432, .max_power = 25 }, 4755 { .hw_value = 6, .center_freq = 2437, .max_power = 25 }, 4756 { .hw_value = 7, .center_freq = 2442, .max_power = 25 }, 4757 { .hw_value = 8, .center_freq = 2447, .max_power = 25 }, 4758 { .hw_value = 9, .center_freq = 2452, .max_power = 25 }, 4759 { .hw_value = 10, .center_freq = 2457, .max_power = 25 }, 4760 { .hw_value = 11, .center_freq = 2462, .max_power = 25 }, 4761 { .hw_value = 12, .center_freq = 2467, .max_power = 25 }, 4762 { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, 4763 { .hw_value = 14, .center_freq = 2484, .max_power = 25 }, 4764 }; 4765 4766 /* can't be const, mac80211 writes to this */ 4767 static struct ieee80211_supported_band wl1271_band_2ghz = { 4768 .channels = wl1271_channels, 4769 .n_channels = ARRAY_SIZE(wl1271_channels), 4770 .bitrates = wl1271_rates, 4771 .n_bitrates = ARRAY_SIZE(wl1271_rates), 4772 }; 4773 4774 /* 5 GHz data rates for WL1273 */ 4775 static struct ieee80211_rate wl1271_rates_5ghz[] = { 4776 { .bitrate = 60, 4777 .hw_value = CONF_HW_BIT_RATE_6MBPS, 4778 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, 4779 { .bitrate = 90, 4780 .hw_value = CONF_HW_BIT_RATE_9MBPS, 4781 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, 4782 { .bitrate = 120, 4783 .hw_value = CONF_HW_BIT_RATE_12MBPS, 4784 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, 4785 { .bitrate = 180, 4786 .hw_value = CONF_HW_BIT_RATE_18MBPS, 4787 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, 4788 { .bitrate = 240, 4789 .hw_value = CONF_HW_BIT_RATE_24MBPS, 4790 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, 4791 { .bitrate = 360, 4792 .hw_value = CONF_HW_BIT_RATE_36MBPS, 4793 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, 4794 { .bitrate = 480, 4795 .hw_value = CONF_HW_BIT_RATE_48MBPS, 4796 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, 4797 { .bitrate = 540, 4798 .hw_value = CONF_HW_BIT_RATE_54MBPS, 4799 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 4800 }; 4801 4802 /* 5 GHz band channels for WL1273 */ 4803 static struct ieee80211_channel wl1271_channels_5ghz[] = { 4804 { .hw_value = 7, .center_freq = 5035, .max_power = 25 }, 4805 { .hw_value = 8, .center_freq = 5040, .max_power = 25 }, 4806 { .hw_value = 9, .center_freq = 5045, .max_power = 25 }, 4807 { .hw_value = 11, .center_freq = 5055, .max_power = 25 }, 4808 { .hw_value = 12, .center_freq = 5060, .max_power = 25 }, 4809 { .hw_value = 16, .center_freq = 5080, .max_power = 25 }, 4810 { .hw_value = 34, .center_freq = 5170, .max_power = 25 }, 4811 { .hw_value = 36, .center_freq = 5180, .max_power = 25 }, 4812 { .hw_value = 38, .center_freq = 5190, .max_power = 25 }, 4813 { .hw_value = 40, .center_freq = 5200, .max_power = 25 }, 4814 { .hw_value = 42, .center_freq = 5210, .max_power = 25 }, 4815 { .hw_value = 44, .center_freq = 5220, .max_power = 25 }, 4816 { .hw_value = 46, .center_freq = 5230, .max_power = 25 }, 4817 { .hw_value = 48, .center_freq = 5240, .max_power = 25 }, 4818 { .hw_value = 52, .center_freq = 5260, .max_power = 25 }, 4819 { .hw_value = 56, .center_freq = 5280, .max_power = 25 }, 4820 { .hw_value = 60, .center_freq = 5300, .max_power = 25 }, 4821 { .hw_value = 64, .center_freq = 5320, .max_power = 25 }, 4822 { .hw_value = 100, .center_freq = 5500, .max_power = 25 }, 4823 { .hw_value = 104, .center_freq = 5520, .max_power = 25 }, 4824 { .hw_value = 108, .center_freq = 5540, .max_power = 25 }, 4825 { .hw_value = 112, .center_freq = 5560, .max_power = 25 }, 4826 { .hw_value = 116, .center_freq = 5580, .max_power = 25 }, 4827 { .hw_value = 120, .center_freq = 5600, .max_power = 25 }, 4828 { .hw_value = 124, .center_freq = 5620, .max_power = 25 }, 4829 { .hw_value = 128, .center_freq = 5640, .max_power = 25 }, 4830 { .hw_value = 132, .center_freq = 5660, .max_power = 25 }, 4831 { .hw_value = 136, .center_freq = 5680, .max_power = 25 }, 4832 { .hw_value = 140, .center_freq = 5700, .max_power = 25 }, 4833 { .hw_value = 149, .center_freq = 5745, .max_power = 25 }, 4834 { .hw_value = 153, .center_freq = 5765, .max_power = 25 }, 4835 { .hw_value = 157, .center_freq = 5785, .max_power = 25 }, 4836 { .hw_value = 161, .center_freq = 5805, .max_power = 25 }, 4837 { .hw_value = 165, .center_freq = 5825, .max_power = 25 }, 4838 }; 4839 4840 static struct ieee80211_supported_band wl1271_band_5ghz = { 4841 .channels = wl1271_channels_5ghz, 4842 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), 4843 .bitrates = wl1271_rates_5ghz, 4844 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 4845 }; 4846 4847 static const struct ieee80211_ops wl1271_ops = { 4848 .start = wl1271_op_start, 4849 .stop = wlcore_op_stop, 4850 .add_interface = wl1271_op_add_interface, 4851 .remove_interface = wl1271_op_remove_interface, 4852 .change_interface = wl12xx_op_change_interface, 4853 #ifdef CONFIG_PM 4854 .suspend = wl1271_op_suspend, 4855 .resume = wl1271_op_resume, 4856 #endif 4857 .config = wl1271_op_config, 4858 .prepare_multicast = wl1271_op_prepare_multicast, 4859 .configure_filter = wl1271_op_configure_filter, 4860 .tx = wl1271_op_tx, 4861 .set_key = wlcore_op_set_key, 4862 .hw_scan = wl1271_op_hw_scan, 4863 .cancel_hw_scan = wl1271_op_cancel_hw_scan, 4864 .sched_scan_start = wl1271_op_sched_scan_start, 4865 .sched_scan_stop = wl1271_op_sched_scan_stop, 4866 .bss_info_changed = wl1271_op_bss_info_changed, 4867 .set_frag_threshold = wl1271_op_set_frag_threshold, 4868 .set_rts_threshold = wl1271_op_set_rts_threshold, 4869 .conf_tx = wl1271_op_conf_tx, 4870 .get_tsf = wl1271_op_get_tsf, 4871 .get_survey = wl1271_op_get_survey, 4872 .sta_state = wl12xx_op_sta_state, 4873 .ampdu_action = wl1271_op_ampdu_action, 4874 .tx_frames_pending = wl1271_tx_frames_pending, 4875 .set_bitrate_mask = wl12xx_set_bitrate_mask, 4876 .channel_switch = wl12xx_op_channel_switch, 4877 .flush = wlcore_op_flush, 4878 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 4879 }; 4880 4881 4882 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band) 4883 { 4884 u8 idx; 4885 4886 BUG_ON(band >= 2); 4887 4888 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) { 4889 wl1271_error("Illegal RX rate from HW: %d", rate); 4890 return 0; 4891 } 4892 4893 idx = wl->band_rate_to_idx[band][rate]; 4894 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { 4895 wl1271_error("Unsupported RX rate from HW: %d", rate); 4896 return 0; 4897 } 4898 4899 return idx; 4900 } 4901 4902 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev, 4903 struct device_attribute *attr, 4904 char *buf) 4905 { 4906 struct wl1271 *wl = dev_get_drvdata(dev); 4907 ssize_t len; 4908 4909 len = PAGE_SIZE; 4910 4911 mutex_lock(&wl->mutex); 4912 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n", 4913 wl->sg_enabled); 4914 mutex_unlock(&wl->mutex); 4915 4916 return len; 4917 4918 } 4919 4920 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, 4921 struct device_attribute *attr, 4922 const char *buf, size_t count) 4923 { 4924 struct wl1271 *wl = dev_get_drvdata(dev); 4925 unsigned long res; 4926 int ret; 4927 4928 ret = kstrtoul(buf, 10, &res); 4929 if (ret < 0) { 4930 wl1271_warning("incorrect value written to bt_coex_mode"); 4931 return count; 4932 } 4933 4934 mutex_lock(&wl->mutex); 4935 4936 res = !!res; 4937 4938 if (res == wl->sg_enabled) 4939 goto out; 4940 4941 wl->sg_enabled = res; 4942 4943 if (unlikely(wl->state != WLCORE_STATE_ON)) 4944 goto out; 4945 4946 ret = wl1271_ps_elp_wakeup(wl); 4947 if (ret < 0) 4948 goto out; 4949 4950 wl1271_acx_sg_enable(wl, wl->sg_enabled); 4951 wl1271_ps_elp_sleep(wl); 4952 4953 out: 4954 mutex_unlock(&wl->mutex); 4955 return count; 4956 } 4957 4958 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR, 4959 wl1271_sysfs_show_bt_coex_state, 4960 wl1271_sysfs_store_bt_coex_state); 4961 4962 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev, 4963 struct device_attribute *attr, 4964 char *buf) 4965 { 4966 struct wl1271 *wl = dev_get_drvdata(dev); 4967 ssize_t len; 4968 4969 len = PAGE_SIZE; 4970 4971 mutex_lock(&wl->mutex); 4972 if (wl->hw_pg_ver >= 0) 4973 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver); 4974 else 4975 len = snprintf(buf, len, "n/a\n"); 4976 mutex_unlock(&wl->mutex); 4977 4978 return len; 4979 } 4980 4981 static DEVICE_ATTR(hw_pg_ver, S_IRUGO, 4982 wl1271_sysfs_show_hw_pg_ver, NULL); 4983 4984 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, 4985 struct bin_attribute *bin_attr, 4986 char *buffer, loff_t pos, size_t count) 4987 { 4988 struct device *dev = container_of(kobj, struct device, kobj); 4989 struct wl1271 *wl = dev_get_drvdata(dev); 4990 ssize_t len; 4991 int ret; 4992 4993 ret = mutex_lock_interruptible(&wl->mutex); 4994 if (ret < 0) 4995 return -ERESTARTSYS; 4996 4997 /* Let only one thread read the log at a time, blocking others */ 4998 while (wl->fwlog_size == 0) { 4999 DEFINE_WAIT(wait); 5000 5001 prepare_to_wait_exclusive(&wl->fwlog_waitq, 5002 &wait, 5003 TASK_INTERRUPTIBLE); 5004 5005 if (wl->fwlog_size != 0) { 5006 finish_wait(&wl->fwlog_waitq, &wait); 5007 break; 5008 } 5009 5010 mutex_unlock(&wl->mutex); 5011 5012 schedule(); 5013 finish_wait(&wl->fwlog_waitq, &wait); 5014 5015 if (signal_pending(current)) 5016 return -ERESTARTSYS; 5017 5018 ret = mutex_lock_interruptible(&wl->mutex); 5019 if (ret < 0) 5020 return -ERESTARTSYS; 5021 } 5022 5023 /* Check if the fwlog is still valid */ 5024 if (wl->fwlog_size < 0) { 5025 mutex_unlock(&wl->mutex); 5026 return 0; 5027 } 5028 5029 /* Seeking is not supported - old logs are not kept. Disregard pos. */ 5030 len = min(count, (size_t)wl->fwlog_size); 5031 wl->fwlog_size -= len; 5032 memcpy(buffer, wl->fwlog, len); 5033 5034 /* Make room for new messages */ 5035 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size); 5036 5037 mutex_unlock(&wl->mutex); 5038 5039 return len; 5040 } 5041 5042 static struct bin_attribute fwlog_attr = { 5043 .attr = {.name = "fwlog", .mode = S_IRUSR}, 5044 .read = wl1271_sysfs_read_fwlog, 5045 }; 5046 5047 static void wl1271_connection_loss_work(struct work_struct *work) 5048 { 5049 struct delayed_work *dwork; 5050 struct wl1271 *wl; 5051 struct ieee80211_vif *vif; 5052 struct wl12xx_vif *wlvif; 5053 5054 dwork = container_of(work, struct delayed_work, work); 5055 wl = container_of(dwork, struct wl1271, connection_loss_work); 5056 5057 wl1271_info("Connection loss work."); 5058 5059 mutex_lock(&wl->mutex); 5060 5061 if (unlikely(wl->state != WLCORE_STATE_ON)) 5062 goto out; 5063 5064 /* Call mac80211 connection loss */ 5065 wl12xx_for_each_wlvif_sta(wl, wlvif) { 5066 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 5067 goto out; 5068 vif = wl12xx_wlvif_to_vif(wlvif); 5069 ieee80211_connection_loss(vif); 5070 } 5071 out: 5072 mutex_unlock(&wl->mutex); 5073 } 5074 5075 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic) 5076 { 5077 int i; 5078 5079 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x", 5080 oui, nic); 5081 5082 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff) 5083 wl1271_warning("NIC part of the MAC address wraps around!"); 5084 5085 for (i = 0; i < wl->num_mac_addr; i++) { 5086 wl->addresses[i].addr[0] = (u8)(oui >> 16); 5087 wl->addresses[i].addr[1] = (u8)(oui >> 8); 5088 wl->addresses[i].addr[2] = (u8) oui; 5089 wl->addresses[i].addr[3] = (u8)(nic >> 16); 5090 wl->addresses[i].addr[4] = (u8)(nic >> 8); 5091 wl->addresses[i].addr[5] = (u8) nic; 5092 nic++; 5093 } 5094 5095 /* we may be one address short at the most */ 5096 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES); 5097 5098 /* 5099 * turn on the LAA bit in the first address and use it as 5100 * the last address. 5101 */ 5102 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) { 5103 int idx = WLCORE_NUM_MAC_ADDRESSES - 1; 5104 memcpy(&wl->addresses[idx], &wl->addresses[0], 5105 sizeof(wl->addresses[0])); 5106 /* LAA bit */ 5107 wl->addresses[idx].addr[2] |= BIT(1); 5108 } 5109 5110 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES; 5111 wl->hw->wiphy->addresses = wl->addresses; 5112 } 5113 5114 static int wl12xx_get_hw_info(struct wl1271 *wl) 5115 { 5116 int ret; 5117 5118 ret = wl12xx_set_power_on(wl); 5119 if (ret < 0) 5120 goto out; 5121 5122 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id); 5123 if (ret < 0) 5124 goto out; 5125 5126 wl->fuse_oui_addr = 0; 5127 wl->fuse_nic_addr = 0; 5128 5129 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver); 5130 if (ret < 0) 5131 goto out; 5132 5133 if (wl->ops->get_mac) 5134 ret = wl->ops->get_mac(wl); 5135 5136 out: 5137 wl1271_power_off(wl); 5138 return ret; 5139 } 5140 5141 static int wl1271_register_hw(struct wl1271 *wl) 5142 { 5143 int ret; 5144 u32 oui_addr = 0, nic_addr = 0; 5145 5146 if (wl->mac80211_registered) 5147 return 0; 5148 5149 if (wl->nvs_len >= 12) { 5150 /* NOTE: The wl->nvs->nvs element must be first, in 5151 * order to simplify the casting, we assume it is at 5152 * the beginning of the wl->nvs structure. 5153 */ 5154 u8 *nvs_ptr = (u8 *)wl->nvs; 5155 5156 oui_addr = 5157 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6]; 5158 nic_addr = 5159 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3]; 5160 } 5161 5162 /* if the MAC address is zeroed in the NVS derive from fuse */ 5163 if (oui_addr == 0 && nic_addr == 0) { 5164 oui_addr = wl->fuse_oui_addr; 5165 /* fuse has the BD_ADDR, the WLAN addresses are the next two */ 5166 nic_addr = wl->fuse_nic_addr + 1; 5167 } 5168 5169 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr); 5170 5171 ret = ieee80211_register_hw(wl->hw); 5172 if (ret < 0) { 5173 wl1271_error("unable to register mac80211 hw: %d", ret); 5174 goto out; 5175 } 5176 5177 wl->mac80211_registered = true; 5178 5179 wl1271_debugfs_init(wl); 5180 5181 wl1271_notice("loaded"); 5182 5183 out: 5184 return ret; 5185 } 5186 5187 static void wl1271_unregister_hw(struct wl1271 *wl) 5188 { 5189 if (wl->plt) 5190 wl1271_plt_stop(wl); 5191 5192 ieee80211_unregister_hw(wl->hw); 5193 wl->mac80211_registered = false; 5194 5195 } 5196 5197 static const struct ieee80211_iface_limit wlcore_iface_limits[] = { 5198 { 5199 .max = 3, 5200 .types = BIT(NL80211_IFTYPE_STATION), 5201 }, 5202 { 5203 .max = 1, 5204 .types = BIT(NL80211_IFTYPE_AP) | 5205 BIT(NL80211_IFTYPE_P2P_GO) | 5206 BIT(NL80211_IFTYPE_P2P_CLIENT), 5207 }, 5208 }; 5209 5210 static const struct ieee80211_iface_combination 5211 wlcore_iface_combinations[] = { 5212 { 5213 .num_different_channels = 1, 5214 .max_interfaces = 3, 5215 .limits = wlcore_iface_limits, 5216 .n_limits = ARRAY_SIZE(wlcore_iface_limits), 5217 }, 5218 }; 5219 5220 static int wl1271_init_ieee80211(struct wl1271 *wl) 5221 { 5222 static const u32 cipher_suites[] = { 5223 WLAN_CIPHER_SUITE_WEP40, 5224 WLAN_CIPHER_SUITE_WEP104, 5225 WLAN_CIPHER_SUITE_TKIP, 5226 WLAN_CIPHER_SUITE_CCMP, 5227 WL1271_CIPHER_SUITE_GEM, 5228 }; 5229 5230 /* The tx descriptor buffer */ 5231 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr); 5232 5233 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) 5234 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP; 5235 5236 /* unit us */ 5237 /* FIXME: find a proper value */ 5238 wl->hw->channel_change_time = 10000; 5239 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval; 5240 5241 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 5242 IEEE80211_HW_SUPPORTS_PS | 5243 IEEE80211_HW_SUPPORTS_DYNAMIC_PS | 5244 IEEE80211_HW_SUPPORTS_UAPSD | 5245 IEEE80211_HW_HAS_RATE_CONTROL | 5246 IEEE80211_HW_CONNECTION_MONITOR | 5247 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 5248 IEEE80211_HW_SPECTRUM_MGMT | 5249 IEEE80211_HW_AP_LINK_PS | 5250 IEEE80211_HW_AMPDU_AGGREGATION | 5251 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | 5252 IEEE80211_HW_SCAN_WHILE_IDLE; 5253 5254 wl->hw->wiphy->cipher_suites = cipher_suites; 5255 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 5256 5257 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 5258 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | 5259 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); 5260 wl->hw->wiphy->max_scan_ssids = 1; 5261 wl->hw->wiphy->max_sched_scan_ssids = 16; 5262 wl->hw->wiphy->max_match_sets = 16; 5263 /* 5264 * Maximum length of elements in scanning probe request templates 5265 * should be the maximum length possible for a template, without 5266 * the IEEE80211 header of the template 5267 */ 5268 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 5269 sizeof(struct ieee80211_header); 5270 5271 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 5272 sizeof(struct ieee80211_header); 5273 5274 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | 5275 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 5276 5277 /* make sure all our channels fit in the scanned_ch bitmask */ 5278 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + 5279 ARRAY_SIZE(wl1271_channels_5ghz) > 5280 WL1271_MAX_CHANNELS); 5281 /* 5282 * We keep local copies of the band structs because we need to 5283 * modify them on a per-device basis. 5284 */ 5285 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, 5286 sizeof(wl1271_band_2ghz)); 5287 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, 5288 &wl->ht_cap[IEEE80211_BAND_2GHZ], 5289 sizeof(*wl->ht_cap)); 5290 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, 5291 sizeof(wl1271_band_5ghz)); 5292 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, 5293 &wl->ht_cap[IEEE80211_BAND_5GHZ], 5294 sizeof(*wl->ht_cap)); 5295 5296 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 5297 &wl->bands[IEEE80211_BAND_2GHZ]; 5298 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 5299 &wl->bands[IEEE80211_BAND_5GHZ]; 5300 5301 wl->hw->queues = 4; 5302 wl->hw->max_rates = 1; 5303 5304 wl->hw->wiphy->reg_notifier = wl1271_reg_notify; 5305 5306 /* the FW answers probe-requests in AP-mode */ 5307 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 5308 wl->hw->wiphy->probe_resp_offload = 5309 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 5310 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 5311 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 5312 5313 /* allowed interface combinations */ 5314 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations; 5315 wl->hw->wiphy->n_iface_combinations = 5316 ARRAY_SIZE(wlcore_iface_combinations); 5317 5318 SET_IEEE80211_DEV(wl->hw, wl->dev); 5319 5320 wl->hw->sta_data_size = sizeof(struct wl1271_station); 5321 wl->hw->vif_data_size = sizeof(struct wl12xx_vif); 5322 5323 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size; 5324 5325 return 0; 5326 } 5327 5328 #define WL1271_DEFAULT_CHANNEL 0 5329 5330 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size) 5331 { 5332 struct ieee80211_hw *hw; 5333 struct wl1271 *wl; 5334 int i, j, ret; 5335 unsigned int order; 5336 5337 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS); 5338 5339 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 5340 if (!hw) { 5341 wl1271_error("could not alloc ieee80211_hw"); 5342 ret = -ENOMEM; 5343 goto err_hw_alloc; 5344 } 5345 5346 wl = hw->priv; 5347 memset(wl, 0, sizeof(*wl)); 5348 5349 wl->priv = kzalloc(priv_size, GFP_KERNEL); 5350 if (!wl->priv) { 5351 wl1271_error("could not alloc wl priv"); 5352 ret = -ENOMEM; 5353 goto err_priv_alloc; 5354 } 5355 5356 INIT_LIST_HEAD(&wl->wlvif_list); 5357 5358 wl->hw = hw; 5359 5360 for (i = 0; i < NUM_TX_QUEUES; i++) 5361 for (j = 0; j < WL12XX_MAX_LINKS; j++) 5362 skb_queue_head_init(&wl->links[j].tx_queue[i]); 5363 5364 skb_queue_head_init(&wl->deferred_rx_queue); 5365 skb_queue_head_init(&wl->deferred_tx_queue); 5366 5367 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 5368 INIT_WORK(&wl->netstack_work, wl1271_netstack_work); 5369 INIT_WORK(&wl->tx_work, wl1271_tx_work); 5370 INIT_WORK(&wl->recovery_work, wl1271_recovery_work); 5371 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); 5372 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work); 5373 INIT_DELAYED_WORK(&wl->connection_loss_work, 5374 wl1271_connection_loss_work); 5375 5376 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); 5377 if (!wl->freezable_wq) { 5378 ret = -ENOMEM; 5379 goto err_hw; 5380 } 5381 5382 wl->channel = WL1271_DEFAULT_CHANNEL; 5383 wl->rx_counter = 0; 5384 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 5385 wl->band = IEEE80211_BAND_2GHZ; 5386 wl->channel_type = NL80211_CHAN_NO_HT; 5387 wl->flags = 0; 5388 wl->sg_enabled = true; 5389 wl->sleep_auth = WL1271_PSM_ILLEGAL; 5390 wl->hw_pg_ver = -1; 5391 wl->ap_ps_map = 0; 5392 wl->ap_fw_ps_map = 0; 5393 wl->quirks = 0; 5394 wl->platform_quirks = 0; 5395 wl->sched_scanning = false; 5396 wl->system_hlid = WL12XX_SYSTEM_HLID; 5397 wl->active_sta_count = 0; 5398 wl->fwlog_size = 0; 5399 init_waitqueue_head(&wl->fwlog_waitq); 5400 5401 /* The system link is always allocated */ 5402 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); 5403 5404 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 5405 for (i = 0; i < wl->num_tx_desc; i++) 5406 wl->tx_frames[i] = NULL; 5407 5408 spin_lock_init(&wl->wl_lock); 5409 5410 wl->state = WLCORE_STATE_OFF; 5411 wl->fw_type = WL12XX_FW_TYPE_NONE; 5412 mutex_init(&wl->mutex); 5413 mutex_init(&wl->flush_mutex); 5414 init_completion(&wl->nvs_loading_complete); 5415 5416 order = get_order(aggr_buf_size); 5417 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 5418 if (!wl->aggr_buf) { 5419 ret = -ENOMEM; 5420 goto err_wq; 5421 } 5422 wl->aggr_buf_size = aggr_buf_size; 5423 5424 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl); 5425 if (!wl->dummy_packet) { 5426 ret = -ENOMEM; 5427 goto err_aggr; 5428 } 5429 5430 /* Allocate one page for the FW log */ 5431 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL); 5432 if (!wl->fwlog) { 5433 ret = -ENOMEM; 5434 goto err_dummy_packet; 5435 } 5436 5437 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA); 5438 if (!wl->mbox) { 5439 ret = -ENOMEM; 5440 goto err_fwlog; 5441 } 5442 5443 return hw; 5444 5445 err_fwlog: 5446 free_page((unsigned long)wl->fwlog); 5447 5448 err_dummy_packet: 5449 dev_kfree_skb(wl->dummy_packet); 5450 5451 err_aggr: 5452 free_pages((unsigned long)wl->aggr_buf, order); 5453 5454 err_wq: 5455 destroy_workqueue(wl->freezable_wq); 5456 5457 err_hw: 5458 wl1271_debugfs_exit(wl); 5459 kfree(wl->priv); 5460 5461 err_priv_alloc: 5462 ieee80211_free_hw(hw); 5463 5464 err_hw_alloc: 5465 5466 return ERR_PTR(ret); 5467 } 5468 EXPORT_SYMBOL_GPL(wlcore_alloc_hw); 5469 5470 int wlcore_free_hw(struct wl1271 *wl) 5471 { 5472 /* Unblock any fwlog readers */ 5473 mutex_lock(&wl->mutex); 5474 wl->fwlog_size = -1; 5475 wake_up_interruptible_all(&wl->fwlog_waitq); 5476 mutex_unlock(&wl->mutex); 5477 5478 device_remove_bin_file(wl->dev, &fwlog_attr); 5479 5480 device_remove_file(wl->dev, &dev_attr_hw_pg_ver); 5481 5482 device_remove_file(wl->dev, &dev_attr_bt_coex_state); 5483 free_page((unsigned long)wl->fwlog); 5484 dev_kfree_skb(wl->dummy_packet); 5485 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size)); 5486 5487 wl1271_debugfs_exit(wl); 5488 5489 vfree(wl->fw); 5490 wl->fw = NULL; 5491 wl->fw_type = WL12XX_FW_TYPE_NONE; 5492 kfree(wl->nvs); 5493 wl->nvs = NULL; 5494 5495 kfree(wl->fw_status_1); 5496 kfree(wl->tx_res_if); 5497 destroy_workqueue(wl->freezable_wq); 5498 5499 kfree(wl->priv); 5500 ieee80211_free_hw(wl->hw); 5501 5502 return 0; 5503 } 5504 EXPORT_SYMBOL_GPL(wlcore_free_hw); 5505 5506 static irqreturn_t wl12xx_hardirq(int irq, void *cookie) 5507 { 5508 struct wl1271 *wl = cookie; 5509 unsigned long flags; 5510 5511 wl1271_debug(DEBUG_IRQ, "IRQ"); 5512 5513 /* complete the ELP completion */ 5514 spin_lock_irqsave(&wl->wl_lock, flags); 5515 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 5516 if (wl->elp_compl) { 5517 complete(wl->elp_compl); 5518 wl->elp_compl = NULL; 5519 } 5520 5521 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { 5522 /* don't enqueue a work right now. mark it as pending */ 5523 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); 5524 wl1271_debug(DEBUG_IRQ, "should not enqueue work"); 5525 disable_irq_nosync(wl->irq); 5526 pm_wakeup_event(wl->dev, 0); 5527 spin_unlock_irqrestore(&wl->wl_lock, flags); 5528 return IRQ_HANDLED; 5529 } 5530 spin_unlock_irqrestore(&wl->wl_lock, flags); 5531 5532 return IRQ_WAKE_THREAD; 5533 } 5534 5535 static void wlcore_nvs_cb(const struct firmware *fw, void *context) 5536 { 5537 struct wl1271 *wl = context; 5538 struct platform_device *pdev = wl->pdev; 5539 struct wl12xx_platform_data *pdata = pdev->dev.platform_data; 5540 unsigned long irqflags; 5541 int ret; 5542 5543 if (fw) { 5544 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); 5545 if (!wl->nvs) { 5546 wl1271_error("Could not allocate nvs data"); 5547 goto out; 5548 } 5549 wl->nvs_len = fw->size; 5550 } else { 5551 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s", 5552 WL12XX_NVS_NAME); 5553 wl->nvs = NULL; 5554 wl->nvs_len = 0; 5555 } 5556 5557 ret = wl->ops->setup(wl); 5558 if (ret < 0) 5559 goto out_free_nvs; 5560 5561 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS); 5562 5563 /* adjust some runtime configuration parameters */ 5564 wlcore_adjust_conf(wl); 5565 5566 wl->irq = platform_get_irq(pdev, 0); 5567 wl->platform_quirks = pdata->platform_quirks; 5568 wl->set_power = pdata->set_power; 5569 wl->if_ops = pdata->ops; 5570 5571 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 5572 irqflags = IRQF_TRIGGER_RISING; 5573 else 5574 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; 5575 5576 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq, 5577 irqflags, 5578 pdev->name, wl); 5579 if (ret < 0) { 5580 wl1271_error("request_irq() failed: %d", ret); 5581 goto out_free_nvs; 5582 } 5583 5584 #ifdef CONFIG_PM 5585 ret = enable_irq_wake(wl->irq); 5586 if (!ret) { 5587 wl->irq_wake_enabled = true; 5588 device_init_wakeup(wl->dev, 1); 5589 if (pdata->pwr_in_suspend) { 5590 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; 5591 wl->hw->wiphy->wowlan.n_patterns = 5592 WL1271_MAX_RX_FILTERS; 5593 wl->hw->wiphy->wowlan.pattern_min_len = 1; 5594 wl->hw->wiphy->wowlan.pattern_max_len = 5595 WL1271_RX_FILTER_MAX_PATTERN_SIZE; 5596 } 5597 } 5598 #endif 5599 disable_irq(wl->irq); 5600 5601 ret = wl12xx_get_hw_info(wl); 5602 if (ret < 0) { 5603 wl1271_error("couldn't get hw info"); 5604 goto out_irq; 5605 } 5606 5607 ret = wl->ops->identify_chip(wl); 5608 if (ret < 0) 5609 goto out_irq; 5610 5611 ret = wl1271_init_ieee80211(wl); 5612 if (ret) 5613 goto out_irq; 5614 5615 ret = wl1271_register_hw(wl); 5616 if (ret) 5617 goto out_irq; 5618 5619 /* Create sysfs file to control bt coex state */ 5620 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); 5621 if (ret < 0) { 5622 wl1271_error("failed to create sysfs file bt_coex_state"); 5623 goto out_unreg; 5624 } 5625 5626 /* Create sysfs file to get HW PG version */ 5627 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver); 5628 if (ret < 0) { 5629 wl1271_error("failed to create sysfs file hw_pg_ver"); 5630 goto out_bt_coex_state; 5631 } 5632 5633 /* Create sysfs file for the FW log */ 5634 ret = device_create_bin_file(wl->dev, &fwlog_attr); 5635 if (ret < 0) { 5636 wl1271_error("failed to create sysfs file fwlog"); 5637 goto out_hw_pg_ver; 5638 } 5639 5640 wl->initialized = true; 5641 goto out; 5642 5643 out_hw_pg_ver: 5644 device_remove_file(wl->dev, &dev_attr_hw_pg_ver); 5645 5646 out_bt_coex_state: 5647 device_remove_file(wl->dev, &dev_attr_bt_coex_state); 5648 5649 out_unreg: 5650 wl1271_unregister_hw(wl); 5651 5652 out_irq: 5653 free_irq(wl->irq, wl); 5654 5655 out_free_nvs: 5656 kfree(wl->nvs); 5657 5658 out: 5659 release_firmware(fw); 5660 complete_all(&wl->nvs_loading_complete); 5661 } 5662 5663 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) 5664 { 5665 int ret; 5666 5667 if (!wl->ops || !wl->ptable) 5668 return -EINVAL; 5669 5670 wl->dev = &pdev->dev; 5671 wl->pdev = pdev; 5672 platform_set_drvdata(pdev, wl); 5673 5674 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 5675 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL, 5676 wl, wlcore_nvs_cb); 5677 if (ret < 0) { 5678 wl1271_error("request_firmware_nowait failed: %d", ret); 5679 complete_all(&wl->nvs_loading_complete); 5680 } 5681 5682 return ret; 5683 } 5684 EXPORT_SYMBOL_GPL(wlcore_probe); 5685 5686 int wlcore_remove(struct platform_device *pdev) 5687 { 5688 struct wl1271 *wl = platform_get_drvdata(pdev); 5689 5690 wait_for_completion(&wl->nvs_loading_complete); 5691 if (!wl->initialized) 5692 return 0; 5693 5694 if (wl->irq_wake_enabled) { 5695 device_init_wakeup(wl->dev, 0); 5696 disable_irq_wake(wl->irq); 5697 } 5698 wl1271_unregister_hw(wl); 5699 free_irq(wl->irq, wl); 5700 wlcore_free_hw(wl); 5701 5702 return 0; 5703 } 5704 EXPORT_SYMBOL_GPL(wlcore_remove); 5705 5706 u32 wl12xx_debug_level = DEBUG_NONE; 5707 EXPORT_SYMBOL_GPL(wl12xx_debug_level); 5708 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR); 5709 MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); 5710 5711 module_param_named(fwlog, fwlog_param, charp, 0); 5712 MODULE_PARM_DESC(fwlog, 5713 "FW logger options: continuous, ondemand, dbgpins or disable"); 5714 5715 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR); 5716 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); 5717 5718 module_param(no_recovery, bool, S_IRUSR | S_IWUSR); 5719 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck."); 5720 5721 MODULE_LICENSE("GPL"); 5722 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 5723 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 5724 MODULE_FIRMWARE(WL12XX_NVS_NAME); 5725