xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision bbde9fc1824aab58bc78c084163007dd6c03fe5b)
1 
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "vendor_cmd.h"
41 #include "scan.h"
42 #include "hw_ops.h"
43 #include "sysfs.h"
44 
45 #define WL1271_BOOT_RETRIES 3
46 
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery     = -1;
51 
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 					 struct ieee80211_vif *vif,
54 					 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
59 {
60 	int ret;
61 
62 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63 		return -EINVAL;
64 
65 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66 		return 0;
67 
68 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69 		return 0;
70 
71 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
72 	if (ret < 0)
73 		return ret;
74 
75 	wl1271_info("Association completed.");
76 	return 0;
77 }
78 
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 			      struct regulatory_request *request)
81 {
82 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 	struct wl1271 *wl = hw->priv;
84 
85 	/* copy the current dfs region */
86 	if (request)
87 		wl->dfs_region = request->dfs_region;
88 
89 	wlcore_regdomain_config(wl);
90 }
91 
92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
93 				   bool enable)
94 {
95 	int ret = 0;
96 
97 	/* we should hold wl->mutex */
98 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
99 	if (ret < 0)
100 		goto out;
101 
102 	if (enable)
103 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
104 	else
105 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
106 out:
107 	return ret;
108 }
109 
110 /*
111  * this function is being called when the rx_streaming interval
112  * has beed changed or rx_streaming should be disabled
113  */
114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
115 {
116 	int ret = 0;
117 	int period = wl->conf.rx_streaming.interval;
118 
119 	/* don't reconfigure if rx_streaming is disabled */
120 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
121 		goto out;
122 
123 	/* reconfigure/disable according to new streaming_period */
124 	if (period &&
125 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 	    (wl->conf.rx_streaming.always ||
127 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
129 	else {
130 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 		/* don't cancel_work_sync since we might deadlock */
132 		del_timer_sync(&wlvif->rx_streaming_timer);
133 	}
134 out:
135 	return ret;
136 }
137 
138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
139 {
140 	int ret;
141 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 						rx_streaming_enable_work);
143 	struct wl1271 *wl = wlvif->wl;
144 
145 	mutex_lock(&wl->mutex);
146 
147 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 	    (!wl->conf.rx_streaming.always &&
150 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151 		goto out;
152 
153 	if (!wl->conf.rx_streaming.interval)
154 		goto out;
155 
156 	ret = wl1271_ps_elp_wakeup(wl);
157 	if (ret < 0)
158 		goto out;
159 
160 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
161 	if (ret < 0)
162 		goto out_sleep;
163 
164 	/* stop it after some time of inactivity */
165 	mod_timer(&wlvif->rx_streaming_timer,
166 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
167 
168 out_sleep:
169 	wl1271_ps_elp_sleep(wl);
170 out:
171 	mutex_unlock(&wl->mutex);
172 }
173 
174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
175 {
176 	int ret;
177 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 						rx_streaming_disable_work);
179 	struct wl1271 *wl = wlvif->wl;
180 
181 	mutex_lock(&wl->mutex);
182 
183 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
184 		goto out;
185 
186 	ret = wl1271_ps_elp_wakeup(wl);
187 	if (ret < 0)
188 		goto out;
189 
190 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
191 	if (ret)
192 		goto out_sleep;
193 
194 out_sleep:
195 	wl1271_ps_elp_sleep(wl);
196 out:
197 	mutex_unlock(&wl->mutex);
198 }
199 
200 static void wl1271_rx_streaming_timer(unsigned long data)
201 {
202 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 	struct wl1271 *wl = wlvif->wl;
204 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
205 }
206 
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
209 {
210 	/* if the watchdog is not armed, don't do anything */
211 	if (wl->tx_allocated_blocks == 0)
212 		return;
213 
214 	cancel_delayed_work(&wl->tx_watchdog_work);
215 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
217 }
218 
219 static void wlcore_rc_update_work(struct work_struct *work)
220 {
221 	int ret;
222 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223 						rc_update_work);
224 	struct wl1271 *wl = wlvif->wl;
225 
226 	mutex_lock(&wl->mutex);
227 
228 	if (unlikely(wl->state != WLCORE_STATE_ON))
229 		goto out;
230 
231 	ret = wl1271_ps_elp_wakeup(wl);
232 	if (ret < 0)
233 		goto out;
234 
235 	wlcore_hw_sta_rc_update(wl, wlvif);
236 
237 	wl1271_ps_elp_sleep(wl);
238 out:
239 	mutex_unlock(&wl->mutex);
240 }
241 
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 {
244 	struct delayed_work *dwork;
245 	struct wl1271 *wl;
246 
247 	dwork = container_of(work, struct delayed_work, work);
248 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 
250 	mutex_lock(&wl->mutex);
251 
252 	if (unlikely(wl->state != WLCORE_STATE_ON))
253 		goto out;
254 
255 	/* Tx went out in the meantime - everything is ok */
256 	if (unlikely(wl->tx_allocated_blocks == 0))
257 		goto out;
258 
259 	/*
260 	 * if a ROC is in progress, we might not have any Tx for a long
261 	 * time (e.g. pending Tx on the non-ROC channels)
262 	 */
263 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 			     wl->conf.tx.tx_watchdog_timeout);
266 		wl12xx_rearm_tx_watchdog_locked(wl);
267 		goto out;
268 	}
269 
270 	/*
271 	 * if a scan is in progress, we might not have any Tx for a long
272 	 * time
273 	 */
274 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 			     wl->conf.tx.tx_watchdog_timeout);
277 		wl12xx_rearm_tx_watchdog_locked(wl);
278 		goto out;
279 	}
280 
281 	/*
282 	* AP might cache a frame for a long time for a sleeping station,
283 	* so rearm the timer if there's an AP interface with stations. If
284 	* Tx is genuinely stuck we will most hopefully discover it when all
285 	* stations are removed due to inactivity.
286 	*/
287 	if (wl->active_sta_count) {
288 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 			     " %d stations",
290 			      wl->conf.tx.tx_watchdog_timeout,
291 			      wl->active_sta_count);
292 		wl12xx_rearm_tx_watchdog_locked(wl);
293 		goto out;
294 	}
295 
296 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 		     wl->conf.tx.tx_watchdog_timeout);
298 	wl12xx_queue_recovery_work(wl);
299 
300 out:
301 	mutex_unlock(&wl->mutex);
302 }
303 
304 static void wlcore_adjust_conf(struct wl1271 *wl)
305 {
306 	/* Adjust settings according to optional module parameters */
307 
308 	/* Firmware Logger params */
309 	if (fwlog_mem_blocks != -1) {
310 		if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 		    fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 			wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
313 		} else {
314 			wl1271_error(
315 				"Illegal fwlog_mem_blocks=%d using default %d",
316 				fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
317 		}
318 	}
319 
320 	if (fwlog_param) {
321 		if (!strcmp(fwlog_param, "continuous")) {
322 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 		} else if (!strcmp(fwlog_param, "ondemand")) {
324 			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 		} else if (!strcmp(fwlog_param, "dbgpins")) {
326 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 		} else if (!strcmp(fwlog_param, "disable")) {
329 			wl->conf.fwlog.mem_blocks = 0;
330 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
331 		} else {
332 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
333 		}
334 	}
335 
336 	if (bug_on_recovery != -1)
337 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
338 
339 	if (no_recovery != -1)
340 		wl->conf.recovery.no_recovery = (u8) no_recovery;
341 }
342 
343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 					struct wl12xx_vif *wlvif,
345 					u8 hlid, u8 tx_pkts)
346 {
347 	bool fw_ps;
348 
349 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
350 
351 	/*
352 	 * Wake up from high level PS if the STA is asleep with too little
353 	 * packets in FW or if the STA is awake.
354 	 */
355 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 		wl12xx_ps_link_end(wl, wlvif, hlid);
357 
358 	/*
359 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 	 * Make an exception if this is the only connected link. In this
361 	 * case FW-memory congestion is less of a problem.
362 	 * Note that a single connected STA means 2*ap_count + 1 active links,
363 	 * since we must account for the global and broadcast AP links
364 	 * for each AP. The "fw_ps" check assures us the other link is a STA
365 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
366 	 */
367 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
370 }
371 
372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 					   struct wl12xx_vif *wlvif,
374 					   struct wl_fw_status *status)
375 {
376 	unsigned long cur_fw_ps_map;
377 	u8 hlid;
378 
379 	cur_fw_ps_map = status->link_ps_bitmap;
380 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 		wl1271_debug(DEBUG_PSM,
382 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 			     wl->ap_fw_ps_map, cur_fw_ps_map,
384 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
385 
386 		wl->ap_fw_ps_map = cur_fw_ps_map;
387 	}
388 
389 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 					    wl->links[hlid].allocated_pkts);
392 }
393 
394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
395 {
396 	struct wl12xx_vif *wlvif;
397 	struct timespec ts;
398 	u32 old_tx_blk_count = wl->tx_blocks_available;
399 	int avail, freed_blocks;
400 	int i;
401 	int ret;
402 	struct wl1271_link *lnk;
403 
404 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
405 				   wl->raw_fw_status,
406 				   wl->fw_status_len, false);
407 	if (ret < 0)
408 		return ret;
409 
410 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
411 
412 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 		     "drv_rx_counter = %d, tx_results_counter = %d)",
414 		     status->intr,
415 		     status->fw_rx_counter,
416 		     status->drv_rx_counter,
417 		     status->tx_results_counter);
418 
419 	for (i = 0; i < NUM_TX_QUEUES; i++) {
420 		/* prevent wrap-around in freed-packets counter */
421 		wl->tx_allocated_pkts[i] -=
422 				(status->counters.tx_released_pkts[i] -
423 				wl->tx_pkts_freed[i]) & 0xff;
424 
425 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
426 	}
427 
428 
429 	for_each_set_bit(i, wl->links_map, wl->num_links) {
430 		u8 diff;
431 		lnk = &wl->links[i];
432 
433 		/* prevent wrap-around in freed-packets counter */
434 		diff = (status->counters.tx_lnk_free_pkts[i] -
435 		       lnk->prev_freed_pkts) & 0xff;
436 
437 		if (diff == 0)
438 			continue;
439 
440 		lnk->allocated_pkts -= diff;
441 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
442 
443 		/* accumulate the prev_freed_pkts counter */
444 		lnk->total_freed_pkts += diff;
445 	}
446 
447 	/* prevent wrap-around in total blocks counter */
448 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 		freed_blocks = status->total_released_blks -
450 			       wl->tx_blocks_freed;
451 	else
452 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 			       status->total_released_blks;
454 
455 	wl->tx_blocks_freed = status->total_released_blks;
456 
457 	wl->tx_allocated_blocks -= freed_blocks;
458 
459 	/*
460 	 * If the FW freed some blocks:
461 	 * If we still have allocated blocks - re-arm the timer, Tx is
462 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
463 	 */
464 	if (freed_blocks) {
465 		if (wl->tx_allocated_blocks)
466 			wl12xx_rearm_tx_watchdog_locked(wl);
467 		else
468 			cancel_delayed_work(&wl->tx_watchdog_work);
469 	}
470 
471 	avail = status->tx_total - wl->tx_allocated_blocks;
472 
473 	/*
474 	 * The FW might change the total number of TX memblocks before
475 	 * we get a notification about blocks being released. Thus, the
476 	 * available blocks calculation might yield a temporary result
477 	 * which is lower than the actual available blocks. Keeping in
478 	 * mind that only blocks that were allocated can be moved from
479 	 * TX to RX, tx_blocks_available should never decrease here.
480 	 */
481 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
482 				      avail);
483 
484 	/* if more blocks are available now, tx work can be scheduled */
485 	if (wl->tx_blocks_available > old_tx_blk_count)
486 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
487 
488 	/* for AP update num of allocated TX blocks per link and ps status */
489 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 		wl12xx_irq_update_links_status(wl, wlvif, status);
491 	}
492 
493 	/* update the host-chipset time offset */
494 	getnstimeofday(&ts);
495 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 		(s64)(status->fw_localtime);
497 
498 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
499 
500 	return 0;
501 }
502 
503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
504 {
505 	struct sk_buff *skb;
506 
507 	/* Pass all received frames to the network stack */
508 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 		ieee80211_rx_ni(wl->hw, skb);
510 
511 	/* Return sent skbs to the network stack */
512 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 		ieee80211_tx_status_ni(wl->hw, skb);
514 }
515 
516 static void wl1271_netstack_work(struct work_struct *work)
517 {
518 	struct wl1271 *wl =
519 		container_of(work, struct wl1271, netstack_work);
520 
521 	do {
522 		wl1271_flush_deferred_work(wl);
523 	} while (skb_queue_len(&wl->deferred_rx_queue));
524 }
525 
526 #define WL1271_IRQ_MAX_LOOPS 256
527 
528 static int wlcore_irq_locked(struct wl1271 *wl)
529 {
530 	int ret = 0;
531 	u32 intr;
532 	int loopcount = WL1271_IRQ_MAX_LOOPS;
533 	bool done = false;
534 	unsigned int defer_count;
535 	unsigned long flags;
536 
537 	/*
538 	 * In case edge triggered interrupt must be used, we cannot iterate
539 	 * more than once without introducing race conditions with the hardirq.
540 	 */
541 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
542 		loopcount = 1;
543 
544 	wl1271_debug(DEBUG_IRQ, "IRQ work");
545 
546 	if (unlikely(wl->state != WLCORE_STATE_ON))
547 		goto out;
548 
549 	ret = wl1271_ps_elp_wakeup(wl);
550 	if (ret < 0)
551 		goto out;
552 
553 	while (!done && loopcount--) {
554 		/*
555 		 * In order to avoid a race with the hardirq, clear the flag
556 		 * before acknowledging the chip. Since the mutex is held,
557 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
558 		 */
559 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 		smp_mb__after_atomic();
561 
562 		ret = wlcore_fw_status(wl, wl->fw_status);
563 		if (ret < 0)
564 			goto out;
565 
566 		wlcore_hw_tx_immediate_compl(wl);
567 
568 		intr = wl->fw_status->intr;
569 		intr &= WLCORE_ALL_INTR_MASK;
570 		if (!intr) {
571 			done = true;
572 			continue;
573 		}
574 
575 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 			wl1271_error("HW watchdog interrupt received! starting recovery.");
577 			wl->watchdog_recovery = true;
578 			ret = -EIO;
579 
580 			/* restarting the chip. ignore any other interrupt. */
581 			goto out;
582 		}
583 
584 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 			wl1271_error("SW watchdog interrupt received! "
586 				     "starting recovery.");
587 			wl->watchdog_recovery = true;
588 			ret = -EIO;
589 
590 			/* restarting the chip. ignore any other interrupt. */
591 			goto out;
592 		}
593 
594 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
596 
597 			ret = wlcore_rx(wl, wl->fw_status);
598 			if (ret < 0)
599 				goto out;
600 
601 			/* Check if any tx blocks were freed */
602 			spin_lock_irqsave(&wl->wl_lock, flags);
603 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 			    wl1271_tx_total_queue_count(wl) > 0) {
605 				spin_unlock_irqrestore(&wl->wl_lock, flags);
606 				/*
607 				 * In order to avoid starvation of the TX path,
608 				 * call the work function directly.
609 				 */
610 				ret = wlcore_tx_work_locked(wl);
611 				if (ret < 0)
612 					goto out;
613 			} else {
614 				spin_unlock_irqrestore(&wl->wl_lock, flags);
615 			}
616 
617 			/* check for tx results */
618 			ret = wlcore_hw_tx_delayed_compl(wl);
619 			if (ret < 0)
620 				goto out;
621 
622 			/* Make sure the deferred queues don't get too long */
623 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 				      skb_queue_len(&wl->deferred_rx_queue);
625 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 				wl1271_flush_deferred_work(wl);
627 		}
628 
629 		if (intr & WL1271_ACX_INTR_EVENT_A) {
630 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 			ret = wl1271_event_handle(wl, 0);
632 			if (ret < 0)
633 				goto out;
634 		}
635 
636 		if (intr & WL1271_ACX_INTR_EVENT_B) {
637 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 			ret = wl1271_event_handle(wl, 1);
639 			if (ret < 0)
640 				goto out;
641 		}
642 
643 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 			wl1271_debug(DEBUG_IRQ,
645 				     "WL1271_ACX_INTR_INIT_COMPLETE");
646 
647 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
649 	}
650 
651 	wl1271_ps_elp_sleep(wl);
652 
653 out:
654 	return ret;
655 }
656 
657 static irqreturn_t wlcore_irq(int irq, void *cookie)
658 {
659 	int ret;
660 	unsigned long flags;
661 	struct wl1271 *wl = cookie;
662 
663 	/* complete the ELP completion */
664 	spin_lock_irqsave(&wl->wl_lock, flags);
665 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
666 	if (wl->elp_compl) {
667 		complete(wl->elp_compl);
668 		wl->elp_compl = NULL;
669 	}
670 
671 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 		/* don't enqueue a work right now. mark it as pending */
673 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 		disable_irq_nosync(wl->irq);
676 		pm_wakeup_event(wl->dev, 0);
677 		spin_unlock_irqrestore(&wl->wl_lock, flags);
678 		return IRQ_HANDLED;
679 	}
680 	spin_unlock_irqrestore(&wl->wl_lock, flags);
681 
682 	/* TX might be handled here, avoid redundant work */
683 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 	cancel_work_sync(&wl->tx_work);
685 
686 	mutex_lock(&wl->mutex);
687 
688 	ret = wlcore_irq_locked(wl);
689 	if (ret)
690 		wl12xx_queue_recovery_work(wl);
691 
692 	spin_lock_irqsave(&wl->wl_lock, flags);
693 	/* In case TX was not handled here, queue TX work */
694 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 	    wl1271_tx_total_queue_count(wl) > 0)
697 		ieee80211_queue_work(wl->hw, &wl->tx_work);
698 	spin_unlock_irqrestore(&wl->wl_lock, flags);
699 
700 	mutex_unlock(&wl->mutex);
701 
702 	return IRQ_HANDLED;
703 }
704 
705 struct vif_counter_data {
706 	u8 counter;
707 
708 	struct ieee80211_vif *cur_vif;
709 	bool cur_vif_running;
710 };
711 
712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 				  struct ieee80211_vif *vif)
714 {
715 	struct vif_counter_data *counter = data;
716 
717 	counter->counter++;
718 	if (counter->cur_vif == vif)
719 		counter->cur_vif_running = true;
720 }
721 
722 /* caller must not hold wl->mutex, as it might deadlock */
723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 			       struct ieee80211_vif *cur_vif,
725 			       struct vif_counter_data *data)
726 {
727 	memset(data, 0, sizeof(*data));
728 	data->cur_vif = cur_vif;
729 
730 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 					    wl12xx_vif_count_iter, data);
732 }
733 
734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
735 {
736 	const struct firmware *fw;
737 	const char *fw_name;
738 	enum wl12xx_fw_type fw_type;
739 	int ret;
740 
741 	if (plt) {
742 		fw_type = WL12XX_FW_TYPE_PLT;
743 		fw_name = wl->plt_fw_name;
744 	} else {
745 		/*
746 		 * we can't call wl12xx_get_vif_count() here because
747 		 * wl->mutex is taken, so use the cached last_vif_count value
748 		 */
749 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 			fw_type = WL12XX_FW_TYPE_MULTI;
751 			fw_name = wl->mr_fw_name;
752 		} else {
753 			fw_type = WL12XX_FW_TYPE_NORMAL;
754 			fw_name = wl->sr_fw_name;
755 		}
756 	}
757 
758 	if (wl->fw_type == fw_type)
759 		return 0;
760 
761 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
762 
763 	ret = request_firmware(&fw, fw_name, wl->dev);
764 
765 	if (ret < 0) {
766 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
767 		return ret;
768 	}
769 
770 	if (fw->size % 4) {
771 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
772 			     fw->size);
773 		ret = -EILSEQ;
774 		goto out;
775 	}
776 
777 	vfree(wl->fw);
778 	wl->fw_type = WL12XX_FW_TYPE_NONE;
779 	wl->fw_len = fw->size;
780 	wl->fw = vmalloc(wl->fw_len);
781 
782 	if (!wl->fw) {
783 		wl1271_error("could not allocate memory for the firmware");
784 		ret = -ENOMEM;
785 		goto out;
786 	}
787 
788 	memcpy(wl->fw, fw->data, wl->fw_len);
789 	ret = 0;
790 	wl->fw_type = fw_type;
791 out:
792 	release_firmware(fw);
793 
794 	return ret;
795 }
796 
797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
798 {
799 	/* Avoid a recursive recovery */
800 	if (wl->state == WLCORE_STATE_ON) {
801 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
802 				  &wl->flags));
803 
804 		wl->state = WLCORE_STATE_RESTARTING;
805 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 		wl1271_ps_elp_wakeup(wl);
807 		wlcore_disable_interrupts_nosync(wl);
808 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
809 	}
810 }
811 
812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
813 {
814 	size_t len;
815 
816 	/* Make sure we have enough room */
817 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
818 
819 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
820 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 	wl->fwlog_size += len;
822 
823 	return len;
824 }
825 
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
827 {
828 	struct wlcore_partition_set part, old_part;
829 	u32 addr;
830 	u32 offset;
831 	u32 end_of_log;
832 	u8 *block;
833 	int ret;
834 
835 	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 	    (wl->conf.fwlog.mem_blocks == 0))
837 		return;
838 
839 	wl1271_info("Reading FW panic log");
840 
841 	block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
842 	if (!block)
843 		return;
844 
845 	/*
846 	 * Make sure the chip is awake and the logger isn't active.
847 	 * Do not send a stop fwlog command if the fw is hanged or if
848 	 * dbgpins are used (due to some fw bug).
849 	 */
850 	if (wl1271_ps_elp_wakeup(wl))
851 		goto out;
852 	if (!wl->watchdog_recovery &&
853 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 		wl12xx_cmd_stop_fwlog(wl);
855 
856 	/* Read the first memory block address */
857 	ret = wlcore_fw_status(wl, wl->fw_status);
858 	if (ret < 0)
859 		goto out;
860 
861 	addr = wl->fw_status->log_start_addr;
862 	if (!addr)
863 		goto out;
864 
865 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 		end_of_log = wl->fwlog_end;
868 	} else {
869 		offset = sizeof(addr);
870 		end_of_log = addr;
871 	}
872 
873 	old_part = wl->curr_part;
874 	memset(&part, 0, sizeof(part));
875 
876 	/* Traverse the memory blocks linked list */
877 	do {
878 		part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 		part.mem.size  = PAGE_SIZE;
880 
881 		ret = wlcore_set_partition(wl, &part);
882 		if (ret < 0) {
883 			wl1271_error("%s: set_partition start=0x%X size=%d",
884 				__func__, part.mem.start, part.mem.size);
885 			goto out;
886 		}
887 
888 		memset(block, 0, wl->fw_mem_block_size);
889 		ret = wlcore_read_hwaddr(wl, addr, block,
890 					wl->fw_mem_block_size, false);
891 
892 		if (ret < 0)
893 			goto out;
894 
895 		/*
896 		 * Memory blocks are linked to one another. The first 4 bytes
897 		 * of each memory block hold the hardware address of the next
898 		 * one. The last memory block points to the first one in
899 		 * on demand mode and is equal to 0x2000000 in continuous mode.
900 		 */
901 		addr = le32_to_cpup((__le32 *)block);
902 
903 		if (!wl12xx_copy_fwlog(wl, block + offset,
904 					wl->fw_mem_block_size - offset))
905 			break;
906 	} while (addr && (addr != end_of_log));
907 
908 	wake_up_interruptible(&wl->fwlog_waitq);
909 
910 out:
911 	kfree(block);
912 	wlcore_set_partition(wl, &old_part);
913 }
914 
915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 				   u8 hlid, struct ieee80211_sta *sta)
917 {
918 	struct wl1271_station *wl_sta;
919 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
920 
921 	wl_sta = (void *)sta->drv_priv;
922 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
923 
924 	/*
925 	 * increment the initial seq number on recovery to account for
926 	 * transmitted packets that we haven't yet got in the FW status
927 	 */
928 	if (wlvif->encryption_type == KEY_GEM)
929 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
930 
931 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 		wl_sta->total_freed_pkts += sqn_recovery_padding;
933 }
934 
935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 					struct wl12xx_vif *wlvif,
937 					u8 hlid, const u8 *addr)
938 {
939 	struct ieee80211_sta *sta;
940 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
941 
942 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 		    is_zero_ether_addr(addr)))
944 		return;
945 
946 	rcu_read_lock();
947 	sta = ieee80211_find_sta(vif, addr);
948 	if (sta)
949 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
950 	rcu_read_unlock();
951 }
952 
953 static void wlcore_print_recovery(struct wl1271 *wl)
954 {
955 	u32 pc = 0;
956 	u32 hint_sts = 0;
957 	int ret;
958 
959 	wl1271_info("Hardware recovery in progress. FW ver: %s",
960 		    wl->chip.fw_ver_str);
961 
962 	/* change partitions momentarily so we can read the FW pc */
963 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
964 	if (ret < 0)
965 		return;
966 
967 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
968 	if (ret < 0)
969 		return;
970 
971 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
972 	if (ret < 0)
973 		return;
974 
975 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 				pc, hint_sts, ++wl->recovery_count);
977 
978 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
979 }
980 
981 
982 static void wl1271_recovery_work(struct work_struct *work)
983 {
984 	struct wl1271 *wl =
985 		container_of(work, struct wl1271, recovery_work);
986 	struct wl12xx_vif *wlvif;
987 	struct ieee80211_vif *vif;
988 
989 	mutex_lock(&wl->mutex);
990 
991 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
992 		goto out_unlock;
993 
994 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 			wl12xx_read_fwlog_panic(wl);
997 		wlcore_print_recovery(wl);
998 	}
999 
1000 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1002 
1003 	if (wl->conf.recovery.no_recovery) {
1004 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1005 		goto out_unlock;
1006 	}
1007 
1008 	/* Prevent spurious TX during FW restart */
1009 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1010 
1011 	/* reboot the chipset */
1012 	while (!list_empty(&wl->wlvif_list)) {
1013 		wlvif = list_first_entry(&wl->wlvif_list,
1014 				       struct wl12xx_vif, list);
1015 		vif = wl12xx_wlvif_to_vif(wlvif);
1016 
1017 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 						    vif->bss_conf.bssid);
1021 		}
1022 
1023 		__wl1271_op_remove_interface(wl, vif, false);
1024 	}
1025 
1026 	wlcore_op_stop_locked(wl);
1027 
1028 	ieee80211_restart_hw(wl->hw);
1029 
1030 	/*
1031 	 * Its safe to enable TX now - the queues are stopped after a request
1032 	 * to restart the HW.
1033 	 */
1034 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1035 
1036 out_unlock:
1037 	wl->watchdog_recovery = false;
1038 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 	mutex_unlock(&wl->mutex);
1040 }
1041 
1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1043 {
1044 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1045 }
1046 
1047 static int wl1271_setup(struct wl1271 *wl)
1048 {
1049 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 	if (!wl->raw_fw_status)
1051 		goto err;
1052 
1053 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1054 	if (!wl->fw_status)
1055 		goto err;
1056 
1057 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1058 	if (!wl->tx_res_if)
1059 		goto err;
1060 
1061 	return 0;
1062 err:
1063 	kfree(wl->fw_status);
1064 	kfree(wl->raw_fw_status);
1065 	return -ENOMEM;
1066 }
1067 
1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1069 {
1070 	int ret;
1071 
1072 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 	ret = wl1271_power_on(wl);
1074 	if (ret < 0)
1075 		goto out;
1076 	msleep(WL1271_POWER_ON_SLEEP);
1077 	wl1271_io_reset(wl);
1078 	wl1271_io_init(wl);
1079 
1080 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1081 	if (ret < 0)
1082 		goto fail;
1083 
1084 	/* ELP module wake up */
1085 	ret = wlcore_fw_wakeup(wl);
1086 	if (ret < 0)
1087 		goto fail;
1088 
1089 out:
1090 	return ret;
1091 
1092 fail:
1093 	wl1271_power_off(wl);
1094 	return ret;
1095 }
1096 
1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1098 {
1099 	int ret = 0;
1100 
1101 	ret = wl12xx_set_power_on(wl);
1102 	if (ret < 0)
1103 		goto out;
1104 
1105 	/*
1106 	 * For wl127x based devices we could use the default block
1107 	 * size (512 bytes), but due to a bug in the sdio driver, we
1108 	 * need to set it explicitly after the chip is powered on.  To
1109 	 * simplify the code and since the performance impact is
1110 	 * negligible, we use the same block size for all different
1111 	 * chip types.
1112 	 *
1113 	 * Check if the bus supports blocksize alignment and, if it
1114 	 * doesn't, make sure we don't have the quirk.
1115 	 */
1116 	if (!wl1271_set_block_size(wl))
1117 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1118 
1119 	/* TODO: make sure the lower driver has set things up correctly */
1120 
1121 	ret = wl1271_setup(wl);
1122 	if (ret < 0)
1123 		goto out;
1124 
1125 	ret = wl12xx_fetch_firmware(wl, plt);
1126 	if (ret < 0)
1127 		goto out;
1128 
1129 out:
1130 	return ret;
1131 }
1132 
1133 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1134 {
1135 	int retries = WL1271_BOOT_RETRIES;
1136 	struct wiphy *wiphy = wl->hw->wiphy;
1137 
1138 	static const char* const PLT_MODE[] = {
1139 		"PLT_OFF",
1140 		"PLT_ON",
1141 		"PLT_FEM_DETECT",
1142 		"PLT_CHIP_AWAKE"
1143 	};
1144 
1145 	int ret;
1146 
1147 	mutex_lock(&wl->mutex);
1148 
1149 	wl1271_notice("power up");
1150 
1151 	if (wl->state != WLCORE_STATE_OFF) {
1152 		wl1271_error("cannot go into PLT state because not "
1153 			     "in off state: %d", wl->state);
1154 		ret = -EBUSY;
1155 		goto out;
1156 	}
1157 
1158 	/* Indicate to lower levels that we are now in PLT mode */
1159 	wl->plt = true;
1160 	wl->plt_mode = plt_mode;
1161 
1162 	while (retries) {
1163 		retries--;
1164 		ret = wl12xx_chip_wakeup(wl, true);
1165 		if (ret < 0)
1166 			goto power_off;
1167 
1168 		if (plt_mode != PLT_CHIP_AWAKE) {
1169 			ret = wl->ops->plt_init(wl);
1170 			if (ret < 0)
1171 				goto power_off;
1172 		}
1173 
1174 		wl->state = WLCORE_STATE_ON;
1175 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1176 			      PLT_MODE[plt_mode],
1177 			      wl->chip.fw_ver_str);
1178 
1179 		/* update hw/fw version info in wiphy struct */
1180 		wiphy->hw_version = wl->chip.id;
1181 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1182 			sizeof(wiphy->fw_version));
1183 
1184 		goto out;
1185 
1186 power_off:
1187 		wl1271_power_off(wl);
1188 	}
1189 
1190 	wl->plt = false;
1191 	wl->plt_mode = PLT_OFF;
1192 
1193 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1194 		     WL1271_BOOT_RETRIES);
1195 out:
1196 	mutex_unlock(&wl->mutex);
1197 
1198 	return ret;
1199 }
1200 
1201 int wl1271_plt_stop(struct wl1271 *wl)
1202 {
1203 	int ret = 0;
1204 
1205 	wl1271_notice("power down");
1206 
1207 	/*
1208 	 * Interrupts must be disabled before setting the state to OFF.
1209 	 * Otherwise, the interrupt handler might be called and exit without
1210 	 * reading the interrupt status.
1211 	 */
1212 	wlcore_disable_interrupts(wl);
1213 	mutex_lock(&wl->mutex);
1214 	if (!wl->plt) {
1215 		mutex_unlock(&wl->mutex);
1216 
1217 		/*
1218 		 * This will not necessarily enable interrupts as interrupts
1219 		 * may have been disabled when op_stop was called. It will,
1220 		 * however, balance the above call to disable_interrupts().
1221 		 */
1222 		wlcore_enable_interrupts(wl);
1223 
1224 		wl1271_error("cannot power down because not in PLT "
1225 			     "state: %d", wl->state);
1226 		ret = -EBUSY;
1227 		goto out;
1228 	}
1229 
1230 	mutex_unlock(&wl->mutex);
1231 
1232 	wl1271_flush_deferred_work(wl);
1233 	cancel_work_sync(&wl->netstack_work);
1234 	cancel_work_sync(&wl->recovery_work);
1235 	cancel_delayed_work_sync(&wl->elp_work);
1236 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1237 
1238 	mutex_lock(&wl->mutex);
1239 	wl1271_power_off(wl);
1240 	wl->flags = 0;
1241 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1242 	wl->state = WLCORE_STATE_OFF;
1243 	wl->plt = false;
1244 	wl->plt_mode = PLT_OFF;
1245 	wl->rx_counter = 0;
1246 	mutex_unlock(&wl->mutex);
1247 
1248 out:
1249 	return ret;
1250 }
1251 
1252 static void wl1271_op_tx(struct ieee80211_hw *hw,
1253 			 struct ieee80211_tx_control *control,
1254 			 struct sk_buff *skb)
1255 {
1256 	struct wl1271 *wl = hw->priv;
1257 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258 	struct ieee80211_vif *vif = info->control.vif;
1259 	struct wl12xx_vif *wlvif = NULL;
1260 	unsigned long flags;
1261 	int q, mapping;
1262 	u8 hlid;
1263 
1264 	if (!vif) {
1265 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1266 		ieee80211_free_txskb(hw, skb);
1267 		return;
1268 	}
1269 
1270 	wlvif = wl12xx_vif_to_data(vif);
1271 	mapping = skb_get_queue_mapping(skb);
1272 	q = wl1271_tx_get_queue(mapping);
1273 
1274 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1275 
1276 	spin_lock_irqsave(&wl->wl_lock, flags);
1277 
1278 	/*
1279 	 * drop the packet if the link is invalid or the queue is stopped
1280 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1281 	 * allow these packets through.
1282 	 */
1283 	if (hlid == WL12XX_INVALID_LINK_ID ||
1284 	    (!test_bit(hlid, wlvif->links_map)) ||
1285 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1286 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1287 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1288 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1289 		ieee80211_free_txskb(hw, skb);
1290 		goto out;
1291 	}
1292 
1293 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1294 		     hlid, q, skb->len);
1295 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1296 
1297 	wl->tx_queue_count[q]++;
1298 	wlvif->tx_queue_count[q]++;
1299 
1300 	/*
1301 	 * The workqueue is slow to process the tx_queue and we need stop
1302 	 * the queue here, otherwise the queue will get too long.
1303 	 */
1304 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1305 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1306 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1307 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1308 		wlcore_stop_queue_locked(wl, wlvif, q,
1309 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1310 	}
1311 
1312 	/*
1313 	 * The chip specific setup must run before the first TX packet -
1314 	 * before that, the tx_work will not be initialized!
1315 	 */
1316 
1317 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1318 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1319 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1320 
1321 out:
1322 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1323 }
1324 
1325 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1326 {
1327 	unsigned long flags;
1328 	int q;
1329 
1330 	/* no need to queue a new dummy packet if one is already pending */
1331 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1332 		return 0;
1333 
1334 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1335 
1336 	spin_lock_irqsave(&wl->wl_lock, flags);
1337 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1338 	wl->tx_queue_count[q]++;
1339 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1340 
1341 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1342 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1343 		return wlcore_tx_work_locked(wl);
1344 
1345 	/*
1346 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1347 	 * interrupt handler function
1348 	 */
1349 	return 0;
1350 }
1351 
1352 /*
1353  * The size of the dummy packet should be at least 1400 bytes. However, in
1354  * order to minimize the number of bus transactions, aligning it to 512 bytes
1355  * boundaries could be beneficial, performance wise
1356  */
1357 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1358 
1359 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1360 {
1361 	struct sk_buff *skb;
1362 	struct ieee80211_hdr_3addr *hdr;
1363 	unsigned int dummy_packet_size;
1364 
1365 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1366 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1367 
1368 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1369 	if (!skb) {
1370 		wl1271_warning("Failed to allocate a dummy packet skb");
1371 		return NULL;
1372 	}
1373 
1374 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1375 
1376 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1377 	memset(hdr, 0, sizeof(*hdr));
1378 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1379 					 IEEE80211_STYPE_NULLFUNC |
1380 					 IEEE80211_FCTL_TODS);
1381 
1382 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1383 
1384 	/* Dummy packets require the TID to be management */
1385 	skb->priority = WL1271_TID_MGMT;
1386 
1387 	/* Initialize all fields that might be used */
1388 	skb_set_queue_mapping(skb, 0);
1389 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1390 
1391 	return skb;
1392 }
1393 
1394 
1395 #ifdef CONFIG_PM
1396 static int
1397 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1398 {
1399 	int num_fields = 0, in_field = 0, fields_size = 0;
1400 	int i, pattern_len = 0;
1401 
1402 	if (!p->mask) {
1403 		wl1271_warning("No mask in WoWLAN pattern");
1404 		return -EINVAL;
1405 	}
1406 
1407 	/*
1408 	 * The pattern is broken up into segments of bytes at different offsets
1409 	 * that need to be checked by the FW filter. Each segment is called
1410 	 * a field in the FW API. We verify that the total number of fields
1411 	 * required for this pattern won't exceed FW limits (8)
1412 	 * as well as the total fields buffer won't exceed the FW limit.
1413 	 * Note that if there's a pattern which crosses Ethernet/IP header
1414 	 * boundary a new field is required.
1415 	 */
1416 	for (i = 0; i < p->pattern_len; i++) {
1417 		if (test_bit(i, (unsigned long *)p->mask)) {
1418 			if (!in_field) {
1419 				in_field = 1;
1420 				pattern_len = 1;
1421 			} else {
1422 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1423 					num_fields++;
1424 					fields_size += pattern_len +
1425 						RX_FILTER_FIELD_OVERHEAD;
1426 					pattern_len = 1;
1427 				} else
1428 					pattern_len++;
1429 			}
1430 		} else {
1431 			if (in_field) {
1432 				in_field = 0;
1433 				fields_size += pattern_len +
1434 					RX_FILTER_FIELD_OVERHEAD;
1435 				num_fields++;
1436 			}
1437 		}
1438 	}
1439 
1440 	if (in_field) {
1441 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1442 		num_fields++;
1443 	}
1444 
1445 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1446 		wl1271_warning("RX Filter too complex. Too many segments");
1447 		return -EINVAL;
1448 	}
1449 
1450 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1451 		wl1271_warning("RX filter pattern is too big");
1452 		return -E2BIG;
1453 	}
1454 
1455 	return 0;
1456 }
1457 
1458 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1459 {
1460 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1461 }
1462 
1463 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1464 {
1465 	int i;
1466 
1467 	if (filter == NULL)
1468 		return;
1469 
1470 	for (i = 0; i < filter->num_fields; i++)
1471 		kfree(filter->fields[i].pattern);
1472 
1473 	kfree(filter);
1474 }
1475 
1476 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1477 				 u16 offset, u8 flags,
1478 				 const u8 *pattern, u8 len)
1479 {
1480 	struct wl12xx_rx_filter_field *field;
1481 
1482 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1483 		wl1271_warning("Max fields per RX filter. can't alloc another");
1484 		return -EINVAL;
1485 	}
1486 
1487 	field = &filter->fields[filter->num_fields];
1488 
1489 	field->pattern = kzalloc(len, GFP_KERNEL);
1490 	if (!field->pattern) {
1491 		wl1271_warning("Failed to allocate RX filter pattern");
1492 		return -ENOMEM;
1493 	}
1494 
1495 	filter->num_fields++;
1496 
1497 	field->offset = cpu_to_le16(offset);
1498 	field->flags = flags;
1499 	field->len = len;
1500 	memcpy(field->pattern, pattern, len);
1501 
1502 	return 0;
1503 }
1504 
1505 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1506 {
1507 	int i, fields_size = 0;
1508 
1509 	for (i = 0; i < filter->num_fields; i++)
1510 		fields_size += filter->fields[i].len +
1511 			sizeof(struct wl12xx_rx_filter_field) -
1512 			sizeof(u8 *);
1513 
1514 	return fields_size;
1515 }
1516 
1517 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1518 				    u8 *buf)
1519 {
1520 	int i;
1521 	struct wl12xx_rx_filter_field *field;
1522 
1523 	for (i = 0; i < filter->num_fields; i++) {
1524 		field = (struct wl12xx_rx_filter_field *)buf;
1525 
1526 		field->offset = filter->fields[i].offset;
1527 		field->flags = filter->fields[i].flags;
1528 		field->len = filter->fields[i].len;
1529 
1530 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1531 		buf += sizeof(struct wl12xx_rx_filter_field) -
1532 			sizeof(u8 *) + field->len;
1533 	}
1534 }
1535 
1536 /*
1537  * Allocates an RX filter returned through f
1538  * which needs to be freed using rx_filter_free()
1539  */
1540 static int
1541 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1542 					   struct wl12xx_rx_filter **f)
1543 {
1544 	int i, j, ret = 0;
1545 	struct wl12xx_rx_filter *filter;
1546 	u16 offset;
1547 	u8 flags, len;
1548 
1549 	filter = wl1271_rx_filter_alloc();
1550 	if (!filter) {
1551 		wl1271_warning("Failed to alloc rx filter");
1552 		ret = -ENOMEM;
1553 		goto err;
1554 	}
1555 
1556 	i = 0;
1557 	while (i < p->pattern_len) {
1558 		if (!test_bit(i, (unsigned long *)p->mask)) {
1559 			i++;
1560 			continue;
1561 		}
1562 
1563 		for (j = i; j < p->pattern_len; j++) {
1564 			if (!test_bit(j, (unsigned long *)p->mask))
1565 				break;
1566 
1567 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1568 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1569 				break;
1570 		}
1571 
1572 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1573 			offset = i;
1574 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1575 		} else {
1576 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1577 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1578 		}
1579 
1580 		len = j - i;
1581 
1582 		ret = wl1271_rx_filter_alloc_field(filter,
1583 						   offset,
1584 						   flags,
1585 						   &p->pattern[i], len);
1586 		if (ret)
1587 			goto err;
1588 
1589 		i = j;
1590 	}
1591 
1592 	filter->action = FILTER_SIGNAL;
1593 
1594 	*f = filter;
1595 	return 0;
1596 
1597 err:
1598 	wl1271_rx_filter_free(filter);
1599 	*f = NULL;
1600 
1601 	return ret;
1602 }
1603 
1604 static int wl1271_configure_wowlan(struct wl1271 *wl,
1605 				   struct cfg80211_wowlan *wow)
1606 {
1607 	int i, ret;
1608 
1609 	if (!wow || wow->any || !wow->n_patterns) {
1610 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1611 							  FILTER_SIGNAL);
1612 		if (ret)
1613 			goto out;
1614 
1615 		ret = wl1271_rx_filter_clear_all(wl);
1616 		if (ret)
1617 			goto out;
1618 
1619 		return 0;
1620 	}
1621 
1622 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1623 		return -EINVAL;
1624 
1625 	/* Validate all incoming patterns before clearing current FW state */
1626 	for (i = 0; i < wow->n_patterns; i++) {
1627 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1628 		if (ret) {
1629 			wl1271_warning("Bad wowlan pattern %d", i);
1630 			return ret;
1631 		}
1632 	}
1633 
1634 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1635 	if (ret)
1636 		goto out;
1637 
1638 	ret = wl1271_rx_filter_clear_all(wl);
1639 	if (ret)
1640 		goto out;
1641 
1642 	/* Translate WoWLAN patterns into filters */
1643 	for (i = 0; i < wow->n_patterns; i++) {
1644 		struct cfg80211_pkt_pattern *p;
1645 		struct wl12xx_rx_filter *filter = NULL;
1646 
1647 		p = &wow->patterns[i];
1648 
1649 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1650 		if (ret) {
1651 			wl1271_warning("Failed to create an RX filter from "
1652 				       "wowlan pattern %d", i);
1653 			goto out;
1654 		}
1655 
1656 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1657 
1658 		wl1271_rx_filter_free(filter);
1659 		if (ret)
1660 			goto out;
1661 	}
1662 
1663 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1664 
1665 out:
1666 	return ret;
1667 }
1668 
1669 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1670 					struct wl12xx_vif *wlvif,
1671 					struct cfg80211_wowlan *wow)
1672 {
1673 	int ret = 0;
1674 
1675 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1676 		goto out;
1677 
1678 	ret = wl1271_configure_wowlan(wl, wow);
1679 	if (ret < 0)
1680 		goto out;
1681 
1682 	if ((wl->conf.conn.suspend_wake_up_event ==
1683 	     wl->conf.conn.wake_up_event) &&
1684 	    (wl->conf.conn.suspend_listen_interval ==
1685 	     wl->conf.conn.listen_interval))
1686 		goto out;
1687 
1688 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 				    wl->conf.conn.suspend_wake_up_event,
1690 				    wl->conf.conn.suspend_listen_interval);
1691 
1692 	if (ret < 0)
1693 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1694 out:
1695 	return ret;
1696 
1697 }
1698 
1699 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1700 					struct wl12xx_vif *wlvif,
1701 					struct cfg80211_wowlan *wow)
1702 {
1703 	int ret = 0;
1704 
1705 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1706 		goto out;
1707 
1708 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1709 	if (ret < 0)
1710 		goto out;
1711 
1712 	ret = wl1271_configure_wowlan(wl, wow);
1713 	if (ret < 0)
1714 		goto out;
1715 
1716 out:
1717 	return ret;
1718 
1719 }
1720 
1721 static int wl1271_configure_suspend(struct wl1271 *wl,
1722 				    struct wl12xx_vif *wlvif,
1723 				    struct cfg80211_wowlan *wow)
1724 {
1725 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1726 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1727 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1728 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1729 	return 0;
1730 }
1731 
1732 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1733 {
1734 	int ret = 0;
1735 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1736 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1737 
1738 	if ((!is_ap) && (!is_sta))
1739 		return;
1740 
1741 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1743 		return;
1744 
1745 	wl1271_configure_wowlan(wl, NULL);
1746 
1747 	if (is_sta) {
1748 		if ((wl->conf.conn.suspend_wake_up_event ==
1749 		     wl->conf.conn.wake_up_event) &&
1750 		    (wl->conf.conn.suspend_listen_interval ==
1751 		     wl->conf.conn.listen_interval))
1752 			return;
1753 
1754 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1755 				    wl->conf.conn.wake_up_event,
1756 				    wl->conf.conn.listen_interval);
1757 
1758 		if (ret < 0)
1759 			wl1271_error("resume: wake up conditions failed: %d",
1760 				     ret);
1761 
1762 	} else if (is_ap) {
1763 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1764 	}
1765 }
1766 
1767 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1768 			    struct cfg80211_wowlan *wow)
1769 {
1770 	struct wl1271 *wl = hw->priv;
1771 	struct wl12xx_vif *wlvif;
1772 	int ret;
1773 
1774 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1775 	WARN_ON(!wow);
1776 
1777 	/* we want to perform the recovery before suspending */
1778 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1779 		wl1271_warning("postponing suspend to perform recovery");
1780 		return -EBUSY;
1781 	}
1782 
1783 	wl1271_tx_flush(wl);
1784 
1785 	mutex_lock(&wl->mutex);
1786 
1787 	ret = wl1271_ps_elp_wakeup(wl);
1788 	if (ret < 0) {
1789 		mutex_unlock(&wl->mutex);
1790 		return ret;
1791 	}
1792 
1793 	wl->wow_enabled = true;
1794 	wl12xx_for_each_wlvif(wl, wlvif) {
1795 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1796 		if (ret < 0) {
1797 			mutex_unlock(&wl->mutex);
1798 			wl1271_warning("couldn't prepare device to suspend");
1799 			return ret;
1800 		}
1801 	}
1802 
1803 	/* disable fast link flow control notifications from FW */
1804 	ret = wlcore_hw_interrupt_notify(wl, false);
1805 	if (ret < 0)
1806 		goto out_sleep;
1807 
1808 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1809 	ret = wlcore_hw_rx_ba_filter(wl,
1810 				     !!wl->conf.conn.suspend_rx_ba_activity);
1811 	if (ret < 0)
1812 		goto out_sleep;
1813 
1814 out_sleep:
1815 	wl1271_ps_elp_sleep(wl);
1816 	mutex_unlock(&wl->mutex);
1817 
1818 	if (ret < 0) {
1819 		wl1271_warning("couldn't prepare device to suspend");
1820 		return ret;
1821 	}
1822 
1823 	/* flush any remaining work */
1824 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1825 
1826 	/*
1827 	 * disable and re-enable interrupts in order to flush
1828 	 * the threaded_irq
1829 	 */
1830 	wlcore_disable_interrupts(wl);
1831 
1832 	/*
1833 	 * set suspended flag to avoid triggering a new threaded_irq
1834 	 * work. no need for spinlock as interrupts are disabled.
1835 	 */
1836 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1837 
1838 	wlcore_enable_interrupts(wl);
1839 	flush_work(&wl->tx_work);
1840 	flush_delayed_work(&wl->elp_work);
1841 
1842 	/*
1843 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1844 	 * it on resume anyway.
1845 	 */
1846 	cancel_delayed_work(&wl->tx_watchdog_work);
1847 
1848 	return 0;
1849 }
1850 
1851 static int wl1271_op_resume(struct ieee80211_hw *hw)
1852 {
1853 	struct wl1271 *wl = hw->priv;
1854 	struct wl12xx_vif *wlvif;
1855 	unsigned long flags;
1856 	bool run_irq_work = false, pending_recovery;
1857 	int ret;
1858 
1859 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1860 		     wl->wow_enabled);
1861 	WARN_ON(!wl->wow_enabled);
1862 
1863 	/*
1864 	 * re-enable irq_work enqueuing, and call irq_work directly if
1865 	 * there is a pending work.
1866 	 */
1867 	spin_lock_irqsave(&wl->wl_lock, flags);
1868 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1869 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1870 		run_irq_work = true;
1871 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1872 
1873 	mutex_lock(&wl->mutex);
1874 
1875 	/* test the recovery flag before calling any SDIO functions */
1876 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1877 				    &wl->flags);
1878 
1879 	if (run_irq_work) {
1880 		wl1271_debug(DEBUG_MAC80211,
1881 			     "run postponed irq_work directly");
1882 
1883 		/* don't talk to the HW if recovery is pending */
1884 		if (!pending_recovery) {
1885 			ret = wlcore_irq_locked(wl);
1886 			if (ret)
1887 				wl12xx_queue_recovery_work(wl);
1888 		}
1889 
1890 		wlcore_enable_interrupts(wl);
1891 	}
1892 
1893 	if (pending_recovery) {
1894 		wl1271_warning("queuing forgotten recovery on resume");
1895 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1896 		goto out_sleep;
1897 	}
1898 
1899 	ret = wl1271_ps_elp_wakeup(wl);
1900 	if (ret < 0)
1901 		goto out;
1902 
1903 	wl12xx_for_each_wlvif(wl, wlvif) {
1904 		wl1271_configure_resume(wl, wlvif);
1905 	}
1906 
1907 	ret = wlcore_hw_interrupt_notify(wl, true);
1908 	if (ret < 0)
1909 		goto out_sleep;
1910 
1911 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1912 	ret = wlcore_hw_rx_ba_filter(wl, false);
1913 	if (ret < 0)
1914 		goto out_sleep;
1915 
1916 out_sleep:
1917 	wl1271_ps_elp_sleep(wl);
1918 
1919 out:
1920 	wl->wow_enabled = false;
1921 
1922 	/*
1923 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1924 	 * That way we avoid possible conditions where Tx-complete interrupts
1925 	 * fail to arrive and we perform a spurious recovery.
1926 	 */
1927 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1928 	mutex_unlock(&wl->mutex);
1929 
1930 	return 0;
1931 }
1932 #endif
1933 
1934 static int wl1271_op_start(struct ieee80211_hw *hw)
1935 {
1936 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1937 
1938 	/*
1939 	 * We have to delay the booting of the hardware because
1940 	 * we need to know the local MAC address before downloading and
1941 	 * initializing the firmware. The MAC address cannot be changed
1942 	 * after boot, and without the proper MAC address, the firmware
1943 	 * will not function properly.
1944 	 *
1945 	 * The MAC address is first known when the corresponding interface
1946 	 * is added. That is where we will initialize the hardware.
1947 	 */
1948 
1949 	return 0;
1950 }
1951 
1952 static void wlcore_op_stop_locked(struct wl1271 *wl)
1953 {
1954 	int i;
1955 
1956 	if (wl->state == WLCORE_STATE_OFF) {
1957 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1958 					&wl->flags))
1959 			wlcore_enable_interrupts(wl);
1960 
1961 		return;
1962 	}
1963 
1964 	/*
1965 	 * this must be before the cancel_work calls below, so that the work
1966 	 * functions don't perform further work.
1967 	 */
1968 	wl->state = WLCORE_STATE_OFF;
1969 
1970 	/*
1971 	 * Use the nosync variant to disable interrupts, so the mutex could be
1972 	 * held while doing so without deadlocking.
1973 	 */
1974 	wlcore_disable_interrupts_nosync(wl);
1975 
1976 	mutex_unlock(&wl->mutex);
1977 
1978 	wlcore_synchronize_interrupts(wl);
1979 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1980 		cancel_work_sync(&wl->recovery_work);
1981 	wl1271_flush_deferred_work(wl);
1982 	cancel_delayed_work_sync(&wl->scan_complete_work);
1983 	cancel_work_sync(&wl->netstack_work);
1984 	cancel_work_sync(&wl->tx_work);
1985 	cancel_delayed_work_sync(&wl->elp_work);
1986 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1987 
1988 	/* let's notify MAC80211 about the remaining pending TX frames */
1989 	mutex_lock(&wl->mutex);
1990 	wl12xx_tx_reset(wl);
1991 
1992 	wl1271_power_off(wl);
1993 	/*
1994 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1995 	 * an interrupt storm. Now that the power is down, it is safe to
1996 	 * re-enable interrupts to balance the disable depth
1997 	 */
1998 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1999 		wlcore_enable_interrupts(wl);
2000 
2001 	wl->band = IEEE80211_BAND_2GHZ;
2002 
2003 	wl->rx_counter = 0;
2004 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2005 	wl->channel_type = NL80211_CHAN_NO_HT;
2006 	wl->tx_blocks_available = 0;
2007 	wl->tx_allocated_blocks = 0;
2008 	wl->tx_results_count = 0;
2009 	wl->tx_packets_count = 0;
2010 	wl->time_offset = 0;
2011 	wl->ap_fw_ps_map = 0;
2012 	wl->ap_ps_map = 0;
2013 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
2014 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
2015 	memset(wl->links_map, 0, sizeof(wl->links_map));
2016 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
2017 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
2018 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2019 	wl->active_sta_count = 0;
2020 	wl->active_link_count = 0;
2021 
2022 	/* The system link is always allocated */
2023 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2024 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2025 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2026 
2027 	/*
2028 	 * this is performed after the cancel_work calls and the associated
2029 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2030 	 * get executed before all these vars have been reset.
2031 	 */
2032 	wl->flags = 0;
2033 
2034 	wl->tx_blocks_freed = 0;
2035 
2036 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2037 		wl->tx_pkts_freed[i] = 0;
2038 		wl->tx_allocated_pkts[i] = 0;
2039 	}
2040 
2041 	wl1271_debugfs_reset(wl);
2042 
2043 	kfree(wl->raw_fw_status);
2044 	wl->raw_fw_status = NULL;
2045 	kfree(wl->fw_status);
2046 	wl->fw_status = NULL;
2047 	kfree(wl->tx_res_if);
2048 	wl->tx_res_if = NULL;
2049 	kfree(wl->target_mem_map);
2050 	wl->target_mem_map = NULL;
2051 
2052 	/*
2053 	 * FW channels must be re-calibrated after recovery,
2054 	 * save current Reg-Domain channel configuration and clear it.
2055 	 */
2056 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2057 	       sizeof(wl->reg_ch_conf_pending));
2058 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2059 }
2060 
2061 static void wlcore_op_stop(struct ieee80211_hw *hw)
2062 {
2063 	struct wl1271 *wl = hw->priv;
2064 
2065 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2066 
2067 	mutex_lock(&wl->mutex);
2068 
2069 	wlcore_op_stop_locked(wl);
2070 
2071 	mutex_unlock(&wl->mutex);
2072 }
2073 
2074 static void wlcore_channel_switch_work(struct work_struct *work)
2075 {
2076 	struct delayed_work *dwork;
2077 	struct wl1271 *wl;
2078 	struct ieee80211_vif *vif;
2079 	struct wl12xx_vif *wlvif;
2080 	int ret;
2081 
2082 	dwork = container_of(work, struct delayed_work, work);
2083 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2084 	wl = wlvif->wl;
2085 
2086 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2087 
2088 	mutex_lock(&wl->mutex);
2089 
2090 	if (unlikely(wl->state != WLCORE_STATE_ON))
2091 		goto out;
2092 
2093 	/* check the channel switch is still ongoing */
2094 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2095 		goto out;
2096 
2097 	vif = wl12xx_wlvif_to_vif(wlvif);
2098 	ieee80211_chswitch_done(vif, false);
2099 
2100 	ret = wl1271_ps_elp_wakeup(wl);
2101 	if (ret < 0)
2102 		goto out;
2103 
2104 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2105 
2106 	wl1271_ps_elp_sleep(wl);
2107 out:
2108 	mutex_unlock(&wl->mutex);
2109 }
2110 
2111 static void wlcore_connection_loss_work(struct work_struct *work)
2112 {
2113 	struct delayed_work *dwork;
2114 	struct wl1271 *wl;
2115 	struct ieee80211_vif *vif;
2116 	struct wl12xx_vif *wlvif;
2117 
2118 	dwork = container_of(work, struct delayed_work, work);
2119 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2120 	wl = wlvif->wl;
2121 
2122 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2123 
2124 	mutex_lock(&wl->mutex);
2125 
2126 	if (unlikely(wl->state != WLCORE_STATE_ON))
2127 		goto out;
2128 
2129 	/* Call mac80211 connection loss */
2130 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2131 		goto out;
2132 
2133 	vif = wl12xx_wlvif_to_vif(wlvif);
2134 	ieee80211_connection_loss(vif);
2135 out:
2136 	mutex_unlock(&wl->mutex);
2137 }
2138 
2139 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2140 {
2141 	struct delayed_work *dwork;
2142 	struct wl1271 *wl;
2143 	struct wl12xx_vif *wlvif;
2144 	unsigned long time_spare;
2145 	int ret;
2146 
2147 	dwork = container_of(work, struct delayed_work, work);
2148 	wlvif = container_of(dwork, struct wl12xx_vif,
2149 			     pending_auth_complete_work);
2150 	wl = wlvif->wl;
2151 
2152 	mutex_lock(&wl->mutex);
2153 
2154 	if (unlikely(wl->state != WLCORE_STATE_ON))
2155 		goto out;
2156 
2157 	/*
2158 	 * Make sure a second really passed since the last auth reply. Maybe
2159 	 * a second auth reply arrived while we were stuck on the mutex.
2160 	 * Check for a little less than the timeout to protect from scheduler
2161 	 * irregularities.
2162 	 */
2163 	time_spare = jiffies +
2164 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2165 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2166 		goto out;
2167 
2168 	ret = wl1271_ps_elp_wakeup(wl);
2169 	if (ret < 0)
2170 		goto out;
2171 
2172 	/* cancel the ROC if active */
2173 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2174 
2175 	wl1271_ps_elp_sleep(wl);
2176 out:
2177 	mutex_unlock(&wl->mutex);
2178 }
2179 
2180 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2181 {
2182 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2183 					WL12XX_MAX_RATE_POLICIES);
2184 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2185 		return -EBUSY;
2186 
2187 	__set_bit(policy, wl->rate_policies_map);
2188 	*idx = policy;
2189 	return 0;
2190 }
2191 
2192 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2193 {
2194 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2195 		return;
2196 
2197 	__clear_bit(*idx, wl->rate_policies_map);
2198 	*idx = WL12XX_MAX_RATE_POLICIES;
2199 }
2200 
2201 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2202 {
2203 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2204 					WLCORE_MAX_KLV_TEMPLATES);
2205 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2206 		return -EBUSY;
2207 
2208 	__set_bit(policy, wl->klv_templates_map);
2209 	*idx = policy;
2210 	return 0;
2211 }
2212 
2213 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2214 {
2215 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2216 		return;
2217 
2218 	__clear_bit(*idx, wl->klv_templates_map);
2219 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2220 }
2221 
2222 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2223 {
2224 	switch (wlvif->bss_type) {
2225 	case BSS_TYPE_AP_BSS:
2226 		if (wlvif->p2p)
2227 			return WL1271_ROLE_P2P_GO;
2228 		else
2229 			return WL1271_ROLE_AP;
2230 
2231 	case BSS_TYPE_STA_BSS:
2232 		if (wlvif->p2p)
2233 			return WL1271_ROLE_P2P_CL;
2234 		else
2235 			return WL1271_ROLE_STA;
2236 
2237 	case BSS_TYPE_IBSS:
2238 		return WL1271_ROLE_IBSS;
2239 
2240 	default:
2241 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2242 	}
2243 	return WL12XX_INVALID_ROLE_TYPE;
2244 }
2245 
2246 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2247 {
2248 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2249 	int i;
2250 
2251 	/* clear everything but the persistent data */
2252 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2253 
2254 	switch (ieee80211_vif_type_p2p(vif)) {
2255 	case NL80211_IFTYPE_P2P_CLIENT:
2256 		wlvif->p2p = 1;
2257 		/* fall-through */
2258 	case NL80211_IFTYPE_STATION:
2259 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2260 		break;
2261 	case NL80211_IFTYPE_ADHOC:
2262 		wlvif->bss_type = BSS_TYPE_IBSS;
2263 		break;
2264 	case NL80211_IFTYPE_P2P_GO:
2265 		wlvif->p2p = 1;
2266 		/* fall-through */
2267 	case NL80211_IFTYPE_AP:
2268 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2269 		break;
2270 	default:
2271 		wlvif->bss_type = MAX_BSS_TYPE;
2272 		return -EOPNOTSUPP;
2273 	}
2274 
2275 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2276 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2277 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2278 
2279 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2280 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2281 		/* init sta/ibss data */
2282 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2283 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2284 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2285 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2286 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2287 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2288 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2289 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2290 	} else {
2291 		/* init ap data */
2292 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2293 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2294 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2295 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2296 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2297 			wl12xx_allocate_rate_policy(wl,
2298 						&wlvif->ap.ucast_rate_idx[i]);
2299 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2300 		/*
2301 		 * TODO: check if basic_rate shouldn't be
2302 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2303 		 * instead (the same thing for STA above).
2304 		*/
2305 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2306 		/* TODO: this seems to be used only for STA, check it */
2307 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2308 	}
2309 
2310 	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2311 	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2312 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2313 
2314 	/*
2315 	 * mac80211 configures some values globally, while we treat them
2316 	 * per-interface. thus, on init, we have to copy them from wl
2317 	 */
2318 	wlvif->band = wl->band;
2319 	wlvif->channel = wl->channel;
2320 	wlvif->power_level = wl->power_level;
2321 	wlvif->channel_type = wl->channel_type;
2322 
2323 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2324 		  wl1271_rx_streaming_enable_work);
2325 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2326 		  wl1271_rx_streaming_disable_work);
2327 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2328 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2329 			  wlcore_channel_switch_work);
2330 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2331 			  wlcore_connection_loss_work);
2332 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2333 			  wlcore_pending_auth_complete_work);
2334 	INIT_LIST_HEAD(&wlvif->list);
2335 
2336 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2337 		    (unsigned long) wlvif);
2338 	return 0;
2339 }
2340 
2341 static int wl12xx_init_fw(struct wl1271 *wl)
2342 {
2343 	int retries = WL1271_BOOT_RETRIES;
2344 	bool booted = false;
2345 	struct wiphy *wiphy = wl->hw->wiphy;
2346 	int ret;
2347 
2348 	while (retries) {
2349 		retries--;
2350 		ret = wl12xx_chip_wakeup(wl, false);
2351 		if (ret < 0)
2352 			goto power_off;
2353 
2354 		ret = wl->ops->boot(wl);
2355 		if (ret < 0)
2356 			goto power_off;
2357 
2358 		ret = wl1271_hw_init(wl);
2359 		if (ret < 0)
2360 			goto irq_disable;
2361 
2362 		booted = true;
2363 		break;
2364 
2365 irq_disable:
2366 		mutex_unlock(&wl->mutex);
2367 		/* Unlocking the mutex in the middle of handling is
2368 		   inherently unsafe. In this case we deem it safe to do,
2369 		   because we need to let any possibly pending IRQ out of
2370 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2371 		   work function will not do anything.) Also, any other
2372 		   possible concurrent operations will fail due to the
2373 		   current state, hence the wl1271 struct should be safe. */
2374 		wlcore_disable_interrupts(wl);
2375 		wl1271_flush_deferred_work(wl);
2376 		cancel_work_sync(&wl->netstack_work);
2377 		mutex_lock(&wl->mutex);
2378 power_off:
2379 		wl1271_power_off(wl);
2380 	}
2381 
2382 	if (!booted) {
2383 		wl1271_error("firmware boot failed despite %d retries",
2384 			     WL1271_BOOT_RETRIES);
2385 		goto out;
2386 	}
2387 
2388 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2389 
2390 	/* update hw/fw version info in wiphy struct */
2391 	wiphy->hw_version = wl->chip.id;
2392 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2393 		sizeof(wiphy->fw_version));
2394 
2395 	/*
2396 	 * Now we know if 11a is supported (info from the NVS), so disable
2397 	 * 11a channels if not supported
2398 	 */
2399 	if (!wl->enable_11a)
2400 		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2401 
2402 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2403 		     wl->enable_11a ? "" : "not ");
2404 
2405 	wl->state = WLCORE_STATE_ON;
2406 out:
2407 	return ret;
2408 }
2409 
2410 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2411 {
2412 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2413 }
2414 
2415 /*
2416  * Check whether a fw switch (i.e. moving from one loaded
2417  * fw to another) is needed. This function is also responsible
2418  * for updating wl->last_vif_count, so it must be called before
2419  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2420  * will be used).
2421  */
2422 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2423 				  struct vif_counter_data vif_counter_data,
2424 				  bool add)
2425 {
2426 	enum wl12xx_fw_type current_fw = wl->fw_type;
2427 	u8 vif_count = vif_counter_data.counter;
2428 
2429 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2430 		return false;
2431 
2432 	/* increase the vif count if this is a new vif */
2433 	if (add && !vif_counter_data.cur_vif_running)
2434 		vif_count++;
2435 
2436 	wl->last_vif_count = vif_count;
2437 
2438 	/* no need for fw change if the device is OFF */
2439 	if (wl->state == WLCORE_STATE_OFF)
2440 		return false;
2441 
2442 	/* no need for fw change if a single fw is used */
2443 	if (!wl->mr_fw_name)
2444 		return false;
2445 
2446 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2447 		return true;
2448 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2449 		return true;
2450 
2451 	return false;
2452 }
2453 
2454 /*
2455  * Enter "forced psm". Make sure the sta is in psm against the ap,
2456  * to make the fw switch a bit more disconnection-persistent.
2457  */
2458 static void wl12xx_force_active_psm(struct wl1271 *wl)
2459 {
2460 	struct wl12xx_vif *wlvif;
2461 
2462 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2463 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2464 	}
2465 }
2466 
2467 struct wlcore_hw_queue_iter_data {
2468 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2469 	/* current vif */
2470 	struct ieee80211_vif *vif;
2471 	/* is the current vif among those iterated */
2472 	bool cur_running;
2473 };
2474 
2475 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2476 				 struct ieee80211_vif *vif)
2477 {
2478 	struct wlcore_hw_queue_iter_data *iter_data = data;
2479 
2480 	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2481 		return;
2482 
2483 	if (iter_data->cur_running || vif == iter_data->vif) {
2484 		iter_data->cur_running = true;
2485 		return;
2486 	}
2487 
2488 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2489 }
2490 
2491 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2492 					 struct wl12xx_vif *wlvif)
2493 {
2494 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2495 	struct wlcore_hw_queue_iter_data iter_data = {};
2496 	int i, q_base;
2497 
2498 	iter_data.vif = vif;
2499 
2500 	/* mark all bits taken by active interfaces */
2501 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2502 					IEEE80211_IFACE_ITER_RESUME_ALL,
2503 					wlcore_hw_queue_iter, &iter_data);
2504 
2505 	/* the current vif is already running in mac80211 (resume/recovery) */
2506 	if (iter_data.cur_running) {
2507 		wlvif->hw_queue_base = vif->hw_queue[0];
2508 		wl1271_debug(DEBUG_MAC80211,
2509 			     "using pre-allocated hw queue base %d",
2510 			     wlvif->hw_queue_base);
2511 
2512 		/* interface type might have changed type */
2513 		goto adjust_cab_queue;
2514 	}
2515 
2516 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2517 				     WLCORE_NUM_MAC_ADDRESSES);
2518 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2519 		return -EBUSY;
2520 
2521 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2522 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2523 		     wlvif->hw_queue_base);
2524 
2525 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2526 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2527 		/* register hw queues in mac80211 */
2528 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2529 	}
2530 
2531 adjust_cab_queue:
2532 	/* the last places are reserved for cab queues per interface */
2533 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2534 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2535 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2536 	else
2537 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2538 
2539 	return 0;
2540 }
2541 
2542 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2543 				   struct ieee80211_vif *vif)
2544 {
2545 	struct wl1271 *wl = hw->priv;
2546 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2547 	struct vif_counter_data vif_count;
2548 	int ret = 0;
2549 	u8 role_type;
2550 
2551 	if (wl->plt) {
2552 		wl1271_error("Adding Interface not allowed while in PLT mode");
2553 		return -EBUSY;
2554 	}
2555 
2556 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2557 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2558 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2559 
2560 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2561 		     ieee80211_vif_type_p2p(vif), vif->addr);
2562 
2563 	wl12xx_get_vif_count(hw, vif, &vif_count);
2564 
2565 	mutex_lock(&wl->mutex);
2566 	ret = wl1271_ps_elp_wakeup(wl);
2567 	if (ret < 0)
2568 		goto out_unlock;
2569 
2570 	/*
2571 	 * in some very corner case HW recovery scenarios its possible to
2572 	 * get here before __wl1271_op_remove_interface is complete, so
2573 	 * opt out if that is the case.
2574 	 */
2575 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2576 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2577 		ret = -EBUSY;
2578 		goto out;
2579 	}
2580 
2581 
2582 	ret = wl12xx_init_vif_data(wl, vif);
2583 	if (ret < 0)
2584 		goto out;
2585 
2586 	wlvif->wl = wl;
2587 	role_type = wl12xx_get_role_type(wl, wlvif);
2588 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2589 		ret = -EINVAL;
2590 		goto out;
2591 	}
2592 
2593 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2594 	if (ret < 0)
2595 		goto out;
2596 
2597 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2598 		wl12xx_force_active_psm(wl);
2599 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2600 		mutex_unlock(&wl->mutex);
2601 		wl1271_recovery_work(&wl->recovery_work);
2602 		return 0;
2603 	}
2604 
2605 	/*
2606 	 * TODO: after the nvs issue will be solved, move this block
2607 	 * to start(), and make sure here the driver is ON.
2608 	 */
2609 	if (wl->state == WLCORE_STATE_OFF) {
2610 		/*
2611 		 * we still need this in order to configure the fw
2612 		 * while uploading the nvs
2613 		 */
2614 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2615 
2616 		ret = wl12xx_init_fw(wl);
2617 		if (ret < 0)
2618 			goto out;
2619 	}
2620 
2621 	ret = wl12xx_cmd_role_enable(wl, vif->addr,
2622 				     role_type, &wlvif->role_id);
2623 	if (ret < 0)
2624 		goto out;
2625 
2626 	ret = wl1271_init_vif_specific(wl, vif);
2627 	if (ret < 0)
2628 		goto out;
2629 
2630 	list_add(&wlvif->list, &wl->wlvif_list);
2631 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2632 
2633 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2634 		wl->ap_count++;
2635 	else
2636 		wl->sta_count++;
2637 out:
2638 	wl1271_ps_elp_sleep(wl);
2639 out_unlock:
2640 	mutex_unlock(&wl->mutex);
2641 
2642 	return ret;
2643 }
2644 
2645 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2646 					 struct ieee80211_vif *vif,
2647 					 bool reset_tx_queues)
2648 {
2649 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2650 	int i, ret;
2651 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2652 
2653 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2654 
2655 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2656 		return;
2657 
2658 	/* because of hardware recovery, we may get here twice */
2659 	if (wl->state == WLCORE_STATE_OFF)
2660 		return;
2661 
2662 	wl1271_info("down");
2663 
2664 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2665 	    wl->scan_wlvif == wlvif) {
2666 		/*
2667 		 * Rearm the tx watchdog just before idling scan. This
2668 		 * prevents just-finished scans from triggering the watchdog
2669 		 */
2670 		wl12xx_rearm_tx_watchdog_locked(wl);
2671 
2672 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2673 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2674 		wl->scan_wlvif = NULL;
2675 		wl->scan.req = NULL;
2676 		ieee80211_scan_completed(wl->hw, true);
2677 	}
2678 
2679 	if (wl->sched_vif == wlvif)
2680 		wl->sched_vif = NULL;
2681 
2682 	if (wl->roc_vif == vif) {
2683 		wl->roc_vif = NULL;
2684 		ieee80211_remain_on_channel_expired(wl->hw);
2685 	}
2686 
2687 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2688 		/* disable active roles */
2689 		ret = wl1271_ps_elp_wakeup(wl);
2690 		if (ret < 0)
2691 			goto deinit;
2692 
2693 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2694 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2695 			if (wl12xx_dev_role_started(wlvif))
2696 				wl12xx_stop_dev(wl, wlvif);
2697 		}
2698 
2699 		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2700 		if (ret < 0)
2701 			goto deinit;
2702 
2703 		wl1271_ps_elp_sleep(wl);
2704 	}
2705 deinit:
2706 	wl12xx_tx_reset_wlvif(wl, wlvif);
2707 
2708 	/* clear all hlids (except system_hlid) */
2709 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2710 
2711 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2712 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2713 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2714 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2715 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2716 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2717 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2718 	} else {
2719 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2720 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2721 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2722 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2723 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2724 			wl12xx_free_rate_policy(wl,
2725 						&wlvif->ap.ucast_rate_idx[i]);
2726 		wl1271_free_ap_keys(wl, wlvif);
2727 	}
2728 
2729 	dev_kfree_skb(wlvif->probereq);
2730 	wlvif->probereq = NULL;
2731 	if (wl->last_wlvif == wlvif)
2732 		wl->last_wlvif = NULL;
2733 	list_del(&wlvif->list);
2734 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2735 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2736 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2737 
2738 	if (is_ap)
2739 		wl->ap_count--;
2740 	else
2741 		wl->sta_count--;
2742 
2743 	/*
2744 	 * Last AP, have more stations. Configure sleep auth according to STA.
2745 	 * Don't do thin on unintended recovery.
2746 	 */
2747 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2748 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2749 		goto unlock;
2750 
2751 	if (wl->ap_count == 0 && is_ap) {
2752 		/* mask ap events */
2753 		wl->event_mask &= ~wl->ap_event_mask;
2754 		wl1271_event_unmask(wl);
2755 	}
2756 
2757 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2758 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2759 		/* Configure for power according to debugfs */
2760 		if (sta_auth != WL1271_PSM_ILLEGAL)
2761 			wl1271_acx_sleep_auth(wl, sta_auth);
2762 		/* Configure for ELP power saving */
2763 		else
2764 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2765 	}
2766 
2767 unlock:
2768 	mutex_unlock(&wl->mutex);
2769 
2770 	del_timer_sync(&wlvif->rx_streaming_timer);
2771 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2772 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2773 	cancel_work_sync(&wlvif->rc_update_work);
2774 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2775 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2776 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2777 
2778 	mutex_lock(&wl->mutex);
2779 }
2780 
2781 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2782 				       struct ieee80211_vif *vif)
2783 {
2784 	struct wl1271 *wl = hw->priv;
2785 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2786 	struct wl12xx_vif *iter;
2787 	struct vif_counter_data vif_count;
2788 
2789 	wl12xx_get_vif_count(hw, vif, &vif_count);
2790 	mutex_lock(&wl->mutex);
2791 
2792 	if (wl->state == WLCORE_STATE_OFF ||
2793 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2794 		goto out;
2795 
2796 	/*
2797 	 * wl->vif can be null here if someone shuts down the interface
2798 	 * just when hardware recovery has been started.
2799 	 */
2800 	wl12xx_for_each_wlvif(wl, iter) {
2801 		if (iter != wlvif)
2802 			continue;
2803 
2804 		__wl1271_op_remove_interface(wl, vif, true);
2805 		break;
2806 	}
2807 	WARN_ON(iter != wlvif);
2808 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2809 		wl12xx_force_active_psm(wl);
2810 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2811 		wl12xx_queue_recovery_work(wl);
2812 	}
2813 out:
2814 	mutex_unlock(&wl->mutex);
2815 }
2816 
2817 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2818 				      struct ieee80211_vif *vif,
2819 				      enum nl80211_iftype new_type, bool p2p)
2820 {
2821 	struct wl1271 *wl = hw->priv;
2822 	int ret;
2823 
2824 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2825 	wl1271_op_remove_interface(hw, vif);
2826 
2827 	vif->type = new_type;
2828 	vif->p2p = p2p;
2829 	ret = wl1271_op_add_interface(hw, vif);
2830 
2831 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2832 	return ret;
2833 }
2834 
2835 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2836 {
2837 	int ret;
2838 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2839 
2840 	/*
2841 	 * One of the side effects of the JOIN command is that is clears
2842 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2843 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2844 	 * Currently the only valid scenario for JOIN during association
2845 	 * is on roaming, in which case we will also be given new keys.
2846 	 * Keep the below message for now, unless it starts bothering
2847 	 * users who really like to roam a lot :)
2848 	 */
2849 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2850 		wl1271_info("JOIN while associated.");
2851 
2852 	/* clear encryption type */
2853 	wlvif->encryption_type = KEY_NONE;
2854 
2855 	if (is_ibss)
2856 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2857 	else {
2858 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2859 			/*
2860 			 * TODO: this is an ugly workaround for wl12xx fw
2861 			 * bug - we are not able to tx/rx after the first
2862 			 * start_sta, so make dummy start+stop calls,
2863 			 * and then call start_sta again.
2864 			 * this should be fixed in the fw.
2865 			 */
2866 			wl12xx_cmd_role_start_sta(wl, wlvif);
2867 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2868 		}
2869 
2870 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2871 	}
2872 
2873 	return ret;
2874 }
2875 
2876 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2877 			    int offset)
2878 {
2879 	u8 ssid_len;
2880 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2881 					 skb->len - offset);
2882 
2883 	if (!ptr) {
2884 		wl1271_error("No SSID in IEs!");
2885 		return -ENOENT;
2886 	}
2887 
2888 	ssid_len = ptr[1];
2889 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2890 		wl1271_error("SSID is too long!");
2891 		return -EINVAL;
2892 	}
2893 
2894 	wlvif->ssid_len = ssid_len;
2895 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2896 	return 0;
2897 }
2898 
2899 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2900 {
2901 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2902 	struct sk_buff *skb;
2903 	int ieoffset;
2904 
2905 	/* we currently only support setting the ssid from the ap probe req */
2906 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2907 		return -EINVAL;
2908 
2909 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2910 	if (!skb)
2911 		return -EINVAL;
2912 
2913 	ieoffset = offsetof(struct ieee80211_mgmt,
2914 			    u.probe_req.variable);
2915 	wl1271_ssid_set(wlvif, skb, ieoffset);
2916 	dev_kfree_skb(skb);
2917 
2918 	return 0;
2919 }
2920 
2921 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2922 			    struct ieee80211_bss_conf *bss_conf,
2923 			    u32 sta_rate_set)
2924 {
2925 	int ieoffset;
2926 	int ret;
2927 
2928 	wlvif->aid = bss_conf->aid;
2929 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2930 	wlvif->beacon_int = bss_conf->beacon_int;
2931 	wlvif->wmm_enabled = bss_conf->qos;
2932 
2933 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2934 
2935 	/*
2936 	 * with wl1271, we don't need to update the
2937 	 * beacon_int and dtim_period, because the firmware
2938 	 * updates it by itself when the first beacon is
2939 	 * received after a join.
2940 	 */
2941 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2942 	if (ret < 0)
2943 		return ret;
2944 
2945 	/*
2946 	 * Get a template for hardware connection maintenance
2947 	 */
2948 	dev_kfree_skb(wlvif->probereq);
2949 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2950 							wlvif,
2951 							NULL);
2952 	ieoffset = offsetof(struct ieee80211_mgmt,
2953 			    u.probe_req.variable);
2954 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2955 
2956 	/* enable the connection monitoring feature */
2957 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2958 	if (ret < 0)
2959 		return ret;
2960 
2961 	/*
2962 	 * The join command disable the keep-alive mode, shut down its process,
2963 	 * and also clear the template config, so we need to reset it all after
2964 	 * the join. The acx_aid starts the keep-alive process, and the order
2965 	 * of the commands below is relevant.
2966 	 */
2967 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2968 	if (ret < 0)
2969 		return ret;
2970 
2971 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2972 	if (ret < 0)
2973 		return ret;
2974 
2975 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2976 	if (ret < 0)
2977 		return ret;
2978 
2979 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2980 					   wlvif->sta.klv_template_id,
2981 					   ACX_KEEP_ALIVE_TPL_VALID);
2982 	if (ret < 0)
2983 		return ret;
2984 
2985 	/*
2986 	 * The default fw psm configuration is AUTO, while mac80211 default
2987 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2988 	 */
2989 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2990 	if (ret < 0)
2991 		return ret;
2992 
2993 	if (sta_rate_set) {
2994 		wlvif->rate_set =
2995 			wl1271_tx_enabled_rates_get(wl,
2996 						    sta_rate_set,
2997 						    wlvif->band);
2998 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2999 		if (ret < 0)
3000 			return ret;
3001 	}
3002 
3003 	return ret;
3004 }
3005 
3006 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3007 {
3008 	int ret;
3009 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3010 
3011 	/* make sure we are connected (sta) joined */
3012 	if (sta &&
3013 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3014 		return false;
3015 
3016 	/* make sure we are joined (ibss) */
3017 	if (!sta &&
3018 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3019 		return false;
3020 
3021 	if (sta) {
3022 		/* use defaults when not associated */
3023 		wlvif->aid = 0;
3024 
3025 		/* free probe-request template */
3026 		dev_kfree_skb(wlvif->probereq);
3027 		wlvif->probereq = NULL;
3028 
3029 		/* disable connection monitor features */
3030 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3031 		if (ret < 0)
3032 			return ret;
3033 
3034 		/* Disable the keep-alive feature */
3035 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3036 		if (ret < 0)
3037 			return ret;
3038 
3039 		/* disable beacon filtering */
3040 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3041 		if (ret < 0)
3042 			return ret;
3043 	}
3044 
3045 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3046 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3047 
3048 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3049 		ieee80211_chswitch_done(vif, false);
3050 		cancel_delayed_work(&wlvif->channel_switch_work);
3051 	}
3052 
3053 	/* invalidate keep-alive template */
3054 	wl1271_acx_keep_alive_config(wl, wlvif,
3055 				     wlvif->sta.klv_template_id,
3056 				     ACX_KEEP_ALIVE_TPL_INVALID);
3057 
3058 	return 0;
3059 }
3060 
3061 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3062 {
3063 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3064 	wlvif->rate_set = wlvif->basic_rate_set;
3065 }
3066 
3067 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3068 				   bool idle)
3069 {
3070 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3071 
3072 	if (idle == cur_idle)
3073 		return;
3074 
3075 	if (idle) {
3076 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3077 	} else {
3078 		/* The current firmware only supports sched_scan in idle */
3079 		if (wl->sched_vif == wlvif)
3080 			wl->ops->sched_scan_stop(wl, wlvif);
3081 
3082 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3083 	}
3084 }
3085 
3086 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3087 			     struct ieee80211_conf *conf, u32 changed)
3088 {
3089 	int ret;
3090 
3091 	if (conf->power_level != wlvif->power_level) {
3092 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3093 		if (ret < 0)
3094 			return ret;
3095 
3096 		wlvif->power_level = conf->power_level;
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3103 {
3104 	struct wl1271 *wl = hw->priv;
3105 	struct wl12xx_vif *wlvif;
3106 	struct ieee80211_conf *conf = &hw->conf;
3107 	int ret = 0;
3108 
3109 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3110 		     " changed 0x%x",
3111 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3112 		     conf->power_level,
3113 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3114 			 changed);
3115 
3116 	mutex_lock(&wl->mutex);
3117 
3118 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3119 		wl->power_level = conf->power_level;
3120 
3121 	if (unlikely(wl->state != WLCORE_STATE_ON))
3122 		goto out;
3123 
3124 	ret = wl1271_ps_elp_wakeup(wl);
3125 	if (ret < 0)
3126 		goto out;
3127 
3128 	/* configure each interface */
3129 	wl12xx_for_each_wlvif(wl, wlvif) {
3130 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3131 		if (ret < 0)
3132 			goto out_sleep;
3133 	}
3134 
3135 out_sleep:
3136 	wl1271_ps_elp_sleep(wl);
3137 
3138 out:
3139 	mutex_unlock(&wl->mutex);
3140 
3141 	return ret;
3142 }
3143 
3144 struct wl1271_filter_params {
3145 	bool enabled;
3146 	int mc_list_length;
3147 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3148 };
3149 
3150 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3151 				       struct netdev_hw_addr_list *mc_list)
3152 {
3153 	struct wl1271_filter_params *fp;
3154 	struct netdev_hw_addr *ha;
3155 
3156 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3157 	if (!fp) {
3158 		wl1271_error("Out of memory setting filters.");
3159 		return 0;
3160 	}
3161 
3162 	/* update multicast filtering parameters */
3163 	fp->mc_list_length = 0;
3164 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3165 		fp->enabled = false;
3166 	} else {
3167 		fp->enabled = true;
3168 		netdev_hw_addr_list_for_each(ha, mc_list) {
3169 			memcpy(fp->mc_list[fp->mc_list_length],
3170 					ha->addr, ETH_ALEN);
3171 			fp->mc_list_length++;
3172 		}
3173 	}
3174 
3175 	return (u64)(unsigned long)fp;
3176 }
3177 
3178 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3179 				  FIF_FCSFAIL | \
3180 				  FIF_BCN_PRBRESP_PROMISC | \
3181 				  FIF_CONTROL | \
3182 				  FIF_OTHER_BSS)
3183 
3184 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3185 				       unsigned int changed,
3186 				       unsigned int *total, u64 multicast)
3187 {
3188 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3189 	struct wl1271 *wl = hw->priv;
3190 	struct wl12xx_vif *wlvif;
3191 
3192 	int ret;
3193 
3194 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3195 		     " total %x", changed, *total);
3196 
3197 	mutex_lock(&wl->mutex);
3198 
3199 	*total &= WL1271_SUPPORTED_FILTERS;
3200 	changed &= WL1271_SUPPORTED_FILTERS;
3201 
3202 	if (unlikely(wl->state != WLCORE_STATE_ON))
3203 		goto out;
3204 
3205 	ret = wl1271_ps_elp_wakeup(wl);
3206 	if (ret < 0)
3207 		goto out;
3208 
3209 	wl12xx_for_each_wlvif(wl, wlvif) {
3210 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3211 			if (*total & FIF_ALLMULTI)
3212 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3213 								   false,
3214 								   NULL, 0);
3215 			else if (fp)
3216 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3217 							fp->enabled,
3218 							fp->mc_list,
3219 							fp->mc_list_length);
3220 			if (ret < 0)
3221 				goto out_sleep;
3222 		}
3223 	}
3224 
3225 	/*
3226 	 * the fw doesn't provide an api to configure the filters. instead,
3227 	 * the filters configuration is based on the active roles / ROC
3228 	 * state.
3229 	 */
3230 
3231 out_sleep:
3232 	wl1271_ps_elp_sleep(wl);
3233 
3234 out:
3235 	mutex_unlock(&wl->mutex);
3236 	kfree(fp);
3237 }
3238 
3239 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3240 				u8 id, u8 key_type, u8 key_size,
3241 				const u8 *key, u8 hlid, u32 tx_seq_32,
3242 				u16 tx_seq_16)
3243 {
3244 	struct wl1271_ap_key *ap_key;
3245 	int i;
3246 
3247 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3248 
3249 	if (key_size > MAX_KEY_SIZE)
3250 		return -EINVAL;
3251 
3252 	/*
3253 	 * Find next free entry in ap_keys. Also check we are not replacing
3254 	 * an existing key.
3255 	 */
3256 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3257 		if (wlvif->ap.recorded_keys[i] == NULL)
3258 			break;
3259 
3260 		if (wlvif->ap.recorded_keys[i]->id == id) {
3261 			wl1271_warning("trying to record key replacement");
3262 			return -EINVAL;
3263 		}
3264 	}
3265 
3266 	if (i == MAX_NUM_KEYS)
3267 		return -EBUSY;
3268 
3269 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3270 	if (!ap_key)
3271 		return -ENOMEM;
3272 
3273 	ap_key->id = id;
3274 	ap_key->key_type = key_type;
3275 	ap_key->key_size = key_size;
3276 	memcpy(ap_key->key, key, key_size);
3277 	ap_key->hlid = hlid;
3278 	ap_key->tx_seq_32 = tx_seq_32;
3279 	ap_key->tx_seq_16 = tx_seq_16;
3280 
3281 	wlvif->ap.recorded_keys[i] = ap_key;
3282 	return 0;
3283 }
3284 
3285 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3286 {
3287 	int i;
3288 
3289 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3290 		kfree(wlvif->ap.recorded_keys[i]);
3291 		wlvif->ap.recorded_keys[i] = NULL;
3292 	}
3293 }
3294 
3295 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3296 {
3297 	int i, ret = 0;
3298 	struct wl1271_ap_key *key;
3299 	bool wep_key_added = false;
3300 
3301 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3302 		u8 hlid;
3303 		if (wlvif->ap.recorded_keys[i] == NULL)
3304 			break;
3305 
3306 		key = wlvif->ap.recorded_keys[i];
3307 		hlid = key->hlid;
3308 		if (hlid == WL12XX_INVALID_LINK_ID)
3309 			hlid = wlvif->ap.bcast_hlid;
3310 
3311 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3312 					    key->id, key->key_type,
3313 					    key->key_size, key->key,
3314 					    hlid, key->tx_seq_32,
3315 					    key->tx_seq_16);
3316 		if (ret < 0)
3317 			goto out;
3318 
3319 		if (key->key_type == KEY_WEP)
3320 			wep_key_added = true;
3321 	}
3322 
3323 	if (wep_key_added) {
3324 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3325 						     wlvif->ap.bcast_hlid);
3326 		if (ret < 0)
3327 			goto out;
3328 	}
3329 
3330 out:
3331 	wl1271_free_ap_keys(wl, wlvif);
3332 	return ret;
3333 }
3334 
3335 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3336 		       u16 action, u8 id, u8 key_type,
3337 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3338 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3339 {
3340 	int ret;
3341 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3342 
3343 	if (is_ap) {
3344 		struct wl1271_station *wl_sta;
3345 		u8 hlid;
3346 
3347 		if (sta) {
3348 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3349 			hlid = wl_sta->hlid;
3350 		} else {
3351 			hlid = wlvif->ap.bcast_hlid;
3352 		}
3353 
3354 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3355 			/*
3356 			 * We do not support removing keys after AP shutdown.
3357 			 * Pretend we do to make mac80211 happy.
3358 			 */
3359 			if (action != KEY_ADD_OR_REPLACE)
3360 				return 0;
3361 
3362 			ret = wl1271_record_ap_key(wl, wlvif, id,
3363 					     key_type, key_size,
3364 					     key, hlid, tx_seq_32,
3365 					     tx_seq_16);
3366 		} else {
3367 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3368 					     id, key_type, key_size,
3369 					     key, hlid, tx_seq_32,
3370 					     tx_seq_16);
3371 		}
3372 
3373 		if (ret < 0)
3374 			return ret;
3375 	} else {
3376 		const u8 *addr;
3377 		static const u8 bcast_addr[ETH_ALEN] = {
3378 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3379 		};
3380 
3381 		addr = sta ? sta->addr : bcast_addr;
3382 
3383 		if (is_zero_ether_addr(addr)) {
3384 			/* We dont support TX only encryption */
3385 			return -EOPNOTSUPP;
3386 		}
3387 
3388 		/* The wl1271 does not allow to remove unicast keys - they
3389 		   will be cleared automatically on next CMD_JOIN. Ignore the
3390 		   request silently, as we dont want the mac80211 to emit
3391 		   an error message. */
3392 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3393 			return 0;
3394 
3395 		/* don't remove key if hlid was already deleted */
3396 		if (action == KEY_REMOVE &&
3397 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3398 			return 0;
3399 
3400 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3401 					     id, key_type, key_size,
3402 					     key, addr, tx_seq_32,
3403 					     tx_seq_16);
3404 		if (ret < 0)
3405 			return ret;
3406 
3407 	}
3408 
3409 	return 0;
3410 }
3411 
3412 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3413 			     struct ieee80211_vif *vif,
3414 			     struct ieee80211_sta *sta,
3415 			     struct ieee80211_key_conf *key_conf)
3416 {
3417 	struct wl1271 *wl = hw->priv;
3418 	int ret;
3419 	bool might_change_spare =
3420 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3421 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3422 
3423 	if (might_change_spare) {
3424 		/*
3425 		 * stop the queues and flush to ensure the next packets are
3426 		 * in sync with FW spare block accounting
3427 		 */
3428 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3429 		wl1271_tx_flush(wl);
3430 	}
3431 
3432 	mutex_lock(&wl->mutex);
3433 
3434 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3435 		ret = -EAGAIN;
3436 		goto out_wake_queues;
3437 	}
3438 
3439 	ret = wl1271_ps_elp_wakeup(wl);
3440 	if (ret < 0)
3441 		goto out_wake_queues;
3442 
3443 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3444 
3445 	wl1271_ps_elp_sleep(wl);
3446 
3447 out_wake_queues:
3448 	if (might_change_spare)
3449 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3450 
3451 	mutex_unlock(&wl->mutex);
3452 
3453 	return ret;
3454 }
3455 
3456 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3457 		   struct ieee80211_vif *vif,
3458 		   struct ieee80211_sta *sta,
3459 		   struct ieee80211_key_conf *key_conf)
3460 {
3461 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3462 	int ret;
3463 	u32 tx_seq_32 = 0;
3464 	u16 tx_seq_16 = 0;
3465 	u8 key_type;
3466 	u8 hlid;
3467 
3468 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3469 
3470 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3471 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3472 		     key_conf->cipher, key_conf->keyidx,
3473 		     key_conf->keylen, key_conf->flags);
3474 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3475 
3476 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3477 		if (sta) {
3478 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3479 			hlid = wl_sta->hlid;
3480 		} else {
3481 			hlid = wlvif->ap.bcast_hlid;
3482 		}
3483 	else
3484 		hlid = wlvif->sta.hlid;
3485 
3486 	if (hlid != WL12XX_INVALID_LINK_ID) {
3487 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3488 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3489 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3490 	}
3491 
3492 	switch (key_conf->cipher) {
3493 	case WLAN_CIPHER_SUITE_WEP40:
3494 	case WLAN_CIPHER_SUITE_WEP104:
3495 		key_type = KEY_WEP;
3496 
3497 		key_conf->hw_key_idx = key_conf->keyidx;
3498 		break;
3499 	case WLAN_CIPHER_SUITE_TKIP:
3500 		key_type = KEY_TKIP;
3501 		key_conf->hw_key_idx = key_conf->keyidx;
3502 		break;
3503 	case WLAN_CIPHER_SUITE_CCMP:
3504 		key_type = KEY_AES;
3505 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3506 		break;
3507 	case WL1271_CIPHER_SUITE_GEM:
3508 		key_type = KEY_GEM;
3509 		break;
3510 	default:
3511 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3512 
3513 		return -EOPNOTSUPP;
3514 	}
3515 
3516 	switch (cmd) {
3517 	case SET_KEY:
3518 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3519 				 key_conf->keyidx, key_type,
3520 				 key_conf->keylen, key_conf->key,
3521 				 tx_seq_32, tx_seq_16, sta);
3522 		if (ret < 0) {
3523 			wl1271_error("Could not add or replace key");
3524 			return ret;
3525 		}
3526 
3527 		/*
3528 		 * reconfiguring arp response if the unicast (or common)
3529 		 * encryption key type was changed
3530 		 */
3531 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3532 		    (sta || key_type == KEY_WEP) &&
3533 		    wlvif->encryption_type != key_type) {
3534 			wlvif->encryption_type = key_type;
3535 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3536 			if (ret < 0) {
3537 				wl1271_warning("build arp rsp failed: %d", ret);
3538 				return ret;
3539 			}
3540 		}
3541 		break;
3542 
3543 	case DISABLE_KEY:
3544 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3545 				     key_conf->keyidx, key_type,
3546 				     key_conf->keylen, key_conf->key,
3547 				     0, 0, sta);
3548 		if (ret < 0) {
3549 			wl1271_error("Could not remove key");
3550 			return ret;
3551 		}
3552 		break;
3553 
3554 	default:
3555 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3556 		return -EOPNOTSUPP;
3557 	}
3558 
3559 	return ret;
3560 }
3561 EXPORT_SYMBOL_GPL(wlcore_set_key);
3562 
3563 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3564 					  struct ieee80211_vif *vif,
3565 					  int key_idx)
3566 {
3567 	struct wl1271 *wl = hw->priv;
3568 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3569 	int ret;
3570 
3571 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3572 		     key_idx);
3573 
3574 	/* we don't handle unsetting of default key */
3575 	if (key_idx == -1)
3576 		return;
3577 
3578 	mutex_lock(&wl->mutex);
3579 
3580 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3581 		ret = -EAGAIN;
3582 		goto out_unlock;
3583 	}
3584 
3585 	ret = wl1271_ps_elp_wakeup(wl);
3586 	if (ret < 0)
3587 		goto out_unlock;
3588 
3589 	wlvif->default_key = key_idx;
3590 
3591 	/* the default WEP key needs to be configured at least once */
3592 	if (wlvif->encryption_type == KEY_WEP) {
3593 		ret = wl12xx_cmd_set_default_wep_key(wl,
3594 				key_idx,
3595 				wlvif->sta.hlid);
3596 		if (ret < 0)
3597 			goto out_sleep;
3598 	}
3599 
3600 out_sleep:
3601 	wl1271_ps_elp_sleep(wl);
3602 
3603 out_unlock:
3604 	mutex_unlock(&wl->mutex);
3605 }
3606 
3607 void wlcore_regdomain_config(struct wl1271 *wl)
3608 {
3609 	int ret;
3610 
3611 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3612 		return;
3613 
3614 	mutex_lock(&wl->mutex);
3615 
3616 	if (unlikely(wl->state != WLCORE_STATE_ON))
3617 		goto out;
3618 
3619 	ret = wl1271_ps_elp_wakeup(wl);
3620 	if (ret < 0)
3621 		goto out;
3622 
3623 	ret = wlcore_cmd_regdomain_config_locked(wl);
3624 	if (ret < 0) {
3625 		wl12xx_queue_recovery_work(wl);
3626 		goto out;
3627 	}
3628 
3629 	wl1271_ps_elp_sleep(wl);
3630 out:
3631 	mutex_unlock(&wl->mutex);
3632 }
3633 
3634 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3635 			     struct ieee80211_vif *vif,
3636 			     struct ieee80211_scan_request *hw_req)
3637 {
3638 	struct cfg80211_scan_request *req = &hw_req->req;
3639 	struct wl1271 *wl = hw->priv;
3640 	int ret;
3641 	u8 *ssid = NULL;
3642 	size_t len = 0;
3643 
3644 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3645 
3646 	if (req->n_ssids) {
3647 		ssid = req->ssids[0].ssid;
3648 		len = req->ssids[0].ssid_len;
3649 	}
3650 
3651 	mutex_lock(&wl->mutex);
3652 
3653 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3654 		/*
3655 		 * We cannot return -EBUSY here because cfg80211 will expect
3656 		 * a call to ieee80211_scan_completed if we do - in this case
3657 		 * there won't be any call.
3658 		 */
3659 		ret = -EAGAIN;
3660 		goto out;
3661 	}
3662 
3663 	ret = wl1271_ps_elp_wakeup(wl);
3664 	if (ret < 0)
3665 		goto out;
3666 
3667 	/* fail if there is any role in ROC */
3668 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3669 		/* don't allow scanning right now */
3670 		ret = -EBUSY;
3671 		goto out_sleep;
3672 	}
3673 
3674 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3675 out_sleep:
3676 	wl1271_ps_elp_sleep(wl);
3677 out:
3678 	mutex_unlock(&wl->mutex);
3679 
3680 	return ret;
3681 }
3682 
3683 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3684 				     struct ieee80211_vif *vif)
3685 {
3686 	struct wl1271 *wl = hw->priv;
3687 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3688 	int ret;
3689 
3690 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3691 
3692 	mutex_lock(&wl->mutex);
3693 
3694 	if (unlikely(wl->state != WLCORE_STATE_ON))
3695 		goto out;
3696 
3697 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3698 		goto out;
3699 
3700 	ret = wl1271_ps_elp_wakeup(wl);
3701 	if (ret < 0)
3702 		goto out;
3703 
3704 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3705 		ret = wl->ops->scan_stop(wl, wlvif);
3706 		if (ret < 0)
3707 			goto out_sleep;
3708 	}
3709 
3710 	/*
3711 	 * Rearm the tx watchdog just before idling scan. This
3712 	 * prevents just-finished scans from triggering the watchdog
3713 	 */
3714 	wl12xx_rearm_tx_watchdog_locked(wl);
3715 
3716 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3717 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3718 	wl->scan_wlvif = NULL;
3719 	wl->scan.req = NULL;
3720 	ieee80211_scan_completed(wl->hw, true);
3721 
3722 out_sleep:
3723 	wl1271_ps_elp_sleep(wl);
3724 out:
3725 	mutex_unlock(&wl->mutex);
3726 
3727 	cancel_delayed_work_sync(&wl->scan_complete_work);
3728 }
3729 
3730 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3731 				      struct ieee80211_vif *vif,
3732 				      struct cfg80211_sched_scan_request *req,
3733 				      struct ieee80211_scan_ies *ies)
3734 {
3735 	struct wl1271 *wl = hw->priv;
3736 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3737 	int ret;
3738 
3739 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3740 
3741 	mutex_lock(&wl->mutex);
3742 
3743 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3744 		ret = -EAGAIN;
3745 		goto out;
3746 	}
3747 
3748 	ret = wl1271_ps_elp_wakeup(wl);
3749 	if (ret < 0)
3750 		goto out;
3751 
3752 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3753 	if (ret < 0)
3754 		goto out_sleep;
3755 
3756 	wl->sched_vif = wlvif;
3757 
3758 out_sleep:
3759 	wl1271_ps_elp_sleep(wl);
3760 out:
3761 	mutex_unlock(&wl->mutex);
3762 	return ret;
3763 }
3764 
3765 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3766 				     struct ieee80211_vif *vif)
3767 {
3768 	struct wl1271 *wl = hw->priv;
3769 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3770 	int ret;
3771 
3772 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3773 
3774 	mutex_lock(&wl->mutex);
3775 
3776 	if (unlikely(wl->state != WLCORE_STATE_ON))
3777 		goto out;
3778 
3779 	ret = wl1271_ps_elp_wakeup(wl);
3780 	if (ret < 0)
3781 		goto out;
3782 
3783 	wl->ops->sched_scan_stop(wl, wlvif);
3784 
3785 	wl1271_ps_elp_sleep(wl);
3786 out:
3787 	mutex_unlock(&wl->mutex);
3788 
3789 	return 0;
3790 }
3791 
3792 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3793 {
3794 	struct wl1271 *wl = hw->priv;
3795 	int ret = 0;
3796 
3797 	mutex_lock(&wl->mutex);
3798 
3799 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3800 		ret = -EAGAIN;
3801 		goto out;
3802 	}
3803 
3804 	ret = wl1271_ps_elp_wakeup(wl);
3805 	if (ret < 0)
3806 		goto out;
3807 
3808 	ret = wl1271_acx_frag_threshold(wl, value);
3809 	if (ret < 0)
3810 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3811 
3812 	wl1271_ps_elp_sleep(wl);
3813 
3814 out:
3815 	mutex_unlock(&wl->mutex);
3816 
3817 	return ret;
3818 }
3819 
3820 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3821 {
3822 	struct wl1271 *wl = hw->priv;
3823 	struct wl12xx_vif *wlvif;
3824 	int ret = 0;
3825 
3826 	mutex_lock(&wl->mutex);
3827 
3828 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3829 		ret = -EAGAIN;
3830 		goto out;
3831 	}
3832 
3833 	ret = wl1271_ps_elp_wakeup(wl);
3834 	if (ret < 0)
3835 		goto out;
3836 
3837 	wl12xx_for_each_wlvif(wl, wlvif) {
3838 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3839 		if (ret < 0)
3840 			wl1271_warning("set rts threshold failed: %d", ret);
3841 	}
3842 	wl1271_ps_elp_sleep(wl);
3843 
3844 out:
3845 	mutex_unlock(&wl->mutex);
3846 
3847 	return ret;
3848 }
3849 
3850 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3851 {
3852 	int len;
3853 	const u8 *next, *end = skb->data + skb->len;
3854 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3855 					skb->len - ieoffset);
3856 	if (!ie)
3857 		return;
3858 	len = ie[1] + 2;
3859 	next = ie + len;
3860 	memmove(ie, next, end - next);
3861 	skb_trim(skb, skb->len - len);
3862 }
3863 
3864 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3865 					    unsigned int oui, u8 oui_type,
3866 					    int ieoffset)
3867 {
3868 	int len;
3869 	const u8 *next, *end = skb->data + skb->len;
3870 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3871 					       skb->data + ieoffset,
3872 					       skb->len - ieoffset);
3873 	if (!ie)
3874 		return;
3875 	len = ie[1] + 2;
3876 	next = ie + len;
3877 	memmove(ie, next, end - next);
3878 	skb_trim(skb, skb->len - len);
3879 }
3880 
3881 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3882 					 struct ieee80211_vif *vif)
3883 {
3884 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3885 	struct sk_buff *skb;
3886 	int ret;
3887 
3888 	skb = ieee80211_proberesp_get(wl->hw, vif);
3889 	if (!skb)
3890 		return -EOPNOTSUPP;
3891 
3892 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3893 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3894 				      skb->data,
3895 				      skb->len, 0,
3896 				      rates);
3897 	dev_kfree_skb(skb);
3898 
3899 	if (ret < 0)
3900 		goto out;
3901 
3902 	wl1271_debug(DEBUG_AP, "probe response updated");
3903 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3904 
3905 out:
3906 	return ret;
3907 }
3908 
3909 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3910 					     struct ieee80211_vif *vif,
3911 					     u8 *probe_rsp_data,
3912 					     size_t probe_rsp_len,
3913 					     u32 rates)
3914 {
3915 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3916 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3917 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3918 	int ssid_ie_offset, ie_offset, templ_len;
3919 	const u8 *ptr;
3920 
3921 	/* no need to change probe response if the SSID is set correctly */
3922 	if (wlvif->ssid_len > 0)
3923 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3924 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3925 					       probe_rsp_data,
3926 					       probe_rsp_len, 0,
3927 					       rates);
3928 
3929 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3930 		wl1271_error("probe_rsp template too big");
3931 		return -EINVAL;
3932 	}
3933 
3934 	/* start searching from IE offset */
3935 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3936 
3937 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3938 			       probe_rsp_len - ie_offset);
3939 	if (!ptr) {
3940 		wl1271_error("No SSID in beacon!");
3941 		return -EINVAL;
3942 	}
3943 
3944 	ssid_ie_offset = ptr - probe_rsp_data;
3945 	ptr += (ptr[1] + 2);
3946 
3947 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3948 
3949 	/* insert SSID from bss_conf */
3950 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3951 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3952 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3953 	       bss_conf->ssid, bss_conf->ssid_len);
3954 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3955 
3956 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3957 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3958 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3959 
3960 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3961 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3962 				       probe_rsp_templ,
3963 				       templ_len, 0,
3964 				       rates);
3965 }
3966 
3967 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3968 				       struct ieee80211_vif *vif,
3969 				       struct ieee80211_bss_conf *bss_conf,
3970 				       u32 changed)
3971 {
3972 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3973 	int ret = 0;
3974 
3975 	if (changed & BSS_CHANGED_ERP_SLOT) {
3976 		if (bss_conf->use_short_slot)
3977 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3978 		else
3979 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3980 		if (ret < 0) {
3981 			wl1271_warning("Set slot time failed %d", ret);
3982 			goto out;
3983 		}
3984 	}
3985 
3986 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3987 		if (bss_conf->use_short_preamble)
3988 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3989 		else
3990 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3991 	}
3992 
3993 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3994 		if (bss_conf->use_cts_prot)
3995 			ret = wl1271_acx_cts_protect(wl, wlvif,
3996 						     CTSPROTECT_ENABLE);
3997 		else
3998 			ret = wl1271_acx_cts_protect(wl, wlvif,
3999 						     CTSPROTECT_DISABLE);
4000 		if (ret < 0) {
4001 			wl1271_warning("Set ctsprotect failed %d", ret);
4002 			goto out;
4003 		}
4004 	}
4005 
4006 out:
4007 	return ret;
4008 }
4009 
4010 static int wlcore_set_beacon_template(struct wl1271 *wl,
4011 				      struct ieee80211_vif *vif,
4012 				      bool is_ap)
4013 {
4014 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4015 	struct ieee80211_hdr *hdr;
4016 	u32 min_rate;
4017 	int ret;
4018 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4019 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4020 	u16 tmpl_id;
4021 
4022 	if (!beacon) {
4023 		ret = -EINVAL;
4024 		goto out;
4025 	}
4026 
4027 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4028 
4029 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4030 	if (ret < 0) {
4031 		dev_kfree_skb(beacon);
4032 		goto out;
4033 	}
4034 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4035 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4036 		CMD_TEMPL_BEACON;
4037 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4038 				      beacon->data,
4039 				      beacon->len, 0,
4040 				      min_rate);
4041 	if (ret < 0) {
4042 		dev_kfree_skb(beacon);
4043 		goto out;
4044 	}
4045 
4046 	wlvif->wmm_enabled =
4047 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4048 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4049 					beacon->data + ieoffset,
4050 					beacon->len - ieoffset);
4051 
4052 	/*
4053 	 * In case we already have a probe-resp beacon set explicitly
4054 	 * by usermode, don't use the beacon data.
4055 	 */
4056 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4057 		goto end_bcn;
4058 
4059 	/* remove TIM ie from probe response */
4060 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4061 
4062 	/*
4063 	 * remove p2p ie from probe response.
4064 	 * the fw reponds to probe requests that don't include
4065 	 * the p2p ie. probe requests with p2p ie will be passed,
4066 	 * and will be responded by the supplicant (the spec
4067 	 * forbids including the p2p ie when responding to probe
4068 	 * requests that didn't include it).
4069 	 */
4070 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4071 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4072 
4073 	hdr = (struct ieee80211_hdr *) beacon->data;
4074 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4075 					 IEEE80211_STYPE_PROBE_RESP);
4076 	if (is_ap)
4077 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4078 							   beacon->data,
4079 							   beacon->len,
4080 							   min_rate);
4081 	else
4082 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4083 					      CMD_TEMPL_PROBE_RESPONSE,
4084 					      beacon->data,
4085 					      beacon->len, 0,
4086 					      min_rate);
4087 end_bcn:
4088 	dev_kfree_skb(beacon);
4089 	if (ret < 0)
4090 		goto out;
4091 
4092 out:
4093 	return ret;
4094 }
4095 
4096 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4097 					  struct ieee80211_vif *vif,
4098 					  struct ieee80211_bss_conf *bss_conf,
4099 					  u32 changed)
4100 {
4101 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4102 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4103 	int ret = 0;
4104 
4105 	if (changed & BSS_CHANGED_BEACON_INT) {
4106 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4107 			bss_conf->beacon_int);
4108 
4109 		wlvif->beacon_int = bss_conf->beacon_int;
4110 	}
4111 
4112 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4113 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4114 
4115 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4116 	}
4117 
4118 	if (changed & BSS_CHANGED_BEACON) {
4119 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4120 		if (ret < 0)
4121 			goto out;
4122 
4123 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4124 				       &wlvif->flags)) {
4125 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4126 			if (ret < 0)
4127 				goto out;
4128 		}
4129 	}
4130 out:
4131 	if (ret != 0)
4132 		wl1271_error("beacon info change failed: %d", ret);
4133 	return ret;
4134 }
4135 
4136 /* AP mode changes */
4137 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4138 				       struct ieee80211_vif *vif,
4139 				       struct ieee80211_bss_conf *bss_conf,
4140 				       u32 changed)
4141 {
4142 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4143 	int ret = 0;
4144 
4145 	if (changed & BSS_CHANGED_BASIC_RATES) {
4146 		u32 rates = bss_conf->basic_rates;
4147 
4148 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4149 								 wlvif->band);
4150 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4151 							wlvif->basic_rate_set);
4152 
4153 		ret = wl1271_init_ap_rates(wl, wlvif);
4154 		if (ret < 0) {
4155 			wl1271_error("AP rate policy change failed %d", ret);
4156 			goto out;
4157 		}
4158 
4159 		ret = wl1271_ap_init_templates(wl, vif);
4160 		if (ret < 0)
4161 			goto out;
4162 
4163 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4164 		if (ret < 0)
4165 			goto out;
4166 
4167 		ret = wlcore_set_beacon_template(wl, vif, true);
4168 		if (ret < 0)
4169 			goto out;
4170 	}
4171 
4172 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4173 	if (ret < 0)
4174 		goto out;
4175 
4176 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4177 		if (bss_conf->enable_beacon) {
4178 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4179 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4180 				if (ret < 0)
4181 					goto out;
4182 
4183 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4184 				if (ret < 0)
4185 					goto out;
4186 
4187 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4188 				wl1271_debug(DEBUG_AP, "started AP");
4189 			}
4190 		} else {
4191 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4192 				/*
4193 				 * AP might be in ROC in case we have just
4194 				 * sent auth reply. handle it.
4195 				 */
4196 				if (test_bit(wlvif->role_id, wl->roc_map))
4197 					wl12xx_croc(wl, wlvif->role_id);
4198 
4199 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4200 				if (ret < 0)
4201 					goto out;
4202 
4203 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4204 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4205 					  &wlvif->flags);
4206 				wl1271_debug(DEBUG_AP, "stopped AP");
4207 			}
4208 		}
4209 	}
4210 
4211 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4212 	if (ret < 0)
4213 		goto out;
4214 
4215 	/* Handle HT information change */
4216 	if ((changed & BSS_CHANGED_HT) &&
4217 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4218 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4219 					bss_conf->ht_operation_mode);
4220 		if (ret < 0) {
4221 			wl1271_warning("Set ht information failed %d", ret);
4222 			goto out;
4223 		}
4224 	}
4225 
4226 out:
4227 	return;
4228 }
4229 
4230 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4231 			    struct ieee80211_bss_conf *bss_conf,
4232 			    u32 sta_rate_set)
4233 {
4234 	u32 rates;
4235 	int ret;
4236 
4237 	wl1271_debug(DEBUG_MAC80211,
4238 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4239 	     bss_conf->bssid, bss_conf->aid,
4240 	     bss_conf->beacon_int,
4241 	     bss_conf->basic_rates, sta_rate_set);
4242 
4243 	wlvif->beacon_int = bss_conf->beacon_int;
4244 	rates = bss_conf->basic_rates;
4245 	wlvif->basic_rate_set =
4246 		wl1271_tx_enabled_rates_get(wl, rates,
4247 					    wlvif->band);
4248 	wlvif->basic_rate =
4249 		wl1271_tx_min_rate_get(wl,
4250 				       wlvif->basic_rate_set);
4251 
4252 	if (sta_rate_set)
4253 		wlvif->rate_set =
4254 			wl1271_tx_enabled_rates_get(wl,
4255 						sta_rate_set,
4256 						wlvif->band);
4257 
4258 	/* we only support sched_scan while not connected */
4259 	if (wl->sched_vif == wlvif)
4260 		wl->ops->sched_scan_stop(wl, wlvif);
4261 
4262 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4263 	if (ret < 0)
4264 		return ret;
4265 
4266 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4267 	if (ret < 0)
4268 		return ret;
4269 
4270 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4271 	if (ret < 0)
4272 		return ret;
4273 
4274 	wlcore_set_ssid(wl, wlvif);
4275 
4276 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4277 
4278 	return 0;
4279 }
4280 
4281 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4282 {
4283 	int ret;
4284 
4285 	/* revert back to minimum rates for the current band */
4286 	wl1271_set_band_rate(wl, wlvif);
4287 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4288 
4289 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4290 	if (ret < 0)
4291 		return ret;
4292 
4293 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4294 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4295 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4296 		if (ret < 0)
4297 			return ret;
4298 	}
4299 
4300 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4301 	return 0;
4302 }
4303 /* STA/IBSS mode changes */
4304 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4305 					struct ieee80211_vif *vif,
4306 					struct ieee80211_bss_conf *bss_conf,
4307 					u32 changed)
4308 {
4309 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4310 	bool do_join = false;
4311 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4312 	bool ibss_joined = false;
4313 	u32 sta_rate_set = 0;
4314 	int ret;
4315 	struct ieee80211_sta *sta;
4316 	bool sta_exists = false;
4317 	struct ieee80211_sta_ht_cap sta_ht_cap;
4318 
4319 	if (is_ibss) {
4320 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4321 						     changed);
4322 		if (ret < 0)
4323 			goto out;
4324 	}
4325 
4326 	if (changed & BSS_CHANGED_IBSS) {
4327 		if (bss_conf->ibss_joined) {
4328 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4329 			ibss_joined = true;
4330 		} else {
4331 			wlcore_unset_assoc(wl, wlvif);
4332 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4333 		}
4334 	}
4335 
4336 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4337 		do_join = true;
4338 
4339 	/* Need to update the SSID (for filtering etc) */
4340 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4341 		do_join = true;
4342 
4343 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4344 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4345 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4346 
4347 		do_join = true;
4348 	}
4349 
4350 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4351 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4352 
4353 	if (changed & BSS_CHANGED_CQM) {
4354 		bool enable = false;
4355 		if (bss_conf->cqm_rssi_thold)
4356 			enable = true;
4357 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4358 						  bss_conf->cqm_rssi_thold,
4359 						  bss_conf->cqm_rssi_hyst);
4360 		if (ret < 0)
4361 			goto out;
4362 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4363 	}
4364 
4365 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4366 		       BSS_CHANGED_ASSOC)) {
4367 		rcu_read_lock();
4368 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4369 		if (sta) {
4370 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4371 
4372 			/* save the supp_rates of the ap */
4373 			sta_rate_set = sta->supp_rates[wlvif->band];
4374 			if (sta->ht_cap.ht_supported)
4375 				sta_rate_set |=
4376 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4377 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4378 			sta_ht_cap = sta->ht_cap;
4379 			sta_exists = true;
4380 		}
4381 
4382 		rcu_read_unlock();
4383 	}
4384 
4385 	if (changed & BSS_CHANGED_BSSID) {
4386 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4387 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4388 					       sta_rate_set);
4389 			if (ret < 0)
4390 				goto out;
4391 
4392 			/* Need to update the BSSID (for filtering etc) */
4393 			do_join = true;
4394 		} else {
4395 			ret = wlcore_clear_bssid(wl, wlvif);
4396 			if (ret < 0)
4397 				goto out;
4398 		}
4399 	}
4400 
4401 	if (changed & BSS_CHANGED_IBSS) {
4402 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4403 			     bss_conf->ibss_joined);
4404 
4405 		if (bss_conf->ibss_joined) {
4406 			u32 rates = bss_conf->basic_rates;
4407 			wlvif->basic_rate_set =
4408 				wl1271_tx_enabled_rates_get(wl, rates,
4409 							    wlvif->band);
4410 			wlvif->basic_rate =
4411 				wl1271_tx_min_rate_get(wl,
4412 						       wlvif->basic_rate_set);
4413 
4414 			/* by default, use 11b + OFDM rates */
4415 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4416 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4417 			if (ret < 0)
4418 				goto out;
4419 		}
4420 	}
4421 
4422 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4423 		/* enable beacon filtering */
4424 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4425 		if (ret < 0)
4426 			goto out;
4427 	}
4428 
4429 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4430 	if (ret < 0)
4431 		goto out;
4432 
4433 	if (do_join) {
4434 		ret = wlcore_join(wl, wlvif);
4435 		if (ret < 0) {
4436 			wl1271_warning("cmd join failed %d", ret);
4437 			goto out;
4438 		}
4439 	}
4440 
4441 	if (changed & BSS_CHANGED_ASSOC) {
4442 		if (bss_conf->assoc) {
4443 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4444 					       sta_rate_set);
4445 			if (ret < 0)
4446 				goto out;
4447 
4448 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4449 				wl12xx_set_authorized(wl, wlvif);
4450 		} else {
4451 			wlcore_unset_assoc(wl, wlvif);
4452 		}
4453 	}
4454 
4455 	if (changed & BSS_CHANGED_PS) {
4456 		if ((bss_conf->ps) &&
4457 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4458 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4459 			int ps_mode;
4460 			char *ps_mode_str;
4461 
4462 			if (wl->conf.conn.forced_ps) {
4463 				ps_mode = STATION_POWER_SAVE_MODE;
4464 				ps_mode_str = "forced";
4465 			} else {
4466 				ps_mode = STATION_AUTO_PS_MODE;
4467 				ps_mode_str = "auto";
4468 			}
4469 
4470 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4471 
4472 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4473 			if (ret < 0)
4474 				wl1271_warning("enter %s ps failed %d",
4475 					       ps_mode_str, ret);
4476 		} else if (!bss_conf->ps &&
4477 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4478 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4479 
4480 			ret = wl1271_ps_set_mode(wl, wlvif,
4481 						 STATION_ACTIVE_MODE);
4482 			if (ret < 0)
4483 				wl1271_warning("exit auto ps failed %d", ret);
4484 		}
4485 	}
4486 
4487 	/* Handle new association with HT. Do this after join. */
4488 	if (sta_exists) {
4489 		bool enabled =
4490 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4491 
4492 		ret = wlcore_hw_set_peer_cap(wl,
4493 					     &sta_ht_cap,
4494 					     enabled,
4495 					     wlvif->rate_set,
4496 					     wlvif->sta.hlid);
4497 		if (ret < 0) {
4498 			wl1271_warning("Set ht cap failed %d", ret);
4499 			goto out;
4500 
4501 		}
4502 
4503 		if (enabled) {
4504 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4505 						bss_conf->ht_operation_mode);
4506 			if (ret < 0) {
4507 				wl1271_warning("Set ht information failed %d",
4508 					       ret);
4509 				goto out;
4510 			}
4511 		}
4512 	}
4513 
4514 	/* Handle arp filtering. Done after join. */
4515 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4516 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4517 		__be32 addr = bss_conf->arp_addr_list[0];
4518 		wlvif->sta.qos = bss_conf->qos;
4519 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4520 
4521 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4522 			wlvif->ip_addr = addr;
4523 			/*
4524 			 * The template should have been configured only upon
4525 			 * association. however, it seems that the correct ip
4526 			 * isn't being set (when sending), so we have to
4527 			 * reconfigure the template upon every ip change.
4528 			 */
4529 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4530 			if (ret < 0) {
4531 				wl1271_warning("build arp rsp failed: %d", ret);
4532 				goto out;
4533 			}
4534 
4535 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4536 				(ACX_ARP_FILTER_ARP_FILTERING |
4537 				 ACX_ARP_FILTER_AUTO_ARP),
4538 				addr);
4539 		} else {
4540 			wlvif->ip_addr = 0;
4541 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4542 		}
4543 
4544 		if (ret < 0)
4545 			goto out;
4546 	}
4547 
4548 out:
4549 	return;
4550 }
4551 
4552 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4553 				       struct ieee80211_vif *vif,
4554 				       struct ieee80211_bss_conf *bss_conf,
4555 				       u32 changed)
4556 {
4557 	struct wl1271 *wl = hw->priv;
4558 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4559 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4560 	int ret;
4561 
4562 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4563 		     wlvif->role_id, (int)changed);
4564 
4565 	/*
4566 	 * make sure to cancel pending disconnections if our association
4567 	 * state changed
4568 	 */
4569 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4570 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4571 
4572 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4573 	    !bss_conf->enable_beacon)
4574 		wl1271_tx_flush(wl);
4575 
4576 	mutex_lock(&wl->mutex);
4577 
4578 	if (unlikely(wl->state != WLCORE_STATE_ON))
4579 		goto out;
4580 
4581 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4582 		goto out;
4583 
4584 	ret = wl1271_ps_elp_wakeup(wl);
4585 	if (ret < 0)
4586 		goto out;
4587 
4588 	if ((changed & BSS_CHANGED_TXPOWER) &&
4589 	    bss_conf->txpower != wlvif->power_level) {
4590 
4591 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4592 		if (ret < 0)
4593 			goto out;
4594 
4595 		wlvif->power_level = bss_conf->txpower;
4596 	}
4597 
4598 	if (is_ap)
4599 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4600 	else
4601 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4602 
4603 	wl1271_ps_elp_sleep(wl);
4604 
4605 out:
4606 	mutex_unlock(&wl->mutex);
4607 }
4608 
4609 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4610 				 struct ieee80211_chanctx_conf *ctx)
4611 {
4612 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4613 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4614 		     cfg80211_get_chandef_type(&ctx->def));
4615 	return 0;
4616 }
4617 
4618 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4619 				     struct ieee80211_chanctx_conf *ctx)
4620 {
4621 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4622 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4623 		     cfg80211_get_chandef_type(&ctx->def));
4624 }
4625 
4626 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4627 				     struct ieee80211_chanctx_conf *ctx,
4628 				     u32 changed)
4629 {
4630 	struct wl1271 *wl = hw->priv;
4631 	struct wl12xx_vif *wlvif;
4632 	int ret;
4633 	int channel = ieee80211_frequency_to_channel(
4634 		ctx->def.chan->center_freq);
4635 
4636 	wl1271_debug(DEBUG_MAC80211,
4637 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4638 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4639 
4640 	mutex_lock(&wl->mutex);
4641 
4642 	ret = wl1271_ps_elp_wakeup(wl);
4643 	if (ret < 0)
4644 		goto out;
4645 
4646 	wl12xx_for_each_wlvif(wl, wlvif) {
4647 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4648 
4649 		rcu_read_lock();
4650 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4651 			rcu_read_unlock();
4652 			continue;
4653 		}
4654 		rcu_read_unlock();
4655 
4656 		/* start radar if needed */
4657 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4658 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4659 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4660 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4661 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4662 			wlcore_hw_set_cac(wl, wlvif, true);
4663 			wlvif->radar_enabled = true;
4664 		}
4665 	}
4666 
4667 	wl1271_ps_elp_sleep(wl);
4668 out:
4669 	mutex_unlock(&wl->mutex);
4670 }
4671 
4672 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4673 					struct ieee80211_vif *vif,
4674 					struct ieee80211_chanctx_conf *ctx)
4675 {
4676 	struct wl1271 *wl = hw->priv;
4677 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4678 	int channel = ieee80211_frequency_to_channel(
4679 		ctx->def.chan->center_freq);
4680 	int ret = -EINVAL;
4681 
4682 	wl1271_debug(DEBUG_MAC80211,
4683 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4684 		     wlvif->role_id, channel,
4685 		     cfg80211_get_chandef_type(&ctx->def),
4686 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4687 
4688 	mutex_lock(&wl->mutex);
4689 
4690 	if (unlikely(wl->state != WLCORE_STATE_ON))
4691 		goto out;
4692 
4693 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4694 		goto out;
4695 
4696 	ret = wl1271_ps_elp_wakeup(wl);
4697 	if (ret < 0)
4698 		goto out;
4699 
4700 	wlvif->band = ctx->def.chan->band;
4701 	wlvif->channel = channel;
4702 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4703 
4704 	/* update default rates according to the band */
4705 	wl1271_set_band_rate(wl, wlvif);
4706 
4707 	if (ctx->radar_enabled &&
4708 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4709 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4710 		wlcore_hw_set_cac(wl, wlvif, true);
4711 		wlvif->radar_enabled = true;
4712 	}
4713 
4714 	wl1271_ps_elp_sleep(wl);
4715 out:
4716 	mutex_unlock(&wl->mutex);
4717 
4718 	return 0;
4719 }
4720 
4721 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4722 					   struct ieee80211_vif *vif,
4723 					   struct ieee80211_chanctx_conf *ctx)
4724 {
4725 	struct wl1271 *wl = hw->priv;
4726 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4727 	int ret;
4728 
4729 	wl1271_debug(DEBUG_MAC80211,
4730 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4731 		     wlvif->role_id,
4732 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4733 		     cfg80211_get_chandef_type(&ctx->def));
4734 
4735 	wl1271_tx_flush(wl);
4736 
4737 	mutex_lock(&wl->mutex);
4738 
4739 	if (unlikely(wl->state != WLCORE_STATE_ON))
4740 		goto out;
4741 
4742 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4743 		goto out;
4744 
4745 	ret = wl1271_ps_elp_wakeup(wl);
4746 	if (ret < 0)
4747 		goto out;
4748 
4749 	if (wlvif->radar_enabled) {
4750 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4751 		wlcore_hw_set_cac(wl, wlvif, false);
4752 		wlvif->radar_enabled = false;
4753 	}
4754 
4755 	wl1271_ps_elp_sleep(wl);
4756 out:
4757 	mutex_unlock(&wl->mutex);
4758 }
4759 
4760 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4761 				    struct wl12xx_vif *wlvif,
4762 				    struct ieee80211_chanctx_conf *new_ctx)
4763 {
4764 	int channel = ieee80211_frequency_to_channel(
4765 		new_ctx->def.chan->center_freq);
4766 
4767 	wl1271_debug(DEBUG_MAC80211,
4768 		     "switch vif (role %d) %d -> %d chan_type: %d",
4769 		     wlvif->role_id, wlvif->channel, channel,
4770 		     cfg80211_get_chandef_type(&new_ctx->def));
4771 
4772 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4773 		return 0;
4774 
4775 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4776 
4777 	if (wlvif->radar_enabled) {
4778 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4779 		wlcore_hw_set_cac(wl, wlvif, false);
4780 		wlvif->radar_enabled = false;
4781 	}
4782 
4783 	wlvif->band = new_ctx->def.chan->band;
4784 	wlvif->channel = channel;
4785 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4786 
4787 	/* start radar if needed */
4788 	if (new_ctx->radar_enabled) {
4789 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4790 		wlcore_hw_set_cac(wl, wlvif, true);
4791 		wlvif->radar_enabled = true;
4792 	}
4793 
4794 	return 0;
4795 }
4796 
4797 static int
4798 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4799 			     struct ieee80211_vif_chanctx_switch *vifs,
4800 			     int n_vifs,
4801 			     enum ieee80211_chanctx_switch_mode mode)
4802 {
4803 	struct wl1271 *wl = hw->priv;
4804 	int i, ret;
4805 
4806 	wl1271_debug(DEBUG_MAC80211,
4807 		     "mac80211 switch chanctx n_vifs %d mode %d",
4808 		     n_vifs, mode);
4809 
4810 	mutex_lock(&wl->mutex);
4811 
4812 	ret = wl1271_ps_elp_wakeup(wl);
4813 	if (ret < 0)
4814 		goto out;
4815 
4816 	for (i = 0; i < n_vifs; i++) {
4817 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4818 
4819 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4820 		if (ret)
4821 			goto out_sleep;
4822 	}
4823 out_sleep:
4824 	wl1271_ps_elp_sleep(wl);
4825 out:
4826 	mutex_unlock(&wl->mutex);
4827 
4828 	return 0;
4829 }
4830 
4831 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4832 			     struct ieee80211_vif *vif, u16 queue,
4833 			     const struct ieee80211_tx_queue_params *params)
4834 {
4835 	struct wl1271 *wl = hw->priv;
4836 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4837 	u8 ps_scheme;
4838 	int ret = 0;
4839 
4840 	mutex_lock(&wl->mutex);
4841 
4842 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4843 
4844 	if (params->uapsd)
4845 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4846 	else
4847 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4848 
4849 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4850 		goto out;
4851 
4852 	ret = wl1271_ps_elp_wakeup(wl);
4853 	if (ret < 0)
4854 		goto out;
4855 
4856 	/*
4857 	 * the txop is confed in units of 32us by the mac80211,
4858 	 * we need us
4859 	 */
4860 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4861 				params->cw_min, params->cw_max,
4862 				params->aifs, params->txop << 5);
4863 	if (ret < 0)
4864 		goto out_sleep;
4865 
4866 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4867 				 CONF_CHANNEL_TYPE_EDCF,
4868 				 wl1271_tx_get_queue(queue),
4869 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4870 				 0, 0);
4871 
4872 out_sleep:
4873 	wl1271_ps_elp_sleep(wl);
4874 
4875 out:
4876 	mutex_unlock(&wl->mutex);
4877 
4878 	return ret;
4879 }
4880 
4881 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4882 			     struct ieee80211_vif *vif)
4883 {
4884 
4885 	struct wl1271 *wl = hw->priv;
4886 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4887 	u64 mactime = ULLONG_MAX;
4888 	int ret;
4889 
4890 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4891 
4892 	mutex_lock(&wl->mutex);
4893 
4894 	if (unlikely(wl->state != WLCORE_STATE_ON))
4895 		goto out;
4896 
4897 	ret = wl1271_ps_elp_wakeup(wl);
4898 	if (ret < 0)
4899 		goto out;
4900 
4901 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4902 	if (ret < 0)
4903 		goto out_sleep;
4904 
4905 out_sleep:
4906 	wl1271_ps_elp_sleep(wl);
4907 
4908 out:
4909 	mutex_unlock(&wl->mutex);
4910 	return mactime;
4911 }
4912 
4913 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4914 				struct survey_info *survey)
4915 {
4916 	struct ieee80211_conf *conf = &hw->conf;
4917 
4918 	if (idx != 0)
4919 		return -ENOENT;
4920 
4921 	survey->channel = conf->chandef.chan;
4922 	survey->filled = 0;
4923 	return 0;
4924 }
4925 
4926 static int wl1271_allocate_sta(struct wl1271 *wl,
4927 			     struct wl12xx_vif *wlvif,
4928 			     struct ieee80211_sta *sta)
4929 {
4930 	struct wl1271_station *wl_sta;
4931 	int ret;
4932 
4933 
4934 	if (wl->active_sta_count >= wl->max_ap_stations) {
4935 		wl1271_warning("could not allocate HLID - too much stations");
4936 		return -EBUSY;
4937 	}
4938 
4939 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4940 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4941 	if (ret < 0) {
4942 		wl1271_warning("could not allocate HLID - too many links");
4943 		return -EBUSY;
4944 	}
4945 
4946 	/* use the previous security seq, if this is a recovery/resume */
4947 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4948 
4949 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4950 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4951 	wl->active_sta_count++;
4952 	return 0;
4953 }
4954 
4955 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4956 {
4957 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4958 		return;
4959 
4960 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4961 	__clear_bit(hlid, &wl->ap_ps_map);
4962 	__clear_bit(hlid, &wl->ap_fw_ps_map);
4963 
4964 	/*
4965 	 * save the last used PN in the private part of iee80211_sta,
4966 	 * in case of recovery/suspend
4967 	 */
4968 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4969 
4970 	wl12xx_free_link(wl, wlvif, &hlid);
4971 	wl->active_sta_count--;
4972 
4973 	/*
4974 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4975 	 * chance to return STA-buffered packets before complaining.
4976 	 */
4977 	if (wl->active_sta_count == 0)
4978 		wl12xx_rearm_tx_watchdog_locked(wl);
4979 }
4980 
4981 static int wl12xx_sta_add(struct wl1271 *wl,
4982 			  struct wl12xx_vif *wlvif,
4983 			  struct ieee80211_sta *sta)
4984 {
4985 	struct wl1271_station *wl_sta;
4986 	int ret = 0;
4987 	u8 hlid;
4988 
4989 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4990 
4991 	ret = wl1271_allocate_sta(wl, wlvif, sta);
4992 	if (ret < 0)
4993 		return ret;
4994 
4995 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4996 	hlid = wl_sta->hlid;
4997 
4998 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4999 	if (ret < 0)
5000 		wl1271_free_sta(wl, wlvif, hlid);
5001 
5002 	return ret;
5003 }
5004 
5005 static int wl12xx_sta_remove(struct wl1271 *wl,
5006 			     struct wl12xx_vif *wlvif,
5007 			     struct ieee80211_sta *sta)
5008 {
5009 	struct wl1271_station *wl_sta;
5010 	int ret = 0, id;
5011 
5012 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5013 
5014 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5015 	id = wl_sta->hlid;
5016 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5017 		return -EINVAL;
5018 
5019 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5020 	if (ret < 0)
5021 		return ret;
5022 
5023 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5024 	return ret;
5025 }
5026 
5027 static void wlcore_roc_if_possible(struct wl1271 *wl,
5028 				   struct wl12xx_vif *wlvif)
5029 {
5030 	if (find_first_bit(wl->roc_map,
5031 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5032 		return;
5033 
5034 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5035 		return;
5036 
5037 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5038 }
5039 
5040 /*
5041  * when wl_sta is NULL, we treat this call as if coming from a
5042  * pending auth reply.
5043  * wl->mutex must be taken and the FW must be awake when the call
5044  * takes place.
5045  */
5046 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5047 			      struct wl1271_station *wl_sta, bool in_conn)
5048 {
5049 	if (in_conn) {
5050 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5051 			return;
5052 
5053 		if (!wlvif->ap_pending_auth_reply &&
5054 		    !wlvif->inconn_count)
5055 			wlcore_roc_if_possible(wl, wlvif);
5056 
5057 		if (wl_sta) {
5058 			wl_sta->in_connection = true;
5059 			wlvif->inconn_count++;
5060 		} else {
5061 			wlvif->ap_pending_auth_reply = true;
5062 		}
5063 	} else {
5064 		if (wl_sta && !wl_sta->in_connection)
5065 			return;
5066 
5067 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5068 			return;
5069 
5070 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5071 			return;
5072 
5073 		if (wl_sta) {
5074 			wl_sta->in_connection = false;
5075 			wlvif->inconn_count--;
5076 		} else {
5077 			wlvif->ap_pending_auth_reply = false;
5078 		}
5079 
5080 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5081 		    test_bit(wlvif->role_id, wl->roc_map))
5082 			wl12xx_croc(wl, wlvif->role_id);
5083 	}
5084 }
5085 
5086 static int wl12xx_update_sta_state(struct wl1271 *wl,
5087 				   struct wl12xx_vif *wlvif,
5088 				   struct ieee80211_sta *sta,
5089 				   enum ieee80211_sta_state old_state,
5090 				   enum ieee80211_sta_state new_state)
5091 {
5092 	struct wl1271_station *wl_sta;
5093 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5094 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5095 	int ret;
5096 
5097 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5098 
5099 	/* Add station (AP mode) */
5100 	if (is_ap &&
5101 	    old_state == IEEE80211_STA_NOTEXIST &&
5102 	    new_state == IEEE80211_STA_NONE) {
5103 		ret = wl12xx_sta_add(wl, wlvif, sta);
5104 		if (ret)
5105 			return ret;
5106 
5107 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5108 	}
5109 
5110 	/* Remove station (AP mode) */
5111 	if (is_ap &&
5112 	    old_state == IEEE80211_STA_NONE &&
5113 	    new_state == IEEE80211_STA_NOTEXIST) {
5114 		/* must not fail */
5115 		wl12xx_sta_remove(wl, wlvif, sta);
5116 
5117 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5118 	}
5119 
5120 	/* Authorize station (AP mode) */
5121 	if (is_ap &&
5122 	    new_state == IEEE80211_STA_AUTHORIZED) {
5123 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5124 		if (ret < 0)
5125 			return ret;
5126 
5127 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5128 						     wl_sta->hlid);
5129 		if (ret)
5130 			return ret;
5131 
5132 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5133 	}
5134 
5135 	/* Authorize station */
5136 	if (is_sta &&
5137 	    new_state == IEEE80211_STA_AUTHORIZED) {
5138 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5139 		ret = wl12xx_set_authorized(wl, wlvif);
5140 		if (ret)
5141 			return ret;
5142 	}
5143 
5144 	if (is_sta &&
5145 	    old_state == IEEE80211_STA_AUTHORIZED &&
5146 	    new_state == IEEE80211_STA_ASSOC) {
5147 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5148 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5149 	}
5150 
5151 	/* save seq number on disassoc (suspend) */
5152 	if (is_sta &&
5153 	    old_state == IEEE80211_STA_ASSOC &&
5154 	    new_state == IEEE80211_STA_AUTH) {
5155 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5156 		wlvif->total_freed_pkts = 0;
5157 	}
5158 
5159 	/* restore seq number on assoc (resume) */
5160 	if (is_sta &&
5161 	    old_state == IEEE80211_STA_AUTH &&
5162 	    new_state == IEEE80211_STA_ASSOC) {
5163 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5164 	}
5165 
5166 	/* clear ROCs on failure or authorization */
5167 	if (is_sta &&
5168 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5169 	     new_state == IEEE80211_STA_NOTEXIST)) {
5170 		if (test_bit(wlvif->role_id, wl->roc_map))
5171 			wl12xx_croc(wl, wlvif->role_id);
5172 	}
5173 
5174 	if (is_sta &&
5175 	    old_state == IEEE80211_STA_NOTEXIST &&
5176 	    new_state == IEEE80211_STA_NONE) {
5177 		if (find_first_bit(wl->roc_map,
5178 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5179 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5180 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5181 				   wlvif->band, wlvif->channel);
5182 		}
5183 	}
5184 	return 0;
5185 }
5186 
5187 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5188 			       struct ieee80211_vif *vif,
5189 			       struct ieee80211_sta *sta,
5190 			       enum ieee80211_sta_state old_state,
5191 			       enum ieee80211_sta_state new_state)
5192 {
5193 	struct wl1271 *wl = hw->priv;
5194 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5195 	int ret;
5196 
5197 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5198 		     sta->aid, old_state, new_state);
5199 
5200 	mutex_lock(&wl->mutex);
5201 
5202 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5203 		ret = -EBUSY;
5204 		goto out;
5205 	}
5206 
5207 	ret = wl1271_ps_elp_wakeup(wl);
5208 	if (ret < 0)
5209 		goto out;
5210 
5211 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5212 
5213 	wl1271_ps_elp_sleep(wl);
5214 out:
5215 	mutex_unlock(&wl->mutex);
5216 	if (new_state < old_state)
5217 		return 0;
5218 	return ret;
5219 }
5220 
5221 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5222 				  struct ieee80211_vif *vif,
5223 				  enum ieee80211_ampdu_mlme_action action,
5224 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5225 				  u8 buf_size)
5226 {
5227 	struct wl1271 *wl = hw->priv;
5228 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5229 	int ret;
5230 	u8 hlid, *ba_bitmap;
5231 
5232 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5233 		     tid);
5234 
5235 	/* sanity check - the fields in FW are only 8bits wide */
5236 	if (WARN_ON(tid > 0xFF))
5237 		return -ENOTSUPP;
5238 
5239 	mutex_lock(&wl->mutex);
5240 
5241 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5242 		ret = -EAGAIN;
5243 		goto out;
5244 	}
5245 
5246 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5247 		hlid = wlvif->sta.hlid;
5248 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5249 		struct wl1271_station *wl_sta;
5250 
5251 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5252 		hlid = wl_sta->hlid;
5253 	} else {
5254 		ret = -EINVAL;
5255 		goto out;
5256 	}
5257 
5258 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5259 
5260 	ret = wl1271_ps_elp_wakeup(wl);
5261 	if (ret < 0)
5262 		goto out;
5263 
5264 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5265 		     tid, action);
5266 
5267 	switch (action) {
5268 	case IEEE80211_AMPDU_RX_START:
5269 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5270 			ret = -ENOTSUPP;
5271 			break;
5272 		}
5273 
5274 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5275 			ret = -EBUSY;
5276 			wl1271_error("exceeded max RX BA sessions");
5277 			break;
5278 		}
5279 
5280 		if (*ba_bitmap & BIT(tid)) {
5281 			ret = -EINVAL;
5282 			wl1271_error("cannot enable RX BA session on active "
5283 				     "tid: %d", tid);
5284 			break;
5285 		}
5286 
5287 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5288 							 hlid);
5289 		if (!ret) {
5290 			*ba_bitmap |= BIT(tid);
5291 			wl->ba_rx_session_count++;
5292 		}
5293 		break;
5294 
5295 	case IEEE80211_AMPDU_RX_STOP:
5296 		if (!(*ba_bitmap & BIT(tid))) {
5297 			/*
5298 			 * this happens on reconfig - so only output a debug
5299 			 * message for now, and don't fail the function.
5300 			 */
5301 			wl1271_debug(DEBUG_MAC80211,
5302 				     "no active RX BA session on tid: %d",
5303 				     tid);
5304 			ret = 0;
5305 			break;
5306 		}
5307 
5308 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5309 							 hlid);
5310 		if (!ret) {
5311 			*ba_bitmap &= ~BIT(tid);
5312 			wl->ba_rx_session_count--;
5313 		}
5314 		break;
5315 
5316 	/*
5317 	 * The BA initiator session management in FW independently.
5318 	 * Falling break here on purpose for all TX APDU commands.
5319 	 */
5320 	case IEEE80211_AMPDU_TX_START:
5321 	case IEEE80211_AMPDU_TX_STOP_CONT:
5322 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5323 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5324 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5325 		ret = -EINVAL;
5326 		break;
5327 
5328 	default:
5329 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5330 		ret = -EINVAL;
5331 	}
5332 
5333 	wl1271_ps_elp_sleep(wl);
5334 
5335 out:
5336 	mutex_unlock(&wl->mutex);
5337 
5338 	return ret;
5339 }
5340 
5341 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5342 				   struct ieee80211_vif *vif,
5343 				   const struct cfg80211_bitrate_mask *mask)
5344 {
5345 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5346 	struct wl1271 *wl = hw->priv;
5347 	int i, ret = 0;
5348 
5349 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5350 		mask->control[NL80211_BAND_2GHZ].legacy,
5351 		mask->control[NL80211_BAND_5GHZ].legacy);
5352 
5353 	mutex_lock(&wl->mutex);
5354 
5355 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5356 		wlvif->bitrate_masks[i] =
5357 			wl1271_tx_enabled_rates_get(wl,
5358 						    mask->control[i].legacy,
5359 						    i);
5360 
5361 	if (unlikely(wl->state != WLCORE_STATE_ON))
5362 		goto out;
5363 
5364 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5365 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5366 
5367 		ret = wl1271_ps_elp_wakeup(wl);
5368 		if (ret < 0)
5369 			goto out;
5370 
5371 		wl1271_set_band_rate(wl, wlvif);
5372 		wlvif->basic_rate =
5373 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5374 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5375 
5376 		wl1271_ps_elp_sleep(wl);
5377 	}
5378 out:
5379 	mutex_unlock(&wl->mutex);
5380 
5381 	return ret;
5382 }
5383 
5384 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5385 				     struct ieee80211_vif *vif,
5386 				     struct ieee80211_channel_switch *ch_switch)
5387 {
5388 	struct wl1271 *wl = hw->priv;
5389 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5390 	int ret;
5391 
5392 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5393 
5394 	wl1271_tx_flush(wl);
5395 
5396 	mutex_lock(&wl->mutex);
5397 
5398 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5399 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5400 			ieee80211_chswitch_done(vif, false);
5401 		goto out;
5402 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5403 		goto out;
5404 	}
5405 
5406 	ret = wl1271_ps_elp_wakeup(wl);
5407 	if (ret < 0)
5408 		goto out;
5409 
5410 	/* TODO: change mac80211 to pass vif as param */
5411 
5412 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5413 		unsigned long delay_usec;
5414 
5415 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5416 		if (ret)
5417 			goto out_sleep;
5418 
5419 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5420 
5421 		/* indicate failure 5 seconds after channel switch time */
5422 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5423 			ch_switch->count;
5424 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5425 					     usecs_to_jiffies(delay_usec) +
5426 					     msecs_to_jiffies(5000));
5427 	}
5428 
5429 out_sleep:
5430 	wl1271_ps_elp_sleep(wl);
5431 
5432 out:
5433 	mutex_unlock(&wl->mutex);
5434 }
5435 
5436 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5437 					struct wl12xx_vif *wlvif,
5438 					u8 eid)
5439 {
5440 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5441 	struct sk_buff *beacon =
5442 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5443 
5444 	if (!beacon)
5445 		return NULL;
5446 
5447 	return cfg80211_find_ie(eid,
5448 				beacon->data + ieoffset,
5449 				beacon->len - ieoffset);
5450 }
5451 
5452 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5453 				u8 *csa_count)
5454 {
5455 	const u8 *ie;
5456 	const struct ieee80211_channel_sw_ie *ie_csa;
5457 
5458 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5459 	if (!ie)
5460 		return -EINVAL;
5461 
5462 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5463 	*csa_count = ie_csa->count;
5464 
5465 	return 0;
5466 }
5467 
5468 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5469 					    struct ieee80211_vif *vif,
5470 					    struct cfg80211_chan_def *chandef)
5471 {
5472 	struct wl1271 *wl = hw->priv;
5473 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5474 	struct ieee80211_channel_switch ch_switch = {
5475 		.block_tx = true,
5476 		.chandef = *chandef,
5477 	};
5478 	int ret;
5479 
5480 	wl1271_debug(DEBUG_MAC80211,
5481 		     "mac80211 channel switch beacon (role %d)",
5482 		     wlvif->role_id);
5483 
5484 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5485 	if (ret < 0) {
5486 		wl1271_error("error getting beacon (for CSA counter)");
5487 		return;
5488 	}
5489 
5490 	mutex_lock(&wl->mutex);
5491 
5492 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5493 		ret = -EBUSY;
5494 		goto out;
5495 	}
5496 
5497 	ret = wl1271_ps_elp_wakeup(wl);
5498 	if (ret < 0)
5499 		goto out;
5500 
5501 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5502 	if (ret)
5503 		goto out_sleep;
5504 
5505 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5506 
5507 out_sleep:
5508 	wl1271_ps_elp_sleep(wl);
5509 out:
5510 	mutex_unlock(&wl->mutex);
5511 }
5512 
5513 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5514 			    u32 queues, bool drop)
5515 {
5516 	struct wl1271 *wl = hw->priv;
5517 
5518 	wl1271_tx_flush(wl);
5519 }
5520 
5521 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5522 				       struct ieee80211_vif *vif,
5523 				       struct ieee80211_channel *chan,
5524 				       int duration,
5525 				       enum ieee80211_roc_type type)
5526 {
5527 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5528 	struct wl1271 *wl = hw->priv;
5529 	int channel, ret = 0;
5530 
5531 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5532 
5533 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5534 		     channel, wlvif->role_id);
5535 
5536 	mutex_lock(&wl->mutex);
5537 
5538 	if (unlikely(wl->state != WLCORE_STATE_ON))
5539 		goto out;
5540 
5541 	/* return EBUSY if we can't ROC right now */
5542 	if (WARN_ON(wl->roc_vif ||
5543 		    find_first_bit(wl->roc_map,
5544 				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5545 		ret = -EBUSY;
5546 		goto out;
5547 	}
5548 
5549 	ret = wl1271_ps_elp_wakeup(wl);
5550 	if (ret < 0)
5551 		goto out;
5552 
5553 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5554 	if (ret < 0)
5555 		goto out_sleep;
5556 
5557 	wl->roc_vif = vif;
5558 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5559 				     msecs_to_jiffies(duration));
5560 out_sleep:
5561 	wl1271_ps_elp_sleep(wl);
5562 out:
5563 	mutex_unlock(&wl->mutex);
5564 	return ret;
5565 }
5566 
5567 static int __wlcore_roc_completed(struct wl1271 *wl)
5568 {
5569 	struct wl12xx_vif *wlvif;
5570 	int ret;
5571 
5572 	/* already completed */
5573 	if (unlikely(!wl->roc_vif))
5574 		return 0;
5575 
5576 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5577 
5578 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5579 		return -EBUSY;
5580 
5581 	ret = wl12xx_stop_dev(wl, wlvif);
5582 	if (ret < 0)
5583 		return ret;
5584 
5585 	wl->roc_vif = NULL;
5586 
5587 	return 0;
5588 }
5589 
5590 static int wlcore_roc_completed(struct wl1271 *wl)
5591 {
5592 	int ret;
5593 
5594 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5595 
5596 	mutex_lock(&wl->mutex);
5597 
5598 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5599 		ret = -EBUSY;
5600 		goto out;
5601 	}
5602 
5603 	ret = wl1271_ps_elp_wakeup(wl);
5604 	if (ret < 0)
5605 		goto out;
5606 
5607 	ret = __wlcore_roc_completed(wl);
5608 
5609 	wl1271_ps_elp_sleep(wl);
5610 out:
5611 	mutex_unlock(&wl->mutex);
5612 
5613 	return ret;
5614 }
5615 
5616 static void wlcore_roc_complete_work(struct work_struct *work)
5617 {
5618 	struct delayed_work *dwork;
5619 	struct wl1271 *wl;
5620 	int ret;
5621 
5622 	dwork = container_of(work, struct delayed_work, work);
5623 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5624 
5625 	ret = wlcore_roc_completed(wl);
5626 	if (!ret)
5627 		ieee80211_remain_on_channel_expired(wl->hw);
5628 }
5629 
5630 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5631 {
5632 	struct wl1271 *wl = hw->priv;
5633 
5634 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5635 
5636 	/* TODO: per-vif */
5637 	wl1271_tx_flush(wl);
5638 
5639 	/*
5640 	 * we can't just flush_work here, because it might deadlock
5641 	 * (as we might get called from the same workqueue)
5642 	 */
5643 	cancel_delayed_work_sync(&wl->roc_complete_work);
5644 	wlcore_roc_completed(wl);
5645 
5646 	return 0;
5647 }
5648 
5649 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5650 				    struct ieee80211_vif *vif,
5651 				    struct ieee80211_sta *sta,
5652 				    u32 changed)
5653 {
5654 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5655 
5656 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5657 
5658 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5659 		return;
5660 
5661 	/* this callback is atomic, so schedule a new work */
5662 	wlvif->rc_update_bw = sta->bandwidth;
5663 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5664 }
5665 
5666 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5667 				     struct ieee80211_vif *vif,
5668 				     struct ieee80211_sta *sta,
5669 				     struct station_info *sinfo)
5670 {
5671 	struct wl1271 *wl = hw->priv;
5672 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5673 	s8 rssi_dbm;
5674 	int ret;
5675 
5676 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5677 
5678 	mutex_lock(&wl->mutex);
5679 
5680 	if (unlikely(wl->state != WLCORE_STATE_ON))
5681 		goto out;
5682 
5683 	ret = wl1271_ps_elp_wakeup(wl);
5684 	if (ret < 0)
5685 		goto out_sleep;
5686 
5687 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5688 	if (ret < 0)
5689 		goto out_sleep;
5690 
5691 	sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5692 	sinfo->signal = rssi_dbm;
5693 
5694 out_sleep:
5695 	wl1271_ps_elp_sleep(wl);
5696 
5697 out:
5698 	mutex_unlock(&wl->mutex);
5699 }
5700 
5701 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5702 {
5703 	struct wl1271 *wl = hw->priv;
5704 	bool ret = false;
5705 
5706 	mutex_lock(&wl->mutex);
5707 
5708 	if (unlikely(wl->state != WLCORE_STATE_ON))
5709 		goto out;
5710 
5711 	/* packets are considered pending if in the TX queue or the FW */
5712 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5713 out:
5714 	mutex_unlock(&wl->mutex);
5715 
5716 	return ret;
5717 }
5718 
5719 /* can't be const, mac80211 writes to this */
5720 static struct ieee80211_rate wl1271_rates[] = {
5721 	{ .bitrate = 10,
5722 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5723 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5724 	{ .bitrate = 20,
5725 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5726 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5727 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5728 	{ .bitrate = 55,
5729 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5730 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5731 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5732 	{ .bitrate = 110,
5733 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5734 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5735 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5736 	{ .bitrate = 60,
5737 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5738 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5739 	{ .bitrate = 90,
5740 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5741 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5742 	{ .bitrate = 120,
5743 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5744 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5745 	{ .bitrate = 180,
5746 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5747 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5748 	{ .bitrate = 240,
5749 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5750 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5751 	{ .bitrate = 360,
5752 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5753 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5754 	{ .bitrate = 480,
5755 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5756 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5757 	{ .bitrate = 540,
5758 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5759 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5760 };
5761 
5762 /* can't be const, mac80211 writes to this */
5763 static struct ieee80211_channel wl1271_channels[] = {
5764 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5765 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5766 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5767 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5768 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5769 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5770 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5771 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5772 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5773 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5774 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5775 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5776 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5777 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5778 };
5779 
5780 /* can't be const, mac80211 writes to this */
5781 static struct ieee80211_supported_band wl1271_band_2ghz = {
5782 	.channels = wl1271_channels,
5783 	.n_channels = ARRAY_SIZE(wl1271_channels),
5784 	.bitrates = wl1271_rates,
5785 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5786 };
5787 
5788 /* 5 GHz data rates for WL1273 */
5789 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5790 	{ .bitrate = 60,
5791 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5792 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5793 	{ .bitrate = 90,
5794 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5795 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5796 	{ .bitrate = 120,
5797 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5798 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5799 	{ .bitrate = 180,
5800 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5801 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5802 	{ .bitrate = 240,
5803 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5804 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5805 	{ .bitrate = 360,
5806 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5807 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5808 	{ .bitrate = 480,
5809 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5810 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5811 	{ .bitrate = 540,
5812 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5813 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5814 };
5815 
5816 /* 5 GHz band channels for WL1273 */
5817 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5818 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5819 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5820 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5821 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5822 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5823 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5824 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5825 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5826 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5827 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5828 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5829 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5830 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5831 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5832 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5833 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5834 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5835 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5836 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5837 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5838 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5839 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5840 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5841 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5842 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5843 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5844 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5845 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5846 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5847 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5848 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5849 };
5850 
5851 static struct ieee80211_supported_band wl1271_band_5ghz = {
5852 	.channels = wl1271_channels_5ghz,
5853 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5854 	.bitrates = wl1271_rates_5ghz,
5855 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5856 };
5857 
5858 static const struct ieee80211_ops wl1271_ops = {
5859 	.start = wl1271_op_start,
5860 	.stop = wlcore_op_stop,
5861 	.add_interface = wl1271_op_add_interface,
5862 	.remove_interface = wl1271_op_remove_interface,
5863 	.change_interface = wl12xx_op_change_interface,
5864 #ifdef CONFIG_PM
5865 	.suspend = wl1271_op_suspend,
5866 	.resume = wl1271_op_resume,
5867 #endif
5868 	.config = wl1271_op_config,
5869 	.prepare_multicast = wl1271_op_prepare_multicast,
5870 	.configure_filter = wl1271_op_configure_filter,
5871 	.tx = wl1271_op_tx,
5872 	.set_key = wlcore_op_set_key,
5873 	.hw_scan = wl1271_op_hw_scan,
5874 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5875 	.sched_scan_start = wl1271_op_sched_scan_start,
5876 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5877 	.bss_info_changed = wl1271_op_bss_info_changed,
5878 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5879 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5880 	.conf_tx = wl1271_op_conf_tx,
5881 	.get_tsf = wl1271_op_get_tsf,
5882 	.get_survey = wl1271_op_get_survey,
5883 	.sta_state = wl12xx_op_sta_state,
5884 	.ampdu_action = wl1271_op_ampdu_action,
5885 	.tx_frames_pending = wl1271_tx_frames_pending,
5886 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5887 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5888 	.channel_switch = wl12xx_op_channel_switch,
5889 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
5890 	.flush = wlcore_op_flush,
5891 	.remain_on_channel = wlcore_op_remain_on_channel,
5892 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5893 	.add_chanctx = wlcore_op_add_chanctx,
5894 	.remove_chanctx = wlcore_op_remove_chanctx,
5895 	.change_chanctx = wlcore_op_change_chanctx,
5896 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5897 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5898 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5899 	.sta_rc_update = wlcore_op_sta_rc_update,
5900 	.sta_statistics = wlcore_op_sta_statistics,
5901 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5902 };
5903 
5904 
5905 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5906 {
5907 	u8 idx;
5908 
5909 	BUG_ON(band >= 2);
5910 
5911 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5912 		wl1271_error("Illegal RX rate from HW: %d", rate);
5913 		return 0;
5914 	}
5915 
5916 	idx = wl->band_rate_to_idx[band][rate];
5917 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5918 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5919 		return 0;
5920 	}
5921 
5922 	return idx;
5923 }
5924 
5925 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5926 {
5927 	int i;
5928 
5929 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5930 		     oui, nic);
5931 
5932 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5933 		wl1271_warning("NIC part of the MAC address wraps around!");
5934 
5935 	for (i = 0; i < wl->num_mac_addr; i++) {
5936 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5937 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5938 		wl->addresses[i].addr[2] = (u8) oui;
5939 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5940 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5941 		wl->addresses[i].addr[5] = (u8) nic;
5942 		nic++;
5943 	}
5944 
5945 	/* we may be one address short at the most */
5946 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5947 
5948 	/*
5949 	 * turn on the LAA bit in the first address and use it as
5950 	 * the last address.
5951 	 */
5952 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5953 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5954 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5955 		       sizeof(wl->addresses[0]));
5956 		/* LAA bit */
5957 		wl->addresses[idx].addr[0] |= BIT(1);
5958 	}
5959 
5960 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5961 	wl->hw->wiphy->addresses = wl->addresses;
5962 }
5963 
5964 static int wl12xx_get_hw_info(struct wl1271 *wl)
5965 {
5966 	int ret;
5967 
5968 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5969 	if (ret < 0)
5970 		goto out;
5971 
5972 	wl->fuse_oui_addr = 0;
5973 	wl->fuse_nic_addr = 0;
5974 
5975 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5976 	if (ret < 0)
5977 		goto out;
5978 
5979 	if (wl->ops->get_mac)
5980 		ret = wl->ops->get_mac(wl);
5981 
5982 out:
5983 	return ret;
5984 }
5985 
5986 static int wl1271_register_hw(struct wl1271 *wl)
5987 {
5988 	int ret;
5989 	u32 oui_addr = 0, nic_addr = 0;
5990 
5991 	if (wl->mac80211_registered)
5992 		return 0;
5993 
5994 	if (wl->nvs_len >= 12) {
5995 		/* NOTE: The wl->nvs->nvs element must be first, in
5996 		 * order to simplify the casting, we assume it is at
5997 		 * the beginning of the wl->nvs structure.
5998 		 */
5999 		u8 *nvs_ptr = (u8 *)wl->nvs;
6000 
6001 		oui_addr =
6002 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6003 		nic_addr =
6004 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6005 	}
6006 
6007 	/* if the MAC address is zeroed in the NVS derive from fuse */
6008 	if (oui_addr == 0 && nic_addr == 0) {
6009 		oui_addr = wl->fuse_oui_addr;
6010 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6011 		nic_addr = wl->fuse_nic_addr + 1;
6012 	}
6013 
6014 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6015 
6016 	ret = ieee80211_register_hw(wl->hw);
6017 	if (ret < 0) {
6018 		wl1271_error("unable to register mac80211 hw: %d", ret);
6019 		goto out;
6020 	}
6021 
6022 	wl->mac80211_registered = true;
6023 
6024 	wl1271_debugfs_init(wl);
6025 
6026 	wl1271_notice("loaded");
6027 
6028 out:
6029 	return ret;
6030 }
6031 
6032 static void wl1271_unregister_hw(struct wl1271 *wl)
6033 {
6034 	if (wl->plt)
6035 		wl1271_plt_stop(wl);
6036 
6037 	ieee80211_unregister_hw(wl->hw);
6038 	wl->mac80211_registered = false;
6039 
6040 }
6041 
6042 static int wl1271_init_ieee80211(struct wl1271 *wl)
6043 {
6044 	int i;
6045 	static const u32 cipher_suites[] = {
6046 		WLAN_CIPHER_SUITE_WEP40,
6047 		WLAN_CIPHER_SUITE_WEP104,
6048 		WLAN_CIPHER_SUITE_TKIP,
6049 		WLAN_CIPHER_SUITE_CCMP,
6050 		WL1271_CIPHER_SUITE_GEM,
6051 	};
6052 
6053 	/* The tx descriptor buffer */
6054 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6055 
6056 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6057 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6058 
6059 	/* unit us */
6060 	/* FIXME: find a proper value */
6061 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6062 
6063 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6064 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6065 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6066 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6067 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6068 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6069 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6070 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6071 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6072 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6073 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6074 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6075 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6076 
6077 	wl->hw->wiphy->cipher_suites = cipher_suites;
6078 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6079 
6080 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6081 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
6082 		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
6083 	wl->hw->wiphy->max_scan_ssids = 1;
6084 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6085 	wl->hw->wiphy->max_match_sets = 16;
6086 	/*
6087 	 * Maximum length of elements in scanning probe request templates
6088 	 * should be the maximum length possible for a template, without
6089 	 * the IEEE80211 header of the template
6090 	 */
6091 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6092 			sizeof(struct ieee80211_header);
6093 
6094 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6095 		sizeof(struct ieee80211_header);
6096 
6097 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6098 
6099 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6100 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6101 				WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6102 				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6103 
6104 	/* make sure all our channels fit in the scanned_ch bitmask */
6105 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6106 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6107 		     WL1271_MAX_CHANNELS);
6108 	/*
6109 	* clear channel flags from the previous usage
6110 	* and restore max_power & max_antenna_gain values.
6111 	*/
6112 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6113 		wl1271_band_2ghz.channels[i].flags = 0;
6114 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6115 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6116 	}
6117 
6118 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6119 		wl1271_band_5ghz.channels[i].flags = 0;
6120 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6121 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6122 	}
6123 
6124 	/*
6125 	 * We keep local copies of the band structs because we need to
6126 	 * modify them on a per-device basis.
6127 	 */
6128 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6129 	       sizeof(wl1271_band_2ghz));
6130 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6131 	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
6132 	       sizeof(*wl->ht_cap));
6133 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6134 	       sizeof(wl1271_band_5ghz));
6135 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6136 	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
6137 	       sizeof(*wl->ht_cap));
6138 
6139 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6140 		&wl->bands[IEEE80211_BAND_2GHZ];
6141 	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6142 		&wl->bands[IEEE80211_BAND_5GHZ];
6143 
6144 	/*
6145 	 * allow 4 queues per mac address we support +
6146 	 * 1 cab queue per mac + one global offchannel Tx queue
6147 	 */
6148 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6149 
6150 	/* the last queue is the offchannel queue */
6151 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6152 	wl->hw->max_rates = 1;
6153 
6154 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6155 
6156 	/* the FW answers probe-requests in AP-mode */
6157 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6158 	wl->hw->wiphy->probe_resp_offload =
6159 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6160 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6161 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6162 
6163 	/* allowed interface combinations */
6164 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6165 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6166 
6167 	/* register vendor commands */
6168 	wlcore_set_vendor_commands(wl->hw->wiphy);
6169 
6170 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6171 
6172 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6173 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6174 
6175 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6176 
6177 	return 0;
6178 }
6179 
6180 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6181 				     u32 mbox_size)
6182 {
6183 	struct ieee80211_hw *hw;
6184 	struct wl1271 *wl;
6185 	int i, j, ret;
6186 	unsigned int order;
6187 
6188 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6189 	if (!hw) {
6190 		wl1271_error("could not alloc ieee80211_hw");
6191 		ret = -ENOMEM;
6192 		goto err_hw_alloc;
6193 	}
6194 
6195 	wl = hw->priv;
6196 	memset(wl, 0, sizeof(*wl));
6197 
6198 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6199 	if (!wl->priv) {
6200 		wl1271_error("could not alloc wl priv");
6201 		ret = -ENOMEM;
6202 		goto err_priv_alloc;
6203 	}
6204 
6205 	INIT_LIST_HEAD(&wl->wlvif_list);
6206 
6207 	wl->hw = hw;
6208 
6209 	/*
6210 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6211 	 * we don't allocate any additional resource here, so that's fine.
6212 	 */
6213 	for (i = 0; i < NUM_TX_QUEUES; i++)
6214 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6215 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6216 
6217 	skb_queue_head_init(&wl->deferred_rx_queue);
6218 	skb_queue_head_init(&wl->deferred_tx_queue);
6219 
6220 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6221 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6222 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6223 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6224 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6225 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6226 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6227 
6228 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6229 	if (!wl->freezable_wq) {
6230 		ret = -ENOMEM;
6231 		goto err_hw;
6232 	}
6233 
6234 	wl->channel = 0;
6235 	wl->rx_counter = 0;
6236 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6237 	wl->band = IEEE80211_BAND_2GHZ;
6238 	wl->channel_type = NL80211_CHAN_NO_HT;
6239 	wl->flags = 0;
6240 	wl->sg_enabled = true;
6241 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6242 	wl->recovery_count = 0;
6243 	wl->hw_pg_ver = -1;
6244 	wl->ap_ps_map = 0;
6245 	wl->ap_fw_ps_map = 0;
6246 	wl->quirks = 0;
6247 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6248 	wl->active_sta_count = 0;
6249 	wl->active_link_count = 0;
6250 	wl->fwlog_size = 0;
6251 	init_waitqueue_head(&wl->fwlog_waitq);
6252 
6253 	/* The system link is always allocated */
6254 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6255 
6256 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6257 	for (i = 0; i < wl->num_tx_desc; i++)
6258 		wl->tx_frames[i] = NULL;
6259 
6260 	spin_lock_init(&wl->wl_lock);
6261 
6262 	wl->state = WLCORE_STATE_OFF;
6263 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6264 	mutex_init(&wl->mutex);
6265 	mutex_init(&wl->flush_mutex);
6266 	init_completion(&wl->nvs_loading_complete);
6267 
6268 	order = get_order(aggr_buf_size);
6269 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6270 	if (!wl->aggr_buf) {
6271 		ret = -ENOMEM;
6272 		goto err_wq;
6273 	}
6274 	wl->aggr_buf_size = aggr_buf_size;
6275 
6276 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6277 	if (!wl->dummy_packet) {
6278 		ret = -ENOMEM;
6279 		goto err_aggr;
6280 	}
6281 
6282 	/* Allocate one page for the FW log */
6283 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6284 	if (!wl->fwlog) {
6285 		ret = -ENOMEM;
6286 		goto err_dummy_packet;
6287 	}
6288 
6289 	wl->mbox_size = mbox_size;
6290 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6291 	if (!wl->mbox) {
6292 		ret = -ENOMEM;
6293 		goto err_fwlog;
6294 	}
6295 
6296 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6297 	if (!wl->buffer_32) {
6298 		ret = -ENOMEM;
6299 		goto err_mbox;
6300 	}
6301 
6302 	return hw;
6303 
6304 err_mbox:
6305 	kfree(wl->mbox);
6306 
6307 err_fwlog:
6308 	free_page((unsigned long)wl->fwlog);
6309 
6310 err_dummy_packet:
6311 	dev_kfree_skb(wl->dummy_packet);
6312 
6313 err_aggr:
6314 	free_pages((unsigned long)wl->aggr_buf, order);
6315 
6316 err_wq:
6317 	destroy_workqueue(wl->freezable_wq);
6318 
6319 err_hw:
6320 	wl1271_debugfs_exit(wl);
6321 	kfree(wl->priv);
6322 
6323 err_priv_alloc:
6324 	ieee80211_free_hw(hw);
6325 
6326 err_hw_alloc:
6327 
6328 	return ERR_PTR(ret);
6329 }
6330 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6331 
6332 int wlcore_free_hw(struct wl1271 *wl)
6333 {
6334 	/* Unblock any fwlog readers */
6335 	mutex_lock(&wl->mutex);
6336 	wl->fwlog_size = -1;
6337 	wake_up_interruptible_all(&wl->fwlog_waitq);
6338 	mutex_unlock(&wl->mutex);
6339 
6340 	wlcore_sysfs_free(wl);
6341 
6342 	kfree(wl->buffer_32);
6343 	kfree(wl->mbox);
6344 	free_page((unsigned long)wl->fwlog);
6345 	dev_kfree_skb(wl->dummy_packet);
6346 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6347 
6348 	wl1271_debugfs_exit(wl);
6349 
6350 	vfree(wl->fw);
6351 	wl->fw = NULL;
6352 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6353 	kfree(wl->nvs);
6354 	wl->nvs = NULL;
6355 
6356 	kfree(wl->raw_fw_status);
6357 	kfree(wl->fw_status);
6358 	kfree(wl->tx_res_if);
6359 	destroy_workqueue(wl->freezable_wq);
6360 
6361 	kfree(wl->priv);
6362 	ieee80211_free_hw(wl->hw);
6363 
6364 	return 0;
6365 }
6366 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6367 
6368 #ifdef CONFIG_PM
6369 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6370 	.flags = WIPHY_WOWLAN_ANY,
6371 	.n_patterns = WL1271_MAX_RX_FILTERS,
6372 	.pattern_min_len = 1,
6373 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6374 };
6375 #endif
6376 
6377 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6378 {
6379 	return IRQ_WAKE_THREAD;
6380 }
6381 
6382 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6383 {
6384 	struct wl1271 *wl = context;
6385 	struct platform_device *pdev = wl->pdev;
6386 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6387 	struct resource *res;
6388 
6389 	int ret;
6390 	irq_handler_t hardirq_fn = NULL;
6391 
6392 	if (fw) {
6393 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6394 		if (!wl->nvs) {
6395 			wl1271_error("Could not allocate nvs data");
6396 			goto out;
6397 		}
6398 		wl->nvs_len = fw->size;
6399 	} else {
6400 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6401 			     WL12XX_NVS_NAME);
6402 		wl->nvs = NULL;
6403 		wl->nvs_len = 0;
6404 	}
6405 
6406 	ret = wl->ops->setup(wl);
6407 	if (ret < 0)
6408 		goto out_free_nvs;
6409 
6410 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6411 
6412 	/* adjust some runtime configuration parameters */
6413 	wlcore_adjust_conf(wl);
6414 
6415 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6416 	if (!res) {
6417 		wl1271_error("Could not get IRQ resource");
6418 		goto out_free_nvs;
6419 	}
6420 
6421 	wl->irq = res->start;
6422 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6423 	wl->if_ops = pdev_data->if_ops;
6424 
6425 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6426 		hardirq_fn = wlcore_hardirq;
6427 	else
6428 		wl->irq_flags |= IRQF_ONESHOT;
6429 
6430 	ret = wl12xx_set_power_on(wl);
6431 	if (ret < 0)
6432 		goto out_free_nvs;
6433 
6434 	ret = wl12xx_get_hw_info(wl);
6435 	if (ret < 0) {
6436 		wl1271_error("couldn't get hw info");
6437 		wl1271_power_off(wl);
6438 		goto out_free_nvs;
6439 	}
6440 
6441 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6442 				   wl->irq_flags, pdev->name, wl);
6443 	if (ret < 0) {
6444 		wl1271_error("interrupt configuration failed");
6445 		wl1271_power_off(wl);
6446 		goto out_free_nvs;
6447 	}
6448 
6449 #ifdef CONFIG_PM
6450 	ret = enable_irq_wake(wl->irq);
6451 	if (!ret) {
6452 		wl->irq_wake_enabled = true;
6453 		device_init_wakeup(wl->dev, 1);
6454 		if (pdev_data->pwr_in_suspend)
6455 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6456 	}
6457 #endif
6458 	disable_irq(wl->irq);
6459 	wl1271_power_off(wl);
6460 
6461 	ret = wl->ops->identify_chip(wl);
6462 	if (ret < 0)
6463 		goto out_irq;
6464 
6465 	ret = wl1271_init_ieee80211(wl);
6466 	if (ret)
6467 		goto out_irq;
6468 
6469 	ret = wl1271_register_hw(wl);
6470 	if (ret)
6471 		goto out_irq;
6472 
6473 	ret = wlcore_sysfs_init(wl);
6474 	if (ret)
6475 		goto out_unreg;
6476 
6477 	wl->initialized = true;
6478 	goto out;
6479 
6480 out_unreg:
6481 	wl1271_unregister_hw(wl);
6482 
6483 out_irq:
6484 	free_irq(wl->irq, wl);
6485 
6486 out_free_nvs:
6487 	kfree(wl->nvs);
6488 
6489 out:
6490 	release_firmware(fw);
6491 	complete_all(&wl->nvs_loading_complete);
6492 }
6493 
6494 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6495 {
6496 	int ret;
6497 
6498 	if (!wl->ops || !wl->ptable)
6499 		return -EINVAL;
6500 
6501 	wl->dev = &pdev->dev;
6502 	wl->pdev = pdev;
6503 	platform_set_drvdata(pdev, wl);
6504 
6505 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6506 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6507 				      wl, wlcore_nvs_cb);
6508 	if (ret < 0) {
6509 		wl1271_error("request_firmware_nowait failed: %d", ret);
6510 		complete_all(&wl->nvs_loading_complete);
6511 	}
6512 
6513 	return ret;
6514 }
6515 EXPORT_SYMBOL_GPL(wlcore_probe);
6516 
6517 int wlcore_remove(struct platform_device *pdev)
6518 {
6519 	struct wl1271 *wl = platform_get_drvdata(pdev);
6520 
6521 	wait_for_completion(&wl->nvs_loading_complete);
6522 	if (!wl->initialized)
6523 		return 0;
6524 
6525 	if (wl->irq_wake_enabled) {
6526 		device_init_wakeup(wl->dev, 0);
6527 		disable_irq_wake(wl->irq);
6528 	}
6529 	wl1271_unregister_hw(wl);
6530 	free_irq(wl->irq, wl);
6531 	wlcore_free_hw(wl);
6532 
6533 	return 0;
6534 }
6535 EXPORT_SYMBOL_GPL(wlcore_remove);
6536 
6537 u32 wl12xx_debug_level = DEBUG_NONE;
6538 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6539 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6540 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6541 
6542 module_param_named(fwlog, fwlog_param, charp, 0);
6543 MODULE_PARM_DESC(fwlog,
6544 		 "FW logger options: continuous, ondemand, dbgpins or disable");
6545 
6546 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6547 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6548 
6549 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6550 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6551 
6552 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6553 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6554 
6555 MODULE_LICENSE("GPL");
6556 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6557 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6558 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6559