xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision 23c2b932)
1 /*
2  * This file is part of wlcore
3  *
4  * Copyright (C) 2008-2010 Nokia Corporation
5  * Copyright (C) 2011-2013 Texas Instruments Inc.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19  * 02110-1301 USA
20  *
21  */
22 
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 
30 #include "wlcore.h"
31 #include "debug.h"
32 #include "wl12xx_80211.h"
33 #include "io.h"
34 #include "tx.h"
35 #include "ps.h"
36 #include "init.h"
37 #include "debugfs.h"
38 #include "testmode.h"
39 #include "vendor_cmd.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43 
44 #define WL1271_BOOT_RETRIES 3
45 
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery     = -1;
50 
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 					 struct ieee80211_vif *vif,
53 					 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56 
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 	int ret;
60 
61 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 		return -EINVAL;
63 
64 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 		return 0;
66 
67 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 		return 0;
69 
70 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 	if (ret < 0)
72 		return ret;
73 
74 	wl1271_info("Association completed.");
75 	return 0;
76 }
77 
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 			      struct regulatory_request *request)
80 {
81 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
82 	struct wl1271 *wl = hw->priv;
83 
84 	/* copy the current dfs region */
85 	if (request)
86 		wl->dfs_region = request->dfs_region;
87 
88 	wlcore_regdomain_config(wl);
89 }
90 
91 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
92 				   bool enable)
93 {
94 	int ret = 0;
95 
96 	/* we should hold wl->mutex */
97 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
98 	if (ret < 0)
99 		goto out;
100 
101 	if (enable)
102 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
103 	else
104 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 out:
106 	return ret;
107 }
108 
109 /*
110  * this function is being called when the rx_streaming interval
111  * has beed changed or rx_streaming should be disabled
112  */
113 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
114 {
115 	int ret = 0;
116 	int period = wl->conf.rx_streaming.interval;
117 
118 	/* don't reconfigure if rx_streaming is disabled */
119 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
120 		goto out;
121 
122 	/* reconfigure/disable according to new streaming_period */
123 	if (period &&
124 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
125 	    (wl->conf.rx_streaming.always ||
126 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
127 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
128 	else {
129 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
130 		/* don't cancel_work_sync since we might deadlock */
131 		del_timer_sync(&wlvif->rx_streaming_timer);
132 	}
133 out:
134 	return ret;
135 }
136 
137 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
138 {
139 	int ret;
140 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
141 						rx_streaming_enable_work);
142 	struct wl1271 *wl = wlvif->wl;
143 
144 	mutex_lock(&wl->mutex);
145 
146 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
147 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
148 	    (!wl->conf.rx_streaming.always &&
149 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 		goto out;
151 
152 	if (!wl->conf.rx_streaming.interval)
153 		goto out;
154 
155 	ret = wl1271_ps_elp_wakeup(wl);
156 	if (ret < 0)
157 		goto out;
158 
159 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
160 	if (ret < 0)
161 		goto out_sleep;
162 
163 	/* stop it after some time of inactivity */
164 	mod_timer(&wlvif->rx_streaming_timer,
165 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
166 
167 out_sleep:
168 	wl1271_ps_elp_sleep(wl);
169 out:
170 	mutex_unlock(&wl->mutex);
171 }
172 
173 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
174 {
175 	int ret;
176 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
177 						rx_streaming_disable_work);
178 	struct wl1271 *wl = wlvif->wl;
179 
180 	mutex_lock(&wl->mutex);
181 
182 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
183 		goto out;
184 
185 	ret = wl1271_ps_elp_wakeup(wl);
186 	if (ret < 0)
187 		goto out;
188 
189 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
190 	if (ret)
191 		goto out_sleep;
192 
193 out_sleep:
194 	wl1271_ps_elp_sleep(wl);
195 out:
196 	mutex_unlock(&wl->mutex);
197 }
198 
199 static void wl1271_rx_streaming_timer(unsigned long data)
200 {
201 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
202 	struct wl1271 *wl = wlvif->wl;
203 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
204 }
205 
206 /* wl->mutex must be taken */
207 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
208 {
209 	/* if the watchdog is not armed, don't do anything */
210 	if (wl->tx_allocated_blocks == 0)
211 		return;
212 
213 	cancel_delayed_work(&wl->tx_watchdog_work);
214 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
215 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
216 }
217 
218 static void wlcore_rc_update_work(struct work_struct *work)
219 {
220 	int ret;
221 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
222 						rc_update_work);
223 	struct wl1271 *wl = wlvif->wl;
224 
225 	mutex_lock(&wl->mutex);
226 
227 	if (unlikely(wl->state != WLCORE_STATE_ON))
228 		goto out;
229 
230 	ret = wl1271_ps_elp_wakeup(wl);
231 	if (ret < 0)
232 		goto out;
233 
234 	wlcore_hw_sta_rc_update(wl, wlvif);
235 
236 	wl1271_ps_elp_sleep(wl);
237 out:
238 	mutex_unlock(&wl->mutex);
239 }
240 
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
242 {
243 	struct delayed_work *dwork;
244 	struct wl1271 *wl;
245 
246 	dwork = to_delayed_work(work);
247 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
248 
249 	mutex_lock(&wl->mutex);
250 
251 	if (unlikely(wl->state != WLCORE_STATE_ON))
252 		goto out;
253 
254 	/* Tx went out in the meantime - everything is ok */
255 	if (unlikely(wl->tx_allocated_blocks == 0))
256 		goto out;
257 
258 	/*
259 	 * if a ROC is in progress, we might not have any Tx for a long
260 	 * time (e.g. pending Tx on the non-ROC channels)
261 	 */
262 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 			     wl->conf.tx.tx_watchdog_timeout);
265 		wl12xx_rearm_tx_watchdog_locked(wl);
266 		goto out;
267 	}
268 
269 	/*
270 	 * if a scan is in progress, we might not have any Tx for a long
271 	 * time
272 	 */
273 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 			     wl->conf.tx.tx_watchdog_timeout);
276 		wl12xx_rearm_tx_watchdog_locked(wl);
277 		goto out;
278 	}
279 
280 	/*
281 	* AP might cache a frame for a long time for a sleeping station,
282 	* so rearm the timer if there's an AP interface with stations. If
283 	* Tx is genuinely stuck we will most hopefully discover it when all
284 	* stations are removed due to inactivity.
285 	*/
286 	if (wl->active_sta_count) {
287 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
288 			     " %d stations",
289 			      wl->conf.tx.tx_watchdog_timeout,
290 			      wl->active_sta_count);
291 		wl12xx_rearm_tx_watchdog_locked(wl);
292 		goto out;
293 	}
294 
295 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 		     wl->conf.tx.tx_watchdog_timeout);
297 	wl12xx_queue_recovery_work(wl);
298 
299 out:
300 	mutex_unlock(&wl->mutex);
301 }
302 
303 static void wlcore_adjust_conf(struct wl1271 *wl)
304 {
305 
306 	if (fwlog_param) {
307 		if (!strcmp(fwlog_param, "continuous")) {
308 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
310 		} else if (!strcmp(fwlog_param, "dbgpins")) {
311 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
312 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
313 		} else if (!strcmp(fwlog_param, "disable")) {
314 			wl->conf.fwlog.mem_blocks = 0;
315 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
316 		} else {
317 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
318 		}
319 	}
320 
321 	if (bug_on_recovery != -1)
322 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
323 
324 	if (no_recovery != -1)
325 		wl->conf.recovery.no_recovery = (u8) no_recovery;
326 }
327 
328 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
329 					struct wl12xx_vif *wlvif,
330 					u8 hlid, u8 tx_pkts)
331 {
332 	bool fw_ps;
333 
334 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
335 
336 	/*
337 	 * Wake up from high level PS if the STA is asleep with too little
338 	 * packets in FW or if the STA is awake.
339 	 */
340 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
341 		wl12xx_ps_link_end(wl, wlvif, hlid);
342 
343 	/*
344 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
345 	 * Make an exception if this is the only connected link. In this
346 	 * case FW-memory congestion is less of a problem.
347 	 * Note that a single connected STA means 2*ap_count + 1 active links,
348 	 * since we must account for the global and broadcast AP links
349 	 * for each AP. The "fw_ps" check assures us the other link is a STA
350 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
351 	 */
352 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
353 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
354 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
355 }
356 
357 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
358 					   struct wl12xx_vif *wlvif,
359 					   struct wl_fw_status *status)
360 {
361 	unsigned long cur_fw_ps_map;
362 	u8 hlid;
363 
364 	cur_fw_ps_map = status->link_ps_bitmap;
365 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
366 		wl1271_debug(DEBUG_PSM,
367 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
368 			     wl->ap_fw_ps_map, cur_fw_ps_map,
369 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
370 
371 		wl->ap_fw_ps_map = cur_fw_ps_map;
372 	}
373 
374 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
375 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
376 					    wl->links[hlid].allocated_pkts);
377 }
378 
379 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
380 {
381 	struct wl12xx_vif *wlvif;
382 	struct timespec ts;
383 	u32 old_tx_blk_count = wl->tx_blocks_available;
384 	int avail, freed_blocks;
385 	int i;
386 	int ret;
387 	struct wl1271_link *lnk;
388 
389 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
390 				   wl->raw_fw_status,
391 				   wl->fw_status_len, false);
392 	if (ret < 0)
393 		return ret;
394 
395 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
396 
397 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
398 		     "drv_rx_counter = %d, tx_results_counter = %d)",
399 		     status->intr,
400 		     status->fw_rx_counter,
401 		     status->drv_rx_counter,
402 		     status->tx_results_counter);
403 
404 	for (i = 0; i < NUM_TX_QUEUES; i++) {
405 		/* prevent wrap-around in freed-packets counter */
406 		wl->tx_allocated_pkts[i] -=
407 				(status->counters.tx_released_pkts[i] -
408 				wl->tx_pkts_freed[i]) & 0xff;
409 
410 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
411 	}
412 
413 
414 	for_each_set_bit(i, wl->links_map, wl->num_links) {
415 		u8 diff;
416 		lnk = &wl->links[i];
417 
418 		/* prevent wrap-around in freed-packets counter */
419 		diff = (status->counters.tx_lnk_free_pkts[i] -
420 		       lnk->prev_freed_pkts) & 0xff;
421 
422 		if (diff == 0)
423 			continue;
424 
425 		lnk->allocated_pkts -= diff;
426 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
427 
428 		/* accumulate the prev_freed_pkts counter */
429 		lnk->total_freed_pkts += diff;
430 	}
431 
432 	/* prevent wrap-around in total blocks counter */
433 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
434 		freed_blocks = status->total_released_blks -
435 			       wl->tx_blocks_freed;
436 	else
437 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
438 			       status->total_released_blks;
439 
440 	wl->tx_blocks_freed = status->total_released_blks;
441 
442 	wl->tx_allocated_blocks -= freed_blocks;
443 
444 	/*
445 	 * If the FW freed some blocks:
446 	 * If we still have allocated blocks - re-arm the timer, Tx is
447 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
448 	 */
449 	if (freed_blocks) {
450 		if (wl->tx_allocated_blocks)
451 			wl12xx_rearm_tx_watchdog_locked(wl);
452 		else
453 			cancel_delayed_work(&wl->tx_watchdog_work);
454 	}
455 
456 	avail = status->tx_total - wl->tx_allocated_blocks;
457 
458 	/*
459 	 * The FW might change the total number of TX memblocks before
460 	 * we get a notification about blocks being released. Thus, the
461 	 * available blocks calculation might yield a temporary result
462 	 * which is lower than the actual available blocks. Keeping in
463 	 * mind that only blocks that were allocated can be moved from
464 	 * TX to RX, tx_blocks_available should never decrease here.
465 	 */
466 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
467 				      avail);
468 
469 	/* if more blocks are available now, tx work can be scheduled */
470 	if (wl->tx_blocks_available > old_tx_blk_count)
471 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
472 
473 	/* for AP update num of allocated TX blocks per link and ps status */
474 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
475 		wl12xx_irq_update_links_status(wl, wlvif, status);
476 	}
477 
478 	/* update the host-chipset time offset */
479 	getnstimeofday(&ts);
480 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
481 		(s64)(status->fw_localtime);
482 
483 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
484 
485 	return 0;
486 }
487 
488 static void wl1271_flush_deferred_work(struct wl1271 *wl)
489 {
490 	struct sk_buff *skb;
491 
492 	/* Pass all received frames to the network stack */
493 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
494 		ieee80211_rx_ni(wl->hw, skb);
495 
496 	/* Return sent skbs to the network stack */
497 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
498 		ieee80211_tx_status_ni(wl->hw, skb);
499 }
500 
501 static void wl1271_netstack_work(struct work_struct *work)
502 {
503 	struct wl1271 *wl =
504 		container_of(work, struct wl1271, netstack_work);
505 
506 	do {
507 		wl1271_flush_deferred_work(wl);
508 	} while (skb_queue_len(&wl->deferred_rx_queue));
509 }
510 
511 #define WL1271_IRQ_MAX_LOOPS 256
512 
513 static int wlcore_irq_locked(struct wl1271 *wl)
514 {
515 	int ret = 0;
516 	u32 intr;
517 	int loopcount = WL1271_IRQ_MAX_LOOPS;
518 	bool done = false;
519 	unsigned int defer_count;
520 	unsigned long flags;
521 
522 	/*
523 	 * In case edge triggered interrupt must be used, we cannot iterate
524 	 * more than once without introducing race conditions with the hardirq.
525 	 */
526 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
527 		loopcount = 1;
528 
529 	wl1271_debug(DEBUG_IRQ, "IRQ work");
530 
531 	if (unlikely(wl->state != WLCORE_STATE_ON))
532 		goto out;
533 
534 	ret = wl1271_ps_elp_wakeup(wl);
535 	if (ret < 0)
536 		goto out;
537 
538 	while (!done && loopcount--) {
539 		/*
540 		 * In order to avoid a race with the hardirq, clear the flag
541 		 * before acknowledging the chip. Since the mutex is held,
542 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
543 		 */
544 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
545 		smp_mb__after_atomic();
546 
547 		ret = wlcore_fw_status(wl, wl->fw_status);
548 		if (ret < 0)
549 			goto out;
550 
551 		wlcore_hw_tx_immediate_compl(wl);
552 
553 		intr = wl->fw_status->intr;
554 		intr &= WLCORE_ALL_INTR_MASK;
555 		if (!intr) {
556 			done = true;
557 			continue;
558 		}
559 
560 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
561 			wl1271_error("HW watchdog interrupt received! starting recovery.");
562 			wl->watchdog_recovery = true;
563 			ret = -EIO;
564 
565 			/* restarting the chip. ignore any other interrupt. */
566 			goto out;
567 		}
568 
569 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
570 			wl1271_error("SW watchdog interrupt received! "
571 				     "starting recovery.");
572 			wl->watchdog_recovery = true;
573 			ret = -EIO;
574 
575 			/* restarting the chip. ignore any other interrupt. */
576 			goto out;
577 		}
578 
579 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
580 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
581 
582 			ret = wlcore_rx(wl, wl->fw_status);
583 			if (ret < 0)
584 				goto out;
585 
586 			/* Check if any tx blocks were freed */
587 			spin_lock_irqsave(&wl->wl_lock, flags);
588 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
589 			    wl1271_tx_total_queue_count(wl) > 0) {
590 				spin_unlock_irqrestore(&wl->wl_lock, flags);
591 				/*
592 				 * In order to avoid starvation of the TX path,
593 				 * call the work function directly.
594 				 */
595 				ret = wlcore_tx_work_locked(wl);
596 				if (ret < 0)
597 					goto out;
598 			} else {
599 				spin_unlock_irqrestore(&wl->wl_lock, flags);
600 			}
601 
602 			/* check for tx results */
603 			ret = wlcore_hw_tx_delayed_compl(wl);
604 			if (ret < 0)
605 				goto out;
606 
607 			/* Make sure the deferred queues don't get too long */
608 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
609 				      skb_queue_len(&wl->deferred_rx_queue);
610 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
611 				wl1271_flush_deferred_work(wl);
612 		}
613 
614 		if (intr & WL1271_ACX_INTR_EVENT_A) {
615 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
616 			ret = wl1271_event_handle(wl, 0);
617 			if (ret < 0)
618 				goto out;
619 		}
620 
621 		if (intr & WL1271_ACX_INTR_EVENT_B) {
622 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
623 			ret = wl1271_event_handle(wl, 1);
624 			if (ret < 0)
625 				goto out;
626 		}
627 
628 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
629 			wl1271_debug(DEBUG_IRQ,
630 				     "WL1271_ACX_INTR_INIT_COMPLETE");
631 
632 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
633 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
634 	}
635 
636 	wl1271_ps_elp_sleep(wl);
637 
638 out:
639 	return ret;
640 }
641 
642 static irqreturn_t wlcore_irq(int irq, void *cookie)
643 {
644 	int ret;
645 	unsigned long flags;
646 	struct wl1271 *wl = cookie;
647 
648 	/* complete the ELP completion */
649 	spin_lock_irqsave(&wl->wl_lock, flags);
650 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
651 	if (wl->elp_compl) {
652 		complete(wl->elp_compl);
653 		wl->elp_compl = NULL;
654 	}
655 
656 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
657 		/* don't enqueue a work right now. mark it as pending */
658 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
659 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
660 		disable_irq_nosync(wl->irq);
661 		pm_wakeup_event(wl->dev, 0);
662 		spin_unlock_irqrestore(&wl->wl_lock, flags);
663 		return IRQ_HANDLED;
664 	}
665 	spin_unlock_irqrestore(&wl->wl_lock, flags);
666 
667 	/* TX might be handled here, avoid redundant work */
668 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
669 	cancel_work_sync(&wl->tx_work);
670 
671 	mutex_lock(&wl->mutex);
672 
673 	ret = wlcore_irq_locked(wl);
674 	if (ret)
675 		wl12xx_queue_recovery_work(wl);
676 
677 	spin_lock_irqsave(&wl->wl_lock, flags);
678 	/* In case TX was not handled here, queue TX work */
679 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
680 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
681 	    wl1271_tx_total_queue_count(wl) > 0)
682 		ieee80211_queue_work(wl->hw, &wl->tx_work);
683 	spin_unlock_irqrestore(&wl->wl_lock, flags);
684 
685 	mutex_unlock(&wl->mutex);
686 
687 	return IRQ_HANDLED;
688 }
689 
690 struct vif_counter_data {
691 	u8 counter;
692 
693 	struct ieee80211_vif *cur_vif;
694 	bool cur_vif_running;
695 };
696 
697 static void wl12xx_vif_count_iter(void *data, u8 *mac,
698 				  struct ieee80211_vif *vif)
699 {
700 	struct vif_counter_data *counter = data;
701 
702 	counter->counter++;
703 	if (counter->cur_vif == vif)
704 		counter->cur_vif_running = true;
705 }
706 
707 /* caller must not hold wl->mutex, as it might deadlock */
708 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
709 			       struct ieee80211_vif *cur_vif,
710 			       struct vif_counter_data *data)
711 {
712 	memset(data, 0, sizeof(*data));
713 	data->cur_vif = cur_vif;
714 
715 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
716 					    wl12xx_vif_count_iter, data);
717 }
718 
719 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
720 {
721 	const struct firmware *fw;
722 	const char *fw_name;
723 	enum wl12xx_fw_type fw_type;
724 	int ret;
725 
726 	if (plt) {
727 		fw_type = WL12XX_FW_TYPE_PLT;
728 		fw_name = wl->plt_fw_name;
729 	} else {
730 		/*
731 		 * we can't call wl12xx_get_vif_count() here because
732 		 * wl->mutex is taken, so use the cached last_vif_count value
733 		 */
734 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
735 			fw_type = WL12XX_FW_TYPE_MULTI;
736 			fw_name = wl->mr_fw_name;
737 		} else {
738 			fw_type = WL12XX_FW_TYPE_NORMAL;
739 			fw_name = wl->sr_fw_name;
740 		}
741 	}
742 
743 	if (wl->fw_type == fw_type)
744 		return 0;
745 
746 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
747 
748 	ret = request_firmware(&fw, fw_name, wl->dev);
749 
750 	if (ret < 0) {
751 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
752 		return ret;
753 	}
754 
755 	if (fw->size % 4) {
756 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
757 			     fw->size);
758 		ret = -EILSEQ;
759 		goto out;
760 	}
761 
762 	vfree(wl->fw);
763 	wl->fw_type = WL12XX_FW_TYPE_NONE;
764 	wl->fw_len = fw->size;
765 	wl->fw = vmalloc(wl->fw_len);
766 
767 	if (!wl->fw) {
768 		wl1271_error("could not allocate memory for the firmware");
769 		ret = -ENOMEM;
770 		goto out;
771 	}
772 
773 	memcpy(wl->fw, fw->data, wl->fw_len);
774 	ret = 0;
775 	wl->fw_type = fw_type;
776 out:
777 	release_firmware(fw);
778 
779 	return ret;
780 }
781 
782 void wl12xx_queue_recovery_work(struct wl1271 *wl)
783 {
784 	/* Avoid a recursive recovery */
785 	if (wl->state == WLCORE_STATE_ON) {
786 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
787 				  &wl->flags));
788 
789 		wl->state = WLCORE_STATE_RESTARTING;
790 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
791 		wl1271_ps_elp_wakeup(wl);
792 		wlcore_disable_interrupts_nosync(wl);
793 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
794 	}
795 }
796 
797 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
798 {
799 	size_t len;
800 
801 	/* Make sure we have enough room */
802 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
803 
804 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
805 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
806 	wl->fwlog_size += len;
807 
808 	return len;
809 }
810 
811 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
812 {
813 	u32 end_of_log = 0;
814 
815 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
816 		return;
817 
818 	wl1271_info("Reading FW panic log");
819 
820 	/*
821 	 * Make sure the chip is awake and the logger isn't active.
822 	 * Do not send a stop fwlog command if the fw is hanged or if
823 	 * dbgpins are used (due to some fw bug).
824 	 */
825 	if (wl1271_ps_elp_wakeup(wl))
826 		return;
827 	if (!wl->watchdog_recovery &&
828 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
829 		wl12xx_cmd_stop_fwlog(wl);
830 
831 	/* Traverse the memory blocks linked list */
832 	do {
833 		end_of_log = wlcore_event_fw_logger(wl);
834 		if (end_of_log == 0) {
835 			msleep(100);
836 			end_of_log = wlcore_event_fw_logger(wl);
837 		}
838 	} while (end_of_log != 0);
839 }
840 
841 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
842 				   u8 hlid, struct ieee80211_sta *sta)
843 {
844 	struct wl1271_station *wl_sta;
845 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
846 
847 	wl_sta = (void *)sta->drv_priv;
848 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
849 
850 	/*
851 	 * increment the initial seq number on recovery to account for
852 	 * transmitted packets that we haven't yet got in the FW status
853 	 */
854 	if (wlvif->encryption_type == KEY_GEM)
855 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
856 
857 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
858 		wl_sta->total_freed_pkts += sqn_recovery_padding;
859 }
860 
861 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
862 					struct wl12xx_vif *wlvif,
863 					u8 hlid, const u8 *addr)
864 {
865 	struct ieee80211_sta *sta;
866 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
867 
868 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
869 		    is_zero_ether_addr(addr)))
870 		return;
871 
872 	rcu_read_lock();
873 	sta = ieee80211_find_sta(vif, addr);
874 	if (sta)
875 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
876 	rcu_read_unlock();
877 }
878 
879 static void wlcore_print_recovery(struct wl1271 *wl)
880 {
881 	u32 pc = 0;
882 	u32 hint_sts = 0;
883 	int ret;
884 
885 	wl1271_info("Hardware recovery in progress. FW ver: %s",
886 		    wl->chip.fw_ver_str);
887 
888 	/* change partitions momentarily so we can read the FW pc */
889 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
890 	if (ret < 0)
891 		return;
892 
893 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
894 	if (ret < 0)
895 		return;
896 
897 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
898 	if (ret < 0)
899 		return;
900 
901 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
902 				pc, hint_sts, ++wl->recovery_count);
903 
904 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
905 }
906 
907 
908 static void wl1271_recovery_work(struct work_struct *work)
909 {
910 	struct wl1271 *wl =
911 		container_of(work, struct wl1271, recovery_work);
912 	struct wl12xx_vif *wlvif;
913 	struct ieee80211_vif *vif;
914 
915 	mutex_lock(&wl->mutex);
916 
917 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
918 		goto out_unlock;
919 
920 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
921 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
922 			wl12xx_read_fwlog_panic(wl);
923 		wlcore_print_recovery(wl);
924 	}
925 
926 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
927 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
928 
929 	if (wl->conf.recovery.no_recovery) {
930 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
931 		goto out_unlock;
932 	}
933 
934 	/* Prevent spurious TX during FW restart */
935 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
936 
937 	/* reboot the chipset */
938 	while (!list_empty(&wl->wlvif_list)) {
939 		wlvif = list_first_entry(&wl->wlvif_list,
940 				       struct wl12xx_vif, list);
941 		vif = wl12xx_wlvif_to_vif(wlvif);
942 
943 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
944 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
945 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
946 						    vif->bss_conf.bssid);
947 		}
948 
949 		__wl1271_op_remove_interface(wl, vif, false);
950 	}
951 
952 	wlcore_op_stop_locked(wl);
953 
954 	ieee80211_restart_hw(wl->hw);
955 
956 	/*
957 	 * Its safe to enable TX now - the queues are stopped after a request
958 	 * to restart the HW.
959 	 */
960 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
961 
962 out_unlock:
963 	wl->watchdog_recovery = false;
964 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
965 	mutex_unlock(&wl->mutex);
966 }
967 
968 static int wlcore_fw_wakeup(struct wl1271 *wl)
969 {
970 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
971 }
972 
973 static int wl1271_setup(struct wl1271 *wl)
974 {
975 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
976 	if (!wl->raw_fw_status)
977 		goto err;
978 
979 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
980 	if (!wl->fw_status)
981 		goto err;
982 
983 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
984 	if (!wl->tx_res_if)
985 		goto err;
986 
987 	return 0;
988 err:
989 	kfree(wl->fw_status);
990 	kfree(wl->raw_fw_status);
991 	return -ENOMEM;
992 }
993 
994 static int wl12xx_set_power_on(struct wl1271 *wl)
995 {
996 	int ret;
997 
998 	msleep(WL1271_PRE_POWER_ON_SLEEP);
999 	ret = wl1271_power_on(wl);
1000 	if (ret < 0)
1001 		goto out;
1002 	msleep(WL1271_POWER_ON_SLEEP);
1003 	wl1271_io_reset(wl);
1004 	wl1271_io_init(wl);
1005 
1006 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1007 	if (ret < 0)
1008 		goto fail;
1009 
1010 	/* ELP module wake up */
1011 	ret = wlcore_fw_wakeup(wl);
1012 	if (ret < 0)
1013 		goto fail;
1014 
1015 out:
1016 	return ret;
1017 
1018 fail:
1019 	wl1271_power_off(wl);
1020 	return ret;
1021 }
1022 
1023 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1024 {
1025 	int ret = 0;
1026 
1027 	ret = wl12xx_set_power_on(wl);
1028 	if (ret < 0)
1029 		goto out;
1030 
1031 	/*
1032 	 * For wl127x based devices we could use the default block
1033 	 * size (512 bytes), but due to a bug in the sdio driver, we
1034 	 * need to set it explicitly after the chip is powered on.  To
1035 	 * simplify the code and since the performance impact is
1036 	 * negligible, we use the same block size for all different
1037 	 * chip types.
1038 	 *
1039 	 * Check if the bus supports blocksize alignment and, if it
1040 	 * doesn't, make sure we don't have the quirk.
1041 	 */
1042 	if (!wl1271_set_block_size(wl))
1043 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1044 
1045 	/* TODO: make sure the lower driver has set things up correctly */
1046 
1047 	ret = wl1271_setup(wl);
1048 	if (ret < 0)
1049 		goto out;
1050 
1051 	ret = wl12xx_fetch_firmware(wl, plt);
1052 	if (ret < 0)
1053 		goto out;
1054 
1055 out:
1056 	return ret;
1057 }
1058 
1059 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1060 {
1061 	int retries = WL1271_BOOT_RETRIES;
1062 	struct wiphy *wiphy = wl->hw->wiphy;
1063 
1064 	static const char* const PLT_MODE[] = {
1065 		"PLT_OFF",
1066 		"PLT_ON",
1067 		"PLT_FEM_DETECT",
1068 		"PLT_CHIP_AWAKE"
1069 	};
1070 
1071 	int ret;
1072 
1073 	mutex_lock(&wl->mutex);
1074 
1075 	wl1271_notice("power up");
1076 
1077 	if (wl->state != WLCORE_STATE_OFF) {
1078 		wl1271_error("cannot go into PLT state because not "
1079 			     "in off state: %d", wl->state);
1080 		ret = -EBUSY;
1081 		goto out;
1082 	}
1083 
1084 	/* Indicate to lower levels that we are now in PLT mode */
1085 	wl->plt = true;
1086 	wl->plt_mode = plt_mode;
1087 
1088 	while (retries) {
1089 		retries--;
1090 		ret = wl12xx_chip_wakeup(wl, true);
1091 		if (ret < 0)
1092 			goto power_off;
1093 
1094 		if (plt_mode != PLT_CHIP_AWAKE) {
1095 			ret = wl->ops->plt_init(wl);
1096 			if (ret < 0)
1097 				goto power_off;
1098 		}
1099 
1100 		wl->state = WLCORE_STATE_ON;
1101 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1102 			      PLT_MODE[plt_mode],
1103 			      wl->chip.fw_ver_str);
1104 
1105 		/* update hw/fw version info in wiphy struct */
1106 		wiphy->hw_version = wl->chip.id;
1107 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1108 			sizeof(wiphy->fw_version));
1109 
1110 		goto out;
1111 
1112 power_off:
1113 		wl1271_power_off(wl);
1114 	}
1115 
1116 	wl->plt = false;
1117 	wl->plt_mode = PLT_OFF;
1118 
1119 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1120 		     WL1271_BOOT_RETRIES);
1121 out:
1122 	mutex_unlock(&wl->mutex);
1123 
1124 	return ret;
1125 }
1126 
1127 int wl1271_plt_stop(struct wl1271 *wl)
1128 {
1129 	int ret = 0;
1130 
1131 	wl1271_notice("power down");
1132 
1133 	/*
1134 	 * Interrupts must be disabled before setting the state to OFF.
1135 	 * Otherwise, the interrupt handler might be called and exit without
1136 	 * reading the interrupt status.
1137 	 */
1138 	wlcore_disable_interrupts(wl);
1139 	mutex_lock(&wl->mutex);
1140 	if (!wl->plt) {
1141 		mutex_unlock(&wl->mutex);
1142 
1143 		/*
1144 		 * This will not necessarily enable interrupts as interrupts
1145 		 * may have been disabled when op_stop was called. It will,
1146 		 * however, balance the above call to disable_interrupts().
1147 		 */
1148 		wlcore_enable_interrupts(wl);
1149 
1150 		wl1271_error("cannot power down because not in PLT "
1151 			     "state: %d", wl->state);
1152 		ret = -EBUSY;
1153 		goto out;
1154 	}
1155 
1156 	mutex_unlock(&wl->mutex);
1157 
1158 	wl1271_flush_deferred_work(wl);
1159 	cancel_work_sync(&wl->netstack_work);
1160 	cancel_work_sync(&wl->recovery_work);
1161 	cancel_delayed_work_sync(&wl->elp_work);
1162 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1163 
1164 	mutex_lock(&wl->mutex);
1165 	wl1271_power_off(wl);
1166 	wl->flags = 0;
1167 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1168 	wl->state = WLCORE_STATE_OFF;
1169 	wl->plt = false;
1170 	wl->plt_mode = PLT_OFF;
1171 	wl->rx_counter = 0;
1172 	mutex_unlock(&wl->mutex);
1173 
1174 out:
1175 	return ret;
1176 }
1177 
1178 static void wl1271_op_tx(struct ieee80211_hw *hw,
1179 			 struct ieee80211_tx_control *control,
1180 			 struct sk_buff *skb)
1181 {
1182 	struct wl1271 *wl = hw->priv;
1183 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1184 	struct ieee80211_vif *vif = info->control.vif;
1185 	struct wl12xx_vif *wlvif = NULL;
1186 	unsigned long flags;
1187 	int q, mapping;
1188 	u8 hlid;
1189 
1190 	if (!vif) {
1191 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1192 		ieee80211_free_txskb(hw, skb);
1193 		return;
1194 	}
1195 
1196 	wlvif = wl12xx_vif_to_data(vif);
1197 	mapping = skb_get_queue_mapping(skb);
1198 	q = wl1271_tx_get_queue(mapping);
1199 
1200 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1201 
1202 	spin_lock_irqsave(&wl->wl_lock, flags);
1203 
1204 	/*
1205 	 * drop the packet if the link is invalid or the queue is stopped
1206 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1207 	 * allow these packets through.
1208 	 */
1209 	if (hlid == WL12XX_INVALID_LINK_ID ||
1210 	    (!test_bit(hlid, wlvif->links_map)) ||
1211 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1212 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1213 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1214 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1215 		ieee80211_free_txskb(hw, skb);
1216 		goto out;
1217 	}
1218 
1219 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1220 		     hlid, q, skb->len);
1221 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1222 
1223 	wl->tx_queue_count[q]++;
1224 	wlvif->tx_queue_count[q]++;
1225 
1226 	/*
1227 	 * The workqueue is slow to process the tx_queue and we need stop
1228 	 * the queue here, otherwise the queue will get too long.
1229 	 */
1230 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1231 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1232 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1233 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1234 		wlcore_stop_queue_locked(wl, wlvif, q,
1235 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1236 	}
1237 
1238 	/*
1239 	 * The chip specific setup must run before the first TX packet -
1240 	 * before that, the tx_work will not be initialized!
1241 	 */
1242 
1243 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1244 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1245 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1246 
1247 out:
1248 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1249 }
1250 
1251 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1252 {
1253 	unsigned long flags;
1254 	int q;
1255 
1256 	/* no need to queue a new dummy packet if one is already pending */
1257 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1258 		return 0;
1259 
1260 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1261 
1262 	spin_lock_irqsave(&wl->wl_lock, flags);
1263 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1264 	wl->tx_queue_count[q]++;
1265 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1266 
1267 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1268 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1269 		return wlcore_tx_work_locked(wl);
1270 
1271 	/*
1272 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1273 	 * interrupt handler function
1274 	 */
1275 	return 0;
1276 }
1277 
1278 /*
1279  * The size of the dummy packet should be at least 1400 bytes. However, in
1280  * order to minimize the number of bus transactions, aligning it to 512 bytes
1281  * boundaries could be beneficial, performance wise
1282  */
1283 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1284 
1285 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1286 {
1287 	struct sk_buff *skb;
1288 	struct ieee80211_hdr_3addr *hdr;
1289 	unsigned int dummy_packet_size;
1290 
1291 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1292 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1293 
1294 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1295 	if (!skb) {
1296 		wl1271_warning("Failed to allocate a dummy packet skb");
1297 		return NULL;
1298 	}
1299 
1300 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1301 
1302 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1303 	memset(hdr, 0, sizeof(*hdr));
1304 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1305 					 IEEE80211_STYPE_NULLFUNC |
1306 					 IEEE80211_FCTL_TODS);
1307 
1308 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1309 
1310 	/* Dummy packets require the TID to be management */
1311 	skb->priority = WL1271_TID_MGMT;
1312 
1313 	/* Initialize all fields that might be used */
1314 	skb_set_queue_mapping(skb, 0);
1315 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1316 
1317 	return skb;
1318 }
1319 
1320 
1321 #ifdef CONFIG_PM
1322 static int
1323 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1324 {
1325 	int num_fields = 0, in_field = 0, fields_size = 0;
1326 	int i, pattern_len = 0;
1327 
1328 	if (!p->mask) {
1329 		wl1271_warning("No mask in WoWLAN pattern");
1330 		return -EINVAL;
1331 	}
1332 
1333 	/*
1334 	 * The pattern is broken up into segments of bytes at different offsets
1335 	 * that need to be checked by the FW filter. Each segment is called
1336 	 * a field in the FW API. We verify that the total number of fields
1337 	 * required for this pattern won't exceed FW limits (8)
1338 	 * as well as the total fields buffer won't exceed the FW limit.
1339 	 * Note that if there's a pattern which crosses Ethernet/IP header
1340 	 * boundary a new field is required.
1341 	 */
1342 	for (i = 0; i < p->pattern_len; i++) {
1343 		if (test_bit(i, (unsigned long *)p->mask)) {
1344 			if (!in_field) {
1345 				in_field = 1;
1346 				pattern_len = 1;
1347 			} else {
1348 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1349 					num_fields++;
1350 					fields_size += pattern_len +
1351 						RX_FILTER_FIELD_OVERHEAD;
1352 					pattern_len = 1;
1353 				} else
1354 					pattern_len++;
1355 			}
1356 		} else {
1357 			if (in_field) {
1358 				in_field = 0;
1359 				fields_size += pattern_len +
1360 					RX_FILTER_FIELD_OVERHEAD;
1361 				num_fields++;
1362 			}
1363 		}
1364 	}
1365 
1366 	if (in_field) {
1367 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1368 		num_fields++;
1369 	}
1370 
1371 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1372 		wl1271_warning("RX Filter too complex. Too many segments");
1373 		return -EINVAL;
1374 	}
1375 
1376 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1377 		wl1271_warning("RX filter pattern is too big");
1378 		return -E2BIG;
1379 	}
1380 
1381 	return 0;
1382 }
1383 
1384 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1385 {
1386 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1387 }
1388 
1389 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1390 {
1391 	int i;
1392 
1393 	if (filter == NULL)
1394 		return;
1395 
1396 	for (i = 0; i < filter->num_fields; i++)
1397 		kfree(filter->fields[i].pattern);
1398 
1399 	kfree(filter);
1400 }
1401 
1402 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1403 				 u16 offset, u8 flags,
1404 				 const u8 *pattern, u8 len)
1405 {
1406 	struct wl12xx_rx_filter_field *field;
1407 
1408 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1409 		wl1271_warning("Max fields per RX filter. can't alloc another");
1410 		return -EINVAL;
1411 	}
1412 
1413 	field = &filter->fields[filter->num_fields];
1414 
1415 	field->pattern = kzalloc(len, GFP_KERNEL);
1416 	if (!field->pattern) {
1417 		wl1271_warning("Failed to allocate RX filter pattern");
1418 		return -ENOMEM;
1419 	}
1420 
1421 	filter->num_fields++;
1422 
1423 	field->offset = cpu_to_le16(offset);
1424 	field->flags = flags;
1425 	field->len = len;
1426 	memcpy(field->pattern, pattern, len);
1427 
1428 	return 0;
1429 }
1430 
1431 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1432 {
1433 	int i, fields_size = 0;
1434 
1435 	for (i = 0; i < filter->num_fields; i++)
1436 		fields_size += filter->fields[i].len +
1437 			sizeof(struct wl12xx_rx_filter_field) -
1438 			sizeof(u8 *);
1439 
1440 	return fields_size;
1441 }
1442 
1443 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1444 				    u8 *buf)
1445 {
1446 	int i;
1447 	struct wl12xx_rx_filter_field *field;
1448 
1449 	for (i = 0; i < filter->num_fields; i++) {
1450 		field = (struct wl12xx_rx_filter_field *)buf;
1451 
1452 		field->offset = filter->fields[i].offset;
1453 		field->flags = filter->fields[i].flags;
1454 		field->len = filter->fields[i].len;
1455 
1456 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1457 		buf += sizeof(struct wl12xx_rx_filter_field) -
1458 			sizeof(u8 *) + field->len;
1459 	}
1460 }
1461 
1462 /*
1463  * Allocates an RX filter returned through f
1464  * which needs to be freed using rx_filter_free()
1465  */
1466 static int
1467 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1468 					   struct wl12xx_rx_filter **f)
1469 {
1470 	int i, j, ret = 0;
1471 	struct wl12xx_rx_filter *filter;
1472 	u16 offset;
1473 	u8 flags, len;
1474 
1475 	filter = wl1271_rx_filter_alloc();
1476 	if (!filter) {
1477 		wl1271_warning("Failed to alloc rx filter");
1478 		ret = -ENOMEM;
1479 		goto err;
1480 	}
1481 
1482 	i = 0;
1483 	while (i < p->pattern_len) {
1484 		if (!test_bit(i, (unsigned long *)p->mask)) {
1485 			i++;
1486 			continue;
1487 		}
1488 
1489 		for (j = i; j < p->pattern_len; j++) {
1490 			if (!test_bit(j, (unsigned long *)p->mask))
1491 				break;
1492 
1493 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1494 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1495 				break;
1496 		}
1497 
1498 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1499 			offset = i;
1500 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1501 		} else {
1502 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1503 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1504 		}
1505 
1506 		len = j - i;
1507 
1508 		ret = wl1271_rx_filter_alloc_field(filter,
1509 						   offset,
1510 						   flags,
1511 						   &p->pattern[i], len);
1512 		if (ret)
1513 			goto err;
1514 
1515 		i = j;
1516 	}
1517 
1518 	filter->action = FILTER_SIGNAL;
1519 
1520 	*f = filter;
1521 	return 0;
1522 
1523 err:
1524 	wl1271_rx_filter_free(filter);
1525 	*f = NULL;
1526 
1527 	return ret;
1528 }
1529 
1530 static int wl1271_configure_wowlan(struct wl1271 *wl,
1531 				   struct cfg80211_wowlan *wow)
1532 {
1533 	int i, ret;
1534 
1535 	if (!wow || wow->any || !wow->n_patterns) {
1536 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1537 							  FILTER_SIGNAL);
1538 		if (ret)
1539 			goto out;
1540 
1541 		ret = wl1271_rx_filter_clear_all(wl);
1542 		if (ret)
1543 			goto out;
1544 
1545 		return 0;
1546 	}
1547 
1548 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1549 		return -EINVAL;
1550 
1551 	/* Validate all incoming patterns before clearing current FW state */
1552 	for (i = 0; i < wow->n_patterns; i++) {
1553 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1554 		if (ret) {
1555 			wl1271_warning("Bad wowlan pattern %d", i);
1556 			return ret;
1557 		}
1558 	}
1559 
1560 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1561 	if (ret)
1562 		goto out;
1563 
1564 	ret = wl1271_rx_filter_clear_all(wl);
1565 	if (ret)
1566 		goto out;
1567 
1568 	/* Translate WoWLAN patterns into filters */
1569 	for (i = 0; i < wow->n_patterns; i++) {
1570 		struct cfg80211_pkt_pattern *p;
1571 		struct wl12xx_rx_filter *filter = NULL;
1572 
1573 		p = &wow->patterns[i];
1574 
1575 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1576 		if (ret) {
1577 			wl1271_warning("Failed to create an RX filter from "
1578 				       "wowlan pattern %d", i);
1579 			goto out;
1580 		}
1581 
1582 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1583 
1584 		wl1271_rx_filter_free(filter);
1585 		if (ret)
1586 			goto out;
1587 	}
1588 
1589 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1590 
1591 out:
1592 	return ret;
1593 }
1594 
1595 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1596 					struct wl12xx_vif *wlvif,
1597 					struct cfg80211_wowlan *wow)
1598 {
1599 	int ret = 0;
1600 
1601 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1602 		goto out;
1603 
1604 	ret = wl1271_configure_wowlan(wl, wow);
1605 	if (ret < 0)
1606 		goto out;
1607 
1608 	if ((wl->conf.conn.suspend_wake_up_event ==
1609 	     wl->conf.conn.wake_up_event) &&
1610 	    (wl->conf.conn.suspend_listen_interval ==
1611 	     wl->conf.conn.listen_interval))
1612 		goto out;
1613 
1614 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1615 				    wl->conf.conn.suspend_wake_up_event,
1616 				    wl->conf.conn.suspend_listen_interval);
1617 
1618 	if (ret < 0)
1619 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1620 out:
1621 	return ret;
1622 
1623 }
1624 
1625 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1626 					struct wl12xx_vif *wlvif,
1627 					struct cfg80211_wowlan *wow)
1628 {
1629 	int ret = 0;
1630 
1631 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1632 		goto out;
1633 
1634 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1635 	if (ret < 0)
1636 		goto out;
1637 
1638 	ret = wl1271_configure_wowlan(wl, wow);
1639 	if (ret < 0)
1640 		goto out;
1641 
1642 out:
1643 	return ret;
1644 
1645 }
1646 
1647 static int wl1271_configure_suspend(struct wl1271 *wl,
1648 				    struct wl12xx_vif *wlvif,
1649 				    struct cfg80211_wowlan *wow)
1650 {
1651 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1652 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1653 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1654 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1655 	return 0;
1656 }
1657 
1658 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1659 {
1660 	int ret = 0;
1661 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1662 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1663 
1664 	if ((!is_ap) && (!is_sta))
1665 		return;
1666 
1667 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1668 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1669 		return;
1670 
1671 	wl1271_configure_wowlan(wl, NULL);
1672 
1673 	if (is_sta) {
1674 		if ((wl->conf.conn.suspend_wake_up_event ==
1675 		     wl->conf.conn.wake_up_event) &&
1676 		    (wl->conf.conn.suspend_listen_interval ==
1677 		     wl->conf.conn.listen_interval))
1678 			return;
1679 
1680 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1681 				    wl->conf.conn.wake_up_event,
1682 				    wl->conf.conn.listen_interval);
1683 
1684 		if (ret < 0)
1685 			wl1271_error("resume: wake up conditions failed: %d",
1686 				     ret);
1687 
1688 	} else if (is_ap) {
1689 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1690 	}
1691 }
1692 
1693 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1694 			    struct cfg80211_wowlan *wow)
1695 {
1696 	struct wl1271 *wl = hw->priv;
1697 	struct wl12xx_vif *wlvif;
1698 	int ret;
1699 
1700 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1701 	WARN_ON(!wow);
1702 
1703 	/* we want to perform the recovery before suspending */
1704 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1705 		wl1271_warning("postponing suspend to perform recovery");
1706 		return -EBUSY;
1707 	}
1708 
1709 	wl1271_tx_flush(wl);
1710 
1711 	mutex_lock(&wl->mutex);
1712 
1713 	ret = wl1271_ps_elp_wakeup(wl);
1714 	if (ret < 0) {
1715 		mutex_unlock(&wl->mutex);
1716 		return ret;
1717 	}
1718 
1719 	wl->wow_enabled = true;
1720 	wl12xx_for_each_wlvif(wl, wlvif) {
1721 		if (wlcore_is_p2p_mgmt(wlvif))
1722 			continue;
1723 
1724 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1725 		if (ret < 0) {
1726 			mutex_unlock(&wl->mutex);
1727 			wl1271_warning("couldn't prepare device to suspend");
1728 			return ret;
1729 		}
1730 	}
1731 
1732 	/* disable fast link flow control notifications from FW */
1733 	ret = wlcore_hw_interrupt_notify(wl, false);
1734 	if (ret < 0)
1735 		goto out_sleep;
1736 
1737 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1738 	ret = wlcore_hw_rx_ba_filter(wl,
1739 				     !!wl->conf.conn.suspend_rx_ba_activity);
1740 	if (ret < 0)
1741 		goto out_sleep;
1742 
1743 out_sleep:
1744 	wl1271_ps_elp_sleep(wl);
1745 	mutex_unlock(&wl->mutex);
1746 
1747 	if (ret < 0) {
1748 		wl1271_warning("couldn't prepare device to suspend");
1749 		return ret;
1750 	}
1751 
1752 	/* flush any remaining work */
1753 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1754 
1755 	/*
1756 	 * disable and re-enable interrupts in order to flush
1757 	 * the threaded_irq
1758 	 */
1759 	wlcore_disable_interrupts(wl);
1760 
1761 	/*
1762 	 * set suspended flag to avoid triggering a new threaded_irq
1763 	 * work. no need for spinlock as interrupts are disabled.
1764 	 */
1765 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1766 
1767 	wlcore_enable_interrupts(wl);
1768 	flush_work(&wl->tx_work);
1769 	flush_delayed_work(&wl->elp_work);
1770 
1771 	/*
1772 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1773 	 * it on resume anyway.
1774 	 */
1775 	cancel_delayed_work(&wl->tx_watchdog_work);
1776 
1777 	return 0;
1778 }
1779 
1780 static int wl1271_op_resume(struct ieee80211_hw *hw)
1781 {
1782 	struct wl1271 *wl = hw->priv;
1783 	struct wl12xx_vif *wlvif;
1784 	unsigned long flags;
1785 	bool run_irq_work = false, pending_recovery;
1786 	int ret;
1787 
1788 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1789 		     wl->wow_enabled);
1790 	WARN_ON(!wl->wow_enabled);
1791 
1792 	/*
1793 	 * re-enable irq_work enqueuing, and call irq_work directly if
1794 	 * there is a pending work.
1795 	 */
1796 	spin_lock_irqsave(&wl->wl_lock, flags);
1797 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1798 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1799 		run_irq_work = true;
1800 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1801 
1802 	mutex_lock(&wl->mutex);
1803 
1804 	/* test the recovery flag before calling any SDIO functions */
1805 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1806 				    &wl->flags);
1807 
1808 	if (run_irq_work) {
1809 		wl1271_debug(DEBUG_MAC80211,
1810 			     "run postponed irq_work directly");
1811 
1812 		/* don't talk to the HW if recovery is pending */
1813 		if (!pending_recovery) {
1814 			ret = wlcore_irq_locked(wl);
1815 			if (ret)
1816 				wl12xx_queue_recovery_work(wl);
1817 		}
1818 
1819 		wlcore_enable_interrupts(wl);
1820 	}
1821 
1822 	if (pending_recovery) {
1823 		wl1271_warning("queuing forgotten recovery on resume");
1824 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1825 		goto out_sleep;
1826 	}
1827 
1828 	ret = wl1271_ps_elp_wakeup(wl);
1829 	if (ret < 0)
1830 		goto out;
1831 
1832 	wl12xx_for_each_wlvif(wl, wlvif) {
1833 		if (wlcore_is_p2p_mgmt(wlvif))
1834 			continue;
1835 
1836 		wl1271_configure_resume(wl, wlvif);
1837 	}
1838 
1839 	ret = wlcore_hw_interrupt_notify(wl, true);
1840 	if (ret < 0)
1841 		goto out_sleep;
1842 
1843 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1844 	ret = wlcore_hw_rx_ba_filter(wl, false);
1845 	if (ret < 0)
1846 		goto out_sleep;
1847 
1848 out_sleep:
1849 	wl1271_ps_elp_sleep(wl);
1850 
1851 out:
1852 	wl->wow_enabled = false;
1853 
1854 	/*
1855 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1856 	 * That way we avoid possible conditions where Tx-complete interrupts
1857 	 * fail to arrive and we perform a spurious recovery.
1858 	 */
1859 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1860 	mutex_unlock(&wl->mutex);
1861 
1862 	return 0;
1863 }
1864 #endif
1865 
1866 static int wl1271_op_start(struct ieee80211_hw *hw)
1867 {
1868 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1869 
1870 	/*
1871 	 * We have to delay the booting of the hardware because
1872 	 * we need to know the local MAC address before downloading and
1873 	 * initializing the firmware. The MAC address cannot be changed
1874 	 * after boot, and without the proper MAC address, the firmware
1875 	 * will not function properly.
1876 	 *
1877 	 * The MAC address is first known when the corresponding interface
1878 	 * is added. That is where we will initialize the hardware.
1879 	 */
1880 
1881 	return 0;
1882 }
1883 
1884 static void wlcore_op_stop_locked(struct wl1271 *wl)
1885 {
1886 	int i;
1887 
1888 	if (wl->state == WLCORE_STATE_OFF) {
1889 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1890 					&wl->flags))
1891 			wlcore_enable_interrupts(wl);
1892 
1893 		return;
1894 	}
1895 
1896 	/*
1897 	 * this must be before the cancel_work calls below, so that the work
1898 	 * functions don't perform further work.
1899 	 */
1900 	wl->state = WLCORE_STATE_OFF;
1901 
1902 	/*
1903 	 * Use the nosync variant to disable interrupts, so the mutex could be
1904 	 * held while doing so without deadlocking.
1905 	 */
1906 	wlcore_disable_interrupts_nosync(wl);
1907 
1908 	mutex_unlock(&wl->mutex);
1909 
1910 	wlcore_synchronize_interrupts(wl);
1911 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1912 		cancel_work_sync(&wl->recovery_work);
1913 	wl1271_flush_deferred_work(wl);
1914 	cancel_delayed_work_sync(&wl->scan_complete_work);
1915 	cancel_work_sync(&wl->netstack_work);
1916 	cancel_work_sync(&wl->tx_work);
1917 	cancel_delayed_work_sync(&wl->elp_work);
1918 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1919 
1920 	/* let's notify MAC80211 about the remaining pending TX frames */
1921 	mutex_lock(&wl->mutex);
1922 	wl12xx_tx_reset(wl);
1923 
1924 	wl1271_power_off(wl);
1925 	/*
1926 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1927 	 * an interrupt storm. Now that the power is down, it is safe to
1928 	 * re-enable interrupts to balance the disable depth
1929 	 */
1930 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1931 		wlcore_enable_interrupts(wl);
1932 
1933 	wl->band = NL80211_BAND_2GHZ;
1934 
1935 	wl->rx_counter = 0;
1936 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1937 	wl->channel_type = NL80211_CHAN_NO_HT;
1938 	wl->tx_blocks_available = 0;
1939 	wl->tx_allocated_blocks = 0;
1940 	wl->tx_results_count = 0;
1941 	wl->tx_packets_count = 0;
1942 	wl->time_offset = 0;
1943 	wl->ap_fw_ps_map = 0;
1944 	wl->ap_ps_map = 0;
1945 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1946 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1947 	memset(wl->links_map, 0, sizeof(wl->links_map));
1948 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1949 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1950 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1951 	wl->active_sta_count = 0;
1952 	wl->active_link_count = 0;
1953 
1954 	/* The system link is always allocated */
1955 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1956 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1957 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1958 
1959 	/*
1960 	 * this is performed after the cancel_work calls and the associated
1961 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1962 	 * get executed before all these vars have been reset.
1963 	 */
1964 	wl->flags = 0;
1965 
1966 	wl->tx_blocks_freed = 0;
1967 
1968 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1969 		wl->tx_pkts_freed[i] = 0;
1970 		wl->tx_allocated_pkts[i] = 0;
1971 	}
1972 
1973 	wl1271_debugfs_reset(wl);
1974 
1975 	kfree(wl->raw_fw_status);
1976 	wl->raw_fw_status = NULL;
1977 	kfree(wl->fw_status);
1978 	wl->fw_status = NULL;
1979 	kfree(wl->tx_res_if);
1980 	wl->tx_res_if = NULL;
1981 	kfree(wl->target_mem_map);
1982 	wl->target_mem_map = NULL;
1983 
1984 	/*
1985 	 * FW channels must be re-calibrated after recovery,
1986 	 * save current Reg-Domain channel configuration and clear it.
1987 	 */
1988 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1989 	       sizeof(wl->reg_ch_conf_pending));
1990 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1991 }
1992 
1993 static void wlcore_op_stop(struct ieee80211_hw *hw)
1994 {
1995 	struct wl1271 *wl = hw->priv;
1996 
1997 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1998 
1999 	mutex_lock(&wl->mutex);
2000 
2001 	wlcore_op_stop_locked(wl);
2002 
2003 	mutex_unlock(&wl->mutex);
2004 }
2005 
2006 static void wlcore_channel_switch_work(struct work_struct *work)
2007 {
2008 	struct delayed_work *dwork;
2009 	struct wl1271 *wl;
2010 	struct ieee80211_vif *vif;
2011 	struct wl12xx_vif *wlvif;
2012 	int ret;
2013 
2014 	dwork = to_delayed_work(work);
2015 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2016 	wl = wlvif->wl;
2017 
2018 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2019 
2020 	mutex_lock(&wl->mutex);
2021 
2022 	if (unlikely(wl->state != WLCORE_STATE_ON))
2023 		goto out;
2024 
2025 	/* check the channel switch is still ongoing */
2026 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2027 		goto out;
2028 
2029 	vif = wl12xx_wlvif_to_vif(wlvif);
2030 	ieee80211_chswitch_done(vif, false);
2031 
2032 	ret = wl1271_ps_elp_wakeup(wl);
2033 	if (ret < 0)
2034 		goto out;
2035 
2036 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2037 
2038 	wl1271_ps_elp_sleep(wl);
2039 out:
2040 	mutex_unlock(&wl->mutex);
2041 }
2042 
2043 static void wlcore_connection_loss_work(struct work_struct *work)
2044 {
2045 	struct delayed_work *dwork;
2046 	struct wl1271 *wl;
2047 	struct ieee80211_vif *vif;
2048 	struct wl12xx_vif *wlvif;
2049 
2050 	dwork = to_delayed_work(work);
2051 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2052 	wl = wlvif->wl;
2053 
2054 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2055 
2056 	mutex_lock(&wl->mutex);
2057 
2058 	if (unlikely(wl->state != WLCORE_STATE_ON))
2059 		goto out;
2060 
2061 	/* Call mac80211 connection loss */
2062 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2063 		goto out;
2064 
2065 	vif = wl12xx_wlvif_to_vif(wlvif);
2066 	ieee80211_connection_loss(vif);
2067 out:
2068 	mutex_unlock(&wl->mutex);
2069 }
2070 
2071 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2072 {
2073 	struct delayed_work *dwork;
2074 	struct wl1271 *wl;
2075 	struct wl12xx_vif *wlvif;
2076 	unsigned long time_spare;
2077 	int ret;
2078 
2079 	dwork = to_delayed_work(work);
2080 	wlvif = container_of(dwork, struct wl12xx_vif,
2081 			     pending_auth_complete_work);
2082 	wl = wlvif->wl;
2083 
2084 	mutex_lock(&wl->mutex);
2085 
2086 	if (unlikely(wl->state != WLCORE_STATE_ON))
2087 		goto out;
2088 
2089 	/*
2090 	 * Make sure a second really passed since the last auth reply. Maybe
2091 	 * a second auth reply arrived while we were stuck on the mutex.
2092 	 * Check for a little less than the timeout to protect from scheduler
2093 	 * irregularities.
2094 	 */
2095 	time_spare = jiffies +
2096 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2097 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2098 		goto out;
2099 
2100 	ret = wl1271_ps_elp_wakeup(wl);
2101 	if (ret < 0)
2102 		goto out;
2103 
2104 	/* cancel the ROC if active */
2105 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2106 
2107 	wl1271_ps_elp_sleep(wl);
2108 out:
2109 	mutex_unlock(&wl->mutex);
2110 }
2111 
2112 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2113 {
2114 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2115 					WL12XX_MAX_RATE_POLICIES);
2116 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2117 		return -EBUSY;
2118 
2119 	__set_bit(policy, wl->rate_policies_map);
2120 	*idx = policy;
2121 	return 0;
2122 }
2123 
2124 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2125 {
2126 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2127 		return;
2128 
2129 	__clear_bit(*idx, wl->rate_policies_map);
2130 	*idx = WL12XX_MAX_RATE_POLICIES;
2131 }
2132 
2133 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2134 {
2135 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2136 					WLCORE_MAX_KLV_TEMPLATES);
2137 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2138 		return -EBUSY;
2139 
2140 	__set_bit(policy, wl->klv_templates_map);
2141 	*idx = policy;
2142 	return 0;
2143 }
2144 
2145 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2146 {
2147 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2148 		return;
2149 
2150 	__clear_bit(*idx, wl->klv_templates_map);
2151 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2152 }
2153 
2154 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2155 {
2156 	switch (wlvif->bss_type) {
2157 	case BSS_TYPE_AP_BSS:
2158 		if (wlvif->p2p)
2159 			return WL1271_ROLE_P2P_GO;
2160 		else
2161 			return WL1271_ROLE_AP;
2162 
2163 	case BSS_TYPE_STA_BSS:
2164 		if (wlvif->p2p)
2165 			return WL1271_ROLE_P2P_CL;
2166 		else
2167 			return WL1271_ROLE_STA;
2168 
2169 	case BSS_TYPE_IBSS:
2170 		return WL1271_ROLE_IBSS;
2171 
2172 	default:
2173 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2174 	}
2175 	return WL12XX_INVALID_ROLE_TYPE;
2176 }
2177 
2178 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2179 {
2180 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2181 	int i;
2182 
2183 	/* clear everything but the persistent data */
2184 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2185 
2186 	switch (ieee80211_vif_type_p2p(vif)) {
2187 	case NL80211_IFTYPE_P2P_CLIENT:
2188 		wlvif->p2p = 1;
2189 		/* fall-through */
2190 	case NL80211_IFTYPE_STATION:
2191 	case NL80211_IFTYPE_P2P_DEVICE:
2192 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2193 		break;
2194 	case NL80211_IFTYPE_ADHOC:
2195 		wlvif->bss_type = BSS_TYPE_IBSS;
2196 		break;
2197 	case NL80211_IFTYPE_P2P_GO:
2198 		wlvif->p2p = 1;
2199 		/* fall-through */
2200 	case NL80211_IFTYPE_AP:
2201 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2202 		break;
2203 	default:
2204 		wlvif->bss_type = MAX_BSS_TYPE;
2205 		return -EOPNOTSUPP;
2206 	}
2207 
2208 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2209 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2210 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2211 
2212 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2213 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2214 		/* init sta/ibss data */
2215 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2216 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2217 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2218 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2219 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2220 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2221 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2222 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2223 	} else {
2224 		/* init ap data */
2225 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2226 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2227 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2228 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2229 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2230 			wl12xx_allocate_rate_policy(wl,
2231 						&wlvif->ap.ucast_rate_idx[i]);
2232 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2233 		/*
2234 		 * TODO: check if basic_rate shouldn't be
2235 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2236 		 * instead (the same thing for STA above).
2237 		*/
2238 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2239 		/* TODO: this seems to be used only for STA, check it */
2240 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2241 	}
2242 
2243 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2244 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2245 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2246 
2247 	/*
2248 	 * mac80211 configures some values globally, while we treat them
2249 	 * per-interface. thus, on init, we have to copy them from wl
2250 	 */
2251 	wlvif->band = wl->band;
2252 	wlvif->channel = wl->channel;
2253 	wlvif->power_level = wl->power_level;
2254 	wlvif->channel_type = wl->channel_type;
2255 
2256 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2257 		  wl1271_rx_streaming_enable_work);
2258 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2259 		  wl1271_rx_streaming_disable_work);
2260 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2261 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2262 			  wlcore_channel_switch_work);
2263 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2264 			  wlcore_connection_loss_work);
2265 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2266 			  wlcore_pending_auth_complete_work);
2267 	INIT_LIST_HEAD(&wlvif->list);
2268 
2269 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2270 		    (unsigned long) wlvif);
2271 	return 0;
2272 }
2273 
2274 static int wl12xx_init_fw(struct wl1271 *wl)
2275 {
2276 	int retries = WL1271_BOOT_RETRIES;
2277 	bool booted = false;
2278 	struct wiphy *wiphy = wl->hw->wiphy;
2279 	int ret;
2280 
2281 	while (retries) {
2282 		retries--;
2283 		ret = wl12xx_chip_wakeup(wl, false);
2284 		if (ret < 0)
2285 			goto power_off;
2286 
2287 		ret = wl->ops->boot(wl);
2288 		if (ret < 0)
2289 			goto power_off;
2290 
2291 		ret = wl1271_hw_init(wl);
2292 		if (ret < 0)
2293 			goto irq_disable;
2294 
2295 		booted = true;
2296 		break;
2297 
2298 irq_disable:
2299 		mutex_unlock(&wl->mutex);
2300 		/* Unlocking the mutex in the middle of handling is
2301 		   inherently unsafe. In this case we deem it safe to do,
2302 		   because we need to let any possibly pending IRQ out of
2303 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2304 		   work function will not do anything.) Also, any other
2305 		   possible concurrent operations will fail due to the
2306 		   current state, hence the wl1271 struct should be safe. */
2307 		wlcore_disable_interrupts(wl);
2308 		wl1271_flush_deferred_work(wl);
2309 		cancel_work_sync(&wl->netstack_work);
2310 		mutex_lock(&wl->mutex);
2311 power_off:
2312 		wl1271_power_off(wl);
2313 	}
2314 
2315 	if (!booted) {
2316 		wl1271_error("firmware boot failed despite %d retries",
2317 			     WL1271_BOOT_RETRIES);
2318 		goto out;
2319 	}
2320 
2321 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2322 
2323 	/* update hw/fw version info in wiphy struct */
2324 	wiphy->hw_version = wl->chip.id;
2325 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2326 		sizeof(wiphy->fw_version));
2327 
2328 	/*
2329 	 * Now we know if 11a is supported (info from the NVS), so disable
2330 	 * 11a channels if not supported
2331 	 */
2332 	if (!wl->enable_11a)
2333 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2334 
2335 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2336 		     wl->enable_11a ? "" : "not ");
2337 
2338 	wl->state = WLCORE_STATE_ON;
2339 out:
2340 	return ret;
2341 }
2342 
2343 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2344 {
2345 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2346 }
2347 
2348 /*
2349  * Check whether a fw switch (i.e. moving from one loaded
2350  * fw to another) is needed. This function is also responsible
2351  * for updating wl->last_vif_count, so it must be called before
2352  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2353  * will be used).
2354  */
2355 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2356 				  struct vif_counter_data vif_counter_data,
2357 				  bool add)
2358 {
2359 	enum wl12xx_fw_type current_fw = wl->fw_type;
2360 	u8 vif_count = vif_counter_data.counter;
2361 
2362 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2363 		return false;
2364 
2365 	/* increase the vif count if this is a new vif */
2366 	if (add && !vif_counter_data.cur_vif_running)
2367 		vif_count++;
2368 
2369 	wl->last_vif_count = vif_count;
2370 
2371 	/* no need for fw change if the device is OFF */
2372 	if (wl->state == WLCORE_STATE_OFF)
2373 		return false;
2374 
2375 	/* no need for fw change if a single fw is used */
2376 	if (!wl->mr_fw_name)
2377 		return false;
2378 
2379 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2380 		return true;
2381 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2382 		return true;
2383 
2384 	return false;
2385 }
2386 
2387 /*
2388  * Enter "forced psm". Make sure the sta is in psm against the ap,
2389  * to make the fw switch a bit more disconnection-persistent.
2390  */
2391 static void wl12xx_force_active_psm(struct wl1271 *wl)
2392 {
2393 	struct wl12xx_vif *wlvif;
2394 
2395 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2396 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2397 	}
2398 }
2399 
2400 struct wlcore_hw_queue_iter_data {
2401 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2402 	/* current vif */
2403 	struct ieee80211_vif *vif;
2404 	/* is the current vif among those iterated */
2405 	bool cur_running;
2406 };
2407 
2408 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2409 				 struct ieee80211_vif *vif)
2410 {
2411 	struct wlcore_hw_queue_iter_data *iter_data = data;
2412 
2413 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2414 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2415 		return;
2416 
2417 	if (iter_data->cur_running || vif == iter_data->vif) {
2418 		iter_data->cur_running = true;
2419 		return;
2420 	}
2421 
2422 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2423 }
2424 
2425 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2426 					 struct wl12xx_vif *wlvif)
2427 {
2428 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2429 	struct wlcore_hw_queue_iter_data iter_data = {};
2430 	int i, q_base;
2431 
2432 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2433 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2434 		return 0;
2435 	}
2436 
2437 	iter_data.vif = vif;
2438 
2439 	/* mark all bits taken by active interfaces */
2440 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2441 					IEEE80211_IFACE_ITER_RESUME_ALL,
2442 					wlcore_hw_queue_iter, &iter_data);
2443 
2444 	/* the current vif is already running in mac80211 (resume/recovery) */
2445 	if (iter_data.cur_running) {
2446 		wlvif->hw_queue_base = vif->hw_queue[0];
2447 		wl1271_debug(DEBUG_MAC80211,
2448 			     "using pre-allocated hw queue base %d",
2449 			     wlvif->hw_queue_base);
2450 
2451 		/* interface type might have changed type */
2452 		goto adjust_cab_queue;
2453 	}
2454 
2455 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2456 				     WLCORE_NUM_MAC_ADDRESSES);
2457 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2458 		return -EBUSY;
2459 
2460 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2461 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2462 		     wlvif->hw_queue_base);
2463 
2464 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2465 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2466 		/* register hw queues in mac80211 */
2467 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2468 	}
2469 
2470 adjust_cab_queue:
2471 	/* the last places are reserved for cab queues per interface */
2472 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2473 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2474 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2475 	else
2476 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2477 
2478 	return 0;
2479 }
2480 
2481 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2482 				   struct ieee80211_vif *vif)
2483 {
2484 	struct wl1271 *wl = hw->priv;
2485 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2486 	struct vif_counter_data vif_count;
2487 	int ret = 0;
2488 	u8 role_type;
2489 
2490 	if (wl->plt) {
2491 		wl1271_error("Adding Interface not allowed while in PLT mode");
2492 		return -EBUSY;
2493 	}
2494 
2495 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2496 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2497 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2498 
2499 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2500 		     ieee80211_vif_type_p2p(vif), vif->addr);
2501 
2502 	wl12xx_get_vif_count(hw, vif, &vif_count);
2503 
2504 	mutex_lock(&wl->mutex);
2505 	ret = wl1271_ps_elp_wakeup(wl);
2506 	if (ret < 0)
2507 		goto out_unlock;
2508 
2509 	/*
2510 	 * in some very corner case HW recovery scenarios its possible to
2511 	 * get here before __wl1271_op_remove_interface is complete, so
2512 	 * opt out if that is the case.
2513 	 */
2514 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2515 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2516 		ret = -EBUSY;
2517 		goto out;
2518 	}
2519 
2520 
2521 	ret = wl12xx_init_vif_data(wl, vif);
2522 	if (ret < 0)
2523 		goto out;
2524 
2525 	wlvif->wl = wl;
2526 	role_type = wl12xx_get_role_type(wl, wlvif);
2527 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2528 		ret = -EINVAL;
2529 		goto out;
2530 	}
2531 
2532 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2533 	if (ret < 0)
2534 		goto out;
2535 
2536 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2537 		wl12xx_force_active_psm(wl);
2538 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2539 		mutex_unlock(&wl->mutex);
2540 		wl1271_recovery_work(&wl->recovery_work);
2541 		return 0;
2542 	}
2543 
2544 	/*
2545 	 * TODO: after the nvs issue will be solved, move this block
2546 	 * to start(), and make sure here the driver is ON.
2547 	 */
2548 	if (wl->state == WLCORE_STATE_OFF) {
2549 		/*
2550 		 * we still need this in order to configure the fw
2551 		 * while uploading the nvs
2552 		 */
2553 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2554 
2555 		ret = wl12xx_init_fw(wl);
2556 		if (ret < 0)
2557 			goto out;
2558 	}
2559 
2560 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2561 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2562 					     role_type, &wlvif->role_id);
2563 		if (ret < 0)
2564 			goto out;
2565 
2566 		ret = wl1271_init_vif_specific(wl, vif);
2567 		if (ret < 0)
2568 			goto out;
2569 
2570 	} else {
2571 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2572 					     &wlvif->dev_role_id);
2573 		if (ret < 0)
2574 			goto out;
2575 
2576 		/* needed mainly for configuring rate policies */
2577 		ret = wl1271_sta_hw_init(wl, wlvif);
2578 		if (ret < 0)
2579 			goto out;
2580 	}
2581 
2582 	list_add(&wlvif->list, &wl->wlvif_list);
2583 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2584 
2585 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2586 		wl->ap_count++;
2587 	else
2588 		wl->sta_count++;
2589 out:
2590 	wl1271_ps_elp_sleep(wl);
2591 out_unlock:
2592 	mutex_unlock(&wl->mutex);
2593 
2594 	return ret;
2595 }
2596 
2597 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2598 					 struct ieee80211_vif *vif,
2599 					 bool reset_tx_queues)
2600 {
2601 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2602 	int i, ret;
2603 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2604 
2605 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2606 
2607 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2608 		return;
2609 
2610 	/* because of hardware recovery, we may get here twice */
2611 	if (wl->state == WLCORE_STATE_OFF)
2612 		return;
2613 
2614 	wl1271_info("down");
2615 
2616 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2617 	    wl->scan_wlvif == wlvif) {
2618 		/*
2619 		 * Rearm the tx watchdog just before idling scan. This
2620 		 * prevents just-finished scans from triggering the watchdog
2621 		 */
2622 		wl12xx_rearm_tx_watchdog_locked(wl);
2623 
2624 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2625 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2626 		wl->scan_wlvif = NULL;
2627 		wl->scan.req = NULL;
2628 		ieee80211_scan_completed(wl->hw, true);
2629 	}
2630 
2631 	if (wl->sched_vif == wlvif)
2632 		wl->sched_vif = NULL;
2633 
2634 	if (wl->roc_vif == vif) {
2635 		wl->roc_vif = NULL;
2636 		ieee80211_remain_on_channel_expired(wl->hw);
2637 	}
2638 
2639 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2640 		/* disable active roles */
2641 		ret = wl1271_ps_elp_wakeup(wl);
2642 		if (ret < 0)
2643 			goto deinit;
2644 
2645 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2646 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2647 			if (wl12xx_dev_role_started(wlvif))
2648 				wl12xx_stop_dev(wl, wlvif);
2649 		}
2650 
2651 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2652 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2653 			if (ret < 0)
2654 				goto deinit;
2655 		} else {
2656 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2657 			if (ret < 0)
2658 				goto deinit;
2659 		}
2660 
2661 		wl1271_ps_elp_sleep(wl);
2662 	}
2663 deinit:
2664 	wl12xx_tx_reset_wlvif(wl, wlvif);
2665 
2666 	/* clear all hlids (except system_hlid) */
2667 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2668 
2669 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2670 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2671 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2672 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2673 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2674 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2675 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2676 	} else {
2677 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2678 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2679 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2680 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2681 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2682 			wl12xx_free_rate_policy(wl,
2683 						&wlvif->ap.ucast_rate_idx[i]);
2684 		wl1271_free_ap_keys(wl, wlvif);
2685 	}
2686 
2687 	dev_kfree_skb(wlvif->probereq);
2688 	wlvif->probereq = NULL;
2689 	if (wl->last_wlvif == wlvif)
2690 		wl->last_wlvif = NULL;
2691 	list_del(&wlvif->list);
2692 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2693 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2694 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2695 
2696 	if (is_ap)
2697 		wl->ap_count--;
2698 	else
2699 		wl->sta_count--;
2700 
2701 	/*
2702 	 * Last AP, have more stations. Configure sleep auth according to STA.
2703 	 * Don't do thin on unintended recovery.
2704 	 */
2705 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2706 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2707 		goto unlock;
2708 
2709 	if (wl->ap_count == 0 && is_ap) {
2710 		/* mask ap events */
2711 		wl->event_mask &= ~wl->ap_event_mask;
2712 		wl1271_event_unmask(wl);
2713 	}
2714 
2715 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2716 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2717 		/* Configure for power according to debugfs */
2718 		if (sta_auth != WL1271_PSM_ILLEGAL)
2719 			wl1271_acx_sleep_auth(wl, sta_auth);
2720 		/* Configure for ELP power saving */
2721 		else
2722 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2723 	}
2724 
2725 unlock:
2726 	mutex_unlock(&wl->mutex);
2727 
2728 	del_timer_sync(&wlvif->rx_streaming_timer);
2729 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2730 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2731 	cancel_work_sync(&wlvif->rc_update_work);
2732 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2733 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2734 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2735 
2736 	mutex_lock(&wl->mutex);
2737 }
2738 
2739 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2740 				       struct ieee80211_vif *vif)
2741 {
2742 	struct wl1271 *wl = hw->priv;
2743 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2744 	struct wl12xx_vif *iter;
2745 	struct vif_counter_data vif_count;
2746 
2747 	wl12xx_get_vif_count(hw, vif, &vif_count);
2748 	mutex_lock(&wl->mutex);
2749 
2750 	if (wl->state == WLCORE_STATE_OFF ||
2751 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2752 		goto out;
2753 
2754 	/*
2755 	 * wl->vif can be null here if someone shuts down the interface
2756 	 * just when hardware recovery has been started.
2757 	 */
2758 	wl12xx_for_each_wlvif(wl, iter) {
2759 		if (iter != wlvif)
2760 			continue;
2761 
2762 		__wl1271_op_remove_interface(wl, vif, true);
2763 		break;
2764 	}
2765 	WARN_ON(iter != wlvif);
2766 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2767 		wl12xx_force_active_psm(wl);
2768 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2769 		wl12xx_queue_recovery_work(wl);
2770 	}
2771 out:
2772 	mutex_unlock(&wl->mutex);
2773 }
2774 
2775 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2776 				      struct ieee80211_vif *vif,
2777 				      enum nl80211_iftype new_type, bool p2p)
2778 {
2779 	struct wl1271 *wl = hw->priv;
2780 	int ret;
2781 
2782 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2783 	wl1271_op_remove_interface(hw, vif);
2784 
2785 	vif->type = new_type;
2786 	vif->p2p = p2p;
2787 	ret = wl1271_op_add_interface(hw, vif);
2788 
2789 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2790 	return ret;
2791 }
2792 
2793 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2794 {
2795 	int ret;
2796 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2797 
2798 	/*
2799 	 * One of the side effects of the JOIN command is that is clears
2800 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2801 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2802 	 * Currently the only valid scenario for JOIN during association
2803 	 * is on roaming, in which case we will also be given new keys.
2804 	 * Keep the below message for now, unless it starts bothering
2805 	 * users who really like to roam a lot :)
2806 	 */
2807 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2808 		wl1271_info("JOIN while associated.");
2809 
2810 	/* clear encryption type */
2811 	wlvif->encryption_type = KEY_NONE;
2812 
2813 	if (is_ibss)
2814 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2815 	else {
2816 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2817 			/*
2818 			 * TODO: this is an ugly workaround for wl12xx fw
2819 			 * bug - we are not able to tx/rx after the first
2820 			 * start_sta, so make dummy start+stop calls,
2821 			 * and then call start_sta again.
2822 			 * this should be fixed in the fw.
2823 			 */
2824 			wl12xx_cmd_role_start_sta(wl, wlvif);
2825 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2826 		}
2827 
2828 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2829 	}
2830 
2831 	return ret;
2832 }
2833 
2834 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2835 			    int offset)
2836 {
2837 	u8 ssid_len;
2838 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2839 					 skb->len - offset);
2840 
2841 	if (!ptr) {
2842 		wl1271_error("No SSID in IEs!");
2843 		return -ENOENT;
2844 	}
2845 
2846 	ssid_len = ptr[1];
2847 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2848 		wl1271_error("SSID is too long!");
2849 		return -EINVAL;
2850 	}
2851 
2852 	wlvif->ssid_len = ssid_len;
2853 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2854 	return 0;
2855 }
2856 
2857 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2858 {
2859 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2860 	struct sk_buff *skb;
2861 	int ieoffset;
2862 
2863 	/* we currently only support setting the ssid from the ap probe req */
2864 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2865 		return -EINVAL;
2866 
2867 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2868 	if (!skb)
2869 		return -EINVAL;
2870 
2871 	ieoffset = offsetof(struct ieee80211_mgmt,
2872 			    u.probe_req.variable);
2873 	wl1271_ssid_set(wlvif, skb, ieoffset);
2874 	dev_kfree_skb(skb);
2875 
2876 	return 0;
2877 }
2878 
2879 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2880 			    struct ieee80211_bss_conf *bss_conf,
2881 			    u32 sta_rate_set)
2882 {
2883 	int ieoffset;
2884 	int ret;
2885 
2886 	wlvif->aid = bss_conf->aid;
2887 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2888 	wlvif->beacon_int = bss_conf->beacon_int;
2889 	wlvif->wmm_enabled = bss_conf->qos;
2890 
2891 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2892 
2893 	/*
2894 	 * with wl1271, we don't need to update the
2895 	 * beacon_int and dtim_period, because the firmware
2896 	 * updates it by itself when the first beacon is
2897 	 * received after a join.
2898 	 */
2899 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2900 	if (ret < 0)
2901 		return ret;
2902 
2903 	/*
2904 	 * Get a template for hardware connection maintenance
2905 	 */
2906 	dev_kfree_skb(wlvif->probereq);
2907 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2908 							wlvif,
2909 							NULL);
2910 	ieoffset = offsetof(struct ieee80211_mgmt,
2911 			    u.probe_req.variable);
2912 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2913 
2914 	/* enable the connection monitoring feature */
2915 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2916 	if (ret < 0)
2917 		return ret;
2918 
2919 	/*
2920 	 * The join command disable the keep-alive mode, shut down its process,
2921 	 * and also clear the template config, so we need to reset it all after
2922 	 * the join. The acx_aid starts the keep-alive process, and the order
2923 	 * of the commands below is relevant.
2924 	 */
2925 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2926 	if (ret < 0)
2927 		return ret;
2928 
2929 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2930 	if (ret < 0)
2931 		return ret;
2932 
2933 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2934 	if (ret < 0)
2935 		return ret;
2936 
2937 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2938 					   wlvif->sta.klv_template_id,
2939 					   ACX_KEEP_ALIVE_TPL_VALID);
2940 	if (ret < 0)
2941 		return ret;
2942 
2943 	/*
2944 	 * The default fw psm configuration is AUTO, while mac80211 default
2945 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2946 	 */
2947 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2948 	if (ret < 0)
2949 		return ret;
2950 
2951 	if (sta_rate_set) {
2952 		wlvif->rate_set =
2953 			wl1271_tx_enabled_rates_get(wl,
2954 						    sta_rate_set,
2955 						    wlvif->band);
2956 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2957 		if (ret < 0)
2958 			return ret;
2959 	}
2960 
2961 	return ret;
2962 }
2963 
2964 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2965 {
2966 	int ret;
2967 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2968 
2969 	/* make sure we are connected (sta) joined */
2970 	if (sta &&
2971 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2972 		return false;
2973 
2974 	/* make sure we are joined (ibss) */
2975 	if (!sta &&
2976 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2977 		return false;
2978 
2979 	if (sta) {
2980 		/* use defaults when not associated */
2981 		wlvif->aid = 0;
2982 
2983 		/* free probe-request template */
2984 		dev_kfree_skb(wlvif->probereq);
2985 		wlvif->probereq = NULL;
2986 
2987 		/* disable connection monitor features */
2988 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2989 		if (ret < 0)
2990 			return ret;
2991 
2992 		/* Disable the keep-alive feature */
2993 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2994 		if (ret < 0)
2995 			return ret;
2996 
2997 		/* disable beacon filtering */
2998 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
2999 		if (ret < 0)
3000 			return ret;
3001 	}
3002 
3003 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3004 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3005 
3006 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3007 		ieee80211_chswitch_done(vif, false);
3008 		cancel_delayed_work(&wlvif->channel_switch_work);
3009 	}
3010 
3011 	/* invalidate keep-alive template */
3012 	wl1271_acx_keep_alive_config(wl, wlvif,
3013 				     wlvif->sta.klv_template_id,
3014 				     ACX_KEEP_ALIVE_TPL_INVALID);
3015 
3016 	return 0;
3017 }
3018 
3019 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3020 {
3021 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3022 	wlvif->rate_set = wlvif->basic_rate_set;
3023 }
3024 
3025 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3026 				   bool idle)
3027 {
3028 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3029 
3030 	if (idle == cur_idle)
3031 		return;
3032 
3033 	if (idle) {
3034 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3035 	} else {
3036 		/* The current firmware only supports sched_scan in idle */
3037 		if (wl->sched_vif == wlvif)
3038 			wl->ops->sched_scan_stop(wl, wlvif);
3039 
3040 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3041 	}
3042 }
3043 
3044 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3045 			     struct ieee80211_conf *conf, u32 changed)
3046 {
3047 	int ret;
3048 
3049 	if (wlcore_is_p2p_mgmt(wlvif))
3050 		return 0;
3051 
3052 	if (conf->power_level != wlvif->power_level) {
3053 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3054 		if (ret < 0)
3055 			return ret;
3056 
3057 		wlvif->power_level = conf->power_level;
3058 	}
3059 
3060 	return 0;
3061 }
3062 
3063 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3064 {
3065 	struct wl1271 *wl = hw->priv;
3066 	struct wl12xx_vif *wlvif;
3067 	struct ieee80211_conf *conf = &hw->conf;
3068 	int ret = 0;
3069 
3070 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3071 		     " changed 0x%x",
3072 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3073 		     conf->power_level,
3074 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3075 			 changed);
3076 
3077 	mutex_lock(&wl->mutex);
3078 
3079 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3080 		wl->power_level = conf->power_level;
3081 
3082 	if (unlikely(wl->state != WLCORE_STATE_ON))
3083 		goto out;
3084 
3085 	ret = wl1271_ps_elp_wakeup(wl);
3086 	if (ret < 0)
3087 		goto out;
3088 
3089 	/* configure each interface */
3090 	wl12xx_for_each_wlvif(wl, wlvif) {
3091 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3092 		if (ret < 0)
3093 			goto out_sleep;
3094 	}
3095 
3096 out_sleep:
3097 	wl1271_ps_elp_sleep(wl);
3098 
3099 out:
3100 	mutex_unlock(&wl->mutex);
3101 
3102 	return ret;
3103 }
3104 
3105 struct wl1271_filter_params {
3106 	bool enabled;
3107 	int mc_list_length;
3108 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3109 };
3110 
3111 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3112 				       struct netdev_hw_addr_list *mc_list)
3113 {
3114 	struct wl1271_filter_params *fp;
3115 	struct netdev_hw_addr *ha;
3116 
3117 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3118 	if (!fp) {
3119 		wl1271_error("Out of memory setting filters.");
3120 		return 0;
3121 	}
3122 
3123 	/* update multicast filtering parameters */
3124 	fp->mc_list_length = 0;
3125 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3126 		fp->enabled = false;
3127 	} else {
3128 		fp->enabled = true;
3129 		netdev_hw_addr_list_for_each(ha, mc_list) {
3130 			memcpy(fp->mc_list[fp->mc_list_length],
3131 					ha->addr, ETH_ALEN);
3132 			fp->mc_list_length++;
3133 		}
3134 	}
3135 
3136 	return (u64)(unsigned long)fp;
3137 }
3138 
3139 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3140 				  FIF_FCSFAIL | \
3141 				  FIF_BCN_PRBRESP_PROMISC | \
3142 				  FIF_CONTROL | \
3143 				  FIF_OTHER_BSS)
3144 
3145 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3146 				       unsigned int changed,
3147 				       unsigned int *total, u64 multicast)
3148 {
3149 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3150 	struct wl1271 *wl = hw->priv;
3151 	struct wl12xx_vif *wlvif;
3152 
3153 	int ret;
3154 
3155 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3156 		     " total %x", changed, *total);
3157 
3158 	mutex_lock(&wl->mutex);
3159 
3160 	*total &= WL1271_SUPPORTED_FILTERS;
3161 	changed &= WL1271_SUPPORTED_FILTERS;
3162 
3163 	if (unlikely(wl->state != WLCORE_STATE_ON))
3164 		goto out;
3165 
3166 	ret = wl1271_ps_elp_wakeup(wl);
3167 	if (ret < 0)
3168 		goto out;
3169 
3170 	wl12xx_for_each_wlvif(wl, wlvif) {
3171 		if (wlcore_is_p2p_mgmt(wlvif))
3172 			continue;
3173 
3174 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3175 			if (*total & FIF_ALLMULTI)
3176 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3177 								   false,
3178 								   NULL, 0);
3179 			else if (fp)
3180 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3181 							fp->enabled,
3182 							fp->mc_list,
3183 							fp->mc_list_length);
3184 			if (ret < 0)
3185 				goto out_sleep;
3186 		}
3187 	}
3188 
3189 	/*
3190 	 * the fw doesn't provide an api to configure the filters. instead,
3191 	 * the filters configuration is based on the active roles / ROC
3192 	 * state.
3193 	 */
3194 
3195 out_sleep:
3196 	wl1271_ps_elp_sleep(wl);
3197 
3198 out:
3199 	mutex_unlock(&wl->mutex);
3200 	kfree(fp);
3201 }
3202 
3203 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3204 				u8 id, u8 key_type, u8 key_size,
3205 				const u8 *key, u8 hlid, u32 tx_seq_32,
3206 				u16 tx_seq_16)
3207 {
3208 	struct wl1271_ap_key *ap_key;
3209 	int i;
3210 
3211 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3212 
3213 	if (key_size > MAX_KEY_SIZE)
3214 		return -EINVAL;
3215 
3216 	/*
3217 	 * Find next free entry in ap_keys. Also check we are not replacing
3218 	 * an existing key.
3219 	 */
3220 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3221 		if (wlvif->ap.recorded_keys[i] == NULL)
3222 			break;
3223 
3224 		if (wlvif->ap.recorded_keys[i]->id == id) {
3225 			wl1271_warning("trying to record key replacement");
3226 			return -EINVAL;
3227 		}
3228 	}
3229 
3230 	if (i == MAX_NUM_KEYS)
3231 		return -EBUSY;
3232 
3233 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3234 	if (!ap_key)
3235 		return -ENOMEM;
3236 
3237 	ap_key->id = id;
3238 	ap_key->key_type = key_type;
3239 	ap_key->key_size = key_size;
3240 	memcpy(ap_key->key, key, key_size);
3241 	ap_key->hlid = hlid;
3242 	ap_key->tx_seq_32 = tx_seq_32;
3243 	ap_key->tx_seq_16 = tx_seq_16;
3244 
3245 	wlvif->ap.recorded_keys[i] = ap_key;
3246 	return 0;
3247 }
3248 
3249 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3250 {
3251 	int i;
3252 
3253 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3254 		kfree(wlvif->ap.recorded_keys[i]);
3255 		wlvif->ap.recorded_keys[i] = NULL;
3256 	}
3257 }
3258 
3259 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3260 {
3261 	int i, ret = 0;
3262 	struct wl1271_ap_key *key;
3263 	bool wep_key_added = false;
3264 
3265 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3266 		u8 hlid;
3267 		if (wlvif->ap.recorded_keys[i] == NULL)
3268 			break;
3269 
3270 		key = wlvif->ap.recorded_keys[i];
3271 		hlid = key->hlid;
3272 		if (hlid == WL12XX_INVALID_LINK_ID)
3273 			hlid = wlvif->ap.bcast_hlid;
3274 
3275 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3276 					    key->id, key->key_type,
3277 					    key->key_size, key->key,
3278 					    hlid, key->tx_seq_32,
3279 					    key->tx_seq_16);
3280 		if (ret < 0)
3281 			goto out;
3282 
3283 		if (key->key_type == KEY_WEP)
3284 			wep_key_added = true;
3285 	}
3286 
3287 	if (wep_key_added) {
3288 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3289 						     wlvif->ap.bcast_hlid);
3290 		if (ret < 0)
3291 			goto out;
3292 	}
3293 
3294 out:
3295 	wl1271_free_ap_keys(wl, wlvif);
3296 	return ret;
3297 }
3298 
3299 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3300 		       u16 action, u8 id, u8 key_type,
3301 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3302 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3303 {
3304 	int ret;
3305 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3306 
3307 	if (is_ap) {
3308 		struct wl1271_station *wl_sta;
3309 		u8 hlid;
3310 
3311 		if (sta) {
3312 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3313 			hlid = wl_sta->hlid;
3314 		} else {
3315 			hlid = wlvif->ap.bcast_hlid;
3316 		}
3317 
3318 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3319 			/*
3320 			 * We do not support removing keys after AP shutdown.
3321 			 * Pretend we do to make mac80211 happy.
3322 			 */
3323 			if (action != KEY_ADD_OR_REPLACE)
3324 				return 0;
3325 
3326 			ret = wl1271_record_ap_key(wl, wlvif, id,
3327 					     key_type, key_size,
3328 					     key, hlid, tx_seq_32,
3329 					     tx_seq_16);
3330 		} else {
3331 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3332 					     id, key_type, key_size,
3333 					     key, hlid, tx_seq_32,
3334 					     tx_seq_16);
3335 		}
3336 
3337 		if (ret < 0)
3338 			return ret;
3339 	} else {
3340 		const u8 *addr;
3341 		static const u8 bcast_addr[ETH_ALEN] = {
3342 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3343 		};
3344 
3345 		addr = sta ? sta->addr : bcast_addr;
3346 
3347 		if (is_zero_ether_addr(addr)) {
3348 			/* We dont support TX only encryption */
3349 			return -EOPNOTSUPP;
3350 		}
3351 
3352 		/* The wl1271 does not allow to remove unicast keys - they
3353 		   will be cleared automatically on next CMD_JOIN. Ignore the
3354 		   request silently, as we dont want the mac80211 to emit
3355 		   an error message. */
3356 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3357 			return 0;
3358 
3359 		/* don't remove key if hlid was already deleted */
3360 		if (action == KEY_REMOVE &&
3361 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3362 			return 0;
3363 
3364 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3365 					     id, key_type, key_size,
3366 					     key, addr, tx_seq_32,
3367 					     tx_seq_16);
3368 		if (ret < 0)
3369 			return ret;
3370 
3371 	}
3372 
3373 	return 0;
3374 }
3375 
3376 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3377 			     struct ieee80211_vif *vif,
3378 			     struct ieee80211_sta *sta,
3379 			     struct ieee80211_key_conf *key_conf)
3380 {
3381 	struct wl1271 *wl = hw->priv;
3382 	int ret;
3383 	bool might_change_spare =
3384 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3385 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3386 
3387 	if (might_change_spare) {
3388 		/*
3389 		 * stop the queues and flush to ensure the next packets are
3390 		 * in sync with FW spare block accounting
3391 		 */
3392 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3393 		wl1271_tx_flush(wl);
3394 	}
3395 
3396 	mutex_lock(&wl->mutex);
3397 
3398 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3399 		ret = -EAGAIN;
3400 		goto out_wake_queues;
3401 	}
3402 
3403 	ret = wl1271_ps_elp_wakeup(wl);
3404 	if (ret < 0)
3405 		goto out_wake_queues;
3406 
3407 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3408 
3409 	wl1271_ps_elp_sleep(wl);
3410 
3411 out_wake_queues:
3412 	if (might_change_spare)
3413 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3414 
3415 	mutex_unlock(&wl->mutex);
3416 
3417 	return ret;
3418 }
3419 
3420 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3421 		   struct ieee80211_vif *vif,
3422 		   struct ieee80211_sta *sta,
3423 		   struct ieee80211_key_conf *key_conf)
3424 {
3425 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3426 	int ret;
3427 	u32 tx_seq_32 = 0;
3428 	u16 tx_seq_16 = 0;
3429 	u8 key_type;
3430 	u8 hlid;
3431 
3432 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3433 
3434 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3435 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3436 		     key_conf->cipher, key_conf->keyidx,
3437 		     key_conf->keylen, key_conf->flags);
3438 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3439 
3440 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3441 		if (sta) {
3442 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3443 			hlid = wl_sta->hlid;
3444 		} else {
3445 			hlid = wlvif->ap.bcast_hlid;
3446 		}
3447 	else
3448 		hlid = wlvif->sta.hlid;
3449 
3450 	if (hlid != WL12XX_INVALID_LINK_ID) {
3451 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3452 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3453 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3454 	}
3455 
3456 	switch (key_conf->cipher) {
3457 	case WLAN_CIPHER_SUITE_WEP40:
3458 	case WLAN_CIPHER_SUITE_WEP104:
3459 		key_type = KEY_WEP;
3460 
3461 		key_conf->hw_key_idx = key_conf->keyidx;
3462 		break;
3463 	case WLAN_CIPHER_SUITE_TKIP:
3464 		key_type = KEY_TKIP;
3465 		key_conf->hw_key_idx = key_conf->keyidx;
3466 		break;
3467 	case WLAN_CIPHER_SUITE_CCMP:
3468 		key_type = KEY_AES;
3469 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3470 		break;
3471 	case WL1271_CIPHER_SUITE_GEM:
3472 		key_type = KEY_GEM;
3473 		break;
3474 	default:
3475 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3476 
3477 		return -EOPNOTSUPP;
3478 	}
3479 
3480 	switch (cmd) {
3481 	case SET_KEY:
3482 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3483 				 key_conf->keyidx, key_type,
3484 				 key_conf->keylen, key_conf->key,
3485 				 tx_seq_32, tx_seq_16, sta);
3486 		if (ret < 0) {
3487 			wl1271_error("Could not add or replace key");
3488 			return ret;
3489 		}
3490 
3491 		/*
3492 		 * reconfiguring arp response if the unicast (or common)
3493 		 * encryption key type was changed
3494 		 */
3495 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3496 		    (sta || key_type == KEY_WEP) &&
3497 		    wlvif->encryption_type != key_type) {
3498 			wlvif->encryption_type = key_type;
3499 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3500 			if (ret < 0) {
3501 				wl1271_warning("build arp rsp failed: %d", ret);
3502 				return ret;
3503 			}
3504 		}
3505 		break;
3506 
3507 	case DISABLE_KEY:
3508 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3509 				     key_conf->keyidx, key_type,
3510 				     key_conf->keylen, key_conf->key,
3511 				     0, 0, sta);
3512 		if (ret < 0) {
3513 			wl1271_error("Could not remove key");
3514 			return ret;
3515 		}
3516 		break;
3517 
3518 	default:
3519 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3520 		return -EOPNOTSUPP;
3521 	}
3522 
3523 	return ret;
3524 }
3525 EXPORT_SYMBOL_GPL(wlcore_set_key);
3526 
3527 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3528 					  struct ieee80211_vif *vif,
3529 					  int key_idx)
3530 {
3531 	struct wl1271 *wl = hw->priv;
3532 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3533 	int ret;
3534 
3535 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3536 		     key_idx);
3537 
3538 	/* we don't handle unsetting of default key */
3539 	if (key_idx == -1)
3540 		return;
3541 
3542 	mutex_lock(&wl->mutex);
3543 
3544 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3545 		ret = -EAGAIN;
3546 		goto out_unlock;
3547 	}
3548 
3549 	ret = wl1271_ps_elp_wakeup(wl);
3550 	if (ret < 0)
3551 		goto out_unlock;
3552 
3553 	wlvif->default_key = key_idx;
3554 
3555 	/* the default WEP key needs to be configured at least once */
3556 	if (wlvif->encryption_type == KEY_WEP) {
3557 		ret = wl12xx_cmd_set_default_wep_key(wl,
3558 				key_idx,
3559 				wlvif->sta.hlid);
3560 		if (ret < 0)
3561 			goto out_sleep;
3562 	}
3563 
3564 out_sleep:
3565 	wl1271_ps_elp_sleep(wl);
3566 
3567 out_unlock:
3568 	mutex_unlock(&wl->mutex);
3569 }
3570 
3571 void wlcore_regdomain_config(struct wl1271 *wl)
3572 {
3573 	int ret;
3574 
3575 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3576 		return;
3577 
3578 	mutex_lock(&wl->mutex);
3579 
3580 	if (unlikely(wl->state != WLCORE_STATE_ON))
3581 		goto out;
3582 
3583 	ret = wl1271_ps_elp_wakeup(wl);
3584 	if (ret < 0)
3585 		goto out;
3586 
3587 	ret = wlcore_cmd_regdomain_config_locked(wl);
3588 	if (ret < 0) {
3589 		wl12xx_queue_recovery_work(wl);
3590 		goto out;
3591 	}
3592 
3593 	wl1271_ps_elp_sleep(wl);
3594 out:
3595 	mutex_unlock(&wl->mutex);
3596 }
3597 
3598 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3599 			     struct ieee80211_vif *vif,
3600 			     struct ieee80211_scan_request *hw_req)
3601 {
3602 	struct cfg80211_scan_request *req = &hw_req->req;
3603 	struct wl1271 *wl = hw->priv;
3604 	int ret;
3605 	u8 *ssid = NULL;
3606 	size_t len = 0;
3607 
3608 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3609 
3610 	if (req->n_ssids) {
3611 		ssid = req->ssids[0].ssid;
3612 		len = req->ssids[0].ssid_len;
3613 	}
3614 
3615 	mutex_lock(&wl->mutex);
3616 
3617 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3618 		/*
3619 		 * We cannot return -EBUSY here because cfg80211 will expect
3620 		 * a call to ieee80211_scan_completed if we do - in this case
3621 		 * there won't be any call.
3622 		 */
3623 		ret = -EAGAIN;
3624 		goto out;
3625 	}
3626 
3627 	ret = wl1271_ps_elp_wakeup(wl);
3628 	if (ret < 0)
3629 		goto out;
3630 
3631 	/* fail if there is any role in ROC */
3632 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3633 		/* don't allow scanning right now */
3634 		ret = -EBUSY;
3635 		goto out_sleep;
3636 	}
3637 
3638 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3639 out_sleep:
3640 	wl1271_ps_elp_sleep(wl);
3641 out:
3642 	mutex_unlock(&wl->mutex);
3643 
3644 	return ret;
3645 }
3646 
3647 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3648 				     struct ieee80211_vif *vif)
3649 {
3650 	struct wl1271 *wl = hw->priv;
3651 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3652 	int ret;
3653 
3654 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3655 
3656 	mutex_lock(&wl->mutex);
3657 
3658 	if (unlikely(wl->state != WLCORE_STATE_ON))
3659 		goto out;
3660 
3661 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3662 		goto out;
3663 
3664 	ret = wl1271_ps_elp_wakeup(wl);
3665 	if (ret < 0)
3666 		goto out;
3667 
3668 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3669 		ret = wl->ops->scan_stop(wl, wlvif);
3670 		if (ret < 0)
3671 			goto out_sleep;
3672 	}
3673 
3674 	/*
3675 	 * Rearm the tx watchdog just before idling scan. This
3676 	 * prevents just-finished scans from triggering the watchdog
3677 	 */
3678 	wl12xx_rearm_tx_watchdog_locked(wl);
3679 
3680 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3681 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3682 	wl->scan_wlvif = NULL;
3683 	wl->scan.req = NULL;
3684 	ieee80211_scan_completed(wl->hw, true);
3685 
3686 out_sleep:
3687 	wl1271_ps_elp_sleep(wl);
3688 out:
3689 	mutex_unlock(&wl->mutex);
3690 
3691 	cancel_delayed_work_sync(&wl->scan_complete_work);
3692 }
3693 
3694 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3695 				      struct ieee80211_vif *vif,
3696 				      struct cfg80211_sched_scan_request *req,
3697 				      struct ieee80211_scan_ies *ies)
3698 {
3699 	struct wl1271 *wl = hw->priv;
3700 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3701 	int ret;
3702 
3703 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3704 
3705 	mutex_lock(&wl->mutex);
3706 
3707 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3708 		ret = -EAGAIN;
3709 		goto out;
3710 	}
3711 
3712 	ret = wl1271_ps_elp_wakeup(wl);
3713 	if (ret < 0)
3714 		goto out;
3715 
3716 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3717 	if (ret < 0)
3718 		goto out_sleep;
3719 
3720 	wl->sched_vif = wlvif;
3721 
3722 out_sleep:
3723 	wl1271_ps_elp_sleep(wl);
3724 out:
3725 	mutex_unlock(&wl->mutex);
3726 	return ret;
3727 }
3728 
3729 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3730 				     struct ieee80211_vif *vif)
3731 {
3732 	struct wl1271 *wl = hw->priv;
3733 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3734 	int ret;
3735 
3736 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3737 
3738 	mutex_lock(&wl->mutex);
3739 
3740 	if (unlikely(wl->state != WLCORE_STATE_ON))
3741 		goto out;
3742 
3743 	ret = wl1271_ps_elp_wakeup(wl);
3744 	if (ret < 0)
3745 		goto out;
3746 
3747 	wl->ops->sched_scan_stop(wl, wlvif);
3748 
3749 	wl1271_ps_elp_sleep(wl);
3750 out:
3751 	mutex_unlock(&wl->mutex);
3752 
3753 	return 0;
3754 }
3755 
3756 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3757 {
3758 	struct wl1271 *wl = hw->priv;
3759 	int ret = 0;
3760 
3761 	mutex_lock(&wl->mutex);
3762 
3763 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3764 		ret = -EAGAIN;
3765 		goto out;
3766 	}
3767 
3768 	ret = wl1271_ps_elp_wakeup(wl);
3769 	if (ret < 0)
3770 		goto out;
3771 
3772 	ret = wl1271_acx_frag_threshold(wl, value);
3773 	if (ret < 0)
3774 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3775 
3776 	wl1271_ps_elp_sleep(wl);
3777 
3778 out:
3779 	mutex_unlock(&wl->mutex);
3780 
3781 	return ret;
3782 }
3783 
3784 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3785 {
3786 	struct wl1271 *wl = hw->priv;
3787 	struct wl12xx_vif *wlvif;
3788 	int ret = 0;
3789 
3790 	mutex_lock(&wl->mutex);
3791 
3792 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3793 		ret = -EAGAIN;
3794 		goto out;
3795 	}
3796 
3797 	ret = wl1271_ps_elp_wakeup(wl);
3798 	if (ret < 0)
3799 		goto out;
3800 
3801 	wl12xx_for_each_wlvif(wl, wlvif) {
3802 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3803 		if (ret < 0)
3804 			wl1271_warning("set rts threshold failed: %d", ret);
3805 	}
3806 	wl1271_ps_elp_sleep(wl);
3807 
3808 out:
3809 	mutex_unlock(&wl->mutex);
3810 
3811 	return ret;
3812 }
3813 
3814 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3815 {
3816 	int len;
3817 	const u8 *next, *end = skb->data + skb->len;
3818 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3819 					skb->len - ieoffset);
3820 	if (!ie)
3821 		return;
3822 	len = ie[1] + 2;
3823 	next = ie + len;
3824 	memmove(ie, next, end - next);
3825 	skb_trim(skb, skb->len - len);
3826 }
3827 
3828 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3829 					    unsigned int oui, u8 oui_type,
3830 					    int ieoffset)
3831 {
3832 	int len;
3833 	const u8 *next, *end = skb->data + skb->len;
3834 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3835 					       skb->data + ieoffset,
3836 					       skb->len - ieoffset);
3837 	if (!ie)
3838 		return;
3839 	len = ie[1] + 2;
3840 	next = ie + len;
3841 	memmove(ie, next, end - next);
3842 	skb_trim(skb, skb->len - len);
3843 }
3844 
3845 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3846 					 struct ieee80211_vif *vif)
3847 {
3848 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3849 	struct sk_buff *skb;
3850 	int ret;
3851 
3852 	skb = ieee80211_proberesp_get(wl->hw, vif);
3853 	if (!skb)
3854 		return -EOPNOTSUPP;
3855 
3856 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3857 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3858 				      skb->data,
3859 				      skb->len, 0,
3860 				      rates);
3861 	dev_kfree_skb(skb);
3862 
3863 	if (ret < 0)
3864 		goto out;
3865 
3866 	wl1271_debug(DEBUG_AP, "probe response updated");
3867 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3868 
3869 out:
3870 	return ret;
3871 }
3872 
3873 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3874 					     struct ieee80211_vif *vif,
3875 					     u8 *probe_rsp_data,
3876 					     size_t probe_rsp_len,
3877 					     u32 rates)
3878 {
3879 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3880 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3881 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3882 	int ssid_ie_offset, ie_offset, templ_len;
3883 	const u8 *ptr;
3884 
3885 	/* no need to change probe response if the SSID is set correctly */
3886 	if (wlvif->ssid_len > 0)
3887 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3888 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3889 					       probe_rsp_data,
3890 					       probe_rsp_len, 0,
3891 					       rates);
3892 
3893 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3894 		wl1271_error("probe_rsp template too big");
3895 		return -EINVAL;
3896 	}
3897 
3898 	/* start searching from IE offset */
3899 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3900 
3901 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3902 			       probe_rsp_len - ie_offset);
3903 	if (!ptr) {
3904 		wl1271_error("No SSID in beacon!");
3905 		return -EINVAL;
3906 	}
3907 
3908 	ssid_ie_offset = ptr - probe_rsp_data;
3909 	ptr += (ptr[1] + 2);
3910 
3911 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3912 
3913 	/* insert SSID from bss_conf */
3914 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3915 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3916 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3917 	       bss_conf->ssid, bss_conf->ssid_len);
3918 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3919 
3920 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3921 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3922 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3923 
3924 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3925 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3926 				       probe_rsp_templ,
3927 				       templ_len, 0,
3928 				       rates);
3929 }
3930 
3931 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3932 				       struct ieee80211_vif *vif,
3933 				       struct ieee80211_bss_conf *bss_conf,
3934 				       u32 changed)
3935 {
3936 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3937 	int ret = 0;
3938 
3939 	if (changed & BSS_CHANGED_ERP_SLOT) {
3940 		if (bss_conf->use_short_slot)
3941 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3942 		else
3943 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3944 		if (ret < 0) {
3945 			wl1271_warning("Set slot time failed %d", ret);
3946 			goto out;
3947 		}
3948 	}
3949 
3950 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3951 		if (bss_conf->use_short_preamble)
3952 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3953 		else
3954 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3955 	}
3956 
3957 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3958 		if (bss_conf->use_cts_prot)
3959 			ret = wl1271_acx_cts_protect(wl, wlvif,
3960 						     CTSPROTECT_ENABLE);
3961 		else
3962 			ret = wl1271_acx_cts_protect(wl, wlvif,
3963 						     CTSPROTECT_DISABLE);
3964 		if (ret < 0) {
3965 			wl1271_warning("Set ctsprotect failed %d", ret);
3966 			goto out;
3967 		}
3968 	}
3969 
3970 out:
3971 	return ret;
3972 }
3973 
3974 static int wlcore_set_beacon_template(struct wl1271 *wl,
3975 				      struct ieee80211_vif *vif,
3976 				      bool is_ap)
3977 {
3978 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3979 	struct ieee80211_hdr *hdr;
3980 	u32 min_rate;
3981 	int ret;
3982 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3983 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3984 	u16 tmpl_id;
3985 
3986 	if (!beacon) {
3987 		ret = -EINVAL;
3988 		goto out;
3989 	}
3990 
3991 	wl1271_debug(DEBUG_MASTER, "beacon updated");
3992 
3993 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3994 	if (ret < 0) {
3995 		dev_kfree_skb(beacon);
3996 		goto out;
3997 	}
3998 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3999 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4000 		CMD_TEMPL_BEACON;
4001 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4002 				      beacon->data,
4003 				      beacon->len, 0,
4004 				      min_rate);
4005 	if (ret < 0) {
4006 		dev_kfree_skb(beacon);
4007 		goto out;
4008 	}
4009 
4010 	wlvif->wmm_enabled =
4011 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4012 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4013 					beacon->data + ieoffset,
4014 					beacon->len - ieoffset);
4015 
4016 	/*
4017 	 * In case we already have a probe-resp beacon set explicitly
4018 	 * by usermode, don't use the beacon data.
4019 	 */
4020 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4021 		goto end_bcn;
4022 
4023 	/* remove TIM ie from probe response */
4024 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4025 
4026 	/*
4027 	 * remove p2p ie from probe response.
4028 	 * the fw reponds to probe requests that don't include
4029 	 * the p2p ie. probe requests with p2p ie will be passed,
4030 	 * and will be responded by the supplicant (the spec
4031 	 * forbids including the p2p ie when responding to probe
4032 	 * requests that didn't include it).
4033 	 */
4034 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4035 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4036 
4037 	hdr = (struct ieee80211_hdr *) beacon->data;
4038 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4039 					 IEEE80211_STYPE_PROBE_RESP);
4040 	if (is_ap)
4041 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4042 							   beacon->data,
4043 							   beacon->len,
4044 							   min_rate);
4045 	else
4046 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4047 					      CMD_TEMPL_PROBE_RESPONSE,
4048 					      beacon->data,
4049 					      beacon->len, 0,
4050 					      min_rate);
4051 end_bcn:
4052 	dev_kfree_skb(beacon);
4053 	if (ret < 0)
4054 		goto out;
4055 
4056 out:
4057 	return ret;
4058 }
4059 
4060 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4061 					  struct ieee80211_vif *vif,
4062 					  struct ieee80211_bss_conf *bss_conf,
4063 					  u32 changed)
4064 {
4065 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4066 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4067 	int ret = 0;
4068 
4069 	if (changed & BSS_CHANGED_BEACON_INT) {
4070 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4071 			bss_conf->beacon_int);
4072 
4073 		wlvif->beacon_int = bss_conf->beacon_int;
4074 	}
4075 
4076 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4077 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4078 
4079 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4080 	}
4081 
4082 	if (changed & BSS_CHANGED_BEACON) {
4083 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4084 		if (ret < 0)
4085 			goto out;
4086 
4087 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4088 				       &wlvif->flags)) {
4089 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4090 			if (ret < 0)
4091 				goto out;
4092 		}
4093 	}
4094 out:
4095 	if (ret != 0)
4096 		wl1271_error("beacon info change failed: %d", ret);
4097 	return ret;
4098 }
4099 
4100 /* AP mode changes */
4101 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4102 				       struct ieee80211_vif *vif,
4103 				       struct ieee80211_bss_conf *bss_conf,
4104 				       u32 changed)
4105 {
4106 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4107 	int ret = 0;
4108 
4109 	if (changed & BSS_CHANGED_BASIC_RATES) {
4110 		u32 rates = bss_conf->basic_rates;
4111 
4112 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4113 								 wlvif->band);
4114 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4115 							wlvif->basic_rate_set);
4116 
4117 		ret = wl1271_init_ap_rates(wl, wlvif);
4118 		if (ret < 0) {
4119 			wl1271_error("AP rate policy change failed %d", ret);
4120 			goto out;
4121 		}
4122 
4123 		ret = wl1271_ap_init_templates(wl, vif);
4124 		if (ret < 0)
4125 			goto out;
4126 
4127 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4128 		if (ret < 0)
4129 			goto out;
4130 
4131 		ret = wlcore_set_beacon_template(wl, vif, true);
4132 		if (ret < 0)
4133 			goto out;
4134 	}
4135 
4136 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4137 	if (ret < 0)
4138 		goto out;
4139 
4140 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4141 		if (bss_conf->enable_beacon) {
4142 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4143 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4144 				if (ret < 0)
4145 					goto out;
4146 
4147 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4148 				if (ret < 0)
4149 					goto out;
4150 
4151 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4152 				wl1271_debug(DEBUG_AP, "started AP");
4153 			}
4154 		} else {
4155 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4156 				/*
4157 				 * AP might be in ROC in case we have just
4158 				 * sent auth reply. handle it.
4159 				 */
4160 				if (test_bit(wlvif->role_id, wl->roc_map))
4161 					wl12xx_croc(wl, wlvif->role_id);
4162 
4163 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4164 				if (ret < 0)
4165 					goto out;
4166 
4167 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4168 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4169 					  &wlvif->flags);
4170 				wl1271_debug(DEBUG_AP, "stopped AP");
4171 			}
4172 		}
4173 	}
4174 
4175 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4176 	if (ret < 0)
4177 		goto out;
4178 
4179 	/* Handle HT information change */
4180 	if ((changed & BSS_CHANGED_HT) &&
4181 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4182 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4183 					bss_conf->ht_operation_mode);
4184 		if (ret < 0) {
4185 			wl1271_warning("Set ht information failed %d", ret);
4186 			goto out;
4187 		}
4188 	}
4189 
4190 out:
4191 	return;
4192 }
4193 
4194 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4195 			    struct ieee80211_bss_conf *bss_conf,
4196 			    u32 sta_rate_set)
4197 {
4198 	u32 rates;
4199 	int ret;
4200 
4201 	wl1271_debug(DEBUG_MAC80211,
4202 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4203 	     bss_conf->bssid, bss_conf->aid,
4204 	     bss_conf->beacon_int,
4205 	     bss_conf->basic_rates, sta_rate_set);
4206 
4207 	wlvif->beacon_int = bss_conf->beacon_int;
4208 	rates = bss_conf->basic_rates;
4209 	wlvif->basic_rate_set =
4210 		wl1271_tx_enabled_rates_get(wl, rates,
4211 					    wlvif->band);
4212 	wlvif->basic_rate =
4213 		wl1271_tx_min_rate_get(wl,
4214 				       wlvif->basic_rate_set);
4215 
4216 	if (sta_rate_set)
4217 		wlvif->rate_set =
4218 			wl1271_tx_enabled_rates_get(wl,
4219 						sta_rate_set,
4220 						wlvif->band);
4221 
4222 	/* we only support sched_scan while not connected */
4223 	if (wl->sched_vif == wlvif)
4224 		wl->ops->sched_scan_stop(wl, wlvif);
4225 
4226 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4227 	if (ret < 0)
4228 		return ret;
4229 
4230 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4231 	if (ret < 0)
4232 		return ret;
4233 
4234 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4235 	if (ret < 0)
4236 		return ret;
4237 
4238 	wlcore_set_ssid(wl, wlvif);
4239 
4240 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4241 
4242 	return 0;
4243 }
4244 
4245 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4246 {
4247 	int ret;
4248 
4249 	/* revert back to minimum rates for the current band */
4250 	wl1271_set_band_rate(wl, wlvif);
4251 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4252 
4253 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4254 	if (ret < 0)
4255 		return ret;
4256 
4257 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4258 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4259 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4260 		if (ret < 0)
4261 			return ret;
4262 	}
4263 
4264 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4265 	return 0;
4266 }
4267 /* STA/IBSS mode changes */
4268 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4269 					struct ieee80211_vif *vif,
4270 					struct ieee80211_bss_conf *bss_conf,
4271 					u32 changed)
4272 {
4273 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4274 	bool do_join = false;
4275 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4276 	bool ibss_joined = false;
4277 	u32 sta_rate_set = 0;
4278 	int ret;
4279 	struct ieee80211_sta *sta;
4280 	bool sta_exists = false;
4281 	struct ieee80211_sta_ht_cap sta_ht_cap;
4282 
4283 	if (is_ibss) {
4284 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4285 						     changed);
4286 		if (ret < 0)
4287 			goto out;
4288 	}
4289 
4290 	if (changed & BSS_CHANGED_IBSS) {
4291 		if (bss_conf->ibss_joined) {
4292 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4293 			ibss_joined = true;
4294 		} else {
4295 			wlcore_unset_assoc(wl, wlvif);
4296 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4297 		}
4298 	}
4299 
4300 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4301 		do_join = true;
4302 
4303 	/* Need to update the SSID (for filtering etc) */
4304 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4305 		do_join = true;
4306 
4307 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4308 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4309 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4310 
4311 		do_join = true;
4312 	}
4313 
4314 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4315 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4316 
4317 	if (changed & BSS_CHANGED_CQM) {
4318 		bool enable = false;
4319 		if (bss_conf->cqm_rssi_thold)
4320 			enable = true;
4321 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4322 						  bss_conf->cqm_rssi_thold,
4323 						  bss_conf->cqm_rssi_hyst);
4324 		if (ret < 0)
4325 			goto out;
4326 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4327 	}
4328 
4329 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4330 		       BSS_CHANGED_ASSOC)) {
4331 		rcu_read_lock();
4332 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4333 		if (sta) {
4334 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4335 
4336 			/* save the supp_rates of the ap */
4337 			sta_rate_set = sta->supp_rates[wlvif->band];
4338 			if (sta->ht_cap.ht_supported)
4339 				sta_rate_set |=
4340 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4341 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4342 			sta_ht_cap = sta->ht_cap;
4343 			sta_exists = true;
4344 		}
4345 
4346 		rcu_read_unlock();
4347 	}
4348 
4349 	if (changed & BSS_CHANGED_BSSID) {
4350 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4351 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4352 					       sta_rate_set);
4353 			if (ret < 0)
4354 				goto out;
4355 
4356 			/* Need to update the BSSID (for filtering etc) */
4357 			do_join = true;
4358 		} else {
4359 			ret = wlcore_clear_bssid(wl, wlvif);
4360 			if (ret < 0)
4361 				goto out;
4362 		}
4363 	}
4364 
4365 	if (changed & BSS_CHANGED_IBSS) {
4366 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4367 			     bss_conf->ibss_joined);
4368 
4369 		if (bss_conf->ibss_joined) {
4370 			u32 rates = bss_conf->basic_rates;
4371 			wlvif->basic_rate_set =
4372 				wl1271_tx_enabled_rates_get(wl, rates,
4373 							    wlvif->band);
4374 			wlvif->basic_rate =
4375 				wl1271_tx_min_rate_get(wl,
4376 						       wlvif->basic_rate_set);
4377 
4378 			/* by default, use 11b + OFDM rates */
4379 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4380 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4381 			if (ret < 0)
4382 				goto out;
4383 		}
4384 	}
4385 
4386 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4387 		/* enable beacon filtering */
4388 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4389 		if (ret < 0)
4390 			goto out;
4391 	}
4392 
4393 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4394 	if (ret < 0)
4395 		goto out;
4396 
4397 	if (do_join) {
4398 		ret = wlcore_join(wl, wlvif);
4399 		if (ret < 0) {
4400 			wl1271_warning("cmd join failed %d", ret);
4401 			goto out;
4402 		}
4403 	}
4404 
4405 	if (changed & BSS_CHANGED_ASSOC) {
4406 		if (bss_conf->assoc) {
4407 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4408 					       sta_rate_set);
4409 			if (ret < 0)
4410 				goto out;
4411 
4412 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4413 				wl12xx_set_authorized(wl, wlvif);
4414 		} else {
4415 			wlcore_unset_assoc(wl, wlvif);
4416 		}
4417 	}
4418 
4419 	if (changed & BSS_CHANGED_PS) {
4420 		if ((bss_conf->ps) &&
4421 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4422 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4423 			int ps_mode;
4424 			char *ps_mode_str;
4425 
4426 			if (wl->conf.conn.forced_ps) {
4427 				ps_mode = STATION_POWER_SAVE_MODE;
4428 				ps_mode_str = "forced";
4429 			} else {
4430 				ps_mode = STATION_AUTO_PS_MODE;
4431 				ps_mode_str = "auto";
4432 			}
4433 
4434 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4435 
4436 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4437 			if (ret < 0)
4438 				wl1271_warning("enter %s ps failed %d",
4439 					       ps_mode_str, ret);
4440 		} else if (!bss_conf->ps &&
4441 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4442 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4443 
4444 			ret = wl1271_ps_set_mode(wl, wlvif,
4445 						 STATION_ACTIVE_MODE);
4446 			if (ret < 0)
4447 				wl1271_warning("exit auto ps failed %d", ret);
4448 		}
4449 	}
4450 
4451 	/* Handle new association with HT. Do this after join. */
4452 	if (sta_exists) {
4453 		bool enabled =
4454 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4455 
4456 		ret = wlcore_hw_set_peer_cap(wl,
4457 					     &sta_ht_cap,
4458 					     enabled,
4459 					     wlvif->rate_set,
4460 					     wlvif->sta.hlid);
4461 		if (ret < 0) {
4462 			wl1271_warning("Set ht cap failed %d", ret);
4463 			goto out;
4464 
4465 		}
4466 
4467 		if (enabled) {
4468 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4469 						bss_conf->ht_operation_mode);
4470 			if (ret < 0) {
4471 				wl1271_warning("Set ht information failed %d",
4472 					       ret);
4473 				goto out;
4474 			}
4475 		}
4476 	}
4477 
4478 	/* Handle arp filtering. Done after join. */
4479 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4480 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4481 		__be32 addr = bss_conf->arp_addr_list[0];
4482 		wlvif->sta.qos = bss_conf->qos;
4483 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4484 
4485 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4486 			wlvif->ip_addr = addr;
4487 			/*
4488 			 * The template should have been configured only upon
4489 			 * association. however, it seems that the correct ip
4490 			 * isn't being set (when sending), so we have to
4491 			 * reconfigure the template upon every ip change.
4492 			 */
4493 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4494 			if (ret < 0) {
4495 				wl1271_warning("build arp rsp failed: %d", ret);
4496 				goto out;
4497 			}
4498 
4499 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4500 				(ACX_ARP_FILTER_ARP_FILTERING |
4501 				 ACX_ARP_FILTER_AUTO_ARP),
4502 				addr);
4503 		} else {
4504 			wlvif->ip_addr = 0;
4505 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4506 		}
4507 
4508 		if (ret < 0)
4509 			goto out;
4510 	}
4511 
4512 out:
4513 	return;
4514 }
4515 
4516 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4517 				       struct ieee80211_vif *vif,
4518 				       struct ieee80211_bss_conf *bss_conf,
4519 				       u32 changed)
4520 {
4521 	struct wl1271 *wl = hw->priv;
4522 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4523 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4524 	int ret;
4525 
4526 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4527 		     wlvif->role_id, (int)changed);
4528 
4529 	/*
4530 	 * make sure to cancel pending disconnections if our association
4531 	 * state changed
4532 	 */
4533 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4534 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4535 
4536 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4537 	    !bss_conf->enable_beacon)
4538 		wl1271_tx_flush(wl);
4539 
4540 	mutex_lock(&wl->mutex);
4541 
4542 	if (unlikely(wl->state != WLCORE_STATE_ON))
4543 		goto out;
4544 
4545 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4546 		goto out;
4547 
4548 	ret = wl1271_ps_elp_wakeup(wl);
4549 	if (ret < 0)
4550 		goto out;
4551 
4552 	if ((changed & BSS_CHANGED_TXPOWER) &&
4553 	    bss_conf->txpower != wlvif->power_level) {
4554 
4555 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4556 		if (ret < 0)
4557 			goto out;
4558 
4559 		wlvif->power_level = bss_conf->txpower;
4560 	}
4561 
4562 	if (is_ap)
4563 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4564 	else
4565 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4566 
4567 	wl1271_ps_elp_sleep(wl);
4568 
4569 out:
4570 	mutex_unlock(&wl->mutex);
4571 }
4572 
4573 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4574 				 struct ieee80211_chanctx_conf *ctx)
4575 {
4576 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4577 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4578 		     cfg80211_get_chandef_type(&ctx->def));
4579 	return 0;
4580 }
4581 
4582 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4583 				     struct ieee80211_chanctx_conf *ctx)
4584 {
4585 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4586 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4587 		     cfg80211_get_chandef_type(&ctx->def));
4588 }
4589 
4590 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4591 				     struct ieee80211_chanctx_conf *ctx,
4592 				     u32 changed)
4593 {
4594 	struct wl1271 *wl = hw->priv;
4595 	struct wl12xx_vif *wlvif;
4596 	int ret;
4597 	int channel = ieee80211_frequency_to_channel(
4598 		ctx->def.chan->center_freq);
4599 
4600 	wl1271_debug(DEBUG_MAC80211,
4601 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4602 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4603 
4604 	mutex_lock(&wl->mutex);
4605 
4606 	ret = wl1271_ps_elp_wakeup(wl);
4607 	if (ret < 0)
4608 		goto out;
4609 
4610 	wl12xx_for_each_wlvif(wl, wlvif) {
4611 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4612 
4613 		rcu_read_lock();
4614 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4615 			rcu_read_unlock();
4616 			continue;
4617 		}
4618 		rcu_read_unlock();
4619 
4620 		/* start radar if needed */
4621 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4622 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4623 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4624 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4625 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4626 			wlcore_hw_set_cac(wl, wlvif, true);
4627 			wlvif->radar_enabled = true;
4628 		}
4629 	}
4630 
4631 	wl1271_ps_elp_sleep(wl);
4632 out:
4633 	mutex_unlock(&wl->mutex);
4634 }
4635 
4636 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4637 					struct ieee80211_vif *vif,
4638 					struct ieee80211_chanctx_conf *ctx)
4639 {
4640 	struct wl1271 *wl = hw->priv;
4641 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4642 	int channel = ieee80211_frequency_to_channel(
4643 		ctx->def.chan->center_freq);
4644 	int ret = -EINVAL;
4645 
4646 	wl1271_debug(DEBUG_MAC80211,
4647 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4648 		     wlvif->role_id, channel,
4649 		     cfg80211_get_chandef_type(&ctx->def),
4650 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4651 
4652 	mutex_lock(&wl->mutex);
4653 
4654 	if (unlikely(wl->state != WLCORE_STATE_ON))
4655 		goto out;
4656 
4657 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4658 		goto out;
4659 
4660 	ret = wl1271_ps_elp_wakeup(wl);
4661 	if (ret < 0)
4662 		goto out;
4663 
4664 	wlvif->band = ctx->def.chan->band;
4665 	wlvif->channel = channel;
4666 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4667 
4668 	/* update default rates according to the band */
4669 	wl1271_set_band_rate(wl, wlvif);
4670 
4671 	if (ctx->radar_enabled &&
4672 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4673 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4674 		wlcore_hw_set_cac(wl, wlvif, true);
4675 		wlvif->radar_enabled = true;
4676 	}
4677 
4678 	wl1271_ps_elp_sleep(wl);
4679 out:
4680 	mutex_unlock(&wl->mutex);
4681 
4682 	return 0;
4683 }
4684 
4685 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4686 					   struct ieee80211_vif *vif,
4687 					   struct ieee80211_chanctx_conf *ctx)
4688 {
4689 	struct wl1271 *wl = hw->priv;
4690 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4691 	int ret;
4692 
4693 	wl1271_debug(DEBUG_MAC80211,
4694 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4695 		     wlvif->role_id,
4696 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4697 		     cfg80211_get_chandef_type(&ctx->def));
4698 
4699 	wl1271_tx_flush(wl);
4700 
4701 	mutex_lock(&wl->mutex);
4702 
4703 	if (unlikely(wl->state != WLCORE_STATE_ON))
4704 		goto out;
4705 
4706 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4707 		goto out;
4708 
4709 	ret = wl1271_ps_elp_wakeup(wl);
4710 	if (ret < 0)
4711 		goto out;
4712 
4713 	if (wlvif->radar_enabled) {
4714 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4715 		wlcore_hw_set_cac(wl, wlvif, false);
4716 		wlvif->radar_enabled = false;
4717 	}
4718 
4719 	wl1271_ps_elp_sleep(wl);
4720 out:
4721 	mutex_unlock(&wl->mutex);
4722 }
4723 
4724 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4725 				    struct wl12xx_vif *wlvif,
4726 				    struct ieee80211_chanctx_conf *new_ctx)
4727 {
4728 	int channel = ieee80211_frequency_to_channel(
4729 		new_ctx->def.chan->center_freq);
4730 
4731 	wl1271_debug(DEBUG_MAC80211,
4732 		     "switch vif (role %d) %d -> %d chan_type: %d",
4733 		     wlvif->role_id, wlvif->channel, channel,
4734 		     cfg80211_get_chandef_type(&new_ctx->def));
4735 
4736 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4737 		return 0;
4738 
4739 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4740 
4741 	if (wlvif->radar_enabled) {
4742 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4743 		wlcore_hw_set_cac(wl, wlvif, false);
4744 		wlvif->radar_enabled = false;
4745 	}
4746 
4747 	wlvif->band = new_ctx->def.chan->band;
4748 	wlvif->channel = channel;
4749 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4750 
4751 	/* start radar if needed */
4752 	if (new_ctx->radar_enabled) {
4753 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4754 		wlcore_hw_set_cac(wl, wlvif, true);
4755 		wlvif->radar_enabled = true;
4756 	}
4757 
4758 	return 0;
4759 }
4760 
4761 static int
4762 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4763 			     struct ieee80211_vif_chanctx_switch *vifs,
4764 			     int n_vifs,
4765 			     enum ieee80211_chanctx_switch_mode mode)
4766 {
4767 	struct wl1271 *wl = hw->priv;
4768 	int i, ret;
4769 
4770 	wl1271_debug(DEBUG_MAC80211,
4771 		     "mac80211 switch chanctx n_vifs %d mode %d",
4772 		     n_vifs, mode);
4773 
4774 	mutex_lock(&wl->mutex);
4775 
4776 	ret = wl1271_ps_elp_wakeup(wl);
4777 	if (ret < 0)
4778 		goto out;
4779 
4780 	for (i = 0; i < n_vifs; i++) {
4781 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4782 
4783 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4784 		if (ret)
4785 			goto out_sleep;
4786 	}
4787 out_sleep:
4788 	wl1271_ps_elp_sleep(wl);
4789 out:
4790 	mutex_unlock(&wl->mutex);
4791 
4792 	return 0;
4793 }
4794 
4795 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4796 			     struct ieee80211_vif *vif, u16 queue,
4797 			     const struct ieee80211_tx_queue_params *params)
4798 {
4799 	struct wl1271 *wl = hw->priv;
4800 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4801 	u8 ps_scheme;
4802 	int ret = 0;
4803 
4804 	if (wlcore_is_p2p_mgmt(wlvif))
4805 		return 0;
4806 
4807 	mutex_lock(&wl->mutex);
4808 
4809 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4810 
4811 	if (params->uapsd)
4812 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4813 	else
4814 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4815 
4816 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4817 		goto out;
4818 
4819 	ret = wl1271_ps_elp_wakeup(wl);
4820 	if (ret < 0)
4821 		goto out;
4822 
4823 	/*
4824 	 * the txop is confed in units of 32us by the mac80211,
4825 	 * we need us
4826 	 */
4827 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4828 				params->cw_min, params->cw_max,
4829 				params->aifs, params->txop << 5);
4830 	if (ret < 0)
4831 		goto out_sleep;
4832 
4833 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4834 				 CONF_CHANNEL_TYPE_EDCF,
4835 				 wl1271_tx_get_queue(queue),
4836 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4837 				 0, 0);
4838 
4839 out_sleep:
4840 	wl1271_ps_elp_sleep(wl);
4841 
4842 out:
4843 	mutex_unlock(&wl->mutex);
4844 
4845 	return ret;
4846 }
4847 
4848 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4849 			     struct ieee80211_vif *vif)
4850 {
4851 
4852 	struct wl1271 *wl = hw->priv;
4853 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4854 	u64 mactime = ULLONG_MAX;
4855 	int ret;
4856 
4857 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4858 
4859 	mutex_lock(&wl->mutex);
4860 
4861 	if (unlikely(wl->state != WLCORE_STATE_ON))
4862 		goto out;
4863 
4864 	ret = wl1271_ps_elp_wakeup(wl);
4865 	if (ret < 0)
4866 		goto out;
4867 
4868 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4869 	if (ret < 0)
4870 		goto out_sleep;
4871 
4872 out_sleep:
4873 	wl1271_ps_elp_sleep(wl);
4874 
4875 out:
4876 	mutex_unlock(&wl->mutex);
4877 	return mactime;
4878 }
4879 
4880 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4881 				struct survey_info *survey)
4882 {
4883 	struct ieee80211_conf *conf = &hw->conf;
4884 
4885 	if (idx != 0)
4886 		return -ENOENT;
4887 
4888 	survey->channel = conf->chandef.chan;
4889 	survey->filled = 0;
4890 	return 0;
4891 }
4892 
4893 static int wl1271_allocate_sta(struct wl1271 *wl,
4894 			     struct wl12xx_vif *wlvif,
4895 			     struct ieee80211_sta *sta)
4896 {
4897 	struct wl1271_station *wl_sta;
4898 	int ret;
4899 
4900 
4901 	if (wl->active_sta_count >= wl->max_ap_stations) {
4902 		wl1271_warning("could not allocate HLID - too much stations");
4903 		return -EBUSY;
4904 	}
4905 
4906 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4907 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4908 	if (ret < 0) {
4909 		wl1271_warning("could not allocate HLID - too many links");
4910 		return -EBUSY;
4911 	}
4912 
4913 	/* use the previous security seq, if this is a recovery/resume */
4914 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4915 
4916 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4917 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4918 	wl->active_sta_count++;
4919 	return 0;
4920 }
4921 
4922 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4923 {
4924 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4925 		return;
4926 
4927 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4928 	__clear_bit(hlid, &wl->ap_ps_map);
4929 	__clear_bit(hlid, &wl->ap_fw_ps_map);
4930 
4931 	/*
4932 	 * save the last used PN in the private part of iee80211_sta,
4933 	 * in case of recovery/suspend
4934 	 */
4935 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4936 
4937 	wl12xx_free_link(wl, wlvif, &hlid);
4938 	wl->active_sta_count--;
4939 
4940 	/*
4941 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4942 	 * chance to return STA-buffered packets before complaining.
4943 	 */
4944 	if (wl->active_sta_count == 0)
4945 		wl12xx_rearm_tx_watchdog_locked(wl);
4946 }
4947 
4948 static int wl12xx_sta_add(struct wl1271 *wl,
4949 			  struct wl12xx_vif *wlvif,
4950 			  struct ieee80211_sta *sta)
4951 {
4952 	struct wl1271_station *wl_sta;
4953 	int ret = 0;
4954 	u8 hlid;
4955 
4956 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4957 
4958 	ret = wl1271_allocate_sta(wl, wlvif, sta);
4959 	if (ret < 0)
4960 		return ret;
4961 
4962 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4963 	hlid = wl_sta->hlid;
4964 
4965 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4966 	if (ret < 0)
4967 		wl1271_free_sta(wl, wlvif, hlid);
4968 
4969 	return ret;
4970 }
4971 
4972 static int wl12xx_sta_remove(struct wl1271 *wl,
4973 			     struct wl12xx_vif *wlvif,
4974 			     struct ieee80211_sta *sta)
4975 {
4976 	struct wl1271_station *wl_sta;
4977 	int ret = 0, id;
4978 
4979 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4980 
4981 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4982 	id = wl_sta->hlid;
4983 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4984 		return -EINVAL;
4985 
4986 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4987 	if (ret < 0)
4988 		return ret;
4989 
4990 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4991 	return ret;
4992 }
4993 
4994 static void wlcore_roc_if_possible(struct wl1271 *wl,
4995 				   struct wl12xx_vif *wlvif)
4996 {
4997 	if (find_first_bit(wl->roc_map,
4998 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4999 		return;
5000 
5001 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5002 		return;
5003 
5004 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5005 }
5006 
5007 /*
5008  * when wl_sta is NULL, we treat this call as if coming from a
5009  * pending auth reply.
5010  * wl->mutex must be taken and the FW must be awake when the call
5011  * takes place.
5012  */
5013 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5014 			      struct wl1271_station *wl_sta, bool in_conn)
5015 {
5016 	if (in_conn) {
5017 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5018 			return;
5019 
5020 		if (!wlvif->ap_pending_auth_reply &&
5021 		    !wlvif->inconn_count)
5022 			wlcore_roc_if_possible(wl, wlvif);
5023 
5024 		if (wl_sta) {
5025 			wl_sta->in_connection = true;
5026 			wlvif->inconn_count++;
5027 		} else {
5028 			wlvif->ap_pending_auth_reply = true;
5029 		}
5030 	} else {
5031 		if (wl_sta && !wl_sta->in_connection)
5032 			return;
5033 
5034 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5035 			return;
5036 
5037 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5038 			return;
5039 
5040 		if (wl_sta) {
5041 			wl_sta->in_connection = false;
5042 			wlvif->inconn_count--;
5043 		} else {
5044 			wlvif->ap_pending_auth_reply = false;
5045 		}
5046 
5047 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5048 		    test_bit(wlvif->role_id, wl->roc_map))
5049 			wl12xx_croc(wl, wlvif->role_id);
5050 	}
5051 }
5052 
5053 static int wl12xx_update_sta_state(struct wl1271 *wl,
5054 				   struct wl12xx_vif *wlvif,
5055 				   struct ieee80211_sta *sta,
5056 				   enum ieee80211_sta_state old_state,
5057 				   enum ieee80211_sta_state new_state)
5058 {
5059 	struct wl1271_station *wl_sta;
5060 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5061 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5062 	int ret;
5063 
5064 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5065 
5066 	/* Add station (AP mode) */
5067 	if (is_ap &&
5068 	    old_state == IEEE80211_STA_NOTEXIST &&
5069 	    new_state == IEEE80211_STA_NONE) {
5070 		ret = wl12xx_sta_add(wl, wlvif, sta);
5071 		if (ret)
5072 			return ret;
5073 
5074 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5075 	}
5076 
5077 	/* Remove station (AP mode) */
5078 	if (is_ap &&
5079 	    old_state == IEEE80211_STA_NONE &&
5080 	    new_state == IEEE80211_STA_NOTEXIST) {
5081 		/* must not fail */
5082 		wl12xx_sta_remove(wl, wlvif, sta);
5083 
5084 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5085 	}
5086 
5087 	/* Authorize station (AP mode) */
5088 	if (is_ap &&
5089 	    new_state == IEEE80211_STA_AUTHORIZED) {
5090 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5091 		if (ret < 0)
5092 			return ret;
5093 
5094 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5095 						     wl_sta->hlid);
5096 		if (ret)
5097 			return ret;
5098 
5099 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5100 	}
5101 
5102 	/* Authorize station */
5103 	if (is_sta &&
5104 	    new_state == IEEE80211_STA_AUTHORIZED) {
5105 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5106 		ret = wl12xx_set_authorized(wl, wlvif);
5107 		if (ret)
5108 			return ret;
5109 	}
5110 
5111 	if (is_sta &&
5112 	    old_state == IEEE80211_STA_AUTHORIZED &&
5113 	    new_state == IEEE80211_STA_ASSOC) {
5114 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5115 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5116 	}
5117 
5118 	/* save seq number on disassoc (suspend) */
5119 	if (is_sta &&
5120 	    old_state == IEEE80211_STA_ASSOC &&
5121 	    new_state == IEEE80211_STA_AUTH) {
5122 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5123 		wlvif->total_freed_pkts = 0;
5124 	}
5125 
5126 	/* restore seq number on assoc (resume) */
5127 	if (is_sta &&
5128 	    old_state == IEEE80211_STA_AUTH &&
5129 	    new_state == IEEE80211_STA_ASSOC) {
5130 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5131 	}
5132 
5133 	/* clear ROCs on failure or authorization */
5134 	if (is_sta &&
5135 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5136 	     new_state == IEEE80211_STA_NOTEXIST)) {
5137 		if (test_bit(wlvif->role_id, wl->roc_map))
5138 			wl12xx_croc(wl, wlvif->role_id);
5139 	}
5140 
5141 	if (is_sta &&
5142 	    old_state == IEEE80211_STA_NOTEXIST &&
5143 	    new_state == IEEE80211_STA_NONE) {
5144 		if (find_first_bit(wl->roc_map,
5145 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5146 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5147 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5148 				   wlvif->band, wlvif->channel);
5149 		}
5150 	}
5151 	return 0;
5152 }
5153 
5154 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5155 			       struct ieee80211_vif *vif,
5156 			       struct ieee80211_sta *sta,
5157 			       enum ieee80211_sta_state old_state,
5158 			       enum ieee80211_sta_state new_state)
5159 {
5160 	struct wl1271 *wl = hw->priv;
5161 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5162 	int ret;
5163 
5164 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5165 		     sta->aid, old_state, new_state);
5166 
5167 	mutex_lock(&wl->mutex);
5168 
5169 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5170 		ret = -EBUSY;
5171 		goto out;
5172 	}
5173 
5174 	ret = wl1271_ps_elp_wakeup(wl);
5175 	if (ret < 0)
5176 		goto out;
5177 
5178 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5179 
5180 	wl1271_ps_elp_sleep(wl);
5181 out:
5182 	mutex_unlock(&wl->mutex);
5183 	if (new_state < old_state)
5184 		return 0;
5185 	return ret;
5186 }
5187 
5188 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5189 				  struct ieee80211_vif *vif,
5190 				  struct ieee80211_ampdu_params *params)
5191 {
5192 	struct wl1271 *wl = hw->priv;
5193 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5194 	int ret;
5195 	u8 hlid, *ba_bitmap;
5196 	struct ieee80211_sta *sta = params->sta;
5197 	enum ieee80211_ampdu_mlme_action action = params->action;
5198 	u16 tid = params->tid;
5199 	u16 *ssn = &params->ssn;
5200 
5201 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5202 		     tid);
5203 
5204 	/* sanity check - the fields in FW are only 8bits wide */
5205 	if (WARN_ON(tid > 0xFF))
5206 		return -ENOTSUPP;
5207 
5208 	mutex_lock(&wl->mutex);
5209 
5210 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5211 		ret = -EAGAIN;
5212 		goto out;
5213 	}
5214 
5215 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5216 		hlid = wlvif->sta.hlid;
5217 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5218 		struct wl1271_station *wl_sta;
5219 
5220 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5221 		hlid = wl_sta->hlid;
5222 	} else {
5223 		ret = -EINVAL;
5224 		goto out;
5225 	}
5226 
5227 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5228 
5229 	ret = wl1271_ps_elp_wakeup(wl);
5230 	if (ret < 0)
5231 		goto out;
5232 
5233 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5234 		     tid, action);
5235 
5236 	switch (action) {
5237 	case IEEE80211_AMPDU_RX_START:
5238 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5239 			ret = -ENOTSUPP;
5240 			break;
5241 		}
5242 
5243 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5244 			ret = -EBUSY;
5245 			wl1271_error("exceeded max RX BA sessions");
5246 			break;
5247 		}
5248 
5249 		if (*ba_bitmap & BIT(tid)) {
5250 			ret = -EINVAL;
5251 			wl1271_error("cannot enable RX BA session on active "
5252 				     "tid: %d", tid);
5253 			break;
5254 		}
5255 
5256 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5257 							 hlid);
5258 		if (!ret) {
5259 			*ba_bitmap |= BIT(tid);
5260 			wl->ba_rx_session_count++;
5261 		}
5262 		break;
5263 
5264 	case IEEE80211_AMPDU_RX_STOP:
5265 		if (!(*ba_bitmap & BIT(tid))) {
5266 			/*
5267 			 * this happens on reconfig - so only output a debug
5268 			 * message for now, and don't fail the function.
5269 			 */
5270 			wl1271_debug(DEBUG_MAC80211,
5271 				     "no active RX BA session on tid: %d",
5272 				     tid);
5273 			ret = 0;
5274 			break;
5275 		}
5276 
5277 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5278 							 hlid);
5279 		if (!ret) {
5280 			*ba_bitmap &= ~BIT(tid);
5281 			wl->ba_rx_session_count--;
5282 		}
5283 		break;
5284 
5285 	/*
5286 	 * The BA initiator session management in FW independently.
5287 	 * Falling break here on purpose for all TX APDU commands.
5288 	 */
5289 	case IEEE80211_AMPDU_TX_START:
5290 	case IEEE80211_AMPDU_TX_STOP_CONT:
5291 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5292 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5293 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5294 		ret = -EINVAL;
5295 		break;
5296 
5297 	default:
5298 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5299 		ret = -EINVAL;
5300 	}
5301 
5302 	wl1271_ps_elp_sleep(wl);
5303 
5304 out:
5305 	mutex_unlock(&wl->mutex);
5306 
5307 	return ret;
5308 }
5309 
5310 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5311 				   struct ieee80211_vif *vif,
5312 				   const struct cfg80211_bitrate_mask *mask)
5313 {
5314 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5315 	struct wl1271 *wl = hw->priv;
5316 	int i, ret = 0;
5317 
5318 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5319 		mask->control[NL80211_BAND_2GHZ].legacy,
5320 		mask->control[NL80211_BAND_5GHZ].legacy);
5321 
5322 	mutex_lock(&wl->mutex);
5323 
5324 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5325 		wlvif->bitrate_masks[i] =
5326 			wl1271_tx_enabled_rates_get(wl,
5327 						    mask->control[i].legacy,
5328 						    i);
5329 
5330 	if (unlikely(wl->state != WLCORE_STATE_ON))
5331 		goto out;
5332 
5333 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5334 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5335 
5336 		ret = wl1271_ps_elp_wakeup(wl);
5337 		if (ret < 0)
5338 			goto out;
5339 
5340 		wl1271_set_band_rate(wl, wlvif);
5341 		wlvif->basic_rate =
5342 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5343 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5344 
5345 		wl1271_ps_elp_sleep(wl);
5346 	}
5347 out:
5348 	mutex_unlock(&wl->mutex);
5349 
5350 	return ret;
5351 }
5352 
5353 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5354 				     struct ieee80211_vif *vif,
5355 				     struct ieee80211_channel_switch *ch_switch)
5356 {
5357 	struct wl1271 *wl = hw->priv;
5358 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5359 	int ret;
5360 
5361 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5362 
5363 	wl1271_tx_flush(wl);
5364 
5365 	mutex_lock(&wl->mutex);
5366 
5367 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5368 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5369 			ieee80211_chswitch_done(vif, false);
5370 		goto out;
5371 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5372 		goto out;
5373 	}
5374 
5375 	ret = wl1271_ps_elp_wakeup(wl);
5376 	if (ret < 0)
5377 		goto out;
5378 
5379 	/* TODO: change mac80211 to pass vif as param */
5380 
5381 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5382 		unsigned long delay_usec;
5383 
5384 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5385 		if (ret)
5386 			goto out_sleep;
5387 
5388 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5389 
5390 		/* indicate failure 5 seconds after channel switch time */
5391 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5392 			ch_switch->count;
5393 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5394 					     usecs_to_jiffies(delay_usec) +
5395 					     msecs_to_jiffies(5000));
5396 	}
5397 
5398 out_sleep:
5399 	wl1271_ps_elp_sleep(wl);
5400 
5401 out:
5402 	mutex_unlock(&wl->mutex);
5403 }
5404 
5405 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5406 					struct wl12xx_vif *wlvif,
5407 					u8 eid)
5408 {
5409 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5410 	struct sk_buff *beacon =
5411 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5412 
5413 	if (!beacon)
5414 		return NULL;
5415 
5416 	return cfg80211_find_ie(eid,
5417 				beacon->data + ieoffset,
5418 				beacon->len - ieoffset);
5419 }
5420 
5421 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5422 				u8 *csa_count)
5423 {
5424 	const u8 *ie;
5425 	const struct ieee80211_channel_sw_ie *ie_csa;
5426 
5427 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5428 	if (!ie)
5429 		return -EINVAL;
5430 
5431 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5432 	*csa_count = ie_csa->count;
5433 
5434 	return 0;
5435 }
5436 
5437 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5438 					    struct ieee80211_vif *vif,
5439 					    struct cfg80211_chan_def *chandef)
5440 {
5441 	struct wl1271 *wl = hw->priv;
5442 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5443 	struct ieee80211_channel_switch ch_switch = {
5444 		.block_tx = true,
5445 		.chandef = *chandef,
5446 	};
5447 	int ret;
5448 
5449 	wl1271_debug(DEBUG_MAC80211,
5450 		     "mac80211 channel switch beacon (role %d)",
5451 		     wlvif->role_id);
5452 
5453 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5454 	if (ret < 0) {
5455 		wl1271_error("error getting beacon (for CSA counter)");
5456 		return;
5457 	}
5458 
5459 	mutex_lock(&wl->mutex);
5460 
5461 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5462 		ret = -EBUSY;
5463 		goto out;
5464 	}
5465 
5466 	ret = wl1271_ps_elp_wakeup(wl);
5467 	if (ret < 0)
5468 		goto out;
5469 
5470 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5471 	if (ret)
5472 		goto out_sleep;
5473 
5474 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5475 
5476 out_sleep:
5477 	wl1271_ps_elp_sleep(wl);
5478 out:
5479 	mutex_unlock(&wl->mutex);
5480 }
5481 
5482 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5483 			    u32 queues, bool drop)
5484 {
5485 	struct wl1271 *wl = hw->priv;
5486 
5487 	wl1271_tx_flush(wl);
5488 }
5489 
5490 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5491 				       struct ieee80211_vif *vif,
5492 				       struct ieee80211_channel *chan,
5493 				       int duration,
5494 				       enum ieee80211_roc_type type)
5495 {
5496 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5497 	struct wl1271 *wl = hw->priv;
5498 	int channel, active_roc, ret = 0;
5499 
5500 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5501 
5502 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5503 		     channel, wlvif->role_id);
5504 
5505 	mutex_lock(&wl->mutex);
5506 
5507 	if (unlikely(wl->state != WLCORE_STATE_ON))
5508 		goto out;
5509 
5510 	/* return EBUSY if we can't ROC right now */
5511 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5512 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5513 		wl1271_warning("active roc on role %d", active_roc);
5514 		ret = -EBUSY;
5515 		goto out;
5516 	}
5517 
5518 	ret = wl1271_ps_elp_wakeup(wl);
5519 	if (ret < 0)
5520 		goto out;
5521 
5522 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5523 	if (ret < 0)
5524 		goto out_sleep;
5525 
5526 	wl->roc_vif = vif;
5527 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5528 				     msecs_to_jiffies(duration));
5529 out_sleep:
5530 	wl1271_ps_elp_sleep(wl);
5531 out:
5532 	mutex_unlock(&wl->mutex);
5533 	return ret;
5534 }
5535 
5536 static int __wlcore_roc_completed(struct wl1271 *wl)
5537 {
5538 	struct wl12xx_vif *wlvif;
5539 	int ret;
5540 
5541 	/* already completed */
5542 	if (unlikely(!wl->roc_vif))
5543 		return 0;
5544 
5545 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5546 
5547 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5548 		return -EBUSY;
5549 
5550 	ret = wl12xx_stop_dev(wl, wlvif);
5551 	if (ret < 0)
5552 		return ret;
5553 
5554 	wl->roc_vif = NULL;
5555 
5556 	return 0;
5557 }
5558 
5559 static int wlcore_roc_completed(struct wl1271 *wl)
5560 {
5561 	int ret;
5562 
5563 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5564 
5565 	mutex_lock(&wl->mutex);
5566 
5567 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5568 		ret = -EBUSY;
5569 		goto out;
5570 	}
5571 
5572 	ret = wl1271_ps_elp_wakeup(wl);
5573 	if (ret < 0)
5574 		goto out;
5575 
5576 	ret = __wlcore_roc_completed(wl);
5577 
5578 	wl1271_ps_elp_sleep(wl);
5579 out:
5580 	mutex_unlock(&wl->mutex);
5581 
5582 	return ret;
5583 }
5584 
5585 static void wlcore_roc_complete_work(struct work_struct *work)
5586 {
5587 	struct delayed_work *dwork;
5588 	struct wl1271 *wl;
5589 	int ret;
5590 
5591 	dwork = to_delayed_work(work);
5592 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5593 
5594 	ret = wlcore_roc_completed(wl);
5595 	if (!ret)
5596 		ieee80211_remain_on_channel_expired(wl->hw);
5597 }
5598 
5599 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5600 {
5601 	struct wl1271 *wl = hw->priv;
5602 
5603 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5604 
5605 	/* TODO: per-vif */
5606 	wl1271_tx_flush(wl);
5607 
5608 	/*
5609 	 * we can't just flush_work here, because it might deadlock
5610 	 * (as we might get called from the same workqueue)
5611 	 */
5612 	cancel_delayed_work_sync(&wl->roc_complete_work);
5613 	wlcore_roc_completed(wl);
5614 
5615 	return 0;
5616 }
5617 
5618 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5619 				    struct ieee80211_vif *vif,
5620 				    struct ieee80211_sta *sta,
5621 				    u32 changed)
5622 {
5623 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5624 
5625 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5626 
5627 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5628 		return;
5629 
5630 	/* this callback is atomic, so schedule a new work */
5631 	wlvif->rc_update_bw = sta->bandwidth;
5632 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5633 }
5634 
5635 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5636 				     struct ieee80211_vif *vif,
5637 				     struct ieee80211_sta *sta,
5638 				     struct station_info *sinfo)
5639 {
5640 	struct wl1271 *wl = hw->priv;
5641 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5642 	s8 rssi_dbm;
5643 	int ret;
5644 
5645 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5646 
5647 	mutex_lock(&wl->mutex);
5648 
5649 	if (unlikely(wl->state != WLCORE_STATE_ON))
5650 		goto out;
5651 
5652 	ret = wl1271_ps_elp_wakeup(wl);
5653 	if (ret < 0)
5654 		goto out_sleep;
5655 
5656 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5657 	if (ret < 0)
5658 		goto out_sleep;
5659 
5660 	sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5661 	sinfo->signal = rssi_dbm;
5662 
5663 out_sleep:
5664 	wl1271_ps_elp_sleep(wl);
5665 
5666 out:
5667 	mutex_unlock(&wl->mutex);
5668 }
5669 
5670 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5671 {
5672 	struct wl1271 *wl = hw->priv;
5673 	bool ret = false;
5674 
5675 	mutex_lock(&wl->mutex);
5676 
5677 	if (unlikely(wl->state != WLCORE_STATE_ON))
5678 		goto out;
5679 
5680 	/* packets are considered pending if in the TX queue or the FW */
5681 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5682 out:
5683 	mutex_unlock(&wl->mutex);
5684 
5685 	return ret;
5686 }
5687 
5688 /* can't be const, mac80211 writes to this */
5689 static struct ieee80211_rate wl1271_rates[] = {
5690 	{ .bitrate = 10,
5691 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5692 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5693 	{ .bitrate = 20,
5694 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5695 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5696 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5697 	{ .bitrate = 55,
5698 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5699 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5700 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5701 	{ .bitrate = 110,
5702 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5703 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5704 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5705 	{ .bitrate = 60,
5706 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5707 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5708 	{ .bitrate = 90,
5709 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5710 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5711 	{ .bitrate = 120,
5712 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5713 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5714 	{ .bitrate = 180,
5715 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5716 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5717 	{ .bitrate = 240,
5718 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5719 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5720 	{ .bitrate = 360,
5721 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5722 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5723 	{ .bitrate = 480,
5724 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5725 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5726 	{ .bitrate = 540,
5727 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5728 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5729 };
5730 
5731 /* can't be const, mac80211 writes to this */
5732 static struct ieee80211_channel wl1271_channels[] = {
5733 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5734 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5735 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5736 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5737 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5738 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5739 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5740 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5741 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5742 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5743 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5744 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5745 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5746 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5747 };
5748 
5749 /* can't be const, mac80211 writes to this */
5750 static struct ieee80211_supported_band wl1271_band_2ghz = {
5751 	.channels = wl1271_channels,
5752 	.n_channels = ARRAY_SIZE(wl1271_channels),
5753 	.bitrates = wl1271_rates,
5754 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5755 };
5756 
5757 /* 5 GHz data rates for WL1273 */
5758 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5759 	{ .bitrate = 60,
5760 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5761 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5762 	{ .bitrate = 90,
5763 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5764 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5765 	{ .bitrate = 120,
5766 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5767 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5768 	{ .bitrate = 180,
5769 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5770 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5771 	{ .bitrate = 240,
5772 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5773 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5774 	{ .bitrate = 360,
5775 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5776 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5777 	{ .bitrate = 480,
5778 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5779 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5780 	{ .bitrate = 540,
5781 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5782 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5783 };
5784 
5785 /* 5 GHz band channels for WL1273 */
5786 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5787 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5788 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5789 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5790 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5791 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5792 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5793 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5794 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5795 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5796 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5797 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5798 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5799 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5800 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5801 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5802 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5803 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5804 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5805 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5806 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5807 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5808 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5809 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5810 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5811 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5812 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5813 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5814 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5815 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5816 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5817 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5818 };
5819 
5820 static struct ieee80211_supported_band wl1271_band_5ghz = {
5821 	.channels = wl1271_channels_5ghz,
5822 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5823 	.bitrates = wl1271_rates_5ghz,
5824 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5825 };
5826 
5827 static const struct ieee80211_ops wl1271_ops = {
5828 	.start = wl1271_op_start,
5829 	.stop = wlcore_op_stop,
5830 	.add_interface = wl1271_op_add_interface,
5831 	.remove_interface = wl1271_op_remove_interface,
5832 	.change_interface = wl12xx_op_change_interface,
5833 #ifdef CONFIG_PM
5834 	.suspend = wl1271_op_suspend,
5835 	.resume = wl1271_op_resume,
5836 #endif
5837 	.config = wl1271_op_config,
5838 	.prepare_multicast = wl1271_op_prepare_multicast,
5839 	.configure_filter = wl1271_op_configure_filter,
5840 	.tx = wl1271_op_tx,
5841 	.set_key = wlcore_op_set_key,
5842 	.hw_scan = wl1271_op_hw_scan,
5843 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5844 	.sched_scan_start = wl1271_op_sched_scan_start,
5845 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5846 	.bss_info_changed = wl1271_op_bss_info_changed,
5847 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5848 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5849 	.conf_tx = wl1271_op_conf_tx,
5850 	.get_tsf = wl1271_op_get_tsf,
5851 	.get_survey = wl1271_op_get_survey,
5852 	.sta_state = wl12xx_op_sta_state,
5853 	.ampdu_action = wl1271_op_ampdu_action,
5854 	.tx_frames_pending = wl1271_tx_frames_pending,
5855 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5856 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5857 	.channel_switch = wl12xx_op_channel_switch,
5858 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
5859 	.flush = wlcore_op_flush,
5860 	.remain_on_channel = wlcore_op_remain_on_channel,
5861 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5862 	.add_chanctx = wlcore_op_add_chanctx,
5863 	.remove_chanctx = wlcore_op_remove_chanctx,
5864 	.change_chanctx = wlcore_op_change_chanctx,
5865 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5866 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5867 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5868 	.sta_rc_update = wlcore_op_sta_rc_update,
5869 	.sta_statistics = wlcore_op_sta_statistics,
5870 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5871 };
5872 
5873 
5874 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5875 {
5876 	u8 idx;
5877 
5878 	BUG_ON(band >= 2);
5879 
5880 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5881 		wl1271_error("Illegal RX rate from HW: %d", rate);
5882 		return 0;
5883 	}
5884 
5885 	idx = wl->band_rate_to_idx[band][rate];
5886 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5887 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5888 		return 0;
5889 	}
5890 
5891 	return idx;
5892 }
5893 
5894 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5895 {
5896 	int i;
5897 
5898 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5899 		     oui, nic);
5900 
5901 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5902 		wl1271_warning("NIC part of the MAC address wraps around!");
5903 
5904 	for (i = 0; i < wl->num_mac_addr; i++) {
5905 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5906 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5907 		wl->addresses[i].addr[2] = (u8) oui;
5908 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5909 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5910 		wl->addresses[i].addr[5] = (u8) nic;
5911 		nic++;
5912 	}
5913 
5914 	/* we may be one address short at the most */
5915 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5916 
5917 	/*
5918 	 * turn on the LAA bit in the first address and use it as
5919 	 * the last address.
5920 	 */
5921 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5922 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5923 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5924 		       sizeof(wl->addresses[0]));
5925 		/* LAA bit */
5926 		wl->addresses[idx].addr[0] |= BIT(1);
5927 	}
5928 
5929 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5930 	wl->hw->wiphy->addresses = wl->addresses;
5931 }
5932 
5933 static int wl12xx_get_hw_info(struct wl1271 *wl)
5934 {
5935 	int ret;
5936 
5937 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5938 	if (ret < 0)
5939 		goto out;
5940 
5941 	wl->fuse_oui_addr = 0;
5942 	wl->fuse_nic_addr = 0;
5943 
5944 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5945 	if (ret < 0)
5946 		goto out;
5947 
5948 	if (wl->ops->get_mac)
5949 		ret = wl->ops->get_mac(wl);
5950 
5951 out:
5952 	return ret;
5953 }
5954 
5955 static int wl1271_register_hw(struct wl1271 *wl)
5956 {
5957 	int ret;
5958 	u32 oui_addr = 0, nic_addr = 0;
5959 
5960 	if (wl->mac80211_registered)
5961 		return 0;
5962 
5963 	if (wl->nvs_len >= 12) {
5964 		/* NOTE: The wl->nvs->nvs element must be first, in
5965 		 * order to simplify the casting, we assume it is at
5966 		 * the beginning of the wl->nvs structure.
5967 		 */
5968 		u8 *nvs_ptr = (u8 *)wl->nvs;
5969 
5970 		oui_addr =
5971 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5972 		nic_addr =
5973 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5974 	}
5975 
5976 	/* if the MAC address is zeroed in the NVS derive from fuse */
5977 	if (oui_addr == 0 && nic_addr == 0) {
5978 		oui_addr = wl->fuse_oui_addr;
5979 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
5980 		nic_addr = wl->fuse_nic_addr + 1;
5981 	}
5982 
5983 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5984 
5985 	ret = ieee80211_register_hw(wl->hw);
5986 	if (ret < 0) {
5987 		wl1271_error("unable to register mac80211 hw: %d", ret);
5988 		goto out;
5989 	}
5990 
5991 	wl->mac80211_registered = true;
5992 
5993 	wl1271_debugfs_init(wl);
5994 
5995 	wl1271_notice("loaded");
5996 
5997 out:
5998 	return ret;
5999 }
6000 
6001 static void wl1271_unregister_hw(struct wl1271 *wl)
6002 {
6003 	if (wl->plt)
6004 		wl1271_plt_stop(wl);
6005 
6006 	ieee80211_unregister_hw(wl->hw);
6007 	wl->mac80211_registered = false;
6008 
6009 }
6010 
6011 static int wl1271_init_ieee80211(struct wl1271 *wl)
6012 {
6013 	int i;
6014 	static const u32 cipher_suites[] = {
6015 		WLAN_CIPHER_SUITE_WEP40,
6016 		WLAN_CIPHER_SUITE_WEP104,
6017 		WLAN_CIPHER_SUITE_TKIP,
6018 		WLAN_CIPHER_SUITE_CCMP,
6019 		WL1271_CIPHER_SUITE_GEM,
6020 	};
6021 
6022 	/* The tx descriptor buffer */
6023 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6024 
6025 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6026 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6027 
6028 	/* unit us */
6029 	/* FIXME: find a proper value */
6030 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6031 
6032 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6033 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6034 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6035 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6036 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6037 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6038 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6039 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6040 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6041 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6042 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6043 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6044 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6045 
6046 	wl->hw->wiphy->cipher_suites = cipher_suites;
6047 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6048 
6049 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6050 					 BIT(NL80211_IFTYPE_AP) |
6051 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6052 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6053 					 BIT(NL80211_IFTYPE_P2P_GO);
6054 	wl->hw->wiphy->max_scan_ssids = 1;
6055 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6056 	wl->hw->wiphy->max_match_sets = 16;
6057 	/*
6058 	 * Maximum length of elements in scanning probe request templates
6059 	 * should be the maximum length possible for a template, without
6060 	 * the IEEE80211 header of the template
6061 	 */
6062 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6063 			sizeof(struct ieee80211_header);
6064 
6065 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6066 		sizeof(struct ieee80211_header);
6067 
6068 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6069 
6070 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6071 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6072 				WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6073 				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6074 
6075 	/* make sure all our channels fit in the scanned_ch bitmask */
6076 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6077 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6078 		     WL1271_MAX_CHANNELS);
6079 	/*
6080 	* clear channel flags from the previous usage
6081 	* and restore max_power & max_antenna_gain values.
6082 	*/
6083 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6084 		wl1271_band_2ghz.channels[i].flags = 0;
6085 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6086 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6087 	}
6088 
6089 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6090 		wl1271_band_5ghz.channels[i].flags = 0;
6091 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6092 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6093 	}
6094 
6095 	/*
6096 	 * We keep local copies of the band structs because we need to
6097 	 * modify them on a per-device basis.
6098 	 */
6099 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6100 	       sizeof(wl1271_band_2ghz));
6101 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6102 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6103 	       sizeof(*wl->ht_cap));
6104 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6105 	       sizeof(wl1271_band_5ghz));
6106 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6107 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6108 	       sizeof(*wl->ht_cap));
6109 
6110 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6111 		&wl->bands[NL80211_BAND_2GHZ];
6112 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6113 		&wl->bands[NL80211_BAND_5GHZ];
6114 
6115 	/*
6116 	 * allow 4 queues per mac address we support +
6117 	 * 1 cab queue per mac + one global offchannel Tx queue
6118 	 */
6119 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6120 
6121 	/* the last queue is the offchannel queue */
6122 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6123 	wl->hw->max_rates = 1;
6124 
6125 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6126 
6127 	/* the FW answers probe-requests in AP-mode */
6128 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6129 	wl->hw->wiphy->probe_resp_offload =
6130 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6131 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6132 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6133 
6134 	/* allowed interface combinations */
6135 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6136 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6137 
6138 	/* register vendor commands */
6139 	wlcore_set_vendor_commands(wl->hw->wiphy);
6140 
6141 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6142 
6143 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6144 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6145 
6146 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6147 
6148 	return 0;
6149 }
6150 
6151 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6152 				     u32 mbox_size)
6153 {
6154 	struct ieee80211_hw *hw;
6155 	struct wl1271 *wl;
6156 	int i, j, ret;
6157 	unsigned int order;
6158 
6159 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6160 	if (!hw) {
6161 		wl1271_error("could not alloc ieee80211_hw");
6162 		ret = -ENOMEM;
6163 		goto err_hw_alloc;
6164 	}
6165 
6166 	wl = hw->priv;
6167 	memset(wl, 0, sizeof(*wl));
6168 
6169 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6170 	if (!wl->priv) {
6171 		wl1271_error("could not alloc wl priv");
6172 		ret = -ENOMEM;
6173 		goto err_priv_alloc;
6174 	}
6175 
6176 	INIT_LIST_HEAD(&wl->wlvif_list);
6177 
6178 	wl->hw = hw;
6179 
6180 	/*
6181 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6182 	 * we don't allocate any additional resource here, so that's fine.
6183 	 */
6184 	for (i = 0; i < NUM_TX_QUEUES; i++)
6185 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6186 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6187 
6188 	skb_queue_head_init(&wl->deferred_rx_queue);
6189 	skb_queue_head_init(&wl->deferred_tx_queue);
6190 
6191 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6192 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6193 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6194 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6195 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6196 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6197 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6198 
6199 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6200 	if (!wl->freezable_wq) {
6201 		ret = -ENOMEM;
6202 		goto err_hw;
6203 	}
6204 
6205 	wl->channel = 0;
6206 	wl->rx_counter = 0;
6207 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6208 	wl->band = NL80211_BAND_2GHZ;
6209 	wl->channel_type = NL80211_CHAN_NO_HT;
6210 	wl->flags = 0;
6211 	wl->sg_enabled = true;
6212 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6213 	wl->recovery_count = 0;
6214 	wl->hw_pg_ver = -1;
6215 	wl->ap_ps_map = 0;
6216 	wl->ap_fw_ps_map = 0;
6217 	wl->quirks = 0;
6218 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6219 	wl->active_sta_count = 0;
6220 	wl->active_link_count = 0;
6221 	wl->fwlog_size = 0;
6222 
6223 	/* The system link is always allocated */
6224 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6225 
6226 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6227 	for (i = 0; i < wl->num_tx_desc; i++)
6228 		wl->tx_frames[i] = NULL;
6229 
6230 	spin_lock_init(&wl->wl_lock);
6231 
6232 	wl->state = WLCORE_STATE_OFF;
6233 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6234 	mutex_init(&wl->mutex);
6235 	mutex_init(&wl->flush_mutex);
6236 	init_completion(&wl->nvs_loading_complete);
6237 
6238 	order = get_order(aggr_buf_size);
6239 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6240 	if (!wl->aggr_buf) {
6241 		ret = -ENOMEM;
6242 		goto err_wq;
6243 	}
6244 	wl->aggr_buf_size = aggr_buf_size;
6245 
6246 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6247 	if (!wl->dummy_packet) {
6248 		ret = -ENOMEM;
6249 		goto err_aggr;
6250 	}
6251 
6252 	/* Allocate one page for the FW log */
6253 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6254 	if (!wl->fwlog) {
6255 		ret = -ENOMEM;
6256 		goto err_dummy_packet;
6257 	}
6258 
6259 	wl->mbox_size = mbox_size;
6260 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6261 	if (!wl->mbox) {
6262 		ret = -ENOMEM;
6263 		goto err_fwlog;
6264 	}
6265 
6266 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6267 	if (!wl->buffer_32) {
6268 		ret = -ENOMEM;
6269 		goto err_mbox;
6270 	}
6271 
6272 	return hw;
6273 
6274 err_mbox:
6275 	kfree(wl->mbox);
6276 
6277 err_fwlog:
6278 	free_page((unsigned long)wl->fwlog);
6279 
6280 err_dummy_packet:
6281 	dev_kfree_skb(wl->dummy_packet);
6282 
6283 err_aggr:
6284 	free_pages((unsigned long)wl->aggr_buf, order);
6285 
6286 err_wq:
6287 	destroy_workqueue(wl->freezable_wq);
6288 
6289 err_hw:
6290 	wl1271_debugfs_exit(wl);
6291 	kfree(wl->priv);
6292 
6293 err_priv_alloc:
6294 	ieee80211_free_hw(hw);
6295 
6296 err_hw_alloc:
6297 
6298 	return ERR_PTR(ret);
6299 }
6300 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6301 
6302 int wlcore_free_hw(struct wl1271 *wl)
6303 {
6304 	/* Unblock any fwlog readers */
6305 	mutex_lock(&wl->mutex);
6306 	wl->fwlog_size = -1;
6307 	mutex_unlock(&wl->mutex);
6308 
6309 	wlcore_sysfs_free(wl);
6310 
6311 	kfree(wl->buffer_32);
6312 	kfree(wl->mbox);
6313 	free_page((unsigned long)wl->fwlog);
6314 	dev_kfree_skb(wl->dummy_packet);
6315 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6316 
6317 	wl1271_debugfs_exit(wl);
6318 
6319 	vfree(wl->fw);
6320 	wl->fw = NULL;
6321 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6322 	kfree(wl->nvs);
6323 	wl->nvs = NULL;
6324 
6325 	kfree(wl->raw_fw_status);
6326 	kfree(wl->fw_status);
6327 	kfree(wl->tx_res_if);
6328 	destroy_workqueue(wl->freezable_wq);
6329 
6330 	kfree(wl->priv);
6331 	ieee80211_free_hw(wl->hw);
6332 
6333 	return 0;
6334 }
6335 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6336 
6337 #ifdef CONFIG_PM
6338 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6339 	.flags = WIPHY_WOWLAN_ANY,
6340 	.n_patterns = WL1271_MAX_RX_FILTERS,
6341 	.pattern_min_len = 1,
6342 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6343 };
6344 #endif
6345 
6346 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6347 {
6348 	return IRQ_WAKE_THREAD;
6349 }
6350 
6351 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6352 {
6353 	struct wl1271 *wl = context;
6354 	struct platform_device *pdev = wl->pdev;
6355 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6356 	struct resource *res;
6357 
6358 	int ret;
6359 	irq_handler_t hardirq_fn = NULL;
6360 
6361 	if (fw) {
6362 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6363 		if (!wl->nvs) {
6364 			wl1271_error("Could not allocate nvs data");
6365 			goto out;
6366 		}
6367 		wl->nvs_len = fw->size;
6368 	} else {
6369 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6370 			     WL12XX_NVS_NAME);
6371 		wl->nvs = NULL;
6372 		wl->nvs_len = 0;
6373 	}
6374 
6375 	ret = wl->ops->setup(wl);
6376 	if (ret < 0)
6377 		goto out_free_nvs;
6378 
6379 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6380 
6381 	/* adjust some runtime configuration parameters */
6382 	wlcore_adjust_conf(wl);
6383 
6384 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6385 	if (!res) {
6386 		wl1271_error("Could not get IRQ resource");
6387 		goto out_free_nvs;
6388 	}
6389 
6390 	wl->irq = res->start;
6391 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6392 	wl->if_ops = pdev_data->if_ops;
6393 
6394 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6395 		hardirq_fn = wlcore_hardirq;
6396 	else
6397 		wl->irq_flags |= IRQF_ONESHOT;
6398 
6399 	ret = wl12xx_set_power_on(wl);
6400 	if (ret < 0)
6401 		goto out_free_nvs;
6402 
6403 	ret = wl12xx_get_hw_info(wl);
6404 	if (ret < 0) {
6405 		wl1271_error("couldn't get hw info");
6406 		wl1271_power_off(wl);
6407 		goto out_free_nvs;
6408 	}
6409 
6410 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6411 				   wl->irq_flags, pdev->name, wl);
6412 	if (ret < 0) {
6413 		wl1271_error("interrupt configuration failed");
6414 		wl1271_power_off(wl);
6415 		goto out_free_nvs;
6416 	}
6417 
6418 #ifdef CONFIG_PM
6419 	ret = enable_irq_wake(wl->irq);
6420 	if (!ret) {
6421 		wl->irq_wake_enabled = true;
6422 		device_init_wakeup(wl->dev, 1);
6423 		if (pdev_data->pwr_in_suspend)
6424 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6425 	}
6426 #endif
6427 	disable_irq(wl->irq);
6428 	wl1271_power_off(wl);
6429 
6430 	ret = wl->ops->identify_chip(wl);
6431 	if (ret < 0)
6432 		goto out_irq;
6433 
6434 	ret = wl1271_init_ieee80211(wl);
6435 	if (ret)
6436 		goto out_irq;
6437 
6438 	ret = wl1271_register_hw(wl);
6439 	if (ret)
6440 		goto out_irq;
6441 
6442 	ret = wlcore_sysfs_init(wl);
6443 	if (ret)
6444 		goto out_unreg;
6445 
6446 	wl->initialized = true;
6447 	goto out;
6448 
6449 out_unreg:
6450 	wl1271_unregister_hw(wl);
6451 
6452 out_irq:
6453 	free_irq(wl->irq, wl);
6454 
6455 out_free_nvs:
6456 	kfree(wl->nvs);
6457 
6458 out:
6459 	release_firmware(fw);
6460 	complete_all(&wl->nvs_loading_complete);
6461 }
6462 
6463 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6464 {
6465 	int ret;
6466 
6467 	if (!wl->ops || !wl->ptable)
6468 		return -EINVAL;
6469 
6470 	wl->dev = &pdev->dev;
6471 	wl->pdev = pdev;
6472 	platform_set_drvdata(pdev, wl);
6473 
6474 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6475 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6476 				      wl, wlcore_nvs_cb);
6477 	if (ret < 0) {
6478 		wl1271_error("request_firmware_nowait failed: %d", ret);
6479 		complete_all(&wl->nvs_loading_complete);
6480 	}
6481 
6482 	return ret;
6483 }
6484 EXPORT_SYMBOL_GPL(wlcore_probe);
6485 
6486 int wlcore_remove(struct platform_device *pdev)
6487 {
6488 	struct wl1271 *wl = platform_get_drvdata(pdev);
6489 
6490 	wait_for_completion(&wl->nvs_loading_complete);
6491 	if (!wl->initialized)
6492 		return 0;
6493 
6494 	if (wl->irq_wake_enabled) {
6495 		device_init_wakeup(wl->dev, 0);
6496 		disable_irq_wake(wl->irq);
6497 	}
6498 	wl1271_unregister_hw(wl);
6499 	free_irq(wl->irq, wl);
6500 	wlcore_free_hw(wl);
6501 
6502 	return 0;
6503 }
6504 EXPORT_SYMBOL_GPL(wlcore_remove);
6505 
6506 u32 wl12xx_debug_level = DEBUG_NONE;
6507 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6508 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6509 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6510 
6511 module_param_named(fwlog, fwlog_param, charp, 0);
6512 MODULE_PARM_DESC(fwlog,
6513 		 "FW logger options: continuous, dbgpins or disable");
6514 
6515 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6516 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6517 
6518 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6519 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6520 
6521 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6522 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6523 
6524 MODULE_LICENSE("GPL");
6525 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6526 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6527 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6528