xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision 12eb4683)
1 
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
30 
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43 
44 #define WL1271_BOOT_RETRIES 3
45 
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery     = -1;
50 
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 					 struct ieee80211_vif *vif,
53 					 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56 
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 	int ret;
60 
61 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 		return -EINVAL;
63 
64 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 		return 0;
66 
67 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 		return 0;
69 
70 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 	if (ret < 0)
72 		return ret;
73 
74 	wl1271_info("Association completed.");
75 	return 0;
76 }
77 
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 			      struct regulatory_request *request)
80 {
81 	struct ieee80211_supported_band *band;
82 	struct ieee80211_channel *ch;
83 	int i;
84 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 	struct wl1271 *wl = hw->priv;
86 
87 	band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 	for (i = 0; i < band->n_channels; i++) {
89 		ch = &band->channels[i];
90 		if (ch->flags & IEEE80211_CHAN_DISABLED)
91 			continue;
92 
93 		if (ch->flags & IEEE80211_CHAN_RADAR)
94 			ch->flags |= IEEE80211_CHAN_NO_IBSS |
95 				     IEEE80211_CHAN_PASSIVE_SCAN;
96 
97 	}
98 
99 	wlcore_regdomain_config(wl);
100 }
101 
102 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
103 				   bool enable)
104 {
105 	int ret = 0;
106 
107 	/* we should hold wl->mutex */
108 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
109 	if (ret < 0)
110 		goto out;
111 
112 	if (enable)
113 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 	else
115 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
116 out:
117 	return ret;
118 }
119 
120 /*
121  * this function is being called when the rx_streaming interval
122  * has beed changed or rx_streaming should be disabled
123  */
124 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
125 {
126 	int ret = 0;
127 	int period = wl->conf.rx_streaming.interval;
128 
129 	/* don't reconfigure if rx_streaming is disabled */
130 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
131 		goto out;
132 
133 	/* reconfigure/disable according to new streaming_period */
134 	if (period &&
135 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
136 	    (wl->conf.rx_streaming.always ||
137 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
138 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
139 	else {
140 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
141 		/* don't cancel_work_sync since we might deadlock */
142 		del_timer_sync(&wlvif->rx_streaming_timer);
143 	}
144 out:
145 	return ret;
146 }
147 
148 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
149 {
150 	int ret;
151 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
152 						rx_streaming_enable_work);
153 	struct wl1271 *wl = wlvif->wl;
154 
155 	mutex_lock(&wl->mutex);
156 
157 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
158 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
159 	    (!wl->conf.rx_streaming.always &&
160 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
161 		goto out;
162 
163 	if (!wl->conf.rx_streaming.interval)
164 		goto out;
165 
166 	ret = wl1271_ps_elp_wakeup(wl);
167 	if (ret < 0)
168 		goto out;
169 
170 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
171 	if (ret < 0)
172 		goto out_sleep;
173 
174 	/* stop it after some time of inactivity */
175 	mod_timer(&wlvif->rx_streaming_timer,
176 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
177 
178 out_sleep:
179 	wl1271_ps_elp_sleep(wl);
180 out:
181 	mutex_unlock(&wl->mutex);
182 }
183 
184 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
185 {
186 	int ret;
187 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
188 						rx_streaming_disable_work);
189 	struct wl1271 *wl = wlvif->wl;
190 
191 	mutex_lock(&wl->mutex);
192 
193 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
194 		goto out;
195 
196 	ret = wl1271_ps_elp_wakeup(wl);
197 	if (ret < 0)
198 		goto out;
199 
200 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
201 	if (ret)
202 		goto out_sleep;
203 
204 out_sleep:
205 	wl1271_ps_elp_sleep(wl);
206 out:
207 	mutex_unlock(&wl->mutex);
208 }
209 
210 static void wl1271_rx_streaming_timer(unsigned long data)
211 {
212 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
213 	struct wl1271 *wl = wlvif->wl;
214 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
215 }
216 
217 /* wl->mutex must be taken */
218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 {
220 	/* if the watchdog is not armed, don't do anything */
221 	if (wl->tx_allocated_blocks == 0)
222 		return;
223 
224 	cancel_delayed_work(&wl->tx_watchdog_work);
225 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
226 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
227 }
228 
229 static void wl12xx_tx_watchdog_work(struct work_struct *work)
230 {
231 	struct delayed_work *dwork;
232 	struct wl1271 *wl;
233 
234 	dwork = container_of(work, struct delayed_work, work);
235 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
236 
237 	mutex_lock(&wl->mutex);
238 
239 	if (unlikely(wl->state != WLCORE_STATE_ON))
240 		goto out;
241 
242 	/* Tx went out in the meantime - everything is ok */
243 	if (unlikely(wl->tx_allocated_blocks == 0))
244 		goto out;
245 
246 	/*
247 	 * if a ROC is in progress, we might not have any Tx for a long
248 	 * time (e.g. pending Tx on the non-ROC channels)
249 	 */
250 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
251 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
252 			     wl->conf.tx.tx_watchdog_timeout);
253 		wl12xx_rearm_tx_watchdog_locked(wl);
254 		goto out;
255 	}
256 
257 	/*
258 	 * if a scan is in progress, we might not have any Tx for a long
259 	 * time
260 	 */
261 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
262 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
263 			     wl->conf.tx.tx_watchdog_timeout);
264 		wl12xx_rearm_tx_watchdog_locked(wl);
265 		goto out;
266 	}
267 
268 	/*
269 	* AP might cache a frame for a long time for a sleeping station,
270 	* so rearm the timer if there's an AP interface with stations. If
271 	* Tx is genuinely stuck we will most hopefully discover it when all
272 	* stations are removed due to inactivity.
273 	*/
274 	if (wl->active_sta_count) {
275 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
276 			     " %d stations",
277 			      wl->conf.tx.tx_watchdog_timeout,
278 			      wl->active_sta_count);
279 		wl12xx_rearm_tx_watchdog_locked(wl);
280 		goto out;
281 	}
282 
283 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
284 		     wl->conf.tx.tx_watchdog_timeout);
285 	wl12xx_queue_recovery_work(wl);
286 
287 out:
288 	mutex_unlock(&wl->mutex);
289 }
290 
291 static void wlcore_adjust_conf(struct wl1271 *wl)
292 {
293 	/* Adjust settings according to optional module parameters */
294 
295 	/* Firmware Logger params */
296 	if (fwlog_mem_blocks != -1) {
297 		if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
298 		    fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
299 			wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
300 		} else {
301 			wl1271_error(
302 				"Illegal fwlog_mem_blocks=%d using default %d",
303 				fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
304 		}
305 	}
306 
307 	if (fwlog_param) {
308 		if (!strcmp(fwlog_param, "continuous")) {
309 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 		} else if (!strcmp(fwlog_param, "ondemand")) {
311 			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
312 		} else if (!strcmp(fwlog_param, "dbgpins")) {
313 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 		} else if (!strcmp(fwlog_param, "disable")) {
316 			wl->conf.fwlog.mem_blocks = 0;
317 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 		} else {
319 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
320 		}
321 	}
322 
323 	if (bug_on_recovery != -1)
324 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325 
326 	if (no_recovery != -1)
327 		wl->conf.recovery.no_recovery = (u8) no_recovery;
328 }
329 
330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 					struct wl12xx_vif *wlvif,
332 					u8 hlid, u8 tx_pkts)
333 {
334 	bool fw_ps;
335 
336 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
337 
338 	/*
339 	 * Wake up from high level PS if the STA is asleep with too little
340 	 * packets in FW or if the STA is awake.
341 	 */
342 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 		wl12xx_ps_link_end(wl, wlvif, hlid);
344 
345 	/*
346 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 	 * Make an exception if this is the only connected link. In this
348 	 * case FW-memory congestion is less of a problem.
349 	 * Note that a single connected STA means 3 active links, since we must
350 	 * account for the global and broadcast AP links. The "fw_ps" check
351 	 * assures us the third link is a STA connected to the AP. Otherwise
352 	 * the FW would not set the PSM bit.
353 	 */
354 	else if (wl->active_link_count > 3 && fw_ps &&
355 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
357 }
358 
359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 					   struct wl12xx_vif *wlvif,
361 					   struct wl_fw_status_2 *status)
362 {
363 	u32 cur_fw_ps_map;
364 	u8 hlid;
365 
366 	cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
367 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 		wl1271_debug(DEBUG_PSM,
369 			     "link ps prev 0x%x cur 0x%x changed 0x%x",
370 			     wl->ap_fw_ps_map, cur_fw_ps_map,
371 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
372 
373 		wl->ap_fw_ps_map = cur_fw_ps_map;
374 	}
375 
376 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
377 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 					    wl->links[hlid].allocated_pkts);
379 }
380 
381 static int wlcore_fw_status(struct wl1271 *wl,
382 			    struct wl_fw_status_1 *status_1,
383 			    struct wl_fw_status_2 *status_2)
384 {
385 	struct wl12xx_vif *wlvif;
386 	struct timespec ts;
387 	u32 old_tx_blk_count = wl->tx_blocks_available;
388 	int avail, freed_blocks;
389 	int i;
390 	size_t status_len;
391 	int ret;
392 	struct wl1271_link *lnk;
393 
394 	status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
395 		sizeof(*status_2) + wl->fw_status_priv_len;
396 
397 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
398 				   status_len, false);
399 	if (ret < 0)
400 		return ret;
401 
402 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
403 		     "drv_rx_counter = %d, tx_results_counter = %d)",
404 		     status_1->intr,
405 		     status_1->fw_rx_counter,
406 		     status_1->drv_rx_counter,
407 		     status_1->tx_results_counter);
408 
409 	for (i = 0; i < NUM_TX_QUEUES; i++) {
410 		/* prevent wrap-around in freed-packets counter */
411 		wl->tx_allocated_pkts[i] -=
412 				(status_2->counters.tx_released_pkts[i] -
413 				wl->tx_pkts_freed[i]) & 0xff;
414 
415 		wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
416 	}
417 
418 
419 	for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
420 		u8 diff;
421 		lnk = &wl->links[i];
422 
423 		/* prevent wrap-around in freed-packets counter */
424 		diff = (status_2->counters.tx_lnk_free_pkts[i] -
425 		       lnk->prev_freed_pkts) & 0xff;
426 
427 		if (diff == 0)
428 			continue;
429 
430 		lnk->allocated_pkts -= diff;
431 		lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
432 
433 		/* accumulate the prev_freed_pkts counter */
434 		lnk->total_freed_pkts += diff;
435 	}
436 
437 	/* prevent wrap-around in total blocks counter */
438 	if (likely(wl->tx_blocks_freed <=
439 		   le32_to_cpu(status_2->total_released_blks)))
440 		freed_blocks = le32_to_cpu(status_2->total_released_blks) -
441 			       wl->tx_blocks_freed;
442 	else
443 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 			       le32_to_cpu(status_2->total_released_blks);
445 
446 	wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
447 
448 	wl->tx_allocated_blocks -= freed_blocks;
449 
450 	/*
451 	 * If the FW freed some blocks:
452 	 * If we still have allocated blocks - re-arm the timer, Tx is
453 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
454 	 */
455 	if (freed_blocks) {
456 		if (wl->tx_allocated_blocks)
457 			wl12xx_rearm_tx_watchdog_locked(wl);
458 		else
459 			cancel_delayed_work(&wl->tx_watchdog_work);
460 	}
461 
462 	avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
463 
464 	/*
465 	 * The FW might change the total number of TX memblocks before
466 	 * we get a notification about blocks being released. Thus, the
467 	 * available blocks calculation might yield a temporary result
468 	 * which is lower than the actual available blocks. Keeping in
469 	 * mind that only blocks that were allocated can be moved from
470 	 * TX to RX, tx_blocks_available should never decrease here.
471 	 */
472 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
473 				      avail);
474 
475 	/* if more blocks are available now, tx work can be scheduled */
476 	if (wl->tx_blocks_available > old_tx_blk_count)
477 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
478 
479 	/* for AP update num of allocated TX blocks per link and ps status */
480 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 		wl12xx_irq_update_links_status(wl, wlvif, status_2);
482 	}
483 
484 	/* update the host-chipset time offset */
485 	getnstimeofday(&ts);
486 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
487 		(s64)le32_to_cpu(status_2->fw_localtime);
488 
489 	wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
490 
491 	return 0;
492 }
493 
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
495 {
496 	struct sk_buff *skb;
497 
498 	/* Pass all received frames to the network stack */
499 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 		ieee80211_rx_ni(wl->hw, skb);
501 
502 	/* Return sent skbs to the network stack */
503 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 		ieee80211_tx_status_ni(wl->hw, skb);
505 }
506 
507 static void wl1271_netstack_work(struct work_struct *work)
508 {
509 	struct wl1271 *wl =
510 		container_of(work, struct wl1271, netstack_work);
511 
512 	do {
513 		wl1271_flush_deferred_work(wl);
514 	} while (skb_queue_len(&wl->deferred_rx_queue));
515 }
516 
517 #define WL1271_IRQ_MAX_LOOPS 256
518 
519 static int wlcore_irq_locked(struct wl1271 *wl)
520 {
521 	int ret = 0;
522 	u32 intr;
523 	int loopcount = WL1271_IRQ_MAX_LOOPS;
524 	bool done = false;
525 	unsigned int defer_count;
526 	unsigned long flags;
527 
528 	/*
529 	 * In case edge triggered interrupt must be used, we cannot iterate
530 	 * more than once without introducing race conditions with the hardirq.
531 	 */
532 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
533 		loopcount = 1;
534 
535 	wl1271_debug(DEBUG_IRQ, "IRQ work");
536 
537 	if (unlikely(wl->state != WLCORE_STATE_ON))
538 		goto out;
539 
540 	ret = wl1271_ps_elp_wakeup(wl);
541 	if (ret < 0)
542 		goto out;
543 
544 	while (!done && loopcount--) {
545 		/*
546 		 * In order to avoid a race with the hardirq, clear the flag
547 		 * before acknowledging the chip. Since the mutex is held,
548 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
549 		 */
550 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
551 		smp_mb__after_clear_bit();
552 
553 		ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
554 		if (ret < 0)
555 			goto out;
556 
557 		wlcore_hw_tx_immediate_compl(wl);
558 
559 		intr = le32_to_cpu(wl->fw_status_1->intr);
560 		intr &= WLCORE_ALL_INTR_MASK;
561 		if (!intr) {
562 			done = true;
563 			continue;
564 		}
565 
566 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
567 			wl1271_error("HW watchdog interrupt received! starting recovery.");
568 			wl->watchdog_recovery = true;
569 			ret = -EIO;
570 
571 			/* restarting the chip. ignore any other interrupt. */
572 			goto out;
573 		}
574 
575 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
576 			wl1271_error("SW watchdog interrupt received! "
577 				     "starting recovery.");
578 			wl->watchdog_recovery = true;
579 			ret = -EIO;
580 
581 			/* restarting the chip. ignore any other interrupt. */
582 			goto out;
583 		}
584 
585 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
586 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
587 
588 			ret = wlcore_rx(wl, wl->fw_status_1);
589 			if (ret < 0)
590 				goto out;
591 
592 			/* Check if any tx blocks were freed */
593 			spin_lock_irqsave(&wl->wl_lock, flags);
594 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
595 			    wl1271_tx_total_queue_count(wl) > 0) {
596 				spin_unlock_irqrestore(&wl->wl_lock, flags);
597 				/*
598 				 * In order to avoid starvation of the TX path,
599 				 * call the work function directly.
600 				 */
601 				ret = wlcore_tx_work_locked(wl);
602 				if (ret < 0)
603 					goto out;
604 			} else {
605 				spin_unlock_irqrestore(&wl->wl_lock, flags);
606 			}
607 
608 			/* check for tx results */
609 			ret = wlcore_hw_tx_delayed_compl(wl);
610 			if (ret < 0)
611 				goto out;
612 
613 			/* Make sure the deferred queues don't get too long */
614 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
615 				      skb_queue_len(&wl->deferred_rx_queue);
616 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
617 				wl1271_flush_deferred_work(wl);
618 		}
619 
620 		if (intr & WL1271_ACX_INTR_EVENT_A) {
621 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 			ret = wl1271_event_handle(wl, 0);
623 			if (ret < 0)
624 				goto out;
625 		}
626 
627 		if (intr & WL1271_ACX_INTR_EVENT_B) {
628 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
629 			ret = wl1271_event_handle(wl, 1);
630 			if (ret < 0)
631 				goto out;
632 		}
633 
634 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
635 			wl1271_debug(DEBUG_IRQ,
636 				     "WL1271_ACX_INTR_INIT_COMPLETE");
637 
638 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
639 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
640 	}
641 
642 	wl1271_ps_elp_sleep(wl);
643 
644 out:
645 	return ret;
646 }
647 
648 static irqreturn_t wlcore_irq(int irq, void *cookie)
649 {
650 	int ret;
651 	unsigned long flags;
652 	struct wl1271 *wl = cookie;
653 
654 	/* complete the ELP completion */
655 	spin_lock_irqsave(&wl->wl_lock, flags);
656 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
657 	if (wl->elp_compl) {
658 		complete(wl->elp_compl);
659 		wl->elp_compl = NULL;
660 	}
661 
662 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
663 		/* don't enqueue a work right now. mark it as pending */
664 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
665 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
666 		disable_irq_nosync(wl->irq);
667 		pm_wakeup_event(wl->dev, 0);
668 		spin_unlock_irqrestore(&wl->wl_lock, flags);
669 		return IRQ_HANDLED;
670 	}
671 	spin_unlock_irqrestore(&wl->wl_lock, flags);
672 
673 	/* TX might be handled here, avoid redundant work */
674 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
675 	cancel_work_sync(&wl->tx_work);
676 
677 	mutex_lock(&wl->mutex);
678 
679 	ret = wlcore_irq_locked(wl);
680 	if (ret)
681 		wl12xx_queue_recovery_work(wl);
682 
683 	spin_lock_irqsave(&wl->wl_lock, flags);
684 	/* In case TX was not handled here, queue TX work */
685 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
686 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
687 	    wl1271_tx_total_queue_count(wl) > 0)
688 		ieee80211_queue_work(wl->hw, &wl->tx_work);
689 	spin_unlock_irqrestore(&wl->wl_lock, flags);
690 
691 	mutex_unlock(&wl->mutex);
692 
693 	return IRQ_HANDLED;
694 }
695 
696 struct vif_counter_data {
697 	u8 counter;
698 
699 	struct ieee80211_vif *cur_vif;
700 	bool cur_vif_running;
701 };
702 
703 static void wl12xx_vif_count_iter(void *data, u8 *mac,
704 				  struct ieee80211_vif *vif)
705 {
706 	struct vif_counter_data *counter = data;
707 
708 	counter->counter++;
709 	if (counter->cur_vif == vif)
710 		counter->cur_vif_running = true;
711 }
712 
713 /* caller must not hold wl->mutex, as it might deadlock */
714 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
715 			       struct ieee80211_vif *cur_vif,
716 			       struct vif_counter_data *data)
717 {
718 	memset(data, 0, sizeof(*data));
719 	data->cur_vif = cur_vif;
720 
721 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
722 					    wl12xx_vif_count_iter, data);
723 }
724 
725 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
726 {
727 	const struct firmware *fw;
728 	const char *fw_name;
729 	enum wl12xx_fw_type fw_type;
730 	int ret;
731 
732 	if (plt) {
733 		fw_type = WL12XX_FW_TYPE_PLT;
734 		fw_name = wl->plt_fw_name;
735 	} else {
736 		/*
737 		 * we can't call wl12xx_get_vif_count() here because
738 		 * wl->mutex is taken, so use the cached last_vif_count value
739 		 */
740 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
741 			fw_type = WL12XX_FW_TYPE_MULTI;
742 			fw_name = wl->mr_fw_name;
743 		} else {
744 			fw_type = WL12XX_FW_TYPE_NORMAL;
745 			fw_name = wl->sr_fw_name;
746 		}
747 	}
748 
749 	if (wl->fw_type == fw_type)
750 		return 0;
751 
752 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
753 
754 	ret = request_firmware(&fw, fw_name, wl->dev);
755 
756 	if (ret < 0) {
757 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
758 		return ret;
759 	}
760 
761 	if (fw->size % 4) {
762 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
763 			     fw->size);
764 		ret = -EILSEQ;
765 		goto out;
766 	}
767 
768 	vfree(wl->fw);
769 	wl->fw_type = WL12XX_FW_TYPE_NONE;
770 	wl->fw_len = fw->size;
771 	wl->fw = vmalloc(wl->fw_len);
772 
773 	if (!wl->fw) {
774 		wl1271_error("could not allocate memory for the firmware");
775 		ret = -ENOMEM;
776 		goto out;
777 	}
778 
779 	memcpy(wl->fw, fw->data, wl->fw_len);
780 	ret = 0;
781 	wl->fw_type = fw_type;
782 out:
783 	release_firmware(fw);
784 
785 	return ret;
786 }
787 
788 void wl12xx_queue_recovery_work(struct wl1271 *wl)
789 {
790 	WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
791 
792 	/* Avoid a recursive recovery */
793 	if (wl->state == WLCORE_STATE_ON) {
794 		wl->state = WLCORE_STATE_RESTARTING;
795 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
796 		wl1271_ps_elp_wakeup(wl);
797 		wlcore_disable_interrupts_nosync(wl);
798 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
799 	}
800 }
801 
802 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
803 {
804 	size_t len;
805 
806 	/* Make sure we have enough room */
807 	len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
808 
809 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
810 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
811 	wl->fwlog_size += len;
812 
813 	return len;
814 }
815 
816 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
817 {
818 	struct wlcore_partition_set part, old_part;
819 	u32 addr;
820 	u32 offset;
821 	u32 end_of_log;
822 	u8 *block;
823 	int ret;
824 
825 	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
826 	    (wl->conf.fwlog.mem_blocks == 0))
827 		return;
828 
829 	wl1271_info("Reading FW panic log");
830 
831 	block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
832 	if (!block)
833 		return;
834 
835 	/*
836 	 * Make sure the chip is awake and the logger isn't active.
837 	 * Do not send a stop fwlog command if the fw is hanged or if
838 	 * dbgpins are used (due to some fw bug).
839 	 */
840 	if (wl1271_ps_elp_wakeup(wl))
841 		goto out;
842 	if (!wl->watchdog_recovery &&
843 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
844 		wl12xx_cmd_stop_fwlog(wl);
845 
846 	/* Read the first memory block address */
847 	ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
848 	if (ret < 0)
849 		goto out;
850 
851 	addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
852 	if (!addr)
853 		goto out;
854 
855 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
856 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
857 		end_of_log = wl->fwlog_end;
858 	} else {
859 		offset = sizeof(addr);
860 		end_of_log = addr;
861 	}
862 
863 	old_part = wl->curr_part;
864 	memset(&part, 0, sizeof(part));
865 
866 	/* Traverse the memory blocks linked list */
867 	do {
868 		part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
869 		part.mem.size  = PAGE_SIZE;
870 
871 		ret = wlcore_set_partition(wl, &part);
872 		if (ret < 0) {
873 			wl1271_error("%s: set_partition start=0x%X size=%d",
874 				__func__, part.mem.start, part.mem.size);
875 			goto out;
876 		}
877 
878 		memset(block, 0, wl->fw_mem_block_size);
879 		ret = wlcore_read_hwaddr(wl, addr, block,
880 					wl->fw_mem_block_size, false);
881 
882 		if (ret < 0)
883 			goto out;
884 
885 		/*
886 		 * Memory blocks are linked to one another. The first 4 bytes
887 		 * of each memory block hold the hardware address of the next
888 		 * one. The last memory block points to the first one in
889 		 * on demand mode and is equal to 0x2000000 in continuous mode.
890 		 */
891 		addr = le32_to_cpup((__le32 *)block);
892 
893 		if (!wl12xx_copy_fwlog(wl, block + offset,
894 					wl->fw_mem_block_size - offset))
895 			break;
896 	} while (addr && (addr != end_of_log));
897 
898 	wake_up_interruptible(&wl->fwlog_waitq);
899 
900 out:
901 	kfree(block);
902 	wlcore_set_partition(wl, &old_part);
903 }
904 
905 static void wlcore_print_recovery(struct wl1271 *wl)
906 {
907 	u32 pc = 0;
908 	u32 hint_sts = 0;
909 	int ret;
910 
911 	wl1271_info("Hardware recovery in progress. FW ver: %s",
912 		    wl->chip.fw_ver_str);
913 
914 	/* change partitions momentarily so we can read the FW pc */
915 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
916 	if (ret < 0)
917 		return;
918 
919 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
920 	if (ret < 0)
921 		return;
922 
923 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
924 	if (ret < 0)
925 		return;
926 
927 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
928 				pc, hint_sts, ++wl->recovery_count);
929 
930 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
931 }
932 
933 
934 static void wl1271_recovery_work(struct work_struct *work)
935 {
936 	struct wl1271 *wl =
937 		container_of(work, struct wl1271, recovery_work);
938 	struct wl12xx_vif *wlvif;
939 	struct ieee80211_vif *vif;
940 
941 	mutex_lock(&wl->mutex);
942 
943 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
944 		goto out_unlock;
945 
946 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
947 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
948 			wl12xx_read_fwlog_panic(wl);
949 		wlcore_print_recovery(wl);
950 	}
951 
952 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
953 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
954 
955 	if (wl->conf.recovery.no_recovery) {
956 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
957 		goto out_unlock;
958 	}
959 
960 	/* Prevent spurious TX during FW restart */
961 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
962 
963 	/* reboot the chipset */
964 	while (!list_empty(&wl->wlvif_list)) {
965 		wlvif = list_first_entry(&wl->wlvif_list,
966 				       struct wl12xx_vif, list);
967 		vif = wl12xx_wlvif_to_vif(wlvif);
968 		__wl1271_op_remove_interface(wl, vif, false);
969 	}
970 
971 	wlcore_op_stop_locked(wl);
972 
973 	ieee80211_restart_hw(wl->hw);
974 
975 	/*
976 	 * Its safe to enable TX now - the queues are stopped after a request
977 	 * to restart the HW.
978 	 */
979 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
980 
981 out_unlock:
982 	wl->watchdog_recovery = false;
983 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
984 	mutex_unlock(&wl->mutex);
985 }
986 
987 static int wlcore_fw_wakeup(struct wl1271 *wl)
988 {
989 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
990 }
991 
992 static int wl1271_setup(struct wl1271 *wl)
993 {
994 	wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
995 				  sizeof(*wl->fw_status_2) +
996 				  wl->fw_status_priv_len, GFP_KERNEL);
997 	if (!wl->fw_status_1)
998 		return -ENOMEM;
999 
1000 	wl->fw_status_2 = (struct wl_fw_status_2 *)
1001 				(((u8 *) wl->fw_status_1) +
1002 				WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
1003 
1004 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1005 	if (!wl->tx_res_if) {
1006 		kfree(wl->fw_status_1);
1007 		return -ENOMEM;
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 static int wl12xx_set_power_on(struct wl1271 *wl)
1014 {
1015 	int ret;
1016 
1017 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1018 	ret = wl1271_power_on(wl);
1019 	if (ret < 0)
1020 		goto out;
1021 	msleep(WL1271_POWER_ON_SLEEP);
1022 	wl1271_io_reset(wl);
1023 	wl1271_io_init(wl);
1024 
1025 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1026 	if (ret < 0)
1027 		goto fail;
1028 
1029 	/* ELP module wake up */
1030 	ret = wlcore_fw_wakeup(wl);
1031 	if (ret < 0)
1032 		goto fail;
1033 
1034 out:
1035 	return ret;
1036 
1037 fail:
1038 	wl1271_power_off(wl);
1039 	return ret;
1040 }
1041 
1042 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1043 {
1044 	int ret = 0;
1045 
1046 	ret = wl12xx_set_power_on(wl);
1047 	if (ret < 0)
1048 		goto out;
1049 
1050 	/*
1051 	 * For wl127x based devices we could use the default block
1052 	 * size (512 bytes), but due to a bug in the sdio driver, we
1053 	 * need to set it explicitly after the chip is powered on.  To
1054 	 * simplify the code and since the performance impact is
1055 	 * negligible, we use the same block size for all different
1056 	 * chip types.
1057 	 *
1058 	 * Check if the bus supports blocksize alignment and, if it
1059 	 * doesn't, make sure we don't have the quirk.
1060 	 */
1061 	if (!wl1271_set_block_size(wl))
1062 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1063 
1064 	/* TODO: make sure the lower driver has set things up correctly */
1065 
1066 	ret = wl1271_setup(wl);
1067 	if (ret < 0)
1068 		goto out;
1069 
1070 	ret = wl12xx_fetch_firmware(wl, plt);
1071 	if (ret < 0)
1072 		goto out;
1073 
1074 out:
1075 	return ret;
1076 }
1077 
1078 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1079 {
1080 	int retries = WL1271_BOOT_RETRIES;
1081 	struct wiphy *wiphy = wl->hw->wiphy;
1082 
1083 	static const char* const PLT_MODE[] = {
1084 		"PLT_OFF",
1085 		"PLT_ON",
1086 		"PLT_FEM_DETECT",
1087 		"PLT_CHIP_AWAKE"
1088 	};
1089 
1090 	int ret;
1091 
1092 	mutex_lock(&wl->mutex);
1093 
1094 	wl1271_notice("power up");
1095 
1096 	if (wl->state != WLCORE_STATE_OFF) {
1097 		wl1271_error("cannot go into PLT state because not "
1098 			     "in off state: %d", wl->state);
1099 		ret = -EBUSY;
1100 		goto out;
1101 	}
1102 
1103 	/* Indicate to lower levels that we are now in PLT mode */
1104 	wl->plt = true;
1105 	wl->plt_mode = plt_mode;
1106 
1107 	while (retries) {
1108 		retries--;
1109 		ret = wl12xx_chip_wakeup(wl, true);
1110 		if (ret < 0)
1111 			goto power_off;
1112 
1113 		if (plt_mode != PLT_CHIP_AWAKE) {
1114 			ret = wl->ops->plt_init(wl);
1115 			if (ret < 0)
1116 				goto power_off;
1117 		}
1118 
1119 		wl->state = WLCORE_STATE_ON;
1120 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1121 			      PLT_MODE[plt_mode],
1122 			      wl->chip.fw_ver_str);
1123 
1124 		/* update hw/fw version info in wiphy struct */
1125 		wiphy->hw_version = wl->chip.id;
1126 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1127 			sizeof(wiphy->fw_version));
1128 
1129 		goto out;
1130 
1131 power_off:
1132 		wl1271_power_off(wl);
1133 	}
1134 
1135 	wl->plt = false;
1136 	wl->plt_mode = PLT_OFF;
1137 
1138 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1139 		     WL1271_BOOT_RETRIES);
1140 out:
1141 	mutex_unlock(&wl->mutex);
1142 
1143 	return ret;
1144 }
1145 
1146 int wl1271_plt_stop(struct wl1271 *wl)
1147 {
1148 	int ret = 0;
1149 
1150 	wl1271_notice("power down");
1151 
1152 	/*
1153 	 * Interrupts must be disabled before setting the state to OFF.
1154 	 * Otherwise, the interrupt handler might be called and exit without
1155 	 * reading the interrupt status.
1156 	 */
1157 	wlcore_disable_interrupts(wl);
1158 	mutex_lock(&wl->mutex);
1159 	if (!wl->plt) {
1160 		mutex_unlock(&wl->mutex);
1161 
1162 		/*
1163 		 * This will not necessarily enable interrupts as interrupts
1164 		 * may have been disabled when op_stop was called. It will,
1165 		 * however, balance the above call to disable_interrupts().
1166 		 */
1167 		wlcore_enable_interrupts(wl);
1168 
1169 		wl1271_error("cannot power down because not in PLT "
1170 			     "state: %d", wl->state);
1171 		ret = -EBUSY;
1172 		goto out;
1173 	}
1174 
1175 	mutex_unlock(&wl->mutex);
1176 
1177 	wl1271_flush_deferred_work(wl);
1178 	cancel_work_sync(&wl->netstack_work);
1179 	cancel_work_sync(&wl->recovery_work);
1180 	cancel_delayed_work_sync(&wl->elp_work);
1181 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1182 
1183 	mutex_lock(&wl->mutex);
1184 	wl1271_power_off(wl);
1185 	wl->flags = 0;
1186 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1187 	wl->state = WLCORE_STATE_OFF;
1188 	wl->plt = false;
1189 	wl->plt_mode = PLT_OFF;
1190 	wl->rx_counter = 0;
1191 	mutex_unlock(&wl->mutex);
1192 
1193 out:
1194 	return ret;
1195 }
1196 
1197 static void wl1271_op_tx(struct ieee80211_hw *hw,
1198 			 struct ieee80211_tx_control *control,
1199 			 struct sk_buff *skb)
1200 {
1201 	struct wl1271 *wl = hw->priv;
1202 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1203 	struct ieee80211_vif *vif = info->control.vif;
1204 	struct wl12xx_vif *wlvif = NULL;
1205 	unsigned long flags;
1206 	int q, mapping;
1207 	u8 hlid;
1208 
1209 	if (!vif) {
1210 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1211 		ieee80211_free_txskb(hw, skb);
1212 		return;
1213 	}
1214 
1215 	wlvif = wl12xx_vif_to_data(vif);
1216 	mapping = skb_get_queue_mapping(skb);
1217 	q = wl1271_tx_get_queue(mapping);
1218 
1219 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1220 
1221 	spin_lock_irqsave(&wl->wl_lock, flags);
1222 
1223 	/*
1224 	 * drop the packet if the link is invalid or the queue is stopped
1225 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1226 	 * allow these packets through.
1227 	 */
1228 	if (hlid == WL12XX_INVALID_LINK_ID ||
1229 	    (!test_bit(hlid, wlvif->links_map)) ||
1230 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1231 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1232 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1233 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1234 		ieee80211_free_txskb(hw, skb);
1235 		goto out;
1236 	}
1237 
1238 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1239 		     hlid, q, skb->len);
1240 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1241 
1242 	wl->tx_queue_count[q]++;
1243 	wlvif->tx_queue_count[q]++;
1244 
1245 	/*
1246 	 * The workqueue is slow to process the tx_queue and we need stop
1247 	 * the queue here, otherwise the queue will get too long.
1248 	 */
1249 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1250 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1251 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1252 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1253 		wlcore_stop_queue_locked(wl, wlvif, q,
1254 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1255 	}
1256 
1257 	/*
1258 	 * The chip specific setup must run before the first TX packet -
1259 	 * before that, the tx_work will not be initialized!
1260 	 */
1261 
1262 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1263 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1264 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1265 
1266 out:
1267 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1268 }
1269 
1270 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1271 {
1272 	unsigned long flags;
1273 	int q;
1274 
1275 	/* no need to queue a new dummy packet if one is already pending */
1276 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1277 		return 0;
1278 
1279 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1280 
1281 	spin_lock_irqsave(&wl->wl_lock, flags);
1282 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1283 	wl->tx_queue_count[q]++;
1284 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1285 
1286 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1287 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1288 		return wlcore_tx_work_locked(wl);
1289 
1290 	/*
1291 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1292 	 * interrupt handler function
1293 	 */
1294 	return 0;
1295 }
1296 
1297 /*
1298  * The size of the dummy packet should be at least 1400 bytes. However, in
1299  * order to minimize the number of bus transactions, aligning it to 512 bytes
1300  * boundaries could be beneficial, performance wise
1301  */
1302 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1303 
1304 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1305 {
1306 	struct sk_buff *skb;
1307 	struct ieee80211_hdr_3addr *hdr;
1308 	unsigned int dummy_packet_size;
1309 
1310 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1311 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1312 
1313 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1314 	if (!skb) {
1315 		wl1271_warning("Failed to allocate a dummy packet skb");
1316 		return NULL;
1317 	}
1318 
1319 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1320 
1321 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1322 	memset(hdr, 0, sizeof(*hdr));
1323 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1324 					 IEEE80211_STYPE_NULLFUNC |
1325 					 IEEE80211_FCTL_TODS);
1326 
1327 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1328 
1329 	/* Dummy packets require the TID to be management */
1330 	skb->priority = WL1271_TID_MGMT;
1331 
1332 	/* Initialize all fields that might be used */
1333 	skb_set_queue_mapping(skb, 0);
1334 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1335 
1336 	return skb;
1337 }
1338 
1339 
1340 #ifdef CONFIG_PM
1341 static int
1342 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1343 {
1344 	int num_fields = 0, in_field = 0, fields_size = 0;
1345 	int i, pattern_len = 0;
1346 
1347 	if (!p->mask) {
1348 		wl1271_warning("No mask in WoWLAN pattern");
1349 		return -EINVAL;
1350 	}
1351 
1352 	/*
1353 	 * The pattern is broken up into segments of bytes at different offsets
1354 	 * that need to be checked by the FW filter. Each segment is called
1355 	 * a field in the FW API. We verify that the total number of fields
1356 	 * required for this pattern won't exceed FW limits (8)
1357 	 * as well as the total fields buffer won't exceed the FW limit.
1358 	 * Note that if there's a pattern which crosses Ethernet/IP header
1359 	 * boundary a new field is required.
1360 	 */
1361 	for (i = 0; i < p->pattern_len; i++) {
1362 		if (test_bit(i, (unsigned long *)p->mask)) {
1363 			if (!in_field) {
1364 				in_field = 1;
1365 				pattern_len = 1;
1366 			} else {
1367 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1368 					num_fields++;
1369 					fields_size += pattern_len +
1370 						RX_FILTER_FIELD_OVERHEAD;
1371 					pattern_len = 1;
1372 				} else
1373 					pattern_len++;
1374 			}
1375 		} else {
1376 			if (in_field) {
1377 				in_field = 0;
1378 				fields_size += pattern_len +
1379 					RX_FILTER_FIELD_OVERHEAD;
1380 				num_fields++;
1381 			}
1382 		}
1383 	}
1384 
1385 	if (in_field) {
1386 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1387 		num_fields++;
1388 	}
1389 
1390 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1391 		wl1271_warning("RX Filter too complex. Too many segments");
1392 		return -EINVAL;
1393 	}
1394 
1395 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1396 		wl1271_warning("RX filter pattern is too big");
1397 		return -E2BIG;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1404 {
1405 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1406 }
1407 
1408 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1409 {
1410 	int i;
1411 
1412 	if (filter == NULL)
1413 		return;
1414 
1415 	for (i = 0; i < filter->num_fields; i++)
1416 		kfree(filter->fields[i].pattern);
1417 
1418 	kfree(filter);
1419 }
1420 
1421 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1422 				 u16 offset, u8 flags,
1423 				 u8 *pattern, u8 len)
1424 {
1425 	struct wl12xx_rx_filter_field *field;
1426 
1427 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1428 		wl1271_warning("Max fields per RX filter. can't alloc another");
1429 		return -EINVAL;
1430 	}
1431 
1432 	field = &filter->fields[filter->num_fields];
1433 
1434 	field->pattern = kzalloc(len, GFP_KERNEL);
1435 	if (!field->pattern) {
1436 		wl1271_warning("Failed to allocate RX filter pattern");
1437 		return -ENOMEM;
1438 	}
1439 
1440 	filter->num_fields++;
1441 
1442 	field->offset = cpu_to_le16(offset);
1443 	field->flags = flags;
1444 	field->len = len;
1445 	memcpy(field->pattern, pattern, len);
1446 
1447 	return 0;
1448 }
1449 
1450 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1451 {
1452 	int i, fields_size = 0;
1453 
1454 	for (i = 0; i < filter->num_fields; i++)
1455 		fields_size += filter->fields[i].len +
1456 			sizeof(struct wl12xx_rx_filter_field) -
1457 			sizeof(u8 *);
1458 
1459 	return fields_size;
1460 }
1461 
1462 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1463 				    u8 *buf)
1464 {
1465 	int i;
1466 	struct wl12xx_rx_filter_field *field;
1467 
1468 	for (i = 0; i < filter->num_fields; i++) {
1469 		field = (struct wl12xx_rx_filter_field *)buf;
1470 
1471 		field->offset = filter->fields[i].offset;
1472 		field->flags = filter->fields[i].flags;
1473 		field->len = filter->fields[i].len;
1474 
1475 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1476 		buf += sizeof(struct wl12xx_rx_filter_field) -
1477 			sizeof(u8 *) + field->len;
1478 	}
1479 }
1480 
1481 /*
1482  * Allocates an RX filter returned through f
1483  * which needs to be freed using rx_filter_free()
1484  */
1485 static int
1486 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1487 					   struct wl12xx_rx_filter **f)
1488 {
1489 	int i, j, ret = 0;
1490 	struct wl12xx_rx_filter *filter;
1491 	u16 offset;
1492 	u8 flags, len;
1493 
1494 	filter = wl1271_rx_filter_alloc();
1495 	if (!filter) {
1496 		wl1271_warning("Failed to alloc rx filter");
1497 		ret = -ENOMEM;
1498 		goto err;
1499 	}
1500 
1501 	i = 0;
1502 	while (i < p->pattern_len) {
1503 		if (!test_bit(i, (unsigned long *)p->mask)) {
1504 			i++;
1505 			continue;
1506 		}
1507 
1508 		for (j = i; j < p->pattern_len; j++) {
1509 			if (!test_bit(j, (unsigned long *)p->mask))
1510 				break;
1511 
1512 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1513 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1514 				break;
1515 		}
1516 
1517 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1518 			offset = i;
1519 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1520 		} else {
1521 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1522 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1523 		}
1524 
1525 		len = j - i;
1526 
1527 		ret = wl1271_rx_filter_alloc_field(filter,
1528 						   offset,
1529 						   flags,
1530 						   &p->pattern[i], len);
1531 		if (ret)
1532 			goto err;
1533 
1534 		i = j;
1535 	}
1536 
1537 	filter->action = FILTER_SIGNAL;
1538 
1539 	*f = filter;
1540 	return 0;
1541 
1542 err:
1543 	wl1271_rx_filter_free(filter);
1544 	*f = NULL;
1545 
1546 	return ret;
1547 }
1548 
1549 static int wl1271_configure_wowlan(struct wl1271 *wl,
1550 				   struct cfg80211_wowlan *wow)
1551 {
1552 	int i, ret;
1553 
1554 	if (!wow || wow->any || !wow->n_patterns) {
1555 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1556 							  FILTER_SIGNAL);
1557 		if (ret)
1558 			goto out;
1559 
1560 		ret = wl1271_rx_filter_clear_all(wl);
1561 		if (ret)
1562 			goto out;
1563 
1564 		return 0;
1565 	}
1566 
1567 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1568 		return -EINVAL;
1569 
1570 	/* Validate all incoming patterns before clearing current FW state */
1571 	for (i = 0; i < wow->n_patterns; i++) {
1572 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1573 		if (ret) {
1574 			wl1271_warning("Bad wowlan pattern %d", i);
1575 			return ret;
1576 		}
1577 	}
1578 
1579 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1580 	if (ret)
1581 		goto out;
1582 
1583 	ret = wl1271_rx_filter_clear_all(wl);
1584 	if (ret)
1585 		goto out;
1586 
1587 	/* Translate WoWLAN patterns into filters */
1588 	for (i = 0; i < wow->n_patterns; i++) {
1589 		struct cfg80211_pkt_pattern *p;
1590 		struct wl12xx_rx_filter *filter = NULL;
1591 
1592 		p = &wow->patterns[i];
1593 
1594 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1595 		if (ret) {
1596 			wl1271_warning("Failed to create an RX filter from "
1597 				       "wowlan pattern %d", i);
1598 			goto out;
1599 		}
1600 
1601 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1602 
1603 		wl1271_rx_filter_free(filter);
1604 		if (ret)
1605 			goto out;
1606 	}
1607 
1608 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1609 
1610 out:
1611 	return ret;
1612 }
1613 
1614 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1615 					struct wl12xx_vif *wlvif,
1616 					struct cfg80211_wowlan *wow)
1617 {
1618 	int ret = 0;
1619 
1620 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1621 		goto out;
1622 
1623 	ret = wl1271_ps_elp_wakeup(wl);
1624 	if (ret < 0)
1625 		goto out;
1626 
1627 	ret = wl1271_configure_wowlan(wl, wow);
1628 	if (ret < 0)
1629 		goto out_sleep;
1630 
1631 	if ((wl->conf.conn.suspend_wake_up_event ==
1632 	     wl->conf.conn.wake_up_event) &&
1633 	    (wl->conf.conn.suspend_listen_interval ==
1634 	     wl->conf.conn.listen_interval))
1635 		goto out_sleep;
1636 
1637 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1638 				    wl->conf.conn.suspend_wake_up_event,
1639 				    wl->conf.conn.suspend_listen_interval);
1640 
1641 	if (ret < 0)
1642 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1643 
1644 out_sleep:
1645 	wl1271_ps_elp_sleep(wl);
1646 out:
1647 	return ret;
1648 
1649 }
1650 
1651 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1652 				       struct wl12xx_vif *wlvif)
1653 {
1654 	int ret = 0;
1655 
1656 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1657 		goto out;
1658 
1659 	ret = wl1271_ps_elp_wakeup(wl);
1660 	if (ret < 0)
1661 		goto out;
1662 
1663 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1664 
1665 	wl1271_ps_elp_sleep(wl);
1666 out:
1667 	return ret;
1668 
1669 }
1670 
1671 static int wl1271_configure_suspend(struct wl1271 *wl,
1672 				    struct wl12xx_vif *wlvif,
1673 				    struct cfg80211_wowlan *wow)
1674 {
1675 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1676 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1677 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1678 		return wl1271_configure_suspend_ap(wl, wlvif);
1679 	return 0;
1680 }
1681 
1682 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1683 {
1684 	int ret = 0;
1685 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1686 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1687 
1688 	if ((!is_ap) && (!is_sta))
1689 		return;
1690 
1691 	if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1692 		return;
1693 
1694 	ret = wl1271_ps_elp_wakeup(wl);
1695 	if (ret < 0)
1696 		return;
1697 
1698 	if (is_sta) {
1699 		wl1271_configure_wowlan(wl, NULL);
1700 
1701 		if ((wl->conf.conn.suspend_wake_up_event ==
1702 		     wl->conf.conn.wake_up_event) &&
1703 		    (wl->conf.conn.suspend_listen_interval ==
1704 		     wl->conf.conn.listen_interval))
1705 			goto out_sleep;
1706 
1707 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1708 				    wl->conf.conn.wake_up_event,
1709 				    wl->conf.conn.listen_interval);
1710 
1711 		if (ret < 0)
1712 			wl1271_error("resume: wake up conditions failed: %d",
1713 				     ret);
1714 
1715 	} else if (is_ap) {
1716 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1717 	}
1718 
1719 out_sleep:
1720 	wl1271_ps_elp_sleep(wl);
1721 }
1722 
1723 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1724 			    struct cfg80211_wowlan *wow)
1725 {
1726 	struct wl1271 *wl = hw->priv;
1727 	struct wl12xx_vif *wlvif;
1728 	int ret;
1729 
1730 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1731 	WARN_ON(!wow);
1732 
1733 	/* we want to perform the recovery before suspending */
1734 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1735 		wl1271_warning("postponing suspend to perform recovery");
1736 		return -EBUSY;
1737 	}
1738 
1739 	wl1271_tx_flush(wl);
1740 
1741 	mutex_lock(&wl->mutex);
1742 	wl->wow_enabled = true;
1743 	wl12xx_for_each_wlvif(wl, wlvif) {
1744 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1745 		if (ret < 0) {
1746 			mutex_unlock(&wl->mutex);
1747 			wl1271_warning("couldn't prepare device to suspend");
1748 			return ret;
1749 		}
1750 	}
1751 	mutex_unlock(&wl->mutex);
1752 	/* flush any remaining work */
1753 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1754 
1755 	/*
1756 	 * disable and re-enable interrupts in order to flush
1757 	 * the threaded_irq
1758 	 */
1759 	wlcore_disable_interrupts(wl);
1760 
1761 	/*
1762 	 * set suspended flag to avoid triggering a new threaded_irq
1763 	 * work. no need for spinlock as interrupts are disabled.
1764 	 */
1765 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1766 
1767 	wlcore_enable_interrupts(wl);
1768 	flush_work(&wl->tx_work);
1769 	flush_delayed_work(&wl->elp_work);
1770 
1771 	return 0;
1772 }
1773 
1774 static int wl1271_op_resume(struct ieee80211_hw *hw)
1775 {
1776 	struct wl1271 *wl = hw->priv;
1777 	struct wl12xx_vif *wlvif;
1778 	unsigned long flags;
1779 	bool run_irq_work = false, pending_recovery;
1780 	int ret;
1781 
1782 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1783 		     wl->wow_enabled);
1784 	WARN_ON(!wl->wow_enabled);
1785 
1786 	/*
1787 	 * re-enable irq_work enqueuing, and call irq_work directly if
1788 	 * there is a pending work.
1789 	 */
1790 	spin_lock_irqsave(&wl->wl_lock, flags);
1791 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1792 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1793 		run_irq_work = true;
1794 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1795 
1796 	mutex_lock(&wl->mutex);
1797 
1798 	/* test the recovery flag before calling any SDIO functions */
1799 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1800 				    &wl->flags);
1801 
1802 	if (run_irq_work) {
1803 		wl1271_debug(DEBUG_MAC80211,
1804 			     "run postponed irq_work directly");
1805 
1806 		/* don't talk to the HW if recovery is pending */
1807 		if (!pending_recovery) {
1808 			ret = wlcore_irq_locked(wl);
1809 			if (ret)
1810 				wl12xx_queue_recovery_work(wl);
1811 		}
1812 
1813 		wlcore_enable_interrupts(wl);
1814 	}
1815 
1816 	if (pending_recovery) {
1817 		wl1271_warning("queuing forgotten recovery on resume");
1818 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1819 		goto out;
1820 	}
1821 
1822 	wl12xx_for_each_wlvif(wl, wlvif) {
1823 		wl1271_configure_resume(wl, wlvif);
1824 	}
1825 
1826 out:
1827 	wl->wow_enabled = false;
1828 	mutex_unlock(&wl->mutex);
1829 
1830 	return 0;
1831 }
1832 #endif
1833 
1834 static int wl1271_op_start(struct ieee80211_hw *hw)
1835 {
1836 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1837 
1838 	/*
1839 	 * We have to delay the booting of the hardware because
1840 	 * we need to know the local MAC address before downloading and
1841 	 * initializing the firmware. The MAC address cannot be changed
1842 	 * after boot, and without the proper MAC address, the firmware
1843 	 * will not function properly.
1844 	 *
1845 	 * The MAC address is first known when the corresponding interface
1846 	 * is added. That is where we will initialize the hardware.
1847 	 */
1848 
1849 	return 0;
1850 }
1851 
1852 static void wlcore_op_stop_locked(struct wl1271 *wl)
1853 {
1854 	int i;
1855 
1856 	if (wl->state == WLCORE_STATE_OFF) {
1857 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1858 					&wl->flags))
1859 			wlcore_enable_interrupts(wl);
1860 
1861 		return;
1862 	}
1863 
1864 	/*
1865 	 * this must be before the cancel_work calls below, so that the work
1866 	 * functions don't perform further work.
1867 	 */
1868 	wl->state = WLCORE_STATE_OFF;
1869 
1870 	/*
1871 	 * Use the nosync variant to disable interrupts, so the mutex could be
1872 	 * held while doing so without deadlocking.
1873 	 */
1874 	wlcore_disable_interrupts_nosync(wl);
1875 
1876 	mutex_unlock(&wl->mutex);
1877 
1878 	wlcore_synchronize_interrupts(wl);
1879 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1880 		cancel_work_sync(&wl->recovery_work);
1881 	wl1271_flush_deferred_work(wl);
1882 	cancel_delayed_work_sync(&wl->scan_complete_work);
1883 	cancel_work_sync(&wl->netstack_work);
1884 	cancel_work_sync(&wl->tx_work);
1885 	cancel_delayed_work_sync(&wl->elp_work);
1886 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1887 
1888 	/* let's notify MAC80211 about the remaining pending TX frames */
1889 	mutex_lock(&wl->mutex);
1890 	wl12xx_tx_reset(wl);
1891 
1892 	wl1271_power_off(wl);
1893 	/*
1894 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1895 	 * an interrupt storm. Now that the power is down, it is safe to
1896 	 * re-enable interrupts to balance the disable depth
1897 	 */
1898 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1899 		wlcore_enable_interrupts(wl);
1900 
1901 	wl->band = IEEE80211_BAND_2GHZ;
1902 
1903 	wl->rx_counter = 0;
1904 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1905 	wl->channel_type = NL80211_CHAN_NO_HT;
1906 	wl->tx_blocks_available = 0;
1907 	wl->tx_allocated_blocks = 0;
1908 	wl->tx_results_count = 0;
1909 	wl->tx_packets_count = 0;
1910 	wl->time_offset = 0;
1911 	wl->ap_fw_ps_map = 0;
1912 	wl->ap_ps_map = 0;
1913 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1914 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1915 	memset(wl->links_map, 0, sizeof(wl->links_map));
1916 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1917 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1918 	wl->active_sta_count = 0;
1919 	wl->active_link_count = 0;
1920 
1921 	/* The system link is always allocated */
1922 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1923 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1924 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1925 
1926 	/*
1927 	 * this is performed after the cancel_work calls and the associated
1928 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1929 	 * get executed before all these vars have been reset.
1930 	 */
1931 	wl->flags = 0;
1932 
1933 	wl->tx_blocks_freed = 0;
1934 
1935 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1936 		wl->tx_pkts_freed[i] = 0;
1937 		wl->tx_allocated_pkts[i] = 0;
1938 	}
1939 
1940 	wl1271_debugfs_reset(wl);
1941 
1942 	kfree(wl->fw_status_1);
1943 	wl->fw_status_1 = NULL;
1944 	wl->fw_status_2 = NULL;
1945 	kfree(wl->tx_res_if);
1946 	wl->tx_res_if = NULL;
1947 	kfree(wl->target_mem_map);
1948 	wl->target_mem_map = NULL;
1949 
1950 	/*
1951 	 * FW channels must be re-calibrated after recovery,
1952 	 * save current Reg-Domain channel configuration and clear it.
1953 	 */
1954 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1955 	       sizeof(wl->reg_ch_conf_pending));
1956 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1957 }
1958 
1959 static void wlcore_op_stop(struct ieee80211_hw *hw)
1960 {
1961 	struct wl1271 *wl = hw->priv;
1962 
1963 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1964 
1965 	mutex_lock(&wl->mutex);
1966 
1967 	wlcore_op_stop_locked(wl);
1968 
1969 	mutex_unlock(&wl->mutex);
1970 }
1971 
1972 static void wlcore_channel_switch_work(struct work_struct *work)
1973 {
1974 	struct delayed_work *dwork;
1975 	struct wl1271 *wl;
1976 	struct ieee80211_vif *vif;
1977 	struct wl12xx_vif *wlvif;
1978 	int ret;
1979 
1980 	dwork = container_of(work, struct delayed_work, work);
1981 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1982 	wl = wlvif->wl;
1983 
1984 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1985 
1986 	mutex_lock(&wl->mutex);
1987 
1988 	if (unlikely(wl->state != WLCORE_STATE_ON))
1989 		goto out;
1990 
1991 	/* check the channel switch is still ongoing */
1992 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1993 		goto out;
1994 
1995 	vif = wl12xx_wlvif_to_vif(wlvif);
1996 	ieee80211_chswitch_done(vif, false);
1997 
1998 	ret = wl1271_ps_elp_wakeup(wl);
1999 	if (ret < 0)
2000 		goto out;
2001 
2002 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2003 
2004 	wl1271_ps_elp_sleep(wl);
2005 out:
2006 	mutex_unlock(&wl->mutex);
2007 }
2008 
2009 static void wlcore_connection_loss_work(struct work_struct *work)
2010 {
2011 	struct delayed_work *dwork;
2012 	struct wl1271 *wl;
2013 	struct ieee80211_vif *vif;
2014 	struct wl12xx_vif *wlvif;
2015 
2016 	dwork = container_of(work, struct delayed_work, work);
2017 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2018 	wl = wlvif->wl;
2019 
2020 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2021 
2022 	mutex_lock(&wl->mutex);
2023 
2024 	if (unlikely(wl->state != WLCORE_STATE_ON))
2025 		goto out;
2026 
2027 	/* Call mac80211 connection loss */
2028 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2029 		goto out;
2030 
2031 	vif = wl12xx_wlvif_to_vif(wlvif);
2032 	ieee80211_connection_loss(vif);
2033 out:
2034 	mutex_unlock(&wl->mutex);
2035 }
2036 
2037 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2038 {
2039 	struct delayed_work *dwork;
2040 	struct wl1271 *wl;
2041 	struct wl12xx_vif *wlvif;
2042 	unsigned long time_spare;
2043 	int ret;
2044 
2045 	dwork = container_of(work, struct delayed_work, work);
2046 	wlvif = container_of(dwork, struct wl12xx_vif,
2047 			     pending_auth_complete_work);
2048 	wl = wlvif->wl;
2049 
2050 	mutex_lock(&wl->mutex);
2051 
2052 	if (unlikely(wl->state != WLCORE_STATE_ON))
2053 		goto out;
2054 
2055 	/*
2056 	 * Make sure a second really passed since the last auth reply. Maybe
2057 	 * a second auth reply arrived while we were stuck on the mutex.
2058 	 * Check for a little less than the timeout to protect from scheduler
2059 	 * irregularities.
2060 	 */
2061 	time_spare = jiffies +
2062 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2063 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2064 		goto out;
2065 
2066 	ret = wl1271_ps_elp_wakeup(wl);
2067 	if (ret < 0)
2068 		goto out;
2069 
2070 	/* cancel the ROC if active */
2071 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2072 
2073 	wl1271_ps_elp_sleep(wl);
2074 out:
2075 	mutex_unlock(&wl->mutex);
2076 }
2077 
2078 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2079 {
2080 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2081 					WL12XX_MAX_RATE_POLICIES);
2082 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2083 		return -EBUSY;
2084 
2085 	__set_bit(policy, wl->rate_policies_map);
2086 	*idx = policy;
2087 	return 0;
2088 }
2089 
2090 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2091 {
2092 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2093 		return;
2094 
2095 	__clear_bit(*idx, wl->rate_policies_map);
2096 	*idx = WL12XX_MAX_RATE_POLICIES;
2097 }
2098 
2099 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2100 {
2101 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2102 					WLCORE_MAX_KLV_TEMPLATES);
2103 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2104 		return -EBUSY;
2105 
2106 	__set_bit(policy, wl->klv_templates_map);
2107 	*idx = policy;
2108 	return 0;
2109 }
2110 
2111 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2112 {
2113 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2114 		return;
2115 
2116 	__clear_bit(*idx, wl->klv_templates_map);
2117 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2118 }
2119 
2120 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2121 {
2122 	switch (wlvif->bss_type) {
2123 	case BSS_TYPE_AP_BSS:
2124 		if (wlvif->p2p)
2125 			return WL1271_ROLE_P2P_GO;
2126 		else
2127 			return WL1271_ROLE_AP;
2128 
2129 	case BSS_TYPE_STA_BSS:
2130 		if (wlvif->p2p)
2131 			return WL1271_ROLE_P2P_CL;
2132 		else
2133 			return WL1271_ROLE_STA;
2134 
2135 	case BSS_TYPE_IBSS:
2136 		return WL1271_ROLE_IBSS;
2137 
2138 	default:
2139 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2140 	}
2141 	return WL12XX_INVALID_ROLE_TYPE;
2142 }
2143 
2144 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2145 {
2146 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2147 	int i;
2148 
2149 	/* clear everything but the persistent data */
2150 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2151 
2152 	switch (ieee80211_vif_type_p2p(vif)) {
2153 	case NL80211_IFTYPE_P2P_CLIENT:
2154 		wlvif->p2p = 1;
2155 		/* fall-through */
2156 	case NL80211_IFTYPE_STATION:
2157 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2158 		break;
2159 	case NL80211_IFTYPE_ADHOC:
2160 		wlvif->bss_type = BSS_TYPE_IBSS;
2161 		break;
2162 	case NL80211_IFTYPE_P2P_GO:
2163 		wlvif->p2p = 1;
2164 		/* fall-through */
2165 	case NL80211_IFTYPE_AP:
2166 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2167 		break;
2168 	default:
2169 		wlvif->bss_type = MAX_BSS_TYPE;
2170 		return -EOPNOTSUPP;
2171 	}
2172 
2173 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2174 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2175 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2176 
2177 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2178 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2179 		/* init sta/ibss data */
2180 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2181 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2182 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2183 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2184 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2185 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2186 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2187 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2188 	} else {
2189 		/* init ap data */
2190 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2191 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2192 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2193 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2194 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2195 			wl12xx_allocate_rate_policy(wl,
2196 						&wlvif->ap.ucast_rate_idx[i]);
2197 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2198 		/*
2199 		 * TODO: check if basic_rate shouldn't be
2200 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2201 		 * instead (the same thing for STA above).
2202 		*/
2203 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2204 		/* TODO: this seems to be used only for STA, check it */
2205 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2206 	}
2207 
2208 	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2209 	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2210 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2211 
2212 	/*
2213 	 * mac80211 configures some values globally, while we treat them
2214 	 * per-interface. thus, on init, we have to copy them from wl
2215 	 */
2216 	wlvif->band = wl->band;
2217 	wlvif->channel = wl->channel;
2218 	wlvif->power_level = wl->power_level;
2219 	wlvif->channel_type = wl->channel_type;
2220 
2221 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2222 		  wl1271_rx_streaming_enable_work);
2223 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2224 		  wl1271_rx_streaming_disable_work);
2225 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2226 			  wlcore_channel_switch_work);
2227 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2228 			  wlcore_connection_loss_work);
2229 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2230 			  wlcore_pending_auth_complete_work);
2231 	INIT_LIST_HEAD(&wlvif->list);
2232 
2233 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2234 		    (unsigned long) wlvif);
2235 	return 0;
2236 }
2237 
2238 static int wl12xx_init_fw(struct wl1271 *wl)
2239 {
2240 	int retries = WL1271_BOOT_RETRIES;
2241 	bool booted = false;
2242 	struct wiphy *wiphy = wl->hw->wiphy;
2243 	int ret;
2244 
2245 	while (retries) {
2246 		retries--;
2247 		ret = wl12xx_chip_wakeup(wl, false);
2248 		if (ret < 0)
2249 			goto power_off;
2250 
2251 		ret = wl->ops->boot(wl);
2252 		if (ret < 0)
2253 			goto power_off;
2254 
2255 		ret = wl1271_hw_init(wl);
2256 		if (ret < 0)
2257 			goto irq_disable;
2258 
2259 		booted = true;
2260 		break;
2261 
2262 irq_disable:
2263 		mutex_unlock(&wl->mutex);
2264 		/* Unlocking the mutex in the middle of handling is
2265 		   inherently unsafe. In this case we deem it safe to do,
2266 		   because we need to let any possibly pending IRQ out of
2267 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2268 		   work function will not do anything.) Also, any other
2269 		   possible concurrent operations will fail due to the
2270 		   current state, hence the wl1271 struct should be safe. */
2271 		wlcore_disable_interrupts(wl);
2272 		wl1271_flush_deferred_work(wl);
2273 		cancel_work_sync(&wl->netstack_work);
2274 		mutex_lock(&wl->mutex);
2275 power_off:
2276 		wl1271_power_off(wl);
2277 	}
2278 
2279 	if (!booted) {
2280 		wl1271_error("firmware boot failed despite %d retries",
2281 			     WL1271_BOOT_RETRIES);
2282 		goto out;
2283 	}
2284 
2285 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2286 
2287 	/* update hw/fw version info in wiphy struct */
2288 	wiphy->hw_version = wl->chip.id;
2289 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2290 		sizeof(wiphy->fw_version));
2291 
2292 	/*
2293 	 * Now we know if 11a is supported (info from the NVS), so disable
2294 	 * 11a channels if not supported
2295 	 */
2296 	if (!wl->enable_11a)
2297 		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2298 
2299 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2300 		     wl->enable_11a ? "" : "not ");
2301 
2302 	wl->state = WLCORE_STATE_ON;
2303 out:
2304 	return ret;
2305 }
2306 
2307 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2308 {
2309 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2310 }
2311 
2312 /*
2313  * Check whether a fw switch (i.e. moving from one loaded
2314  * fw to another) is needed. This function is also responsible
2315  * for updating wl->last_vif_count, so it must be called before
2316  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2317  * will be used).
2318  */
2319 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2320 				  struct vif_counter_data vif_counter_data,
2321 				  bool add)
2322 {
2323 	enum wl12xx_fw_type current_fw = wl->fw_type;
2324 	u8 vif_count = vif_counter_data.counter;
2325 
2326 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2327 		return false;
2328 
2329 	/* increase the vif count if this is a new vif */
2330 	if (add && !vif_counter_data.cur_vif_running)
2331 		vif_count++;
2332 
2333 	wl->last_vif_count = vif_count;
2334 
2335 	/* no need for fw change if the device is OFF */
2336 	if (wl->state == WLCORE_STATE_OFF)
2337 		return false;
2338 
2339 	/* no need for fw change if a single fw is used */
2340 	if (!wl->mr_fw_name)
2341 		return false;
2342 
2343 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2344 		return true;
2345 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2346 		return true;
2347 
2348 	return false;
2349 }
2350 
2351 /*
2352  * Enter "forced psm". Make sure the sta is in psm against the ap,
2353  * to make the fw switch a bit more disconnection-persistent.
2354  */
2355 static void wl12xx_force_active_psm(struct wl1271 *wl)
2356 {
2357 	struct wl12xx_vif *wlvif;
2358 
2359 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2360 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2361 	}
2362 }
2363 
2364 struct wlcore_hw_queue_iter_data {
2365 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2366 	/* current vif */
2367 	struct ieee80211_vif *vif;
2368 	/* is the current vif among those iterated */
2369 	bool cur_running;
2370 };
2371 
2372 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2373 				 struct ieee80211_vif *vif)
2374 {
2375 	struct wlcore_hw_queue_iter_data *iter_data = data;
2376 
2377 	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2378 		return;
2379 
2380 	if (iter_data->cur_running || vif == iter_data->vif) {
2381 		iter_data->cur_running = true;
2382 		return;
2383 	}
2384 
2385 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2386 }
2387 
2388 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2389 					 struct wl12xx_vif *wlvif)
2390 {
2391 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2392 	struct wlcore_hw_queue_iter_data iter_data = {};
2393 	int i, q_base;
2394 
2395 	iter_data.vif = vif;
2396 
2397 	/* mark all bits taken by active interfaces */
2398 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2399 					IEEE80211_IFACE_ITER_RESUME_ALL,
2400 					wlcore_hw_queue_iter, &iter_data);
2401 
2402 	/* the current vif is already running in mac80211 (resume/recovery) */
2403 	if (iter_data.cur_running) {
2404 		wlvif->hw_queue_base = vif->hw_queue[0];
2405 		wl1271_debug(DEBUG_MAC80211,
2406 			     "using pre-allocated hw queue base %d",
2407 			     wlvif->hw_queue_base);
2408 
2409 		/* interface type might have changed type */
2410 		goto adjust_cab_queue;
2411 	}
2412 
2413 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2414 				     WLCORE_NUM_MAC_ADDRESSES);
2415 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2416 		return -EBUSY;
2417 
2418 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2419 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2420 		     wlvif->hw_queue_base);
2421 
2422 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2423 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2424 		/* register hw queues in mac80211 */
2425 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2426 	}
2427 
2428 adjust_cab_queue:
2429 	/* the last places are reserved for cab queues per interface */
2430 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2431 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2432 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2433 	else
2434 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2435 
2436 	return 0;
2437 }
2438 
2439 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2440 				   struct ieee80211_vif *vif)
2441 {
2442 	struct wl1271 *wl = hw->priv;
2443 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2444 	struct vif_counter_data vif_count;
2445 	int ret = 0;
2446 	u8 role_type;
2447 
2448 	if (wl->plt) {
2449 		wl1271_error("Adding Interface not allowed while in PLT mode");
2450 		return -EBUSY;
2451 	}
2452 
2453 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2454 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2455 
2456 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2457 		     ieee80211_vif_type_p2p(vif), vif->addr);
2458 
2459 	wl12xx_get_vif_count(hw, vif, &vif_count);
2460 
2461 	mutex_lock(&wl->mutex);
2462 	ret = wl1271_ps_elp_wakeup(wl);
2463 	if (ret < 0)
2464 		goto out_unlock;
2465 
2466 	/*
2467 	 * in some very corner case HW recovery scenarios its possible to
2468 	 * get here before __wl1271_op_remove_interface is complete, so
2469 	 * opt out if that is the case.
2470 	 */
2471 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2472 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2473 		ret = -EBUSY;
2474 		goto out;
2475 	}
2476 
2477 
2478 	ret = wl12xx_init_vif_data(wl, vif);
2479 	if (ret < 0)
2480 		goto out;
2481 
2482 	wlvif->wl = wl;
2483 	role_type = wl12xx_get_role_type(wl, wlvif);
2484 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2485 		ret = -EINVAL;
2486 		goto out;
2487 	}
2488 
2489 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2490 	if (ret < 0)
2491 		goto out;
2492 
2493 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2494 		wl12xx_force_active_psm(wl);
2495 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2496 		mutex_unlock(&wl->mutex);
2497 		wl1271_recovery_work(&wl->recovery_work);
2498 		return 0;
2499 	}
2500 
2501 	/*
2502 	 * TODO: after the nvs issue will be solved, move this block
2503 	 * to start(), and make sure here the driver is ON.
2504 	 */
2505 	if (wl->state == WLCORE_STATE_OFF) {
2506 		/*
2507 		 * we still need this in order to configure the fw
2508 		 * while uploading the nvs
2509 		 */
2510 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2511 
2512 		ret = wl12xx_init_fw(wl);
2513 		if (ret < 0)
2514 			goto out;
2515 	}
2516 
2517 	ret = wl12xx_cmd_role_enable(wl, vif->addr,
2518 				     role_type, &wlvif->role_id);
2519 	if (ret < 0)
2520 		goto out;
2521 
2522 	ret = wl1271_init_vif_specific(wl, vif);
2523 	if (ret < 0)
2524 		goto out;
2525 
2526 	list_add(&wlvif->list, &wl->wlvif_list);
2527 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2528 
2529 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2530 		wl->ap_count++;
2531 	else
2532 		wl->sta_count++;
2533 out:
2534 	wl1271_ps_elp_sleep(wl);
2535 out_unlock:
2536 	mutex_unlock(&wl->mutex);
2537 
2538 	return ret;
2539 }
2540 
2541 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2542 					 struct ieee80211_vif *vif,
2543 					 bool reset_tx_queues)
2544 {
2545 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2546 	int i, ret;
2547 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2548 
2549 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2550 
2551 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2552 		return;
2553 
2554 	/* because of hardware recovery, we may get here twice */
2555 	if (wl->state == WLCORE_STATE_OFF)
2556 		return;
2557 
2558 	wl1271_info("down");
2559 
2560 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2561 	    wl->scan_wlvif == wlvif) {
2562 		/*
2563 		 * Rearm the tx watchdog just before idling scan. This
2564 		 * prevents just-finished scans from triggering the watchdog
2565 		 */
2566 		wl12xx_rearm_tx_watchdog_locked(wl);
2567 
2568 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2569 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2570 		wl->scan_wlvif = NULL;
2571 		wl->scan.req = NULL;
2572 		ieee80211_scan_completed(wl->hw, true);
2573 	}
2574 
2575 	if (wl->sched_vif == wlvif) {
2576 		ieee80211_sched_scan_stopped(wl->hw);
2577 		wl->sched_vif = NULL;
2578 	}
2579 
2580 	if (wl->roc_vif == vif) {
2581 		wl->roc_vif = NULL;
2582 		ieee80211_remain_on_channel_expired(wl->hw);
2583 	}
2584 
2585 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2586 		/* disable active roles */
2587 		ret = wl1271_ps_elp_wakeup(wl);
2588 		if (ret < 0)
2589 			goto deinit;
2590 
2591 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2592 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2593 			if (wl12xx_dev_role_started(wlvif))
2594 				wl12xx_stop_dev(wl, wlvif);
2595 		}
2596 
2597 		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2598 		if (ret < 0)
2599 			goto deinit;
2600 
2601 		wl1271_ps_elp_sleep(wl);
2602 	}
2603 deinit:
2604 	wl12xx_tx_reset_wlvif(wl, wlvif);
2605 
2606 	/* clear all hlids (except system_hlid) */
2607 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2608 
2609 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2610 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2611 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2612 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2613 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2614 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2615 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2616 	} else {
2617 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2618 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2619 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2620 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2621 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2622 			wl12xx_free_rate_policy(wl,
2623 						&wlvif->ap.ucast_rate_idx[i]);
2624 		wl1271_free_ap_keys(wl, wlvif);
2625 	}
2626 
2627 	dev_kfree_skb(wlvif->probereq);
2628 	wlvif->probereq = NULL;
2629 	if (wl->last_wlvif == wlvif)
2630 		wl->last_wlvif = NULL;
2631 	list_del(&wlvif->list);
2632 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2633 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2634 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2635 
2636 	if (is_ap)
2637 		wl->ap_count--;
2638 	else
2639 		wl->sta_count--;
2640 
2641 	/*
2642 	 * Last AP, have more stations. Configure sleep auth according to STA.
2643 	 * Don't do thin on unintended recovery.
2644 	 */
2645 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2646 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2647 		goto unlock;
2648 
2649 	if (wl->ap_count == 0 && is_ap) {
2650 		/* mask ap events */
2651 		wl->event_mask &= ~wl->ap_event_mask;
2652 		wl1271_event_unmask(wl);
2653 	}
2654 
2655 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2656 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2657 		/* Configure for power according to debugfs */
2658 		if (sta_auth != WL1271_PSM_ILLEGAL)
2659 			wl1271_acx_sleep_auth(wl, sta_auth);
2660 		/* Configure for ELP power saving */
2661 		else
2662 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2663 	}
2664 
2665 unlock:
2666 	mutex_unlock(&wl->mutex);
2667 
2668 	del_timer_sync(&wlvif->rx_streaming_timer);
2669 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2670 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2671 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2672 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2673 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2674 
2675 	mutex_lock(&wl->mutex);
2676 }
2677 
2678 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2679 				       struct ieee80211_vif *vif)
2680 {
2681 	struct wl1271 *wl = hw->priv;
2682 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2683 	struct wl12xx_vif *iter;
2684 	struct vif_counter_data vif_count;
2685 
2686 	wl12xx_get_vif_count(hw, vif, &vif_count);
2687 	mutex_lock(&wl->mutex);
2688 
2689 	if (wl->state == WLCORE_STATE_OFF ||
2690 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2691 		goto out;
2692 
2693 	/*
2694 	 * wl->vif can be null here if someone shuts down the interface
2695 	 * just when hardware recovery has been started.
2696 	 */
2697 	wl12xx_for_each_wlvif(wl, iter) {
2698 		if (iter != wlvif)
2699 			continue;
2700 
2701 		__wl1271_op_remove_interface(wl, vif, true);
2702 		break;
2703 	}
2704 	WARN_ON(iter != wlvif);
2705 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2706 		wl12xx_force_active_psm(wl);
2707 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2708 		wl12xx_queue_recovery_work(wl);
2709 	}
2710 out:
2711 	mutex_unlock(&wl->mutex);
2712 }
2713 
2714 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2715 				      struct ieee80211_vif *vif,
2716 				      enum nl80211_iftype new_type, bool p2p)
2717 {
2718 	struct wl1271 *wl = hw->priv;
2719 	int ret;
2720 
2721 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2722 	wl1271_op_remove_interface(hw, vif);
2723 
2724 	vif->type = new_type;
2725 	vif->p2p = p2p;
2726 	ret = wl1271_op_add_interface(hw, vif);
2727 
2728 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2729 	return ret;
2730 }
2731 
2732 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2733 {
2734 	int ret;
2735 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2736 
2737 	/*
2738 	 * One of the side effects of the JOIN command is that is clears
2739 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2740 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2741 	 * Currently the only valid scenario for JOIN during association
2742 	 * is on roaming, in which case we will also be given new keys.
2743 	 * Keep the below message for now, unless it starts bothering
2744 	 * users who really like to roam a lot :)
2745 	 */
2746 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2747 		wl1271_info("JOIN while associated.");
2748 
2749 	/* clear encryption type */
2750 	wlvif->encryption_type = KEY_NONE;
2751 
2752 	if (is_ibss)
2753 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2754 	else {
2755 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2756 			/*
2757 			 * TODO: this is an ugly workaround for wl12xx fw
2758 			 * bug - we are not able to tx/rx after the first
2759 			 * start_sta, so make dummy start+stop calls,
2760 			 * and then call start_sta again.
2761 			 * this should be fixed in the fw.
2762 			 */
2763 			wl12xx_cmd_role_start_sta(wl, wlvif);
2764 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2765 		}
2766 
2767 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2768 	}
2769 
2770 	return ret;
2771 }
2772 
2773 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2774 			    int offset)
2775 {
2776 	u8 ssid_len;
2777 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2778 					 skb->len - offset);
2779 
2780 	if (!ptr) {
2781 		wl1271_error("No SSID in IEs!");
2782 		return -ENOENT;
2783 	}
2784 
2785 	ssid_len = ptr[1];
2786 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2787 		wl1271_error("SSID is too long!");
2788 		return -EINVAL;
2789 	}
2790 
2791 	wlvif->ssid_len = ssid_len;
2792 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2793 	return 0;
2794 }
2795 
2796 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2797 {
2798 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2799 	struct sk_buff *skb;
2800 	int ieoffset;
2801 
2802 	/* we currently only support setting the ssid from the ap probe req */
2803 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2804 		return -EINVAL;
2805 
2806 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2807 	if (!skb)
2808 		return -EINVAL;
2809 
2810 	ieoffset = offsetof(struct ieee80211_mgmt,
2811 			    u.probe_req.variable);
2812 	wl1271_ssid_set(wlvif, skb, ieoffset);
2813 	dev_kfree_skb(skb);
2814 
2815 	return 0;
2816 }
2817 
2818 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2819 			    struct ieee80211_bss_conf *bss_conf,
2820 			    u32 sta_rate_set)
2821 {
2822 	int ieoffset;
2823 	int ret;
2824 
2825 	wlvif->aid = bss_conf->aid;
2826 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2827 	wlvif->beacon_int = bss_conf->beacon_int;
2828 	wlvif->wmm_enabled = bss_conf->qos;
2829 
2830 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2831 
2832 	/*
2833 	 * with wl1271, we don't need to update the
2834 	 * beacon_int and dtim_period, because the firmware
2835 	 * updates it by itself when the first beacon is
2836 	 * received after a join.
2837 	 */
2838 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2839 	if (ret < 0)
2840 		return ret;
2841 
2842 	/*
2843 	 * Get a template for hardware connection maintenance
2844 	 */
2845 	dev_kfree_skb(wlvif->probereq);
2846 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2847 							wlvif,
2848 							NULL);
2849 	ieoffset = offsetof(struct ieee80211_mgmt,
2850 			    u.probe_req.variable);
2851 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2852 
2853 	/* enable the connection monitoring feature */
2854 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2855 	if (ret < 0)
2856 		return ret;
2857 
2858 	/*
2859 	 * The join command disable the keep-alive mode, shut down its process,
2860 	 * and also clear the template config, so we need to reset it all after
2861 	 * the join. The acx_aid starts the keep-alive process, and the order
2862 	 * of the commands below is relevant.
2863 	 */
2864 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2865 	if (ret < 0)
2866 		return ret;
2867 
2868 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2869 	if (ret < 0)
2870 		return ret;
2871 
2872 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2873 	if (ret < 0)
2874 		return ret;
2875 
2876 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2877 					   wlvif->sta.klv_template_id,
2878 					   ACX_KEEP_ALIVE_TPL_VALID);
2879 	if (ret < 0)
2880 		return ret;
2881 
2882 	/*
2883 	 * The default fw psm configuration is AUTO, while mac80211 default
2884 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2885 	 */
2886 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2887 	if (ret < 0)
2888 		return ret;
2889 
2890 	if (sta_rate_set) {
2891 		wlvif->rate_set =
2892 			wl1271_tx_enabled_rates_get(wl,
2893 						    sta_rate_set,
2894 						    wlvif->band);
2895 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2896 		if (ret < 0)
2897 			return ret;
2898 	}
2899 
2900 	return ret;
2901 }
2902 
2903 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2904 {
2905 	int ret;
2906 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2907 
2908 	/* make sure we are connected (sta) joined */
2909 	if (sta &&
2910 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2911 		return false;
2912 
2913 	/* make sure we are joined (ibss) */
2914 	if (!sta &&
2915 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2916 		return false;
2917 
2918 	if (sta) {
2919 		/* use defaults when not associated */
2920 		wlvif->aid = 0;
2921 
2922 		/* free probe-request template */
2923 		dev_kfree_skb(wlvif->probereq);
2924 		wlvif->probereq = NULL;
2925 
2926 		/* disable connection monitor features */
2927 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2928 		if (ret < 0)
2929 			return ret;
2930 
2931 		/* Disable the keep-alive feature */
2932 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2933 		if (ret < 0)
2934 			return ret;
2935 	}
2936 
2937 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2938 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2939 
2940 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
2941 		ieee80211_chswitch_done(vif, false);
2942 		cancel_delayed_work(&wlvif->channel_switch_work);
2943 	}
2944 
2945 	/* invalidate keep-alive template */
2946 	wl1271_acx_keep_alive_config(wl, wlvif,
2947 				     wlvif->sta.klv_template_id,
2948 				     ACX_KEEP_ALIVE_TPL_INVALID);
2949 
2950 	return 0;
2951 }
2952 
2953 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2954 {
2955 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2956 	wlvif->rate_set = wlvif->basic_rate_set;
2957 }
2958 
2959 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2960 				   bool idle)
2961 {
2962 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2963 
2964 	if (idle == cur_idle)
2965 		return;
2966 
2967 	if (idle) {
2968 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2969 	} else {
2970 		/* The current firmware only supports sched_scan in idle */
2971 		if (wl->sched_vif == wlvif)
2972 			wl->ops->sched_scan_stop(wl, wlvif);
2973 
2974 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2975 	}
2976 }
2977 
2978 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2979 			     struct ieee80211_conf *conf, u32 changed)
2980 {
2981 	int ret;
2982 
2983 	if (conf->power_level != wlvif->power_level) {
2984 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2985 		if (ret < 0)
2986 			return ret;
2987 
2988 		wlvif->power_level = conf->power_level;
2989 	}
2990 
2991 	return 0;
2992 }
2993 
2994 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2995 {
2996 	struct wl1271 *wl = hw->priv;
2997 	struct wl12xx_vif *wlvif;
2998 	struct ieee80211_conf *conf = &hw->conf;
2999 	int ret = 0;
3000 
3001 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3002 		     " changed 0x%x",
3003 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3004 		     conf->power_level,
3005 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3006 			 changed);
3007 
3008 	mutex_lock(&wl->mutex);
3009 
3010 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3011 		wl->power_level = conf->power_level;
3012 
3013 	if (unlikely(wl->state != WLCORE_STATE_ON))
3014 		goto out;
3015 
3016 	ret = wl1271_ps_elp_wakeup(wl);
3017 	if (ret < 0)
3018 		goto out;
3019 
3020 	/* configure each interface */
3021 	wl12xx_for_each_wlvif(wl, wlvif) {
3022 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3023 		if (ret < 0)
3024 			goto out_sleep;
3025 	}
3026 
3027 out_sleep:
3028 	wl1271_ps_elp_sleep(wl);
3029 
3030 out:
3031 	mutex_unlock(&wl->mutex);
3032 
3033 	return ret;
3034 }
3035 
3036 struct wl1271_filter_params {
3037 	bool enabled;
3038 	int mc_list_length;
3039 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3040 };
3041 
3042 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3043 				       struct netdev_hw_addr_list *mc_list)
3044 {
3045 	struct wl1271_filter_params *fp;
3046 	struct netdev_hw_addr *ha;
3047 
3048 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3049 	if (!fp) {
3050 		wl1271_error("Out of memory setting filters.");
3051 		return 0;
3052 	}
3053 
3054 	/* update multicast filtering parameters */
3055 	fp->mc_list_length = 0;
3056 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3057 		fp->enabled = false;
3058 	} else {
3059 		fp->enabled = true;
3060 		netdev_hw_addr_list_for_each(ha, mc_list) {
3061 			memcpy(fp->mc_list[fp->mc_list_length],
3062 					ha->addr, ETH_ALEN);
3063 			fp->mc_list_length++;
3064 		}
3065 	}
3066 
3067 	return (u64)(unsigned long)fp;
3068 }
3069 
3070 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3071 				  FIF_ALLMULTI | \
3072 				  FIF_FCSFAIL | \
3073 				  FIF_BCN_PRBRESP_PROMISC | \
3074 				  FIF_CONTROL | \
3075 				  FIF_OTHER_BSS)
3076 
3077 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3078 				       unsigned int changed,
3079 				       unsigned int *total, u64 multicast)
3080 {
3081 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3082 	struct wl1271 *wl = hw->priv;
3083 	struct wl12xx_vif *wlvif;
3084 
3085 	int ret;
3086 
3087 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3088 		     " total %x", changed, *total);
3089 
3090 	mutex_lock(&wl->mutex);
3091 
3092 	*total &= WL1271_SUPPORTED_FILTERS;
3093 	changed &= WL1271_SUPPORTED_FILTERS;
3094 
3095 	if (unlikely(wl->state != WLCORE_STATE_ON))
3096 		goto out;
3097 
3098 	ret = wl1271_ps_elp_wakeup(wl);
3099 	if (ret < 0)
3100 		goto out;
3101 
3102 	wl12xx_for_each_wlvif(wl, wlvif) {
3103 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3104 			if (*total & FIF_ALLMULTI)
3105 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3106 								   false,
3107 								   NULL, 0);
3108 			else if (fp)
3109 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3110 							fp->enabled,
3111 							fp->mc_list,
3112 							fp->mc_list_length);
3113 			if (ret < 0)
3114 				goto out_sleep;
3115 		}
3116 	}
3117 
3118 	/*
3119 	 * the fw doesn't provide an api to configure the filters. instead,
3120 	 * the filters configuration is based on the active roles / ROC
3121 	 * state.
3122 	 */
3123 
3124 out_sleep:
3125 	wl1271_ps_elp_sleep(wl);
3126 
3127 out:
3128 	mutex_unlock(&wl->mutex);
3129 	kfree(fp);
3130 }
3131 
3132 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3133 				u8 id, u8 key_type, u8 key_size,
3134 				const u8 *key, u8 hlid, u32 tx_seq_32,
3135 				u16 tx_seq_16)
3136 {
3137 	struct wl1271_ap_key *ap_key;
3138 	int i;
3139 
3140 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3141 
3142 	if (key_size > MAX_KEY_SIZE)
3143 		return -EINVAL;
3144 
3145 	/*
3146 	 * Find next free entry in ap_keys. Also check we are not replacing
3147 	 * an existing key.
3148 	 */
3149 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3150 		if (wlvif->ap.recorded_keys[i] == NULL)
3151 			break;
3152 
3153 		if (wlvif->ap.recorded_keys[i]->id == id) {
3154 			wl1271_warning("trying to record key replacement");
3155 			return -EINVAL;
3156 		}
3157 	}
3158 
3159 	if (i == MAX_NUM_KEYS)
3160 		return -EBUSY;
3161 
3162 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3163 	if (!ap_key)
3164 		return -ENOMEM;
3165 
3166 	ap_key->id = id;
3167 	ap_key->key_type = key_type;
3168 	ap_key->key_size = key_size;
3169 	memcpy(ap_key->key, key, key_size);
3170 	ap_key->hlid = hlid;
3171 	ap_key->tx_seq_32 = tx_seq_32;
3172 	ap_key->tx_seq_16 = tx_seq_16;
3173 
3174 	wlvif->ap.recorded_keys[i] = ap_key;
3175 	return 0;
3176 }
3177 
3178 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3179 {
3180 	int i;
3181 
3182 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3183 		kfree(wlvif->ap.recorded_keys[i]);
3184 		wlvif->ap.recorded_keys[i] = NULL;
3185 	}
3186 }
3187 
3188 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3189 {
3190 	int i, ret = 0;
3191 	struct wl1271_ap_key *key;
3192 	bool wep_key_added = false;
3193 
3194 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3195 		u8 hlid;
3196 		if (wlvif->ap.recorded_keys[i] == NULL)
3197 			break;
3198 
3199 		key = wlvif->ap.recorded_keys[i];
3200 		hlid = key->hlid;
3201 		if (hlid == WL12XX_INVALID_LINK_ID)
3202 			hlid = wlvif->ap.bcast_hlid;
3203 
3204 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3205 					    key->id, key->key_type,
3206 					    key->key_size, key->key,
3207 					    hlid, key->tx_seq_32,
3208 					    key->tx_seq_16);
3209 		if (ret < 0)
3210 			goto out;
3211 
3212 		if (key->key_type == KEY_WEP)
3213 			wep_key_added = true;
3214 	}
3215 
3216 	if (wep_key_added) {
3217 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3218 						     wlvif->ap.bcast_hlid);
3219 		if (ret < 0)
3220 			goto out;
3221 	}
3222 
3223 out:
3224 	wl1271_free_ap_keys(wl, wlvif);
3225 	return ret;
3226 }
3227 
3228 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3229 		       u16 action, u8 id, u8 key_type,
3230 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3231 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3232 {
3233 	int ret;
3234 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3235 
3236 	if (is_ap) {
3237 		struct wl1271_station *wl_sta;
3238 		u8 hlid;
3239 
3240 		if (sta) {
3241 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3242 			hlid = wl_sta->hlid;
3243 		} else {
3244 			hlid = wlvif->ap.bcast_hlid;
3245 		}
3246 
3247 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3248 			/*
3249 			 * We do not support removing keys after AP shutdown.
3250 			 * Pretend we do to make mac80211 happy.
3251 			 */
3252 			if (action != KEY_ADD_OR_REPLACE)
3253 				return 0;
3254 
3255 			ret = wl1271_record_ap_key(wl, wlvif, id,
3256 					     key_type, key_size,
3257 					     key, hlid, tx_seq_32,
3258 					     tx_seq_16);
3259 		} else {
3260 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3261 					     id, key_type, key_size,
3262 					     key, hlid, tx_seq_32,
3263 					     tx_seq_16);
3264 		}
3265 
3266 		if (ret < 0)
3267 			return ret;
3268 	} else {
3269 		const u8 *addr;
3270 		static const u8 bcast_addr[ETH_ALEN] = {
3271 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3272 		};
3273 
3274 		addr = sta ? sta->addr : bcast_addr;
3275 
3276 		if (is_zero_ether_addr(addr)) {
3277 			/* We dont support TX only encryption */
3278 			return -EOPNOTSUPP;
3279 		}
3280 
3281 		/* The wl1271 does not allow to remove unicast keys - they
3282 		   will be cleared automatically on next CMD_JOIN. Ignore the
3283 		   request silently, as we dont want the mac80211 to emit
3284 		   an error message. */
3285 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3286 			return 0;
3287 
3288 		/* don't remove key if hlid was already deleted */
3289 		if (action == KEY_REMOVE &&
3290 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3291 			return 0;
3292 
3293 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3294 					     id, key_type, key_size,
3295 					     key, addr, tx_seq_32,
3296 					     tx_seq_16);
3297 		if (ret < 0)
3298 			return ret;
3299 
3300 	}
3301 
3302 	return 0;
3303 }
3304 
3305 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3306 			     struct ieee80211_vif *vif,
3307 			     struct ieee80211_sta *sta,
3308 			     struct ieee80211_key_conf *key_conf)
3309 {
3310 	struct wl1271 *wl = hw->priv;
3311 	int ret;
3312 	bool might_change_spare =
3313 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3314 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3315 
3316 	if (might_change_spare) {
3317 		/*
3318 		 * stop the queues and flush to ensure the next packets are
3319 		 * in sync with FW spare block accounting
3320 		 */
3321 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3322 		wl1271_tx_flush(wl);
3323 	}
3324 
3325 	mutex_lock(&wl->mutex);
3326 
3327 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3328 		ret = -EAGAIN;
3329 		goto out_wake_queues;
3330 	}
3331 
3332 	ret = wl1271_ps_elp_wakeup(wl);
3333 	if (ret < 0)
3334 		goto out_wake_queues;
3335 
3336 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3337 
3338 	wl1271_ps_elp_sleep(wl);
3339 
3340 out_wake_queues:
3341 	if (might_change_spare)
3342 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3343 
3344 	mutex_unlock(&wl->mutex);
3345 
3346 	return ret;
3347 }
3348 
3349 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3350 		   struct ieee80211_vif *vif,
3351 		   struct ieee80211_sta *sta,
3352 		   struct ieee80211_key_conf *key_conf)
3353 {
3354 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3355 	int ret;
3356 	u32 tx_seq_32 = 0;
3357 	u16 tx_seq_16 = 0;
3358 	u8 key_type;
3359 	u8 hlid;
3360 
3361 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3362 
3363 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3364 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3365 		     key_conf->cipher, key_conf->keyidx,
3366 		     key_conf->keylen, key_conf->flags);
3367 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3368 
3369 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3370 		if (sta) {
3371 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3372 			hlid = wl_sta->hlid;
3373 		} else {
3374 			hlid = wlvif->ap.bcast_hlid;
3375 		}
3376 	else
3377 		hlid = wlvif->sta.hlid;
3378 
3379 	if (hlid != WL12XX_INVALID_LINK_ID) {
3380 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3381 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3382 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3383 	}
3384 
3385 	switch (key_conf->cipher) {
3386 	case WLAN_CIPHER_SUITE_WEP40:
3387 	case WLAN_CIPHER_SUITE_WEP104:
3388 		key_type = KEY_WEP;
3389 
3390 		key_conf->hw_key_idx = key_conf->keyidx;
3391 		break;
3392 	case WLAN_CIPHER_SUITE_TKIP:
3393 		key_type = KEY_TKIP;
3394 		key_conf->hw_key_idx = key_conf->keyidx;
3395 		break;
3396 	case WLAN_CIPHER_SUITE_CCMP:
3397 		key_type = KEY_AES;
3398 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3399 		break;
3400 	case WL1271_CIPHER_SUITE_GEM:
3401 		key_type = KEY_GEM;
3402 		break;
3403 	default:
3404 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3405 
3406 		return -EOPNOTSUPP;
3407 	}
3408 
3409 	switch (cmd) {
3410 	case SET_KEY:
3411 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3412 				 key_conf->keyidx, key_type,
3413 				 key_conf->keylen, key_conf->key,
3414 				 tx_seq_32, tx_seq_16, sta);
3415 		if (ret < 0) {
3416 			wl1271_error("Could not add or replace key");
3417 			return ret;
3418 		}
3419 
3420 		/*
3421 		 * reconfiguring arp response if the unicast (or common)
3422 		 * encryption key type was changed
3423 		 */
3424 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3425 		    (sta || key_type == KEY_WEP) &&
3426 		    wlvif->encryption_type != key_type) {
3427 			wlvif->encryption_type = key_type;
3428 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3429 			if (ret < 0) {
3430 				wl1271_warning("build arp rsp failed: %d", ret);
3431 				return ret;
3432 			}
3433 		}
3434 		break;
3435 
3436 	case DISABLE_KEY:
3437 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3438 				     key_conf->keyidx, key_type,
3439 				     key_conf->keylen, key_conf->key,
3440 				     0, 0, sta);
3441 		if (ret < 0) {
3442 			wl1271_error("Could not remove key");
3443 			return ret;
3444 		}
3445 		break;
3446 
3447 	default:
3448 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3449 		return -EOPNOTSUPP;
3450 	}
3451 
3452 	return ret;
3453 }
3454 EXPORT_SYMBOL_GPL(wlcore_set_key);
3455 
3456 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3457 					  struct ieee80211_vif *vif,
3458 					  int key_idx)
3459 {
3460 	struct wl1271 *wl = hw->priv;
3461 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3462 	int ret;
3463 
3464 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3465 		     key_idx);
3466 
3467 	mutex_lock(&wl->mutex);
3468 
3469 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3470 		ret = -EAGAIN;
3471 		goto out_unlock;
3472 	}
3473 
3474 	ret = wl1271_ps_elp_wakeup(wl);
3475 	if (ret < 0)
3476 		goto out_unlock;
3477 
3478 	wlvif->default_key = key_idx;
3479 
3480 	/* the default WEP key needs to be configured at least once */
3481 	if (wlvif->encryption_type == KEY_WEP) {
3482 		ret = wl12xx_cmd_set_default_wep_key(wl,
3483 				key_idx,
3484 				wlvif->sta.hlid);
3485 		if (ret < 0)
3486 			goto out_sleep;
3487 	}
3488 
3489 out_sleep:
3490 	wl1271_ps_elp_sleep(wl);
3491 
3492 out_unlock:
3493 	mutex_unlock(&wl->mutex);
3494 }
3495 
3496 void wlcore_regdomain_config(struct wl1271 *wl)
3497 {
3498 	int ret;
3499 
3500 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3501 		return;
3502 
3503 	mutex_lock(&wl->mutex);
3504 
3505 	if (unlikely(wl->state != WLCORE_STATE_ON))
3506 		goto out;
3507 
3508 	ret = wl1271_ps_elp_wakeup(wl);
3509 	if (ret < 0)
3510 		goto out;
3511 
3512 	ret = wlcore_cmd_regdomain_config_locked(wl);
3513 	if (ret < 0) {
3514 		wl12xx_queue_recovery_work(wl);
3515 		goto out;
3516 	}
3517 
3518 	wl1271_ps_elp_sleep(wl);
3519 out:
3520 	mutex_unlock(&wl->mutex);
3521 }
3522 
3523 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3524 			     struct ieee80211_vif *vif,
3525 			     struct cfg80211_scan_request *req)
3526 {
3527 	struct wl1271 *wl = hw->priv;
3528 	int ret;
3529 	u8 *ssid = NULL;
3530 	size_t len = 0;
3531 
3532 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3533 
3534 	if (req->n_ssids) {
3535 		ssid = req->ssids[0].ssid;
3536 		len = req->ssids[0].ssid_len;
3537 	}
3538 
3539 	mutex_lock(&wl->mutex);
3540 
3541 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3542 		/*
3543 		 * We cannot return -EBUSY here because cfg80211 will expect
3544 		 * a call to ieee80211_scan_completed if we do - in this case
3545 		 * there won't be any call.
3546 		 */
3547 		ret = -EAGAIN;
3548 		goto out;
3549 	}
3550 
3551 	ret = wl1271_ps_elp_wakeup(wl);
3552 	if (ret < 0)
3553 		goto out;
3554 
3555 	/* fail if there is any role in ROC */
3556 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3557 		/* don't allow scanning right now */
3558 		ret = -EBUSY;
3559 		goto out_sleep;
3560 	}
3561 
3562 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3563 out_sleep:
3564 	wl1271_ps_elp_sleep(wl);
3565 out:
3566 	mutex_unlock(&wl->mutex);
3567 
3568 	return ret;
3569 }
3570 
3571 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3572 				     struct ieee80211_vif *vif)
3573 {
3574 	struct wl1271 *wl = hw->priv;
3575 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3576 	int ret;
3577 
3578 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3579 
3580 	mutex_lock(&wl->mutex);
3581 
3582 	if (unlikely(wl->state != WLCORE_STATE_ON))
3583 		goto out;
3584 
3585 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3586 		goto out;
3587 
3588 	ret = wl1271_ps_elp_wakeup(wl);
3589 	if (ret < 0)
3590 		goto out;
3591 
3592 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3593 		ret = wl->ops->scan_stop(wl, wlvif);
3594 		if (ret < 0)
3595 			goto out_sleep;
3596 	}
3597 
3598 	/*
3599 	 * Rearm the tx watchdog just before idling scan. This
3600 	 * prevents just-finished scans from triggering the watchdog
3601 	 */
3602 	wl12xx_rearm_tx_watchdog_locked(wl);
3603 
3604 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3605 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3606 	wl->scan_wlvif = NULL;
3607 	wl->scan.req = NULL;
3608 	ieee80211_scan_completed(wl->hw, true);
3609 
3610 out_sleep:
3611 	wl1271_ps_elp_sleep(wl);
3612 out:
3613 	mutex_unlock(&wl->mutex);
3614 
3615 	cancel_delayed_work_sync(&wl->scan_complete_work);
3616 }
3617 
3618 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3619 				      struct ieee80211_vif *vif,
3620 				      struct cfg80211_sched_scan_request *req,
3621 				      struct ieee80211_sched_scan_ies *ies)
3622 {
3623 	struct wl1271 *wl = hw->priv;
3624 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3625 	int ret;
3626 
3627 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3628 
3629 	mutex_lock(&wl->mutex);
3630 
3631 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3632 		ret = -EAGAIN;
3633 		goto out;
3634 	}
3635 
3636 	ret = wl1271_ps_elp_wakeup(wl);
3637 	if (ret < 0)
3638 		goto out;
3639 
3640 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3641 	if (ret < 0)
3642 		goto out_sleep;
3643 
3644 	wl->sched_vif = wlvif;
3645 
3646 out_sleep:
3647 	wl1271_ps_elp_sleep(wl);
3648 out:
3649 	mutex_unlock(&wl->mutex);
3650 	return ret;
3651 }
3652 
3653 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3654 				      struct ieee80211_vif *vif)
3655 {
3656 	struct wl1271 *wl = hw->priv;
3657 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3658 	int ret;
3659 
3660 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3661 
3662 	mutex_lock(&wl->mutex);
3663 
3664 	if (unlikely(wl->state != WLCORE_STATE_ON))
3665 		goto out;
3666 
3667 	ret = wl1271_ps_elp_wakeup(wl);
3668 	if (ret < 0)
3669 		goto out;
3670 
3671 	wl->ops->sched_scan_stop(wl, wlvif);
3672 
3673 	wl1271_ps_elp_sleep(wl);
3674 out:
3675 	mutex_unlock(&wl->mutex);
3676 }
3677 
3678 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3679 {
3680 	struct wl1271 *wl = hw->priv;
3681 	int ret = 0;
3682 
3683 	mutex_lock(&wl->mutex);
3684 
3685 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3686 		ret = -EAGAIN;
3687 		goto out;
3688 	}
3689 
3690 	ret = wl1271_ps_elp_wakeup(wl);
3691 	if (ret < 0)
3692 		goto out;
3693 
3694 	ret = wl1271_acx_frag_threshold(wl, value);
3695 	if (ret < 0)
3696 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3697 
3698 	wl1271_ps_elp_sleep(wl);
3699 
3700 out:
3701 	mutex_unlock(&wl->mutex);
3702 
3703 	return ret;
3704 }
3705 
3706 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3707 {
3708 	struct wl1271 *wl = hw->priv;
3709 	struct wl12xx_vif *wlvif;
3710 	int ret = 0;
3711 
3712 	mutex_lock(&wl->mutex);
3713 
3714 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3715 		ret = -EAGAIN;
3716 		goto out;
3717 	}
3718 
3719 	ret = wl1271_ps_elp_wakeup(wl);
3720 	if (ret < 0)
3721 		goto out;
3722 
3723 	wl12xx_for_each_wlvif(wl, wlvif) {
3724 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3725 		if (ret < 0)
3726 			wl1271_warning("set rts threshold failed: %d", ret);
3727 	}
3728 	wl1271_ps_elp_sleep(wl);
3729 
3730 out:
3731 	mutex_unlock(&wl->mutex);
3732 
3733 	return ret;
3734 }
3735 
3736 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3737 {
3738 	int len;
3739 	const u8 *next, *end = skb->data + skb->len;
3740 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3741 					skb->len - ieoffset);
3742 	if (!ie)
3743 		return;
3744 	len = ie[1] + 2;
3745 	next = ie + len;
3746 	memmove(ie, next, end - next);
3747 	skb_trim(skb, skb->len - len);
3748 }
3749 
3750 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3751 					    unsigned int oui, u8 oui_type,
3752 					    int ieoffset)
3753 {
3754 	int len;
3755 	const u8 *next, *end = skb->data + skb->len;
3756 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3757 					       skb->data + ieoffset,
3758 					       skb->len - ieoffset);
3759 	if (!ie)
3760 		return;
3761 	len = ie[1] + 2;
3762 	next = ie + len;
3763 	memmove(ie, next, end - next);
3764 	skb_trim(skb, skb->len - len);
3765 }
3766 
3767 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3768 					 struct ieee80211_vif *vif)
3769 {
3770 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3771 	struct sk_buff *skb;
3772 	int ret;
3773 
3774 	skb = ieee80211_proberesp_get(wl->hw, vif);
3775 	if (!skb)
3776 		return -EOPNOTSUPP;
3777 
3778 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3779 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3780 				      skb->data,
3781 				      skb->len, 0,
3782 				      rates);
3783 	dev_kfree_skb(skb);
3784 
3785 	if (ret < 0)
3786 		goto out;
3787 
3788 	wl1271_debug(DEBUG_AP, "probe response updated");
3789 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3790 
3791 out:
3792 	return ret;
3793 }
3794 
3795 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3796 					     struct ieee80211_vif *vif,
3797 					     u8 *probe_rsp_data,
3798 					     size_t probe_rsp_len,
3799 					     u32 rates)
3800 {
3801 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3802 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3803 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3804 	int ssid_ie_offset, ie_offset, templ_len;
3805 	const u8 *ptr;
3806 
3807 	/* no need to change probe response if the SSID is set correctly */
3808 	if (wlvif->ssid_len > 0)
3809 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3810 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3811 					       probe_rsp_data,
3812 					       probe_rsp_len, 0,
3813 					       rates);
3814 
3815 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3816 		wl1271_error("probe_rsp template too big");
3817 		return -EINVAL;
3818 	}
3819 
3820 	/* start searching from IE offset */
3821 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3822 
3823 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3824 			       probe_rsp_len - ie_offset);
3825 	if (!ptr) {
3826 		wl1271_error("No SSID in beacon!");
3827 		return -EINVAL;
3828 	}
3829 
3830 	ssid_ie_offset = ptr - probe_rsp_data;
3831 	ptr += (ptr[1] + 2);
3832 
3833 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3834 
3835 	/* insert SSID from bss_conf */
3836 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3837 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3838 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3839 	       bss_conf->ssid, bss_conf->ssid_len);
3840 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3841 
3842 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3843 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3844 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3845 
3846 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3847 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3848 				       probe_rsp_templ,
3849 				       templ_len, 0,
3850 				       rates);
3851 }
3852 
3853 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3854 				       struct ieee80211_vif *vif,
3855 				       struct ieee80211_bss_conf *bss_conf,
3856 				       u32 changed)
3857 {
3858 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3859 	int ret = 0;
3860 
3861 	if (changed & BSS_CHANGED_ERP_SLOT) {
3862 		if (bss_conf->use_short_slot)
3863 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3864 		else
3865 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3866 		if (ret < 0) {
3867 			wl1271_warning("Set slot time failed %d", ret);
3868 			goto out;
3869 		}
3870 	}
3871 
3872 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3873 		if (bss_conf->use_short_preamble)
3874 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3875 		else
3876 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3877 	}
3878 
3879 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3880 		if (bss_conf->use_cts_prot)
3881 			ret = wl1271_acx_cts_protect(wl, wlvif,
3882 						     CTSPROTECT_ENABLE);
3883 		else
3884 			ret = wl1271_acx_cts_protect(wl, wlvif,
3885 						     CTSPROTECT_DISABLE);
3886 		if (ret < 0) {
3887 			wl1271_warning("Set ctsprotect failed %d", ret);
3888 			goto out;
3889 		}
3890 	}
3891 
3892 out:
3893 	return ret;
3894 }
3895 
3896 static int wlcore_set_beacon_template(struct wl1271 *wl,
3897 				      struct ieee80211_vif *vif,
3898 				      bool is_ap)
3899 {
3900 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3901 	struct ieee80211_hdr *hdr;
3902 	u32 min_rate;
3903 	int ret;
3904 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3905 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3906 	u16 tmpl_id;
3907 
3908 	if (!beacon) {
3909 		ret = -EINVAL;
3910 		goto out;
3911 	}
3912 
3913 	wl1271_debug(DEBUG_MASTER, "beacon updated");
3914 
3915 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3916 	if (ret < 0) {
3917 		dev_kfree_skb(beacon);
3918 		goto out;
3919 	}
3920 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3921 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3922 		CMD_TEMPL_BEACON;
3923 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3924 				      beacon->data,
3925 				      beacon->len, 0,
3926 				      min_rate);
3927 	if (ret < 0) {
3928 		dev_kfree_skb(beacon);
3929 		goto out;
3930 	}
3931 
3932 	wlvif->wmm_enabled =
3933 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3934 					WLAN_OUI_TYPE_MICROSOFT_WMM,
3935 					beacon->data + ieoffset,
3936 					beacon->len - ieoffset);
3937 
3938 	/*
3939 	 * In case we already have a probe-resp beacon set explicitly
3940 	 * by usermode, don't use the beacon data.
3941 	 */
3942 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3943 		goto end_bcn;
3944 
3945 	/* remove TIM ie from probe response */
3946 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3947 
3948 	/*
3949 	 * remove p2p ie from probe response.
3950 	 * the fw reponds to probe requests that don't include
3951 	 * the p2p ie. probe requests with p2p ie will be passed,
3952 	 * and will be responded by the supplicant (the spec
3953 	 * forbids including the p2p ie when responding to probe
3954 	 * requests that didn't include it).
3955 	 */
3956 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3957 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3958 
3959 	hdr = (struct ieee80211_hdr *) beacon->data;
3960 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3961 					 IEEE80211_STYPE_PROBE_RESP);
3962 	if (is_ap)
3963 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3964 							   beacon->data,
3965 							   beacon->len,
3966 							   min_rate);
3967 	else
3968 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3969 					      CMD_TEMPL_PROBE_RESPONSE,
3970 					      beacon->data,
3971 					      beacon->len, 0,
3972 					      min_rate);
3973 end_bcn:
3974 	dev_kfree_skb(beacon);
3975 	if (ret < 0)
3976 		goto out;
3977 
3978 out:
3979 	return ret;
3980 }
3981 
3982 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3983 					  struct ieee80211_vif *vif,
3984 					  struct ieee80211_bss_conf *bss_conf,
3985 					  u32 changed)
3986 {
3987 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3988 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3989 	int ret = 0;
3990 
3991 	if (changed & BSS_CHANGED_BEACON_INT) {
3992 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3993 			bss_conf->beacon_int);
3994 
3995 		wlvif->beacon_int = bss_conf->beacon_int;
3996 	}
3997 
3998 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3999 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4000 
4001 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4002 	}
4003 
4004 	if (changed & BSS_CHANGED_BEACON) {
4005 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4006 		if (ret < 0)
4007 			goto out;
4008 	}
4009 
4010 out:
4011 	if (ret != 0)
4012 		wl1271_error("beacon info change failed: %d", ret);
4013 	return ret;
4014 }
4015 
4016 /* AP mode changes */
4017 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4018 				       struct ieee80211_vif *vif,
4019 				       struct ieee80211_bss_conf *bss_conf,
4020 				       u32 changed)
4021 {
4022 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4023 	int ret = 0;
4024 
4025 	if (changed & BSS_CHANGED_BASIC_RATES) {
4026 		u32 rates = bss_conf->basic_rates;
4027 
4028 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4029 								 wlvif->band);
4030 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4031 							wlvif->basic_rate_set);
4032 
4033 		ret = wl1271_init_ap_rates(wl, wlvif);
4034 		if (ret < 0) {
4035 			wl1271_error("AP rate policy change failed %d", ret);
4036 			goto out;
4037 		}
4038 
4039 		ret = wl1271_ap_init_templates(wl, vif);
4040 		if (ret < 0)
4041 			goto out;
4042 
4043 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4044 		if (ret < 0)
4045 			goto out;
4046 
4047 		ret = wlcore_set_beacon_template(wl, vif, true);
4048 		if (ret < 0)
4049 			goto out;
4050 	}
4051 
4052 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4053 	if (ret < 0)
4054 		goto out;
4055 
4056 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4057 		if (bss_conf->enable_beacon) {
4058 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4059 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4060 				if (ret < 0)
4061 					goto out;
4062 
4063 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4064 				if (ret < 0)
4065 					goto out;
4066 
4067 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4068 				wl1271_debug(DEBUG_AP, "started AP");
4069 			}
4070 		} else {
4071 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4072 				/*
4073 				 * AP might be in ROC in case we have just
4074 				 * sent auth reply. handle it.
4075 				 */
4076 				if (test_bit(wlvif->role_id, wl->roc_map))
4077 					wl12xx_croc(wl, wlvif->role_id);
4078 
4079 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4080 				if (ret < 0)
4081 					goto out;
4082 
4083 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4084 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4085 					  &wlvif->flags);
4086 				wl1271_debug(DEBUG_AP, "stopped AP");
4087 			}
4088 		}
4089 	}
4090 
4091 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4092 	if (ret < 0)
4093 		goto out;
4094 
4095 	/* Handle HT information change */
4096 	if ((changed & BSS_CHANGED_HT) &&
4097 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4098 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4099 					bss_conf->ht_operation_mode);
4100 		if (ret < 0) {
4101 			wl1271_warning("Set ht information failed %d", ret);
4102 			goto out;
4103 		}
4104 	}
4105 
4106 out:
4107 	return;
4108 }
4109 
4110 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4111 			    struct ieee80211_bss_conf *bss_conf,
4112 			    u32 sta_rate_set)
4113 {
4114 	u32 rates;
4115 	int ret;
4116 
4117 	wl1271_debug(DEBUG_MAC80211,
4118 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4119 	     bss_conf->bssid, bss_conf->aid,
4120 	     bss_conf->beacon_int,
4121 	     bss_conf->basic_rates, sta_rate_set);
4122 
4123 	wlvif->beacon_int = bss_conf->beacon_int;
4124 	rates = bss_conf->basic_rates;
4125 	wlvif->basic_rate_set =
4126 		wl1271_tx_enabled_rates_get(wl, rates,
4127 					    wlvif->band);
4128 	wlvif->basic_rate =
4129 		wl1271_tx_min_rate_get(wl,
4130 				       wlvif->basic_rate_set);
4131 
4132 	if (sta_rate_set)
4133 		wlvif->rate_set =
4134 			wl1271_tx_enabled_rates_get(wl,
4135 						sta_rate_set,
4136 						wlvif->band);
4137 
4138 	/* we only support sched_scan while not connected */
4139 	if (wl->sched_vif == wlvif)
4140 		wl->ops->sched_scan_stop(wl, wlvif);
4141 
4142 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4143 	if (ret < 0)
4144 		return ret;
4145 
4146 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4147 	if (ret < 0)
4148 		return ret;
4149 
4150 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4151 	if (ret < 0)
4152 		return ret;
4153 
4154 	wlcore_set_ssid(wl, wlvif);
4155 
4156 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4157 
4158 	return 0;
4159 }
4160 
4161 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4162 {
4163 	int ret;
4164 
4165 	/* revert back to minimum rates for the current band */
4166 	wl1271_set_band_rate(wl, wlvif);
4167 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4168 
4169 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4170 	if (ret < 0)
4171 		return ret;
4172 
4173 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4174 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4175 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4176 		if (ret < 0)
4177 			return ret;
4178 	}
4179 
4180 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4181 	return 0;
4182 }
4183 /* STA/IBSS mode changes */
4184 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4185 					struct ieee80211_vif *vif,
4186 					struct ieee80211_bss_conf *bss_conf,
4187 					u32 changed)
4188 {
4189 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4190 	bool do_join = false;
4191 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4192 	bool ibss_joined = false;
4193 	u32 sta_rate_set = 0;
4194 	int ret;
4195 	struct ieee80211_sta *sta;
4196 	bool sta_exists = false;
4197 	struct ieee80211_sta_ht_cap sta_ht_cap;
4198 
4199 	if (is_ibss) {
4200 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4201 						     changed);
4202 		if (ret < 0)
4203 			goto out;
4204 	}
4205 
4206 	if (changed & BSS_CHANGED_IBSS) {
4207 		if (bss_conf->ibss_joined) {
4208 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4209 			ibss_joined = true;
4210 		} else {
4211 			wlcore_unset_assoc(wl, wlvif);
4212 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4213 		}
4214 	}
4215 
4216 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4217 		do_join = true;
4218 
4219 	/* Need to update the SSID (for filtering etc) */
4220 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4221 		do_join = true;
4222 
4223 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4224 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4225 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4226 
4227 		do_join = true;
4228 	}
4229 
4230 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4231 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4232 
4233 	if (changed & BSS_CHANGED_CQM) {
4234 		bool enable = false;
4235 		if (bss_conf->cqm_rssi_thold)
4236 			enable = true;
4237 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4238 						  bss_conf->cqm_rssi_thold,
4239 						  bss_conf->cqm_rssi_hyst);
4240 		if (ret < 0)
4241 			goto out;
4242 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4243 	}
4244 
4245 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4246 		       BSS_CHANGED_ASSOC)) {
4247 		rcu_read_lock();
4248 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4249 		if (sta) {
4250 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4251 
4252 			/* save the supp_rates of the ap */
4253 			sta_rate_set = sta->supp_rates[wlvif->band];
4254 			if (sta->ht_cap.ht_supported)
4255 				sta_rate_set |=
4256 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4257 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4258 			sta_ht_cap = sta->ht_cap;
4259 			sta_exists = true;
4260 		}
4261 
4262 		rcu_read_unlock();
4263 	}
4264 
4265 	if (changed & BSS_CHANGED_BSSID) {
4266 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4267 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4268 					       sta_rate_set);
4269 			if (ret < 0)
4270 				goto out;
4271 
4272 			/* Need to update the BSSID (for filtering etc) */
4273 			do_join = true;
4274 		} else {
4275 			ret = wlcore_clear_bssid(wl, wlvif);
4276 			if (ret < 0)
4277 				goto out;
4278 		}
4279 	}
4280 
4281 	if (changed & BSS_CHANGED_IBSS) {
4282 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4283 			     bss_conf->ibss_joined);
4284 
4285 		if (bss_conf->ibss_joined) {
4286 			u32 rates = bss_conf->basic_rates;
4287 			wlvif->basic_rate_set =
4288 				wl1271_tx_enabled_rates_get(wl, rates,
4289 							    wlvif->band);
4290 			wlvif->basic_rate =
4291 				wl1271_tx_min_rate_get(wl,
4292 						       wlvif->basic_rate_set);
4293 
4294 			/* by default, use 11b + OFDM rates */
4295 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4296 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4297 			if (ret < 0)
4298 				goto out;
4299 		}
4300 	}
4301 
4302 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4303 	if (ret < 0)
4304 		goto out;
4305 
4306 	if (do_join) {
4307 		ret = wlcore_join(wl, wlvif);
4308 		if (ret < 0) {
4309 			wl1271_warning("cmd join failed %d", ret);
4310 			goto out;
4311 		}
4312 	}
4313 
4314 	if (changed & BSS_CHANGED_ASSOC) {
4315 		if (bss_conf->assoc) {
4316 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4317 					       sta_rate_set);
4318 			if (ret < 0)
4319 				goto out;
4320 
4321 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4322 				wl12xx_set_authorized(wl, wlvif);
4323 		} else {
4324 			wlcore_unset_assoc(wl, wlvif);
4325 		}
4326 	}
4327 
4328 	if (changed & BSS_CHANGED_PS) {
4329 		if ((bss_conf->ps) &&
4330 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4331 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4332 			int ps_mode;
4333 			char *ps_mode_str;
4334 
4335 			if (wl->conf.conn.forced_ps) {
4336 				ps_mode = STATION_POWER_SAVE_MODE;
4337 				ps_mode_str = "forced";
4338 			} else {
4339 				ps_mode = STATION_AUTO_PS_MODE;
4340 				ps_mode_str = "auto";
4341 			}
4342 
4343 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4344 
4345 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4346 			if (ret < 0)
4347 				wl1271_warning("enter %s ps failed %d",
4348 					       ps_mode_str, ret);
4349 		} else if (!bss_conf->ps &&
4350 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4351 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4352 
4353 			ret = wl1271_ps_set_mode(wl, wlvif,
4354 						 STATION_ACTIVE_MODE);
4355 			if (ret < 0)
4356 				wl1271_warning("exit auto ps failed %d", ret);
4357 		}
4358 	}
4359 
4360 	/* Handle new association with HT. Do this after join. */
4361 	if (sta_exists) {
4362 		bool enabled =
4363 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4364 
4365 		ret = wlcore_hw_set_peer_cap(wl,
4366 					     &sta_ht_cap,
4367 					     enabled,
4368 					     wlvif->rate_set,
4369 					     wlvif->sta.hlid);
4370 		if (ret < 0) {
4371 			wl1271_warning("Set ht cap failed %d", ret);
4372 			goto out;
4373 
4374 		}
4375 
4376 		if (enabled) {
4377 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4378 						bss_conf->ht_operation_mode);
4379 			if (ret < 0) {
4380 				wl1271_warning("Set ht information failed %d",
4381 					       ret);
4382 				goto out;
4383 			}
4384 		}
4385 	}
4386 
4387 	/* Handle arp filtering. Done after join. */
4388 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4389 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4390 		__be32 addr = bss_conf->arp_addr_list[0];
4391 		wlvif->sta.qos = bss_conf->qos;
4392 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4393 
4394 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4395 			wlvif->ip_addr = addr;
4396 			/*
4397 			 * The template should have been configured only upon
4398 			 * association. however, it seems that the correct ip
4399 			 * isn't being set (when sending), so we have to
4400 			 * reconfigure the template upon every ip change.
4401 			 */
4402 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4403 			if (ret < 0) {
4404 				wl1271_warning("build arp rsp failed: %d", ret);
4405 				goto out;
4406 			}
4407 
4408 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4409 				(ACX_ARP_FILTER_ARP_FILTERING |
4410 				 ACX_ARP_FILTER_AUTO_ARP),
4411 				addr);
4412 		} else {
4413 			wlvif->ip_addr = 0;
4414 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4415 		}
4416 
4417 		if (ret < 0)
4418 			goto out;
4419 	}
4420 
4421 out:
4422 	return;
4423 }
4424 
4425 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4426 				       struct ieee80211_vif *vif,
4427 				       struct ieee80211_bss_conf *bss_conf,
4428 				       u32 changed)
4429 {
4430 	struct wl1271 *wl = hw->priv;
4431 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4432 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4433 	int ret;
4434 
4435 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4436 		     wlvif->role_id, (int)changed);
4437 
4438 	/*
4439 	 * make sure to cancel pending disconnections if our association
4440 	 * state changed
4441 	 */
4442 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4443 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4444 
4445 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4446 	    !bss_conf->enable_beacon)
4447 		wl1271_tx_flush(wl);
4448 
4449 	mutex_lock(&wl->mutex);
4450 
4451 	if (unlikely(wl->state != WLCORE_STATE_ON))
4452 		goto out;
4453 
4454 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4455 		goto out;
4456 
4457 	ret = wl1271_ps_elp_wakeup(wl);
4458 	if (ret < 0)
4459 		goto out;
4460 
4461 	if (is_ap)
4462 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4463 	else
4464 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4465 
4466 	wl1271_ps_elp_sleep(wl);
4467 
4468 out:
4469 	mutex_unlock(&wl->mutex);
4470 }
4471 
4472 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4473 				 struct ieee80211_chanctx_conf *ctx)
4474 {
4475 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4476 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4477 		     cfg80211_get_chandef_type(&ctx->def));
4478 	return 0;
4479 }
4480 
4481 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4482 				     struct ieee80211_chanctx_conf *ctx)
4483 {
4484 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4485 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4486 		     cfg80211_get_chandef_type(&ctx->def));
4487 }
4488 
4489 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4490 				     struct ieee80211_chanctx_conf *ctx,
4491 				     u32 changed)
4492 {
4493 	wl1271_debug(DEBUG_MAC80211,
4494 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4495 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4496 		     cfg80211_get_chandef_type(&ctx->def), changed);
4497 }
4498 
4499 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4500 					struct ieee80211_vif *vif,
4501 					struct ieee80211_chanctx_conf *ctx)
4502 {
4503 	struct wl1271 *wl = hw->priv;
4504 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4505 	int channel = ieee80211_frequency_to_channel(
4506 		ctx->def.chan->center_freq);
4507 
4508 	wl1271_debug(DEBUG_MAC80211,
4509 		     "mac80211 assign chanctx (role %d) %d (type %d)",
4510 		     wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4511 
4512 	mutex_lock(&wl->mutex);
4513 
4514 	wlvif->band = ctx->def.chan->band;
4515 	wlvif->channel = channel;
4516 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4517 
4518 	/* update default rates according to the band */
4519 	wl1271_set_band_rate(wl, wlvif);
4520 
4521 	mutex_unlock(&wl->mutex);
4522 
4523 	return 0;
4524 }
4525 
4526 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4527 					   struct ieee80211_vif *vif,
4528 					   struct ieee80211_chanctx_conf *ctx)
4529 {
4530 	struct wl1271 *wl = hw->priv;
4531 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4532 
4533 	wl1271_debug(DEBUG_MAC80211,
4534 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4535 		     wlvif->role_id,
4536 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4537 		     cfg80211_get_chandef_type(&ctx->def));
4538 
4539 	wl1271_tx_flush(wl);
4540 }
4541 
4542 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4543 			     struct ieee80211_vif *vif, u16 queue,
4544 			     const struct ieee80211_tx_queue_params *params)
4545 {
4546 	struct wl1271 *wl = hw->priv;
4547 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4548 	u8 ps_scheme;
4549 	int ret = 0;
4550 
4551 	mutex_lock(&wl->mutex);
4552 
4553 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4554 
4555 	if (params->uapsd)
4556 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4557 	else
4558 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4559 
4560 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4561 		goto out;
4562 
4563 	ret = wl1271_ps_elp_wakeup(wl);
4564 	if (ret < 0)
4565 		goto out;
4566 
4567 	/*
4568 	 * the txop is confed in units of 32us by the mac80211,
4569 	 * we need us
4570 	 */
4571 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4572 				params->cw_min, params->cw_max,
4573 				params->aifs, params->txop << 5);
4574 	if (ret < 0)
4575 		goto out_sleep;
4576 
4577 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4578 				 CONF_CHANNEL_TYPE_EDCF,
4579 				 wl1271_tx_get_queue(queue),
4580 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4581 				 0, 0);
4582 
4583 out_sleep:
4584 	wl1271_ps_elp_sleep(wl);
4585 
4586 out:
4587 	mutex_unlock(&wl->mutex);
4588 
4589 	return ret;
4590 }
4591 
4592 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4593 			     struct ieee80211_vif *vif)
4594 {
4595 
4596 	struct wl1271 *wl = hw->priv;
4597 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4598 	u64 mactime = ULLONG_MAX;
4599 	int ret;
4600 
4601 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4602 
4603 	mutex_lock(&wl->mutex);
4604 
4605 	if (unlikely(wl->state != WLCORE_STATE_ON))
4606 		goto out;
4607 
4608 	ret = wl1271_ps_elp_wakeup(wl);
4609 	if (ret < 0)
4610 		goto out;
4611 
4612 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4613 	if (ret < 0)
4614 		goto out_sleep;
4615 
4616 out_sleep:
4617 	wl1271_ps_elp_sleep(wl);
4618 
4619 out:
4620 	mutex_unlock(&wl->mutex);
4621 	return mactime;
4622 }
4623 
4624 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4625 				struct survey_info *survey)
4626 {
4627 	struct ieee80211_conf *conf = &hw->conf;
4628 
4629 	if (idx != 0)
4630 		return -ENOENT;
4631 
4632 	survey->channel = conf->chandef.chan;
4633 	survey->filled = 0;
4634 	return 0;
4635 }
4636 
4637 static int wl1271_allocate_sta(struct wl1271 *wl,
4638 			     struct wl12xx_vif *wlvif,
4639 			     struct ieee80211_sta *sta)
4640 {
4641 	struct wl1271_station *wl_sta;
4642 	int ret;
4643 
4644 
4645 	if (wl->active_sta_count >= AP_MAX_STATIONS) {
4646 		wl1271_warning("could not allocate HLID - too much stations");
4647 		return -EBUSY;
4648 	}
4649 
4650 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4651 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4652 	if (ret < 0) {
4653 		wl1271_warning("could not allocate HLID - too many links");
4654 		return -EBUSY;
4655 	}
4656 
4657 	/* use the previous security seq, if this is a recovery/resume */
4658 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4659 
4660 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4661 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4662 	wl->active_sta_count++;
4663 	return 0;
4664 }
4665 
4666 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4667 {
4668 	struct wl1271_station *wl_sta;
4669 	struct ieee80211_sta *sta;
4670 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4671 
4672 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4673 		return;
4674 
4675 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4676 	__clear_bit(hlid, &wl->ap_ps_map);
4677 	__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4678 
4679 	/*
4680 	 * save the last used PN in the private part of iee80211_sta,
4681 	 * in case of recovery/suspend
4682 	 */
4683 	rcu_read_lock();
4684 	sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4685 	if (sta) {
4686 		wl_sta = (void *)sta->drv_priv;
4687 		wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4688 
4689 		/*
4690 		 * increment the initial seq number on recovery to account for
4691 		 * transmitted packets that we haven't yet got in the FW status
4692 		 */
4693 		if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4694 			wl_sta->total_freed_pkts +=
4695 					WL1271_TX_SQN_POST_RECOVERY_PADDING;
4696 	}
4697 	rcu_read_unlock();
4698 
4699 	wl12xx_free_link(wl, wlvif, &hlid);
4700 	wl->active_sta_count--;
4701 
4702 	/*
4703 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4704 	 * chance to return STA-buffered packets before complaining.
4705 	 */
4706 	if (wl->active_sta_count == 0)
4707 		wl12xx_rearm_tx_watchdog_locked(wl);
4708 }
4709 
4710 static int wl12xx_sta_add(struct wl1271 *wl,
4711 			  struct wl12xx_vif *wlvif,
4712 			  struct ieee80211_sta *sta)
4713 {
4714 	struct wl1271_station *wl_sta;
4715 	int ret = 0;
4716 	u8 hlid;
4717 
4718 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4719 
4720 	ret = wl1271_allocate_sta(wl, wlvif, sta);
4721 	if (ret < 0)
4722 		return ret;
4723 
4724 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4725 	hlid = wl_sta->hlid;
4726 
4727 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4728 	if (ret < 0)
4729 		wl1271_free_sta(wl, wlvif, hlid);
4730 
4731 	return ret;
4732 }
4733 
4734 static int wl12xx_sta_remove(struct wl1271 *wl,
4735 			     struct wl12xx_vif *wlvif,
4736 			     struct ieee80211_sta *sta)
4737 {
4738 	struct wl1271_station *wl_sta;
4739 	int ret = 0, id;
4740 
4741 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4742 
4743 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4744 	id = wl_sta->hlid;
4745 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4746 		return -EINVAL;
4747 
4748 	ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4749 	if (ret < 0)
4750 		return ret;
4751 
4752 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4753 	return ret;
4754 }
4755 
4756 static void wlcore_roc_if_possible(struct wl1271 *wl,
4757 				   struct wl12xx_vif *wlvif)
4758 {
4759 	if (find_first_bit(wl->roc_map,
4760 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4761 		return;
4762 
4763 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4764 		return;
4765 
4766 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4767 }
4768 
4769 /*
4770  * when wl_sta is NULL, we treat this call as if coming from a
4771  * pending auth reply.
4772  * wl->mutex must be taken and the FW must be awake when the call
4773  * takes place.
4774  */
4775 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4776 			      struct wl1271_station *wl_sta, bool in_conn)
4777 {
4778 	if (in_conn) {
4779 		if (WARN_ON(wl_sta && wl_sta->in_connection))
4780 			return;
4781 
4782 		if (!wlvif->ap_pending_auth_reply &&
4783 		    !wlvif->inconn_count)
4784 			wlcore_roc_if_possible(wl, wlvif);
4785 
4786 		if (wl_sta) {
4787 			wl_sta->in_connection = true;
4788 			wlvif->inconn_count++;
4789 		} else {
4790 			wlvif->ap_pending_auth_reply = true;
4791 		}
4792 	} else {
4793 		if (wl_sta && !wl_sta->in_connection)
4794 			return;
4795 
4796 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4797 			return;
4798 
4799 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
4800 			return;
4801 
4802 		if (wl_sta) {
4803 			wl_sta->in_connection = false;
4804 			wlvif->inconn_count--;
4805 		} else {
4806 			wlvif->ap_pending_auth_reply = false;
4807 		}
4808 
4809 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4810 		    test_bit(wlvif->role_id, wl->roc_map))
4811 			wl12xx_croc(wl, wlvif->role_id);
4812 	}
4813 }
4814 
4815 static int wl12xx_update_sta_state(struct wl1271 *wl,
4816 				   struct wl12xx_vif *wlvif,
4817 				   struct ieee80211_sta *sta,
4818 				   enum ieee80211_sta_state old_state,
4819 				   enum ieee80211_sta_state new_state)
4820 {
4821 	struct wl1271_station *wl_sta;
4822 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4823 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4824 	int ret;
4825 
4826 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4827 
4828 	/* Add station (AP mode) */
4829 	if (is_ap &&
4830 	    old_state == IEEE80211_STA_NOTEXIST &&
4831 	    new_state == IEEE80211_STA_NONE) {
4832 		ret = wl12xx_sta_add(wl, wlvif, sta);
4833 		if (ret)
4834 			return ret;
4835 
4836 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4837 	}
4838 
4839 	/* Remove station (AP mode) */
4840 	if (is_ap &&
4841 	    old_state == IEEE80211_STA_NONE &&
4842 	    new_state == IEEE80211_STA_NOTEXIST) {
4843 		/* must not fail */
4844 		wl12xx_sta_remove(wl, wlvif, sta);
4845 
4846 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4847 	}
4848 
4849 	/* Authorize station (AP mode) */
4850 	if (is_ap &&
4851 	    new_state == IEEE80211_STA_AUTHORIZED) {
4852 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4853 		if (ret < 0)
4854 			return ret;
4855 
4856 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4857 						     wl_sta->hlid);
4858 		if (ret)
4859 			return ret;
4860 
4861 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4862 	}
4863 
4864 	/* Authorize station */
4865 	if (is_sta &&
4866 	    new_state == IEEE80211_STA_AUTHORIZED) {
4867 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4868 		ret = wl12xx_set_authorized(wl, wlvif);
4869 		if (ret)
4870 			return ret;
4871 	}
4872 
4873 	if (is_sta &&
4874 	    old_state == IEEE80211_STA_AUTHORIZED &&
4875 	    new_state == IEEE80211_STA_ASSOC) {
4876 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4877 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4878 	}
4879 
4880 	/* clear ROCs on failure or authorization */
4881 	if (is_sta &&
4882 	    (new_state == IEEE80211_STA_AUTHORIZED ||
4883 	     new_state == IEEE80211_STA_NOTEXIST)) {
4884 		if (test_bit(wlvif->role_id, wl->roc_map))
4885 			wl12xx_croc(wl, wlvif->role_id);
4886 	}
4887 
4888 	if (is_sta &&
4889 	    old_state == IEEE80211_STA_NOTEXIST &&
4890 	    new_state == IEEE80211_STA_NONE) {
4891 		if (find_first_bit(wl->roc_map,
4892 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4893 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4894 			wl12xx_roc(wl, wlvif, wlvif->role_id,
4895 				   wlvif->band, wlvif->channel);
4896 		}
4897 	}
4898 	return 0;
4899 }
4900 
4901 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4902 			       struct ieee80211_vif *vif,
4903 			       struct ieee80211_sta *sta,
4904 			       enum ieee80211_sta_state old_state,
4905 			       enum ieee80211_sta_state new_state)
4906 {
4907 	struct wl1271 *wl = hw->priv;
4908 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4909 	int ret;
4910 
4911 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4912 		     sta->aid, old_state, new_state);
4913 
4914 	mutex_lock(&wl->mutex);
4915 
4916 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4917 		ret = -EBUSY;
4918 		goto out;
4919 	}
4920 
4921 	ret = wl1271_ps_elp_wakeup(wl);
4922 	if (ret < 0)
4923 		goto out;
4924 
4925 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4926 
4927 	wl1271_ps_elp_sleep(wl);
4928 out:
4929 	mutex_unlock(&wl->mutex);
4930 	if (new_state < old_state)
4931 		return 0;
4932 	return ret;
4933 }
4934 
4935 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4936 				  struct ieee80211_vif *vif,
4937 				  enum ieee80211_ampdu_mlme_action action,
4938 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4939 				  u8 buf_size)
4940 {
4941 	struct wl1271 *wl = hw->priv;
4942 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4943 	int ret;
4944 	u8 hlid, *ba_bitmap;
4945 
4946 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4947 		     tid);
4948 
4949 	/* sanity check - the fields in FW are only 8bits wide */
4950 	if (WARN_ON(tid > 0xFF))
4951 		return -ENOTSUPP;
4952 
4953 	mutex_lock(&wl->mutex);
4954 
4955 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4956 		ret = -EAGAIN;
4957 		goto out;
4958 	}
4959 
4960 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4961 		hlid = wlvif->sta.hlid;
4962 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4963 		struct wl1271_station *wl_sta;
4964 
4965 		wl_sta = (struct wl1271_station *)sta->drv_priv;
4966 		hlid = wl_sta->hlid;
4967 	} else {
4968 		ret = -EINVAL;
4969 		goto out;
4970 	}
4971 
4972 	ba_bitmap = &wl->links[hlid].ba_bitmap;
4973 
4974 	ret = wl1271_ps_elp_wakeup(wl);
4975 	if (ret < 0)
4976 		goto out;
4977 
4978 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4979 		     tid, action);
4980 
4981 	switch (action) {
4982 	case IEEE80211_AMPDU_RX_START:
4983 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
4984 			ret = -ENOTSUPP;
4985 			break;
4986 		}
4987 
4988 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4989 			ret = -EBUSY;
4990 			wl1271_error("exceeded max RX BA sessions");
4991 			break;
4992 		}
4993 
4994 		if (*ba_bitmap & BIT(tid)) {
4995 			ret = -EINVAL;
4996 			wl1271_error("cannot enable RX BA session on active "
4997 				     "tid: %d", tid);
4998 			break;
4999 		}
5000 
5001 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5002 							 hlid);
5003 		if (!ret) {
5004 			*ba_bitmap |= BIT(tid);
5005 			wl->ba_rx_session_count++;
5006 		}
5007 		break;
5008 
5009 	case IEEE80211_AMPDU_RX_STOP:
5010 		if (!(*ba_bitmap & BIT(tid))) {
5011 			/*
5012 			 * this happens on reconfig - so only output a debug
5013 			 * message for now, and don't fail the function.
5014 			 */
5015 			wl1271_debug(DEBUG_MAC80211,
5016 				     "no active RX BA session on tid: %d",
5017 				     tid);
5018 			ret = 0;
5019 			break;
5020 		}
5021 
5022 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5023 							 hlid);
5024 		if (!ret) {
5025 			*ba_bitmap &= ~BIT(tid);
5026 			wl->ba_rx_session_count--;
5027 		}
5028 		break;
5029 
5030 	/*
5031 	 * The BA initiator session management in FW independently.
5032 	 * Falling break here on purpose for all TX APDU commands.
5033 	 */
5034 	case IEEE80211_AMPDU_TX_START:
5035 	case IEEE80211_AMPDU_TX_STOP_CONT:
5036 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5037 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5038 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5039 		ret = -EINVAL;
5040 		break;
5041 
5042 	default:
5043 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5044 		ret = -EINVAL;
5045 	}
5046 
5047 	wl1271_ps_elp_sleep(wl);
5048 
5049 out:
5050 	mutex_unlock(&wl->mutex);
5051 
5052 	return ret;
5053 }
5054 
5055 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5056 				   struct ieee80211_vif *vif,
5057 				   const struct cfg80211_bitrate_mask *mask)
5058 {
5059 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5060 	struct wl1271 *wl = hw->priv;
5061 	int i, ret = 0;
5062 
5063 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5064 		mask->control[NL80211_BAND_2GHZ].legacy,
5065 		mask->control[NL80211_BAND_5GHZ].legacy);
5066 
5067 	mutex_lock(&wl->mutex);
5068 
5069 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5070 		wlvif->bitrate_masks[i] =
5071 			wl1271_tx_enabled_rates_get(wl,
5072 						    mask->control[i].legacy,
5073 						    i);
5074 
5075 	if (unlikely(wl->state != WLCORE_STATE_ON))
5076 		goto out;
5077 
5078 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5079 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5080 
5081 		ret = wl1271_ps_elp_wakeup(wl);
5082 		if (ret < 0)
5083 			goto out;
5084 
5085 		wl1271_set_band_rate(wl, wlvif);
5086 		wlvif->basic_rate =
5087 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5088 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5089 
5090 		wl1271_ps_elp_sleep(wl);
5091 	}
5092 out:
5093 	mutex_unlock(&wl->mutex);
5094 
5095 	return ret;
5096 }
5097 
5098 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5099 				     struct ieee80211_channel_switch *ch_switch)
5100 {
5101 	struct wl1271 *wl = hw->priv;
5102 	struct wl12xx_vif *wlvif;
5103 	int ret;
5104 
5105 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5106 
5107 	wl1271_tx_flush(wl);
5108 
5109 	mutex_lock(&wl->mutex);
5110 
5111 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5112 		wl12xx_for_each_wlvif_sta(wl, wlvif) {
5113 			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5114 			ieee80211_chswitch_done(vif, false);
5115 		}
5116 		goto out;
5117 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5118 		goto out;
5119 	}
5120 
5121 	ret = wl1271_ps_elp_wakeup(wl);
5122 	if (ret < 0)
5123 		goto out;
5124 
5125 	/* TODO: change mac80211 to pass vif as param */
5126 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
5127 		unsigned long delay_usec;
5128 
5129 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5130 		if (ret)
5131 			goto out_sleep;
5132 
5133 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5134 
5135 		/* indicate failure 5 seconds after channel switch time */
5136 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5137 			     ch_switch->count;
5138 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5139 				usecs_to_jiffies(delay_usec) +
5140 				msecs_to_jiffies(5000));
5141 	}
5142 
5143 out_sleep:
5144 	wl1271_ps_elp_sleep(wl);
5145 
5146 out:
5147 	mutex_unlock(&wl->mutex);
5148 }
5149 
5150 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5151 {
5152 	struct wl1271 *wl = hw->priv;
5153 
5154 	wl1271_tx_flush(wl);
5155 }
5156 
5157 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5158 				       struct ieee80211_vif *vif,
5159 				       struct ieee80211_channel *chan,
5160 				       int duration,
5161 				       enum ieee80211_roc_type type)
5162 {
5163 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5164 	struct wl1271 *wl = hw->priv;
5165 	int channel, ret = 0;
5166 
5167 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5168 
5169 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5170 		     channel, wlvif->role_id);
5171 
5172 	mutex_lock(&wl->mutex);
5173 
5174 	if (unlikely(wl->state != WLCORE_STATE_ON))
5175 		goto out;
5176 
5177 	/* return EBUSY if we can't ROC right now */
5178 	if (WARN_ON(wl->roc_vif ||
5179 		    find_first_bit(wl->roc_map,
5180 				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5181 		ret = -EBUSY;
5182 		goto out;
5183 	}
5184 
5185 	ret = wl1271_ps_elp_wakeup(wl);
5186 	if (ret < 0)
5187 		goto out;
5188 
5189 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5190 	if (ret < 0)
5191 		goto out_sleep;
5192 
5193 	wl->roc_vif = vif;
5194 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5195 				     msecs_to_jiffies(duration));
5196 out_sleep:
5197 	wl1271_ps_elp_sleep(wl);
5198 out:
5199 	mutex_unlock(&wl->mutex);
5200 	return ret;
5201 }
5202 
5203 static int __wlcore_roc_completed(struct wl1271 *wl)
5204 {
5205 	struct wl12xx_vif *wlvif;
5206 	int ret;
5207 
5208 	/* already completed */
5209 	if (unlikely(!wl->roc_vif))
5210 		return 0;
5211 
5212 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5213 
5214 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5215 		return -EBUSY;
5216 
5217 	ret = wl12xx_stop_dev(wl, wlvif);
5218 	if (ret < 0)
5219 		return ret;
5220 
5221 	wl->roc_vif = NULL;
5222 
5223 	return 0;
5224 }
5225 
5226 static int wlcore_roc_completed(struct wl1271 *wl)
5227 {
5228 	int ret;
5229 
5230 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5231 
5232 	mutex_lock(&wl->mutex);
5233 
5234 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5235 		ret = -EBUSY;
5236 		goto out;
5237 	}
5238 
5239 	ret = wl1271_ps_elp_wakeup(wl);
5240 	if (ret < 0)
5241 		goto out;
5242 
5243 	ret = __wlcore_roc_completed(wl);
5244 
5245 	wl1271_ps_elp_sleep(wl);
5246 out:
5247 	mutex_unlock(&wl->mutex);
5248 
5249 	return ret;
5250 }
5251 
5252 static void wlcore_roc_complete_work(struct work_struct *work)
5253 {
5254 	struct delayed_work *dwork;
5255 	struct wl1271 *wl;
5256 	int ret;
5257 
5258 	dwork = container_of(work, struct delayed_work, work);
5259 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5260 
5261 	ret = wlcore_roc_completed(wl);
5262 	if (!ret)
5263 		ieee80211_remain_on_channel_expired(wl->hw);
5264 }
5265 
5266 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5267 {
5268 	struct wl1271 *wl = hw->priv;
5269 
5270 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5271 
5272 	/* TODO: per-vif */
5273 	wl1271_tx_flush(wl);
5274 
5275 	/*
5276 	 * we can't just flush_work here, because it might deadlock
5277 	 * (as we might get called from the same workqueue)
5278 	 */
5279 	cancel_delayed_work_sync(&wl->roc_complete_work);
5280 	wlcore_roc_completed(wl);
5281 
5282 	return 0;
5283 }
5284 
5285 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5286 				    struct ieee80211_vif *vif,
5287 				    struct ieee80211_sta *sta,
5288 				    u32 changed)
5289 {
5290 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5291 	struct wl1271 *wl = hw->priv;
5292 
5293 	wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5294 }
5295 
5296 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5297 			       struct ieee80211_vif *vif,
5298 			       struct ieee80211_sta *sta,
5299 			       s8 *rssi_dbm)
5300 {
5301 	struct wl1271 *wl = hw->priv;
5302 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5303 	int ret = 0;
5304 
5305 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5306 
5307 	mutex_lock(&wl->mutex);
5308 
5309 	if (unlikely(wl->state != WLCORE_STATE_ON))
5310 		goto out;
5311 
5312 	ret = wl1271_ps_elp_wakeup(wl);
5313 	if (ret < 0)
5314 		goto out_sleep;
5315 
5316 	ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5317 	if (ret < 0)
5318 		goto out_sleep;
5319 
5320 out_sleep:
5321 	wl1271_ps_elp_sleep(wl);
5322 
5323 out:
5324 	mutex_unlock(&wl->mutex);
5325 
5326 	return ret;
5327 }
5328 
5329 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5330 {
5331 	struct wl1271 *wl = hw->priv;
5332 	bool ret = false;
5333 
5334 	mutex_lock(&wl->mutex);
5335 
5336 	if (unlikely(wl->state != WLCORE_STATE_ON))
5337 		goto out;
5338 
5339 	/* packets are considered pending if in the TX queue or the FW */
5340 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5341 out:
5342 	mutex_unlock(&wl->mutex);
5343 
5344 	return ret;
5345 }
5346 
5347 /* can't be const, mac80211 writes to this */
5348 static struct ieee80211_rate wl1271_rates[] = {
5349 	{ .bitrate = 10,
5350 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5351 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5352 	{ .bitrate = 20,
5353 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5354 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5355 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5356 	{ .bitrate = 55,
5357 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5358 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5359 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5360 	{ .bitrate = 110,
5361 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5362 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5363 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5364 	{ .bitrate = 60,
5365 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5366 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5367 	{ .bitrate = 90,
5368 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5369 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5370 	{ .bitrate = 120,
5371 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5372 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5373 	{ .bitrate = 180,
5374 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5375 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5376 	{ .bitrate = 240,
5377 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5378 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5379 	{ .bitrate = 360,
5380 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5381 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5382 	{ .bitrate = 480,
5383 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5384 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5385 	{ .bitrate = 540,
5386 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5387 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5388 };
5389 
5390 /* can't be const, mac80211 writes to this */
5391 static struct ieee80211_channel wl1271_channels[] = {
5392 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5393 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5394 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5395 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5396 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5397 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5398 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5399 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5400 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5401 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5402 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5403 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5404 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5405 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5406 };
5407 
5408 /* can't be const, mac80211 writes to this */
5409 static struct ieee80211_supported_band wl1271_band_2ghz = {
5410 	.channels = wl1271_channels,
5411 	.n_channels = ARRAY_SIZE(wl1271_channels),
5412 	.bitrates = wl1271_rates,
5413 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5414 };
5415 
5416 /* 5 GHz data rates for WL1273 */
5417 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5418 	{ .bitrate = 60,
5419 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5420 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5421 	{ .bitrate = 90,
5422 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5423 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5424 	{ .bitrate = 120,
5425 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5426 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5427 	{ .bitrate = 180,
5428 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5429 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5430 	{ .bitrate = 240,
5431 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5432 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5433 	{ .bitrate = 360,
5434 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5435 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5436 	{ .bitrate = 480,
5437 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5438 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5439 	{ .bitrate = 540,
5440 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5441 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5442 };
5443 
5444 /* 5 GHz band channels for WL1273 */
5445 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5446 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5447 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5448 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5449 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5450 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5451 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5452 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5453 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5454 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5455 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5456 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5457 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5458 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5459 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5460 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5461 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5462 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5463 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5464 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5465 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5466 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5467 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5468 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5469 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5470 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5471 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5472 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5473 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5474 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5475 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5476 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5477 };
5478 
5479 static struct ieee80211_supported_band wl1271_band_5ghz = {
5480 	.channels = wl1271_channels_5ghz,
5481 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5482 	.bitrates = wl1271_rates_5ghz,
5483 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5484 };
5485 
5486 static const struct ieee80211_ops wl1271_ops = {
5487 	.start = wl1271_op_start,
5488 	.stop = wlcore_op_stop,
5489 	.add_interface = wl1271_op_add_interface,
5490 	.remove_interface = wl1271_op_remove_interface,
5491 	.change_interface = wl12xx_op_change_interface,
5492 #ifdef CONFIG_PM
5493 	.suspend = wl1271_op_suspend,
5494 	.resume = wl1271_op_resume,
5495 #endif
5496 	.config = wl1271_op_config,
5497 	.prepare_multicast = wl1271_op_prepare_multicast,
5498 	.configure_filter = wl1271_op_configure_filter,
5499 	.tx = wl1271_op_tx,
5500 	.set_key = wlcore_op_set_key,
5501 	.hw_scan = wl1271_op_hw_scan,
5502 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5503 	.sched_scan_start = wl1271_op_sched_scan_start,
5504 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5505 	.bss_info_changed = wl1271_op_bss_info_changed,
5506 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5507 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5508 	.conf_tx = wl1271_op_conf_tx,
5509 	.get_tsf = wl1271_op_get_tsf,
5510 	.get_survey = wl1271_op_get_survey,
5511 	.sta_state = wl12xx_op_sta_state,
5512 	.ampdu_action = wl1271_op_ampdu_action,
5513 	.tx_frames_pending = wl1271_tx_frames_pending,
5514 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5515 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5516 	.channel_switch = wl12xx_op_channel_switch,
5517 	.flush = wlcore_op_flush,
5518 	.remain_on_channel = wlcore_op_remain_on_channel,
5519 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5520 	.add_chanctx = wlcore_op_add_chanctx,
5521 	.remove_chanctx = wlcore_op_remove_chanctx,
5522 	.change_chanctx = wlcore_op_change_chanctx,
5523 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5524 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5525 	.sta_rc_update = wlcore_op_sta_rc_update,
5526 	.get_rssi = wlcore_op_get_rssi,
5527 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5528 };
5529 
5530 
5531 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5532 {
5533 	u8 idx;
5534 
5535 	BUG_ON(band >= 2);
5536 
5537 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5538 		wl1271_error("Illegal RX rate from HW: %d", rate);
5539 		return 0;
5540 	}
5541 
5542 	idx = wl->band_rate_to_idx[band][rate];
5543 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5544 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5545 		return 0;
5546 	}
5547 
5548 	return idx;
5549 }
5550 
5551 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5552 {
5553 	int i;
5554 
5555 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5556 		     oui, nic);
5557 
5558 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5559 		wl1271_warning("NIC part of the MAC address wraps around!");
5560 
5561 	for (i = 0; i < wl->num_mac_addr; i++) {
5562 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5563 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5564 		wl->addresses[i].addr[2] = (u8) oui;
5565 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5566 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5567 		wl->addresses[i].addr[5] = (u8) nic;
5568 		nic++;
5569 	}
5570 
5571 	/* we may be one address short at the most */
5572 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5573 
5574 	/*
5575 	 * turn on the LAA bit in the first address and use it as
5576 	 * the last address.
5577 	 */
5578 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5579 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5580 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5581 		       sizeof(wl->addresses[0]));
5582 		/* LAA bit */
5583 		wl->addresses[idx].addr[2] |= BIT(1);
5584 	}
5585 
5586 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5587 	wl->hw->wiphy->addresses = wl->addresses;
5588 }
5589 
5590 static int wl12xx_get_hw_info(struct wl1271 *wl)
5591 {
5592 	int ret;
5593 
5594 	ret = wl12xx_set_power_on(wl);
5595 	if (ret < 0)
5596 		return ret;
5597 
5598 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5599 	if (ret < 0)
5600 		goto out;
5601 
5602 	wl->fuse_oui_addr = 0;
5603 	wl->fuse_nic_addr = 0;
5604 
5605 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5606 	if (ret < 0)
5607 		goto out;
5608 
5609 	if (wl->ops->get_mac)
5610 		ret = wl->ops->get_mac(wl);
5611 
5612 out:
5613 	wl1271_power_off(wl);
5614 	return ret;
5615 }
5616 
5617 static int wl1271_register_hw(struct wl1271 *wl)
5618 {
5619 	int ret;
5620 	u32 oui_addr = 0, nic_addr = 0;
5621 
5622 	if (wl->mac80211_registered)
5623 		return 0;
5624 
5625 	if (wl->nvs_len >= 12) {
5626 		/* NOTE: The wl->nvs->nvs element must be first, in
5627 		 * order to simplify the casting, we assume it is at
5628 		 * the beginning of the wl->nvs structure.
5629 		 */
5630 		u8 *nvs_ptr = (u8 *)wl->nvs;
5631 
5632 		oui_addr =
5633 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5634 		nic_addr =
5635 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5636 	}
5637 
5638 	/* if the MAC address is zeroed in the NVS derive from fuse */
5639 	if (oui_addr == 0 && nic_addr == 0) {
5640 		oui_addr = wl->fuse_oui_addr;
5641 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
5642 		nic_addr = wl->fuse_nic_addr + 1;
5643 	}
5644 
5645 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5646 
5647 	ret = ieee80211_register_hw(wl->hw);
5648 	if (ret < 0) {
5649 		wl1271_error("unable to register mac80211 hw: %d", ret);
5650 		goto out;
5651 	}
5652 
5653 	wl->mac80211_registered = true;
5654 
5655 	wl1271_debugfs_init(wl);
5656 
5657 	wl1271_notice("loaded");
5658 
5659 out:
5660 	return ret;
5661 }
5662 
5663 static void wl1271_unregister_hw(struct wl1271 *wl)
5664 {
5665 	if (wl->plt)
5666 		wl1271_plt_stop(wl);
5667 
5668 	ieee80211_unregister_hw(wl->hw);
5669 	wl->mac80211_registered = false;
5670 
5671 }
5672 
5673 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5674 	{
5675 		.max = 3,
5676 		.types = BIT(NL80211_IFTYPE_STATION),
5677 	},
5678 	{
5679 		.max = 1,
5680 		.types = BIT(NL80211_IFTYPE_AP) |
5681 			 BIT(NL80211_IFTYPE_P2P_GO) |
5682 			 BIT(NL80211_IFTYPE_P2P_CLIENT),
5683 	},
5684 };
5685 
5686 static struct ieee80211_iface_combination
5687 wlcore_iface_combinations[] = {
5688 	{
5689 	  .max_interfaces = 3,
5690 	  .limits = wlcore_iface_limits,
5691 	  .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5692 	},
5693 };
5694 
5695 static int wl1271_init_ieee80211(struct wl1271 *wl)
5696 {
5697 	int i;
5698 	static const u32 cipher_suites[] = {
5699 		WLAN_CIPHER_SUITE_WEP40,
5700 		WLAN_CIPHER_SUITE_WEP104,
5701 		WLAN_CIPHER_SUITE_TKIP,
5702 		WLAN_CIPHER_SUITE_CCMP,
5703 		WL1271_CIPHER_SUITE_GEM,
5704 	};
5705 
5706 	/* The tx descriptor buffer */
5707 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5708 
5709 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5710 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5711 
5712 	/* unit us */
5713 	/* FIXME: find a proper value */
5714 	wl->hw->channel_change_time = 10000;
5715 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5716 
5717 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5718 		IEEE80211_HW_SUPPORTS_PS |
5719 		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5720 		IEEE80211_HW_SUPPORTS_UAPSD |
5721 		IEEE80211_HW_HAS_RATE_CONTROL |
5722 		IEEE80211_HW_CONNECTION_MONITOR |
5723 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5724 		IEEE80211_HW_SPECTRUM_MGMT |
5725 		IEEE80211_HW_AP_LINK_PS |
5726 		IEEE80211_HW_AMPDU_AGGREGATION |
5727 		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5728 		IEEE80211_HW_QUEUE_CONTROL;
5729 
5730 	wl->hw->wiphy->cipher_suites = cipher_suites;
5731 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5732 
5733 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5734 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5735 		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5736 	wl->hw->wiphy->max_scan_ssids = 1;
5737 	wl->hw->wiphy->max_sched_scan_ssids = 16;
5738 	wl->hw->wiphy->max_match_sets = 16;
5739 	/*
5740 	 * Maximum length of elements in scanning probe request templates
5741 	 * should be the maximum length possible for a template, without
5742 	 * the IEEE80211 header of the template
5743 	 */
5744 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5745 			sizeof(struct ieee80211_header);
5746 
5747 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5748 		sizeof(struct ieee80211_header);
5749 
5750 	wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5751 
5752 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5753 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5754 				WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5755 
5756 	/* make sure all our channels fit in the scanned_ch bitmask */
5757 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5758 		     ARRAY_SIZE(wl1271_channels_5ghz) >
5759 		     WL1271_MAX_CHANNELS);
5760 	/*
5761 	* clear channel flags from the previous usage
5762 	* and restore max_power & max_antenna_gain values.
5763 	*/
5764 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5765 		wl1271_band_2ghz.channels[i].flags = 0;
5766 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5767 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5768 	}
5769 
5770 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5771 		wl1271_band_5ghz.channels[i].flags = 0;
5772 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5773 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5774 	}
5775 
5776 	/*
5777 	 * We keep local copies of the band structs because we need to
5778 	 * modify them on a per-device basis.
5779 	 */
5780 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5781 	       sizeof(wl1271_band_2ghz));
5782 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5783 	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
5784 	       sizeof(*wl->ht_cap));
5785 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5786 	       sizeof(wl1271_band_5ghz));
5787 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5788 	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
5789 	       sizeof(*wl->ht_cap));
5790 
5791 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5792 		&wl->bands[IEEE80211_BAND_2GHZ];
5793 	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5794 		&wl->bands[IEEE80211_BAND_5GHZ];
5795 
5796 	/*
5797 	 * allow 4 queues per mac address we support +
5798 	 * 1 cab queue per mac + one global offchannel Tx queue
5799 	 */
5800 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5801 
5802 	/* the last queue is the offchannel queue */
5803 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5804 	wl->hw->max_rates = 1;
5805 
5806 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5807 
5808 	/* the FW answers probe-requests in AP-mode */
5809 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5810 	wl->hw->wiphy->probe_resp_offload =
5811 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5812 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5813 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5814 
5815 	/* allowed interface combinations */
5816 	wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5817 	wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5818 	wl->hw->wiphy->n_iface_combinations =
5819 		ARRAY_SIZE(wlcore_iface_combinations);
5820 
5821 	SET_IEEE80211_DEV(wl->hw, wl->dev);
5822 
5823 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
5824 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5825 
5826 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5827 
5828 	return 0;
5829 }
5830 
5831 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5832 				     u32 mbox_size)
5833 {
5834 	struct ieee80211_hw *hw;
5835 	struct wl1271 *wl;
5836 	int i, j, ret;
5837 	unsigned int order;
5838 
5839 	BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5840 
5841 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5842 	if (!hw) {
5843 		wl1271_error("could not alloc ieee80211_hw");
5844 		ret = -ENOMEM;
5845 		goto err_hw_alloc;
5846 	}
5847 
5848 	wl = hw->priv;
5849 	memset(wl, 0, sizeof(*wl));
5850 
5851 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
5852 	if (!wl->priv) {
5853 		wl1271_error("could not alloc wl priv");
5854 		ret = -ENOMEM;
5855 		goto err_priv_alloc;
5856 	}
5857 
5858 	INIT_LIST_HEAD(&wl->wlvif_list);
5859 
5860 	wl->hw = hw;
5861 
5862 	for (i = 0; i < NUM_TX_QUEUES; i++)
5863 		for (j = 0; j < WL12XX_MAX_LINKS; j++)
5864 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
5865 
5866 	skb_queue_head_init(&wl->deferred_rx_queue);
5867 	skb_queue_head_init(&wl->deferred_tx_queue);
5868 
5869 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5870 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5871 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
5872 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5873 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5874 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5875 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5876 
5877 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5878 	if (!wl->freezable_wq) {
5879 		ret = -ENOMEM;
5880 		goto err_hw;
5881 	}
5882 
5883 	wl->channel = 0;
5884 	wl->rx_counter = 0;
5885 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5886 	wl->band = IEEE80211_BAND_2GHZ;
5887 	wl->channel_type = NL80211_CHAN_NO_HT;
5888 	wl->flags = 0;
5889 	wl->sg_enabled = true;
5890 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
5891 	wl->recovery_count = 0;
5892 	wl->hw_pg_ver = -1;
5893 	wl->ap_ps_map = 0;
5894 	wl->ap_fw_ps_map = 0;
5895 	wl->quirks = 0;
5896 	wl->platform_quirks = 0;
5897 	wl->system_hlid = WL12XX_SYSTEM_HLID;
5898 	wl->active_sta_count = 0;
5899 	wl->active_link_count = 0;
5900 	wl->fwlog_size = 0;
5901 	init_waitqueue_head(&wl->fwlog_waitq);
5902 
5903 	/* The system link is always allocated */
5904 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5905 
5906 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5907 	for (i = 0; i < wl->num_tx_desc; i++)
5908 		wl->tx_frames[i] = NULL;
5909 
5910 	spin_lock_init(&wl->wl_lock);
5911 
5912 	wl->state = WLCORE_STATE_OFF;
5913 	wl->fw_type = WL12XX_FW_TYPE_NONE;
5914 	mutex_init(&wl->mutex);
5915 	mutex_init(&wl->flush_mutex);
5916 	init_completion(&wl->nvs_loading_complete);
5917 
5918 	order = get_order(aggr_buf_size);
5919 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5920 	if (!wl->aggr_buf) {
5921 		ret = -ENOMEM;
5922 		goto err_wq;
5923 	}
5924 	wl->aggr_buf_size = aggr_buf_size;
5925 
5926 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5927 	if (!wl->dummy_packet) {
5928 		ret = -ENOMEM;
5929 		goto err_aggr;
5930 	}
5931 
5932 	/* Allocate one page for the FW log */
5933 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5934 	if (!wl->fwlog) {
5935 		ret = -ENOMEM;
5936 		goto err_dummy_packet;
5937 	}
5938 
5939 	wl->mbox_size = mbox_size;
5940 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5941 	if (!wl->mbox) {
5942 		ret = -ENOMEM;
5943 		goto err_fwlog;
5944 	}
5945 
5946 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5947 	if (!wl->buffer_32) {
5948 		ret = -ENOMEM;
5949 		goto err_mbox;
5950 	}
5951 
5952 	return hw;
5953 
5954 err_mbox:
5955 	kfree(wl->mbox);
5956 
5957 err_fwlog:
5958 	free_page((unsigned long)wl->fwlog);
5959 
5960 err_dummy_packet:
5961 	dev_kfree_skb(wl->dummy_packet);
5962 
5963 err_aggr:
5964 	free_pages((unsigned long)wl->aggr_buf, order);
5965 
5966 err_wq:
5967 	destroy_workqueue(wl->freezable_wq);
5968 
5969 err_hw:
5970 	wl1271_debugfs_exit(wl);
5971 	kfree(wl->priv);
5972 
5973 err_priv_alloc:
5974 	ieee80211_free_hw(hw);
5975 
5976 err_hw_alloc:
5977 
5978 	return ERR_PTR(ret);
5979 }
5980 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5981 
5982 int wlcore_free_hw(struct wl1271 *wl)
5983 {
5984 	/* Unblock any fwlog readers */
5985 	mutex_lock(&wl->mutex);
5986 	wl->fwlog_size = -1;
5987 	wake_up_interruptible_all(&wl->fwlog_waitq);
5988 	mutex_unlock(&wl->mutex);
5989 
5990 	wlcore_sysfs_free(wl);
5991 
5992 	kfree(wl->buffer_32);
5993 	kfree(wl->mbox);
5994 	free_page((unsigned long)wl->fwlog);
5995 	dev_kfree_skb(wl->dummy_packet);
5996 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5997 
5998 	wl1271_debugfs_exit(wl);
5999 
6000 	vfree(wl->fw);
6001 	wl->fw = NULL;
6002 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6003 	kfree(wl->nvs);
6004 	wl->nvs = NULL;
6005 
6006 	kfree(wl->fw_status_1);
6007 	kfree(wl->tx_res_if);
6008 	destroy_workqueue(wl->freezable_wq);
6009 
6010 	kfree(wl->priv);
6011 	ieee80211_free_hw(wl->hw);
6012 
6013 	return 0;
6014 }
6015 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6016 
6017 #ifdef CONFIG_PM
6018 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6019 	.flags = WIPHY_WOWLAN_ANY,
6020 	.n_patterns = WL1271_MAX_RX_FILTERS,
6021 	.pattern_min_len = 1,
6022 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6023 };
6024 #endif
6025 
6026 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6027 {
6028 	return IRQ_WAKE_THREAD;
6029 }
6030 
6031 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6032 {
6033 	struct wl1271 *wl = context;
6034 	struct platform_device *pdev = wl->pdev;
6035 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6036 	struct wl12xx_platform_data *pdata = pdev_data->pdata;
6037 	unsigned long irqflags;
6038 	int ret;
6039 	irq_handler_t hardirq_fn = NULL;
6040 
6041 	if (fw) {
6042 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6043 		if (!wl->nvs) {
6044 			wl1271_error("Could not allocate nvs data");
6045 			goto out;
6046 		}
6047 		wl->nvs_len = fw->size;
6048 	} else {
6049 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6050 			     WL12XX_NVS_NAME);
6051 		wl->nvs = NULL;
6052 		wl->nvs_len = 0;
6053 	}
6054 
6055 	ret = wl->ops->setup(wl);
6056 	if (ret < 0)
6057 		goto out_free_nvs;
6058 
6059 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6060 
6061 	/* adjust some runtime configuration parameters */
6062 	wlcore_adjust_conf(wl);
6063 
6064 	wl->irq = platform_get_irq(pdev, 0);
6065 	wl->platform_quirks = pdata->platform_quirks;
6066 	wl->if_ops = pdev_data->if_ops;
6067 
6068 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6069 		irqflags = IRQF_TRIGGER_RISING;
6070 		hardirq_fn = wlcore_hardirq;
6071 	} else {
6072 		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6073 	}
6074 
6075 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6076 				   irqflags, pdev->name, wl);
6077 	if (ret < 0) {
6078 		wl1271_error("request_irq() failed: %d", ret);
6079 		goto out_free_nvs;
6080 	}
6081 
6082 #ifdef CONFIG_PM
6083 	ret = enable_irq_wake(wl->irq);
6084 	if (!ret) {
6085 		wl->irq_wake_enabled = true;
6086 		device_init_wakeup(wl->dev, 1);
6087 		if (pdata->pwr_in_suspend)
6088 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6089 	}
6090 #endif
6091 	disable_irq(wl->irq);
6092 
6093 	ret = wl12xx_get_hw_info(wl);
6094 	if (ret < 0) {
6095 		wl1271_error("couldn't get hw info");
6096 		goto out_irq;
6097 	}
6098 
6099 	ret = wl->ops->identify_chip(wl);
6100 	if (ret < 0)
6101 		goto out_irq;
6102 
6103 	ret = wl1271_init_ieee80211(wl);
6104 	if (ret)
6105 		goto out_irq;
6106 
6107 	ret = wl1271_register_hw(wl);
6108 	if (ret)
6109 		goto out_irq;
6110 
6111 	ret = wlcore_sysfs_init(wl);
6112 	if (ret)
6113 		goto out_unreg;
6114 
6115 	wl->initialized = true;
6116 	goto out;
6117 
6118 out_unreg:
6119 	wl1271_unregister_hw(wl);
6120 
6121 out_irq:
6122 	free_irq(wl->irq, wl);
6123 
6124 out_free_nvs:
6125 	kfree(wl->nvs);
6126 
6127 out:
6128 	release_firmware(fw);
6129 	complete_all(&wl->nvs_loading_complete);
6130 }
6131 
6132 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6133 {
6134 	int ret;
6135 
6136 	if (!wl->ops || !wl->ptable)
6137 		return -EINVAL;
6138 
6139 	wl->dev = &pdev->dev;
6140 	wl->pdev = pdev;
6141 	platform_set_drvdata(pdev, wl);
6142 
6143 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6144 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6145 				      wl, wlcore_nvs_cb);
6146 	if (ret < 0) {
6147 		wl1271_error("request_firmware_nowait failed: %d", ret);
6148 		complete_all(&wl->nvs_loading_complete);
6149 	}
6150 
6151 	return ret;
6152 }
6153 EXPORT_SYMBOL_GPL(wlcore_probe);
6154 
6155 int wlcore_remove(struct platform_device *pdev)
6156 {
6157 	struct wl1271 *wl = platform_get_drvdata(pdev);
6158 
6159 	wait_for_completion(&wl->nvs_loading_complete);
6160 	if (!wl->initialized)
6161 		return 0;
6162 
6163 	if (wl->irq_wake_enabled) {
6164 		device_init_wakeup(wl->dev, 0);
6165 		disable_irq_wake(wl->irq);
6166 	}
6167 	wl1271_unregister_hw(wl);
6168 	free_irq(wl->irq, wl);
6169 	wlcore_free_hw(wl);
6170 
6171 	return 0;
6172 }
6173 EXPORT_SYMBOL_GPL(wlcore_remove);
6174 
6175 u32 wl12xx_debug_level = DEBUG_NONE;
6176 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6177 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6178 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6179 
6180 module_param_named(fwlog, fwlog_param, charp, 0);
6181 MODULE_PARM_DESC(fwlog,
6182 		 "FW logger options: continuous, ondemand, dbgpins or disable");
6183 
6184 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6185 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6186 
6187 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6188 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6189 
6190 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6191 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6192 
6193 MODULE_LICENSE("GPL");
6194 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6195 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6196 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6197