xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision 4f6cce39)
1 /*
2  * This file is part of wlcore
3  *
4  * Copyright (C) 2008-2010 Nokia Corporation
5  * Copyright (C) 2011-2013 Texas Instruments Inc.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19  * 02110-1301 USA
20  *
21  */
22 
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 
30 #include "wlcore.h"
31 #include "debug.h"
32 #include "wl12xx_80211.h"
33 #include "io.h"
34 #include "tx.h"
35 #include "ps.h"
36 #include "init.h"
37 #include "debugfs.h"
38 #include "testmode.h"
39 #include "vendor_cmd.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43 
44 #define WL1271_BOOT_RETRIES 3
45 
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery     = -1;
50 
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 					 struct ieee80211_vif *vif,
53 					 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56 
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 	int ret;
60 
61 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 		return -EINVAL;
63 
64 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 		return 0;
66 
67 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 		return 0;
69 
70 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 	if (ret < 0)
72 		return ret;
73 
74 	wl1271_info("Association completed.");
75 	return 0;
76 }
77 
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 			      struct regulatory_request *request)
80 {
81 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
82 	struct wl1271 *wl = hw->priv;
83 
84 	/* copy the current dfs region */
85 	if (request)
86 		wl->dfs_region = request->dfs_region;
87 
88 	wlcore_regdomain_config(wl);
89 }
90 
91 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
92 				   bool enable)
93 {
94 	int ret = 0;
95 
96 	/* we should hold wl->mutex */
97 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
98 	if (ret < 0)
99 		goto out;
100 
101 	if (enable)
102 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
103 	else
104 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 out:
106 	return ret;
107 }
108 
109 /*
110  * this function is being called when the rx_streaming interval
111  * has beed changed or rx_streaming should be disabled
112  */
113 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
114 {
115 	int ret = 0;
116 	int period = wl->conf.rx_streaming.interval;
117 
118 	/* don't reconfigure if rx_streaming is disabled */
119 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
120 		goto out;
121 
122 	/* reconfigure/disable according to new streaming_period */
123 	if (period &&
124 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
125 	    (wl->conf.rx_streaming.always ||
126 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
127 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
128 	else {
129 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
130 		/* don't cancel_work_sync since we might deadlock */
131 		del_timer_sync(&wlvif->rx_streaming_timer);
132 	}
133 out:
134 	return ret;
135 }
136 
137 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
138 {
139 	int ret;
140 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
141 						rx_streaming_enable_work);
142 	struct wl1271 *wl = wlvif->wl;
143 
144 	mutex_lock(&wl->mutex);
145 
146 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
147 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
148 	    (!wl->conf.rx_streaming.always &&
149 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 		goto out;
151 
152 	if (!wl->conf.rx_streaming.interval)
153 		goto out;
154 
155 	ret = wl1271_ps_elp_wakeup(wl);
156 	if (ret < 0)
157 		goto out;
158 
159 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
160 	if (ret < 0)
161 		goto out_sleep;
162 
163 	/* stop it after some time of inactivity */
164 	mod_timer(&wlvif->rx_streaming_timer,
165 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
166 
167 out_sleep:
168 	wl1271_ps_elp_sleep(wl);
169 out:
170 	mutex_unlock(&wl->mutex);
171 }
172 
173 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
174 {
175 	int ret;
176 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
177 						rx_streaming_disable_work);
178 	struct wl1271 *wl = wlvif->wl;
179 
180 	mutex_lock(&wl->mutex);
181 
182 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
183 		goto out;
184 
185 	ret = wl1271_ps_elp_wakeup(wl);
186 	if (ret < 0)
187 		goto out;
188 
189 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
190 	if (ret)
191 		goto out_sleep;
192 
193 out_sleep:
194 	wl1271_ps_elp_sleep(wl);
195 out:
196 	mutex_unlock(&wl->mutex);
197 }
198 
199 static void wl1271_rx_streaming_timer(unsigned long data)
200 {
201 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
202 	struct wl1271 *wl = wlvif->wl;
203 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
204 }
205 
206 /* wl->mutex must be taken */
207 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
208 {
209 	/* if the watchdog is not armed, don't do anything */
210 	if (wl->tx_allocated_blocks == 0)
211 		return;
212 
213 	cancel_delayed_work(&wl->tx_watchdog_work);
214 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
215 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
216 }
217 
218 static void wlcore_rc_update_work(struct work_struct *work)
219 {
220 	int ret;
221 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
222 						rc_update_work);
223 	struct wl1271 *wl = wlvif->wl;
224 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
225 
226 	mutex_lock(&wl->mutex);
227 
228 	if (unlikely(wl->state != WLCORE_STATE_ON))
229 		goto out;
230 
231 	ret = wl1271_ps_elp_wakeup(wl);
232 	if (ret < 0)
233 		goto out;
234 
235 	if (ieee80211_vif_is_mesh(vif)) {
236 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
237 						     true, wlvif->sta.hlid);
238 		if (ret < 0)
239 			goto out_sleep;
240 	} else {
241 		wlcore_hw_sta_rc_update(wl, wlvif);
242 	}
243 
244 out_sleep:
245 	wl1271_ps_elp_sleep(wl);
246 out:
247 	mutex_unlock(&wl->mutex);
248 }
249 
250 static void wl12xx_tx_watchdog_work(struct work_struct *work)
251 {
252 	struct delayed_work *dwork;
253 	struct wl1271 *wl;
254 
255 	dwork = to_delayed_work(work);
256 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
257 
258 	mutex_lock(&wl->mutex);
259 
260 	if (unlikely(wl->state != WLCORE_STATE_ON))
261 		goto out;
262 
263 	/* Tx went out in the meantime - everything is ok */
264 	if (unlikely(wl->tx_allocated_blocks == 0))
265 		goto out;
266 
267 	/*
268 	 * if a ROC is in progress, we might not have any Tx for a long
269 	 * time (e.g. pending Tx on the non-ROC channels)
270 	 */
271 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
272 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
273 			     wl->conf.tx.tx_watchdog_timeout);
274 		wl12xx_rearm_tx_watchdog_locked(wl);
275 		goto out;
276 	}
277 
278 	/*
279 	 * if a scan is in progress, we might not have any Tx for a long
280 	 * time
281 	 */
282 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
283 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
284 			     wl->conf.tx.tx_watchdog_timeout);
285 		wl12xx_rearm_tx_watchdog_locked(wl);
286 		goto out;
287 	}
288 
289 	/*
290 	* AP might cache a frame for a long time for a sleeping station,
291 	* so rearm the timer if there's an AP interface with stations. If
292 	* Tx is genuinely stuck we will most hopefully discover it when all
293 	* stations are removed due to inactivity.
294 	*/
295 	if (wl->active_sta_count) {
296 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
297 			     " %d stations",
298 			      wl->conf.tx.tx_watchdog_timeout,
299 			      wl->active_sta_count);
300 		wl12xx_rearm_tx_watchdog_locked(wl);
301 		goto out;
302 	}
303 
304 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
305 		     wl->conf.tx.tx_watchdog_timeout);
306 	wl12xx_queue_recovery_work(wl);
307 
308 out:
309 	mutex_unlock(&wl->mutex);
310 }
311 
312 static void wlcore_adjust_conf(struct wl1271 *wl)
313 {
314 
315 	if (fwlog_param) {
316 		if (!strcmp(fwlog_param, "continuous")) {
317 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
318 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
319 		} else if (!strcmp(fwlog_param, "dbgpins")) {
320 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
321 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
322 		} else if (!strcmp(fwlog_param, "disable")) {
323 			wl->conf.fwlog.mem_blocks = 0;
324 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
325 		} else {
326 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
327 		}
328 	}
329 
330 	if (bug_on_recovery != -1)
331 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
332 
333 	if (no_recovery != -1)
334 		wl->conf.recovery.no_recovery = (u8) no_recovery;
335 }
336 
337 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
338 					struct wl12xx_vif *wlvif,
339 					u8 hlid, u8 tx_pkts)
340 {
341 	bool fw_ps;
342 
343 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
344 
345 	/*
346 	 * Wake up from high level PS if the STA is asleep with too little
347 	 * packets in FW or if the STA is awake.
348 	 */
349 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
350 		wl12xx_ps_link_end(wl, wlvif, hlid);
351 
352 	/*
353 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
354 	 * Make an exception if this is the only connected link. In this
355 	 * case FW-memory congestion is less of a problem.
356 	 * Note that a single connected STA means 2*ap_count + 1 active links,
357 	 * since we must account for the global and broadcast AP links
358 	 * for each AP. The "fw_ps" check assures us the other link is a STA
359 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
360 	 */
361 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
362 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
363 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
364 }
365 
366 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
367 					   struct wl12xx_vif *wlvif,
368 					   struct wl_fw_status *status)
369 {
370 	unsigned long cur_fw_ps_map;
371 	u8 hlid;
372 
373 	cur_fw_ps_map = status->link_ps_bitmap;
374 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
375 		wl1271_debug(DEBUG_PSM,
376 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
377 			     wl->ap_fw_ps_map, cur_fw_ps_map,
378 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
379 
380 		wl->ap_fw_ps_map = cur_fw_ps_map;
381 	}
382 
383 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
384 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
385 					    wl->links[hlid].allocated_pkts);
386 }
387 
388 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
389 {
390 	struct wl12xx_vif *wlvif;
391 	struct timespec ts;
392 	u32 old_tx_blk_count = wl->tx_blocks_available;
393 	int avail, freed_blocks;
394 	int i;
395 	int ret;
396 	struct wl1271_link *lnk;
397 
398 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
399 				   wl->raw_fw_status,
400 				   wl->fw_status_len, false);
401 	if (ret < 0)
402 		return ret;
403 
404 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
405 
406 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
407 		     "drv_rx_counter = %d, tx_results_counter = %d)",
408 		     status->intr,
409 		     status->fw_rx_counter,
410 		     status->drv_rx_counter,
411 		     status->tx_results_counter);
412 
413 	for (i = 0; i < NUM_TX_QUEUES; i++) {
414 		/* prevent wrap-around in freed-packets counter */
415 		wl->tx_allocated_pkts[i] -=
416 				(status->counters.tx_released_pkts[i] -
417 				wl->tx_pkts_freed[i]) & 0xff;
418 
419 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
420 	}
421 
422 
423 	for_each_set_bit(i, wl->links_map, wl->num_links) {
424 		u8 diff;
425 		lnk = &wl->links[i];
426 
427 		/* prevent wrap-around in freed-packets counter */
428 		diff = (status->counters.tx_lnk_free_pkts[i] -
429 		       lnk->prev_freed_pkts) & 0xff;
430 
431 		if (diff == 0)
432 			continue;
433 
434 		lnk->allocated_pkts -= diff;
435 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
436 
437 		/* accumulate the prev_freed_pkts counter */
438 		lnk->total_freed_pkts += diff;
439 	}
440 
441 	/* prevent wrap-around in total blocks counter */
442 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
443 		freed_blocks = status->total_released_blks -
444 			       wl->tx_blocks_freed;
445 	else
446 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
447 			       status->total_released_blks;
448 
449 	wl->tx_blocks_freed = status->total_released_blks;
450 
451 	wl->tx_allocated_blocks -= freed_blocks;
452 
453 	/*
454 	 * If the FW freed some blocks:
455 	 * If we still have allocated blocks - re-arm the timer, Tx is
456 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
457 	 */
458 	if (freed_blocks) {
459 		if (wl->tx_allocated_blocks)
460 			wl12xx_rearm_tx_watchdog_locked(wl);
461 		else
462 			cancel_delayed_work(&wl->tx_watchdog_work);
463 	}
464 
465 	avail = status->tx_total - wl->tx_allocated_blocks;
466 
467 	/*
468 	 * The FW might change the total number of TX memblocks before
469 	 * we get a notification about blocks being released. Thus, the
470 	 * available blocks calculation might yield a temporary result
471 	 * which is lower than the actual available blocks. Keeping in
472 	 * mind that only blocks that were allocated can be moved from
473 	 * TX to RX, tx_blocks_available should never decrease here.
474 	 */
475 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
476 				      avail);
477 
478 	/* if more blocks are available now, tx work can be scheduled */
479 	if (wl->tx_blocks_available > old_tx_blk_count)
480 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
481 
482 	/* for AP update num of allocated TX blocks per link and ps status */
483 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
484 		wl12xx_irq_update_links_status(wl, wlvif, status);
485 	}
486 
487 	/* update the host-chipset time offset */
488 	getnstimeofday(&ts);
489 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
490 		(s64)(status->fw_localtime);
491 
492 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
493 
494 	return 0;
495 }
496 
497 static void wl1271_flush_deferred_work(struct wl1271 *wl)
498 {
499 	struct sk_buff *skb;
500 
501 	/* Pass all received frames to the network stack */
502 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
503 		ieee80211_rx_ni(wl->hw, skb);
504 
505 	/* Return sent skbs to the network stack */
506 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
507 		ieee80211_tx_status_ni(wl->hw, skb);
508 }
509 
510 static void wl1271_netstack_work(struct work_struct *work)
511 {
512 	struct wl1271 *wl =
513 		container_of(work, struct wl1271, netstack_work);
514 
515 	do {
516 		wl1271_flush_deferred_work(wl);
517 	} while (skb_queue_len(&wl->deferred_rx_queue));
518 }
519 
520 #define WL1271_IRQ_MAX_LOOPS 256
521 
522 static int wlcore_irq_locked(struct wl1271 *wl)
523 {
524 	int ret = 0;
525 	u32 intr;
526 	int loopcount = WL1271_IRQ_MAX_LOOPS;
527 	bool done = false;
528 	unsigned int defer_count;
529 	unsigned long flags;
530 
531 	/*
532 	 * In case edge triggered interrupt must be used, we cannot iterate
533 	 * more than once without introducing race conditions with the hardirq.
534 	 */
535 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
536 		loopcount = 1;
537 
538 	wl1271_debug(DEBUG_IRQ, "IRQ work");
539 
540 	if (unlikely(wl->state != WLCORE_STATE_ON))
541 		goto out;
542 
543 	ret = wl1271_ps_elp_wakeup(wl);
544 	if (ret < 0)
545 		goto out;
546 
547 	while (!done && loopcount--) {
548 		/*
549 		 * In order to avoid a race with the hardirq, clear the flag
550 		 * before acknowledging the chip. Since the mutex is held,
551 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
552 		 */
553 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
554 		smp_mb__after_atomic();
555 
556 		ret = wlcore_fw_status(wl, wl->fw_status);
557 		if (ret < 0)
558 			goto out;
559 
560 		wlcore_hw_tx_immediate_compl(wl);
561 
562 		intr = wl->fw_status->intr;
563 		intr &= WLCORE_ALL_INTR_MASK;
564 		if (!intr) {
565 			done = true;
566 			continue;
567 		}
568 
569 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
570 			wl1271_error("HW watchdog interrupt received! starting recovery.");
571 			wl->watchdog_recovery = true;
572 			ret = -EIO;
573 
574 			/* restarting the chip. ignore any other interrupt. */
575 			goto out;
576 		}
577 
578 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
579 			wl1271_error("SW watchdog interrupt received! "
580 				     "starting recovery.");
581 			wl->watchdog_recovery = true;
582 			ret = -EIO;
583 
584 			/* restarting the chip. ignore any other interrupt. */
585 			goto out;
586 		}
587 
588 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
589 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
590 
591 			ret = wlcore_rx(wl, wl->fw_status);
592 			if (ret < 0)
593 				goto out;
594 
595 			/* Check if any tx blocks were freed */
596 			spin_lock_irqsave(&wl->wl_lock, flags);
597 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
598 			    wl1271_tx_total_queue_count(wl) > 0) {
599 				spin_unlock_irqrestore(&wl->wl_lock, flags);
600 				/*
601 				 * In order to avoid starvation of the TX path,
602 				 * call the work function directly.
603 				 */
604 				ret = wlcore_tx_work_locked(wl);
605 				if (ret < 0)
606 					goto out;
607 			} else {
608 				spin_unlock_irqrestore(&wl->wl_lock, flags);
609 			}
610 
611 			/* check for tx results */
612 			ret = wlcore_hw_tx_delayed_compl(wl);
613 			if (ret < 0)
614 				goto out;
615 
616 			/* Make sure the deferred queues don't get too long */
617 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
618 				      skb_queue_len(&wl->deferred_rx_queue);
619 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
620 				wl1271_flush_deferred_work(wl);
621 		}
622 
623 		if (intr & WL1271_ACX_INTR_EVENT_A) {
624 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
625 			ret = wl1271_event_handle(wl, 0);
626 			if (ret < 0)
627 				goto out;
628 		}
629 
630 		if (intr & WL1271_ACX_INTR_EVENT_B) {
631 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
632 			ret = wl1271_event_handle(wl, 1);
633 			if (ret < 0)
634 				goto out;
635 		}
636 
637 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
638 			wl1271_debug(DEBUG_IRQ,
639 				     "WL1271_ACX_INTR_INIT_COMPLETE");
640 
641 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
642 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
643 	}
644 
645 	wl1271_ps_elp_sleep(wl);
646 
647 out:
648 	return ret;
649 }
650 
651 static irqreturn_t wlcore_irq(int irq, void *cookie)
652 {
653 	int ret;
654 	unsigned long flags;
655 	struct wl1271 *wl = cookie;
656 
657 	/* complete the ELP completion */
658 	spin_lock_irqsave(&wl->wl_lock, flags);
659 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
660 	if (wl->elp_compl) {
661 		complete(wl->elp_compl);
662 		wl->elp_compl = NULL;
663 	}
664 
665 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
666 		/* don't enqueue a work right now. mark it as pending */
667 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
668 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
669 		disable_irq_nosync(wl->irq);
670 		pm_wakeup_event(wl->dev, 0);
671 		spin_unlock_irqrestore(&wl->wl_lock, flags);
672 		return IRQ_HANDLED;
673 	}
674 	spin_unlock_irqrestore(&wl->wl_lock, flags);
675 
676 	/* TX might be handled here, avoid redundant work */
677 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
678 	cancel_work_sync(&wl->tx_work);
679 
680 	mutex_lock(&wl->mutex);
681 
682 	ret = wlcore_irq_locked(wl);
683 	if (ret)
684 		wl12xx_queue_recovery_work(wl);
685 
686 	spin_lock_irqsave(&wl->wl_lock, flags);
687 	/* In case TX was not handled here, queue TX work */
688 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
690 	    wl1271_tx_total_queue_count(wl) > 0)
691 		ieee80211_queue_work(wl->hw, &wl->tx_work);
692 	spin_unlock_irqrestore(&wl->wl_lock, flags);
693 
694 	mutex_unlock(&wl->mutex);
695 
696 	return IRQ_HANDLED;
697 }
698 
699 struct vif_counter_data {
700 	u8 counter;
701 
702 	struct ieee80211_vif *cur_vif;
703 	bool cur_vif_running;
704 };
705 
706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 				  struct ieee80211_vif *vif)
708 {
709 	struct vif_counter_data *counter = data;
710 
711 	counter->counter++;
712 	if (counter->cur_vif == vif)
713 		counter->cur_vif_running = true;
714 }
715 
716 /* caller must not hold wl->mutex, as it might deadlock */
717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 			       struct ieee80211_vif *cur_vif,
719 			       struct vif_counter_data *data)
720 {
721 	memset(data, 0, sizeof(*data));
722 	data->cur_vif = cur_vif;
723 
724 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 					    wl12xx_vif_count_iter, data);
726 }
727 
728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 {
730 	const struct firmware *fw;
731 	const char *fw_name;
732 	enum wl12xx_fw_type fw_type;
733 	int ret;
734 
735 	if (plt) {
736 		fw_type = WL12XX_FW_TYPE_PLT;
737 		fw_name = wl->plt_fw_name;
738 	} else {
739 		/*
740 		 * we can't call wl12xx_get_vif_count() here because
741 		 * wl->mutex is taken, so use the cached last_vif_count value
742 		 */
743 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 			fw_type = WL12XX_FW_TYPE_MULTI;
745 			fw_name = wl->mr_fw_name;
746 		} else {
747 			fw_type = WL12XX_FW_TYPE_NORMAL;
748 			fw_name = wl->sr_fw_name;
749 		}
750 	}
751 
752 	if (wl->fw_type == fw_type)
753 		return 0;
754 
755 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756 
757 	ret = request_firmware(&fw, fw_name, wl->dev);
758 
759 	if (ret < 0) {
760 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
761 		return ret;
762 	}
763 
764 	if (fw->size % 4) {
765 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
766 			     fw->size);
767 		ret = -EILSEQ;
768 		goto out;
769 	}
770 
771 	vfree(wl->fw);
772 	wl->fw_type = WL12XX_FW_TYPE_NONE;
773 	wl->fw_len = fw->size;
774 	wl->fw = vmalloc(wl->fw_len);
775 
776 	if (!wl->fw) {
777 		wl1271_error("could not allocate memory for the firmware");
778 		ret = -ENOMEM;
779 		goto out;
780 	}
781 
782 	memcpy(wl->fw, fw->data, wl->fw_len);
783 	ret = 0;
784 	wl->fw_type = fw_type;
785 out:
786 	release_firmware(fw);
787 
788 	return ret;
789 }
790 
791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 {
793 	/* Avoid a recursive recovery */
794 	if (wl->state == WLCORE_STATE_ON) {
795 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
796 				  &wl->flags));
797 
798 		wl->state = WLCORE_STATE_RESTARTING;
799 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 		wl1271_ps_elp_wakeup(wl);
801 		wlcore_disable_interrupts_nosync(wl);
802 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
803 	}
804 }
805 
806 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
807 {
808 	size_t len;
809 
810 	/* Make sure we have enough room */
811 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
812 
813 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
814 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
815 	wl->fwlog_size += len;
816 
817 	return len;
818 }
819 
820 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
821 {
822 	u32 end_of_log = 0;
823 
824 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
825 		return;
826 
827 	wl1271_info("Reading FW panic log");
828 
829 	/*
830 	 * Make sure the chip is awake and the logger isn't active.
831 	 * Do not send a stop fwlog command if the fw is hanged or if
832 	 * dbgpins are used (due to some fw bug).
833 	 */
834 	if (wl1271_ps_elp_wakeup(wl))
835 		return;
836 	if (!wl->watchdog_recovery &&
837 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
838 		wl12xx_cmd_stop_fwlog(wl);
839 
840 	/* Traverse the memory blocks linked list */
841 	do {
842 		end_of_log = wlcore_event_fw_logger(wl);
843 		if (end_of_log == 0) {
844 			msleep(100);
845 			end_of_log = wlcore_event_fw_logger(wl);
846 		}
847 	} while (end_of_log != 0);
848 }
849 
850 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
851 				   u8 hlid, struct ieee80211_sta *sta)
852 {
853 	struct wl1271_station *wl_sta;
854 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
855 
856 	wl_sta = (void *)sta->drv_priv;
857 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
858 
859 	/*
860 	 * increment the initial seq number on recovery to account for
861 	 * transmitted packets that we haven't yet got in the FW status
862 	 */
863 	if (wlvif->encryption_type == KEY_GEM)
864 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
865 
866 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
867 		wl_sta->total_freed_pkts += sqn_recovery_padding;
868 }
869 
870 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
871 					struct wl12xx_vif *wlvif,
872 					u8 hlid, const u8 *addr)
873 {
874 	struct ieee80211_sta *sta;
875 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
876 
877 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
878 		    is_zero_ether_addr(addr)))
879 		return;
880 
881 	rcu_read_lock();
882 	sta = ieee80211_find_sta(vif, addr);
883 	if (sta)
884 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
885 	rcu_read_unlock();
886 }
887 
888 static void wlcore_print_recovery(struct wl1271 *wl)
889 {
890 	u32 pc = 0;
891 	u32 hint_sts = 0;
892 	int ret;
893 
894 	wl1271_info("Hardware recovery in progress. FW ver: %s",
895 		    wl->chip.fw_ver_str);
896 
897 	/* change partitions momentarily so we can read the FW pc */
898 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
899 	if (ret < 0)
900 		return;
901 
902 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
903 	if (ret < 0)
904 		return;
905 
906 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
907 	if (ret < 0)
908 		return;
909 
910 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
911 				pc, hint_sts, ++wl->recovery_count);
912 
913 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
914 }
915 
916 
917 static void wl1271_recovery_work(struct work_struct *work)
918 {
919 	struct wl1271 *wl =
920 		container_of(work, struct wl1271, recovery_work);
921 	struct wl12xx_vif *wlvif;
922 	struct ieee80211_vif *vif;
923 
924 	mutex_lock(&wl->mutex);
925 
926 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
927 		goto out_unlock;
928 
929 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
930 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
931 			wl12xx_read_fwlog_panic(wl);
932 		wlcore_print_recovery(wl);
933 	}
934 
935 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
936 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
937 
938 	if (wl->conf.recovery.no_recovery) {
939 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
940 		goto out_unlock;
941 	}
942 
943 	/* Prevent spurious TX during FW restart */
944 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
945 
946 	/* reboot the chipset */
947 	while (!list_empty(&wl->wlvif_list)) {
948 		wlvif = list_first_entry(&wl->wlvif_list,
949 				       struct wl12xx_vif, list);
950 		vif = wl12xx_wlvif_to_vif(wlvif);
951 
952 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
953 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
954 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
955 						    vif->bss_conf.bssid);
956 		}
957 
958 		__wl1271_op_remove_interface(wl, vif, false);
959 	}
960 
961 	wlcore_op_stop_locked(wl);
962 
963 	ieee80211_restart_hw(wl->hw);
964 
965 	/*
966 	 * Its safe to enable TX now - the queues are stopped after a request
967 	 * to restart the HW.
968 	 */
969 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970 
971 out_unlock:
972 	wl->watchdog_recovery = false;
973 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
974 	mutex_unlock(&wl->mutex);
975 }
976 
977 static int wlcore_fw_wakeup(struct wl1271 *wl)
978 {
979 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
980 }
981 
982 static int wl1271_setup(struct wl1271 *wl)
983 {
984 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
985 	if (!wl->raw_fw_status)
986 		goto err;
987 
988 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
989 	if (!wl->fw_status)
990 		goto err;
991 
992 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
993 	if (!wl->tx_res_if)
994 		goto err;
995 
996 	return 0;
997 err:
998 	kfree(wl->fw_status);
999 	kfree(wl->raw_fw_status);
1000 	return -ENOMEM;
1001 }
1002 
1003 static int wl12xx_set_power_on(struct wl1271 *wl)
1004 {
1005 	int ret;
1006 
1007 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1008 	ret = wl1271_power_on(wl);
1009 	if (ret < 0)
1010 		goto out;
1011 	msleep(WL1271_POWER_ON_SLEEP);
1012 	wl1271_io_reset(wl);
1013 	wl1271_io_init(wl);
1014 
1015 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1016 	if (ret < 0)
1017 		goto fail;
1018 
1019 	/* ELP module wake up */
1020 	ret = wlcore_fw_wakeup(wl);
1021 	if (ret < 0)
1022 		goto fail;
1023 
1024 out:
1025 	return ret;
1026 
1027 fail:
1028 	wl1271_power_off(wl);
1029 	return ret;
1030 }
1031 
1032 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1033 {
1034 	int ret = 0;
1035 
1036 	ret = wl12xx_set_power_on(wl);
1037 	if (ret < 0)
1038 		goto out;
1039 
1040 	/*
1041 	 * For wl127x based devices we could use the default block
1042 	 * size (512 bytes), but due to a bug in the sdio driver, we
1043 	 * need to set it explicitly after the chip is powered on.  To
1044 	 * simplify the code and since the performance impact is
1045 	 * negligible, we use the same block size for all different
1046 	 * chip types.
1047 	 *
1048 	 * Check if the bus supports blocksize alignment and, if it
1049 	 * doesn't, make sure we don't have the quirk.
1050 	 */
1051 	if (!wl1271_set_block_size(wl))
1052 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1053 
1054 	/* TODO: make sure the lower driver has set things up correctly */
1055 
1056 	ret = wl1271_setup(wl);
1057 	if (ret < 0)
1058 		goto out;
1059 
1060 	ret = wl12xx_fetch_firmware(wl, plt);
1061 	if (ret < 0)
1062 		goto out;
1063 
1064 out:
1065 	return ret;
1066 }
1067 
1068 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1069 {
1070 	int retries = WL1271_BOOT_RETRIES;
1071 	struct wiphy *wiphy = wl->hw->wiphy;
1072 
1073 	static const char* const PLT_MODE[] = {
1074 		"PLT_OFF",
1075 		"PLT_ON",
1076 		"PLT_FEM_DETECT",
1077 		"PLT_CHIP_AWAKE"
1078 	};
1079 
1080 	int ret;
1081 
1082 	mutex_lock(&wl->mutex);
1083 
1084 	wl1271_notice("power up");
1085 
1086 	if (wl->state != WLCORE_STATE_OFF) {
1087 		wl1271_error("cannot go into PLT state because not "
1088 			     "in off state: %d", wl->state);
1089 		ret = -EBUSY;
1090 		goto out;
1091 	}
1092 
1093 	/* Indicate to lower levels that we are now in PLT mode */
1094 	wl->plt = true;
1095 	wl->plt_mode = plt_mode;
1096 
1097 	while (retries) {
1098 		retries--;
1099 		ret = wl12xx_chip_wakeup(wl, true);
1100 		if (ret < 0)
1101 			goto power_off;
1102 
1103 		if (plt_mode != PLT_CHIP_AWAKE) {
1104 			ret = wl->ops->plt_init(wl);
1105 			if (ret < 0)
1106 				goto power_off;
1107 		}
1108 
1109 		wl->state = WLCORE_STATE_ON;
1110 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1111 			      PLT_MODE[plt_mode],
1112 			      wl->chip.fw_ver_str);
1113 
1114 		/* update hw/fw version info in wiphy struct */
1115 		wiphy->hw_version = wl->chip.id;
1116 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1117 			sizeof(wiphy->fw_version));
1118 
1119 		goto out;
1120 
1121 power_off:
1122 		wl1271_power_off(wl);
1123 	}
1124 
1125 	wl->plt = false;
1126 	wl->plt_mode = PLT_OFF;
1127 
1128 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1129 		     WL1271_BOOT_RETRIES);
1130 out:
1131 	mutex_unlock(&wl->mutex);
1132 
1133 	return ret;
1134 }
1135 
1136 int wl1271_plt_stop(struct wl1271 *wl)
1137 {
1138 	int ret = 0;
1139 
1140 	wl1271_notice("power down");
1141 
1142 	/*
1143 	 * Interrupts must be disabled before setting the state to OFF.
1144 	 * Otherwise, the interrupt handler might be called and exit without
1145 	 * reading the interrupt status.
1146 	 */
1147 	wlcore_disable_interrupts(wl);
1148 	mutex_lock(&wl->mutex);
1149 	if (!wl->plt) {
1150 		mutex_unlock(&wl->mutex);
1151 
1152 		/*
1153 		 * This will not necessarily enable interrupts as interrupts
1154 		 * may have been disabled when op_stop was called. It will,
1155 		 * however, balance the above call to disable_interrupts().
1156 		 */
1157 		wlcore_enable_interrupts(wl);
1158 
1159 		wl1271_error("cannot power down because not in PLT "
1160 			     "state: %d", wl->state);
1161 		ret = -EBUSY;
1162 		goto out;
1163 	}
1164 
1165 	mutex_unlock(&wl->mutex);
1166 
1167 	wl1271_flush_deferred_work(wl);
1168 	cancel_work_sync(&wl->netstack_work);
1169 	cancel_work_sync(&wl->recovery_work);
1170 	cancel_delayed_work_sync(&wl->elp_work);
1171 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1172 
1173 	mutex_lock(&wl->mutex);
1174 	wl1271_power_off(wl);
1175 	wl->flags = 0;
1176 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1177 	wl->state = WLCORE_STATE_OFF;
1178 	wl->plt = false;
1179 	wl->plt_mode = PLT_OFF;
1180 	wl->rx_counter = 0;
1181 	mutex_unlock(&wl->mutex);
1182 
1183 out:
1184 	return ret;
1185 }
1186 
1187 static void wl1271_op_tx(struct ieee80211_hw *hw,
1188 			 struct ieee80211_tx_control *control,
1189 			 struct sk_buff *skb)
1190 {
1191 	struct wl1271 *wl = hw->priv;
1192 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1193 	struct ieee80211_vif *vif = info->control.vif;
1194 	struct wl12xx_vif *wlvif = NULL;
1195 	unsigned long flags;
1196 	int q, mapping;
1197 	u8 hlid;
1198 
1199 	if (!vif) {
1200 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1201 		ieee80211_free_txskb(hw, skb);
1202 		return;
1203 	}
1204 
1205 	wlvif = wl12xx_vif_to_data(vif);
1206 	mapping = skb_get_queue_mapping(skb);
1207 	q = wl1271_tx_get_queue(mapping);
1208 
1209 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1210 
1211 	spin_lock_irqsave(&wl->wl_lock, flags);
1212 
1213 	/*
1214 	 * drop the packet if the link is invalid or the queue is stopped
1215 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1216 	 * allow these packets through.
1217 	 */
1218 	if (hlid == WL12XX_INVALID_LINK_ID ||
1219 	    (!test_bit(hlid, wlvif->links_map)) ||
1220 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1221 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1222 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1223 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1224 		ieee80211_free_txskb(hw, skb);
1225 		goto out;
1226 	}
1227 
1228 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1229 		     hlid, q, skb->len);
1230 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1231 
1232 	wl->tx_queue_count[q]++;
1233 	wlvif->tx_queue_count[q]++;
1234 
1235 	/*
1236 	 * The workqueue is slow to process the tx_queue and we need stop
1237 	 * the queue here, otherwise the queue will get too long.
1238 	 */
1239 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1240 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1241 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1242 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1243 		wlcore_stop_queue_locked(wl, wlvif, q,
1244 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1245 	}
1246 
1247 	/*
1248 	 * The chip specific setup must run before the first TX packet -
1249 	 * before that, the tx_work will not be initialized!
1250 	 */
1251 
1252 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1253 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1254 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1255 
1256 out:
1257 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1258 }
1259 
1260 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1261 {
1262 	unsigned long flags;
1263 	int q;
1264 
1265 	/* no need to queue a new dummy packet if one is already pending */
1266 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1267 		return 0;
1268 
1269 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1270 
1271 	spin_lock_irqsave(&wl->wl_lock, flags);
1272 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1273 	wl->tx_queue_count[q]++;
1274 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1275 
1276 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1277 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1278 		return wlcore_tx_work_locked(wl);
1279 
1280 	/*
1281 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1282 	 * interrupt handler function
1283 	 */
1284 	return 0;
1285 }
1286 
1287 /*
1288  * The size of the dummy packet should be at least 1400 bytes. However, in
1289  * order to minimize the number of bus transactions, aligning it to 512 bytes
1290  * boundaries could be beneficial, performance wise
1291  */
1292 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1293 
1294 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1295 {
1296 	struct sk_buff *skb;
1297 	struct ieee80211_hdr_3addr *hdr;
1298 	unsigned int dummy_packet_size;
1299 
1300 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1301 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1302 
1303 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1304 	if (!skb) {
1305 		wl1271_warning("Failed to allocate a dummy packet skb");
1306 		return NULL;
1307 	}
1308 
1309 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1310 
1311 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1312 	memset(hdr, 0, sizeof(*hdr));
1313 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1314 					 IEEE80211_STYPE_NULLFUNC |
1315 					 IEEE80211_FCTL_TODS);
1316 
1317 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1318 
1319 	/* Dummy packets require the TID to be management */
1320 	skb->priority = WL1271_TID_MGMT;
1321 
1322 	/* Initialize all fields that might be used */
1323 	skb_set_queue_mapping(skb, 0);
1324 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1325 
1326 	return skb;
1327 }
1328 
1329 
1330 #ifdef CONFIG_PM
1331 static int
1332 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1333 {
1334 	int num_fields = 0, in_field = 0, fields_size = 0;
1335 	int i, pattern_len = 0;
1336 
1337 	if (!p->mask) {
1338 		wl1271_warning("No mask in WoWLAN pattern");
1339 		return -EINVAL;
1340 	}
1341 
1342 	/*
1343 	 * The pattern is broken up into segments of bytes at different offsets
1344 	 * that need to be checked by the FW filter. Each segment is called
1345 	 * a field in the FW API. We verify that the total number of fields
1346 	 * required for this pattern won't exceed FW limits (8)
1347 	 * as well as the total fields buffer won't exceed the FW limit.
1348 	 * Note that if there's a pattern which crosses Ethernet/IP header
1349 	 * boundary a new field is required.
1350 	 */
1351 	for (i = 0; i < p->pattern_len; i++) {
1352 		if (test_bit(i, (unsigned long *)p->mask)) {
1353 			if (!in_field) {
1354 				in_field = 1;
1355 				pattern_len = 1;
1356 			} else {
1357 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1358 					num_fields++;
1359 					fields_size += pattern_len +
1360 						RX_FILTER_FIELD_OVERHEAD;
1361 					pattern_len = 1;
1362 				} else
1363 					pattern_len++;
1364 			}
1365 		} else {
1366 			if (in_field) {
1367 				in_field = 0;
1368 				fields_size += pattern_len +
1369 					RX_FILTER_FIELD_OVERHEAD;
1370 				num_fields++;
1371 			}
1372 		}
1373 	}
1374 
1375 	if (in_field) {
1376 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1377 		num_fields++;
1378 	}
1379 
1380 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1381 		wl1271_warning("RX Filter too complex. Too many segments");
1382 		return -EINVAL;
1383 	}
1384 
1385 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1386 		wl1271_warning("RX filter pattern is too big");
1387 		return -E2BIG;
1388 	}
1389 
1390 	return 0;
1391 }
1392 
1393 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1394 {
1395 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1396 }
1397 
1398 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1399 {
1400 	int i;
1401 
1402 	if (filter == NULL)
1403 		return;
1404 
1405 	for (i = 0; i < filter->num_fields; i++)
1406 		kfree(filter->fields[i].pattern);
1407 
1408 	kfree(filter);
1409 }
1410 
1411 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1412 				 u16 offset, u8 flags,
1413 				 const u8 *pattern, u8 len)
1414 {
1415 	struct wl12xx_rx_filter_field *field;
1416 
1417 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1418 		wl1271_warning("Max fields per RX filter. can't alloc another");
1419 		return -EINVAL;
1420 	}
1421 
1422 	field = &filter->fields[filter->num_fields];
1423 
1424 	field->pattern = kzalloc(len, GFP_KERNEL);
1425 	if (!field->pattern) {
1426 		wl1271_warning("Failed to allocate RX filter pattern");
1427 		return -ENOMEM;
1428 	}
1429 
1430 	filter->num_fields++;
1431 
1432 	field->offset = cpu_to_le16(offset);
1433 	field->flags = flags;
1434 	field->len = len;
1435 	memcpy(field->pattern, pattern, len);
1436 
1437 	return 0;
1438 }
1439 
1440 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1441 {
1442 	int i, fields_size = 0;
1443 
1444 	for (i = 0; i < filter->num_fields; i++)
1445 		fields_size += filter->fields[i].len +
1446 			sizeof(struct wl12xx_rx_filter_field) -
1447 			sizeof(u8 *);
1448 
1449 	return fields_size;
1450 }
1451 
1452 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1453 				    u8 *buf)
1454 {
1455 	int i;
1456 	struct wl12xx_rx_filter_field *field;
1457 
1458 	for (i = 0; i < filter->num_fields; i++) {
1459 		field = (struct wl12xx_rx_filter_field *)buf;
1460 
1461 		field->offset = filter->fields[i].offset;
1462 		field->flags = filter->fields[i].flags;
1463 		field->len = filter->fields[i].len;
1464 
1465 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1466 		buf += sizeof(struct wl12xx_rx_filter_field) -
1467 			sizeof(u8 *) + field->len;
1468 	}
1469 }
1470 
1471 /*
1472  * Allocates an RX filter returned through f
1473  * which needs to be freed using rx_filter_free()
1474  */
1475 static int
1476 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1477 					   struct wl12xx_rx_filter **f)
1478 {
1479 	int i, j, ret = 0;
1480 	struct wl12xx_rx_filter *filter;
1481 	u16 offset;
1482 	u8 flags, len;
1483 
1484 	filter = wl1271_rx_filter_alloc();
1485 	if (!filter) {
1486 		wl1271_warning("Failed to alloc rx filter");
1487 		ret = -ENOMEM;
1488 		goto err;
1489 	}
1490 
1491 	i = 0;
1492 	while (i < p->pattern_len) {
1493 		if (!test_bit(i, (unsigned long *)p->mask)) {
1494 			i++;
1495 			continue;
1496 		}
1497 
1498 		for (j = i; j < p->pattern_len; j++) {
1499 			if (!test_bit(j, (unsigned long *)p->mask))
1500 				break;
1501 
1502 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1503 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1504 				break;
1505 		}
1506 
1507 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1508 			offset = i;
1509 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1510 		} else {
1511 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1512 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1513 		}
1514 
1515 		len = j - i;
1516 
1517 		ret = wl1271_rx_filter_alloc_field(filter,
1518 						   offset,
1519 						   flags,
1520 						   &p->pattern[i], len);
1521 		if (ret)
1522 			goto err;
1523 
1524 		i = j;
1525 	}
1526 
1527 	filter->action = FILTER_SIGNAL;
1528 
1529 	*f = filter;
1530 	return 0;
1531 
1532 err:
1533 	wl1271_rx_filter_free(filter);
1534 	*f = NULL;
1535 
1536 	return ret;
1537 }
1538 
1539 static int wl1271_configure_wowlan(struct wl1271 *wl,
1540 				   struct cfg80211_wowlan *wow)
1541 {
1542 	int i, ret;
1543 
1544 	if (!wow || wow->any || !wow->n_patterns) {
1545 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1546 							  FILTER_SIGNAL);
1547 		if (ret)
1548 			goto out;
1549 
1550 		ret = wl1271_rx_filter_clear_all(wl);
1551 		if (ret)
1552 			goto out;
1553 
1554 		return 0;
1555 	}
1556 
1557 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1558 		return -EINVAL;
1559 
1560 	/* Validate all incoming patterns before clearing current FW state */
1561 	for (i = 0; i < wow->n_patterns; i++) {
1562 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1563 		if (ret) {
1564 			wl1271_warning("Bad wowlan pattern %d", i);
1565 			return ret;
1566 		}
1567 	}
1568 
1569 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1570 	if (ret)
1571 		goto out;
1572 
1573 	ret = wl1271_rx_filter_clear_all(wl);
1574 	if (ret)
1575 		goto out;
1576 
1577 	/* Translate WoWLAN patterns into filters */
1578 	for (i = 0; i < wow->n_patterns; i++) {
1579 		struct cfg80211_pkt_pattern *p;
1580 		struct wl12xx_rx_filter *filter = NULL;
1581 
1582 		p = &wow->patterns[i];
1583 
1584 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1585 		if (ret) {
1586 			wl1271_warning("Failed to create an RX filter from "
1587 				       "wowlan pattern %d", i);
1588 			goto out;
1589 		}
1590 
1591 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1592 
1593 		wl1271_rx_filter_free(filter);
1594 		if (ret)
1595 			goto out;
1596 	}
1597 
1598 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1599 
1600 out:
1601 	return ret;
1602 }
1603 
1604 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1605 					struct wl12xx_vif *wlvif,
1606 					struct cfg80211_wowlan *wow)
1607 {
1608 	int ret = 0;
1609 
1610 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1611 		goto out;
1612 
1613 	ret = wl1271_configure_wowlan(wl, wow);
1614 	if (ret < 0)
1615 		goto out;
1616 
1617 	if ((wl->conf.conn.suspend_wake_up_event ==
1618 	     wl->conf.conn.wake_up_event) &&
1619 	    (wl->conf.conn.suspend_listen_interval ==
1620 	     wl->conf.conn.listen_interval))
1621 		goto out;
1622 
1623 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1624 				    wl->conf.conn.suspend_wake_up_event,
1625 				    wl->conf.conn.suspend_listen_interval);
1626 
1627 	if (ret < 0)
1628 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1629 out:
1630 	return ret;
1631 
1632 }
1633 
1634 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1635 					struct wl12xx_vif *wlvif,
1636 					struct cfg80211_wowlan *wow)
1637 {
1638 	int ret = 0;
1639 
1640 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1641 		goto out;
1642 
1643 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1644 	if (ret < 0)
1645 		goto out;
1646 
1647 	ret = wl1271_configure_wowlan(wl, wow);
1648 	if (ret < 0)
1649 		goto out;
1650 
1651 out:
1652 	return ret;
1653 
1654 }
1655 
1656 static int wl1271_configure_suspend(struct wl1271 *wl,
1657 				    struct wl12xx_vif *wlvif,
1658 				    struct cfg80211_wowlan *wow)
1659 {
1660 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1661 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1662 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1663 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1664 	return 0;
1665 }
1666 
1667 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1668 {
1669 	int ret = 0;
1670 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1671 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1672 
1673 	if ((!is_ap) && (!is_sta))
1674 		return;
1675 
1676 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1677 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1678 		return;
1679 
1680 	wl1271_configure_wowlan(wl, NULL);
1681 
1682 	if (is_sta) {
1683 		if ((wl->conf.conn.suspend_wake_up_event ==
1684 		     wl->conf.conn.wake_up_event) &&
1685 		    (wl->conf.conn.suspend_listen_interval ==
1686 		     wl->conf.conn.listen_interval))
1687 			return;
1688 
1689 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1690 				    wl->conf.conn.wake_up_event,
1691 				    wl->conf.conn.listen_interval);
1692 
1693 		if (ret < 0)
1694 			wl1271_error("resume: wake up conditions failed: %d",
1695 				     ret);
1696 
1697 	} else if (is_ap) {
1698 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1699 	}
1700 }
1701 
1702 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1703 			    struct cfg80211_wowlan *wow)
1704 {
1705 	struct wl1271 *wl = hw->priv;
1706 	struct wl12xx_vif *wlvif;
1707 	int ret;
1708 
1709 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1710 	WARN_ON(!wow);
1711 
1712 	/* we want to perform the recovery before suspending */
1713 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1714 		wl1271_warning("postponing suspend to perform recovery");
1715 		return -EBUSY;
1716 	}
1717 
1718 	wl1271_tx_flush(wl);
1719 
1720 	mutex_lock(&wl->mutex);
1721 
1722 	ret = wl1271_ps_elp_wakeup(wl);
1723 	if (ret < 0) {
1724 		mutex_unlock(&wl->mutex);
1725 		return ret;
1726 	}
1727 
1728 	wl->wow_enabled = true;
1729 	wl12xx_for_each_wlvif(wl, wlvif) {
1730 		if (wlcore_is_p2p_mgmt(wlvif))
1731 			continue;
1732 
1733 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1734 		if (ret < 0) {
1735 			mutex_unlock(&wl->mutex);
1736 			wl1271_warning("couldn't prepare device to suspend");
1737 			return ret;
1738 		}
1739 	}
1740 
1741 	/* disable fast link flow control notifications from FW */
1742 	ret = wlcore_hw_interrupt_notify(wl, false);
1743 	if (ret < 0)
1744 		goto out_sleep;
1745 
1746 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1747 	ret = wlcore_hw_rx_ba_filter(wl,
1748 				     !!wl->conf.conn.suspend_rx_ba_activity);
1749 	if (ret < 0)
1750 		goto out_sleep;
1751 
1752 out_sleep:
1753 	wl1271_ps_elp_sleep(wl);
1754 	mutex_unlock(&wl->mutex);
1755 
1756 	if (ret < 0) {
1757 		wl1271_warning("couldn't prepare device to suspend");
1758 		return ret;
1759 	}
1760 
1761 	/* flush any remaining work */
1762 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1763 
1764 	/*
1765 	 * disable and re-enable interrupts in order to flush
1766 	 * the threaded_irq
1767 	 */
1768 	wlcore_disable_interrupts(wl);
1769 
1770 	/*
1771 	 * set suspended flag to avoid triggering a new threaded_irq
1772 	 * work. no need for spinlock as interrupts are disabled.
1773 	 */
1774 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1775 
1776 	wlcore_enable_interrupts(wl);
1777 	flush_work(&wl->tx_work);
1778 	flush_delayed_work(&wl->elp_work);
1779 
1780 	/*
1781 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1782 	 * it on resume anyway.
1783 	 */
1784 	cancel_delayed_work(&wl->tx_watchdog_work);
1785 
1786 	return 0;
1787 }
1788 
1789 static int wl1271_op_resume(struct ieee80211_hw *hw)
1790 {
1791 	struct wl1271 *wl = hw->priv;
1792 	struct wl12xx_vif *wlvif;
1793 	unsigned long flags;
1794 	bool run_irq_work = false, pending_recovery;
1795 	int ret;
1796 
1797 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1798 		     wl->wow_enabled);
1799 	WARN_ON(!wl->wow_enabled);
1800 
1801 	/*
1802 	 * re-enable irq_work enqueuing, and call irq_work directly if
1803 	 * there is a pending work.
1804 	 */
1805 	spin_lock_irqsave(&wl->wl_lock, flags);
1806 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1807 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1808 		run_irq_work = true;
1809 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1810 
1811 	mutex_lock(&wl->mutex);
1812 
1813 	/* test the recovery flag before calling any SDIO functions */
1814 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1815 				    &wl->flags);
1816 
1817 	if (run_irq_work) {
1818 		wl1271_debug(DEBUG_MAC80211,
1819 			     "run postponed irq_work directly");
1820 
1821 		/* don't talk to the HW if recovery is pending */
1822 		if (!pending_recovery) {
1823 			ret = wlcore_irq_locked(wl);
1824 			if (ret)
1825 				wl12xx_queue_recovery_work(wl);
1826 		}
1827 
1828 		wlcore_enable_interrupts(wl);
1829 	}
1830 
1831 	if (pending_recovery) {
1832 		wl1271_warning("queuing forgotten recovery on resume");
1833 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1834 		goto out_sleep;
1835 	}
1836 
1837 	ret = wl1271_ps_elp_wakeup(wl);
1838 	if (ret < 0)
1839 		goto out;
1840 
1841 	wl12xx_for_each_wlvif(wl, wlvif) {
1842 		if (wlcore_is_p2p_mgmt(wlvif))
1843 			continue;
1844 
1845 		wl1271_configure_resume(wl, wlvif);
1846 	}
1847 
1848 	ret = wlcore_hw_interrupt_notify(wl, true);
1849 	if (ret < 0)
1850 		goto out_sleep;
1851 
1852 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1853 	ret = wlcore_hw_rx_ba_filter(wl, false);
1854 	if (ret < 0)
1855 		goto out_sleep;
1856 
1857 out_sleep:
1858 	wl1271_ps_elp_sleep(wl);
1859 
1860 out:
1861 	wl->wow_enabled = false;
1862 
1863 	/*
1864 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1865 	 * That way we avoid possible conditions where Tx-complete interrupts
1866 	 * fail to arrive and we perform a spurious recovery.
1867 	 */
1868 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1869 	mutex_unlock(&wl->mutex);
1870 
1871 	return 0;
1872 }
1873 #endif
1874 
1875 static int wl1271_op_start(struct ieee80211_hw *hw)
1876 {
1877 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1878 
1879 	/*
1880 	 * We have to delay the booting of the hardware because
1881 	 * we need to know the local MAC address before downloading and
1882 	 * initializing the firmware. The MAC address cannot be changed
1883 	 * after boot, and without the proper MAC address, the firmware
1884 	 * will not function properly.
1885 	 *
1886 	 * The MAC address is first known when the corresponding interface
1887 	 * is added. That is where we will initialize the hardware.
1888 	 */
1889 
1890 	return 0;
1891 }
1892 
1893 static void wlcore_op_stop_locked(struct wl1271 *wl)
1894 {
1895 	int i;
1896 
1897 	if (wl->state == WLCORE_STATE_OFF) {
1898 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1899 					&wl->flags))
1900 			wlcore_enable_interrupts(wl);
1901 
1902 		return;
1903 	}
1904 
1905 	/*
1906 	 * this must be before the cancel_work calls below, so that the work
1907 	 * functions don't perform further work.
1908 	 */
1909 	wl->state = WLCORE_STATE_OFF;
1910 
1911 	/*
1912 	 * Use the nosync variant to disable interrupts, so the mutex could be
1913 	 * held while doing so without deadlocking.
1914 	 */
1915 	wlcore_disable_interrupts_nosync(wl);
1916 
1917 	mutex_unlock(&wl->mutex);
1918 
1919 	wlcore_synchronize_interrupts(wl);
1920 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1921 		cancel_work_sync(&wl->recovery_work);
1922 	wl1271_flush_deferred_work(wl);
1923 	cancel_delayed_work_sync(&wl->scan_complete_work);
1924 	cancel_work_sync(&wl->netstack_work);
1925 	cancel_work_sync(&wl->tx_work);
1926 	cancel_delayed_work_sync(&wl->elp_work);
1927 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1928 
1929 	/* let's notify MAC80211 about the remaining pending TX frames */
1930 	mutex_lock(&wl->mutex);
1931 	wl12xx_tx_reset(wl);
1932 
1933 	wl1271_power_off(wl);
1934 	/*
1935 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1936 	 * an interrupt storm. Now that the power is down, it is safe to
1937 	 * re-enable interrupts to balance the disable depth
1938 	 */
1939 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1940 		wlcore_enable_interrupts(wl);
1941 
1942 	wl->band = NL80211_BAND_2GHZ;
1943 
1944 	wl->rx_counter = 0;
1945 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1946 	wl->channel_type = NL80211_CHAN_NO_HT;
1947 	wl->tx_blocks_available = 0;
1948 	wl->tx_allocated_blocks = 0;
1949 	wl->tx_results_count = 0;
1950 	wl->tx_packets_count = 0;
1951 	wl->time_offset = 0;
1952 	wl->ap_fw_ps_map = 0;
1953 	wl->ap_ps_map = 0;
1954 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1955 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1956 	memset(wl->links_map, 0, sizeof(wl->links_map));
1957 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1958 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1959 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1960 	wl->active_sta_count = 0;
1961 	wl->active_link_count = 0;
1962 
1963 	/* The system link is always allocated */
1964 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1965 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1966 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1967 
1968 	/*
1969 	 * this is performed after the cancel_work calls and the associated
1970 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1971 	 * get executed before all these vars have been reset.
1972 	 */
1973 	wl->flags = 0;
1974 
1975 	wl->tx_blocks_freed = 0;
1976 
1977 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1978 		wl->tx_pkts_freed[i] = 0;
1979 		wl->tx_allocated_pkts[i] = 0;
1980 	}
1981 
1982 	wl1271_debugfs_reset(wl);
1983 
1984 	kfree(wl->raw_fw_status);
1985 	wl->raw_fw_status = NULL;
1986 	kfree(wl->fw_status);
1987 	wl->fw_status = NULL;
1988 	kfree(wl->tx_res_if);
1989 	wl->tx_res_if = NULL;
1990 	kfree(wl->target_mem_map);
1991 	wl->target_mem_map = NULL;
1992 
1993 	/*
1994 	 * FW channels must be re-calibrated after recovery,
1995 	 * save current Reg-Domain channel configuration and clear it.
1996 	 */
1997 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1998 	       sizeof(wl->reg_ch_conf_pending));
1999 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2000 }
2001 
2002 static void wlcore_op_stop(struct ieee80211_hw *hw)
2003 {
2004 	struct wl1271 *wl = hw->priv;
2005 
2006 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2007 
2008 	mutex_lock(&wl->mutex);
2009 
2010 	wlcore_op_stop_locked(wl);
2011 
2012 	mutex_unlock(&wl->mutex);
2013 }
2014 
2015 static void wlcore_channel_switch_work(struct work_struct *work)
2016 {
2017 	struct delayed_work *dwork;
2018 	struct wl1271 *wl;
2019 	struct ieee80211_vif *vif;
2020 	struct wl12xx_vif *wlvif;
2021 	int ret;
2022 
2023 	dwork = to_delayed_work(work);
2024 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2025 	wl = wlvif->wl;
2026 
2027 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2028 
2029 	mutex_lock(&wl->mutex);
2030 
2031 	if (unlikely(wl->state != WLCORE_STATE_ON))
2032 		goto out;
2033 
2034 	/* check the channel switch is still ongoing */
2035 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2036 		goto out;
2037 
2038 	vif = wl12xx_wlvif_to_vif(wlvif);
2039 	ieee80211_chswitch_done(vif, false);
2040 
2041 	ret = wl1271_ps_elp_wakeup(wl);
2042 	if (ret < 0)
2043 		goto out;
2044 
2045 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2046 
2047 	wl1271_ps_elp_sleep(wl);
2048 out:
2049 	mutex_unlock(&wl->mutex);
2050 }
2051 
2052 static void wlcore_connection_loss_work(struct work_struct *work)
2053 {
2054 	struct delayed_work *dwork;
2055 	struct wl1271 *wl;
2056 	struct ieee80211_vif *vif;
2057 	struct wl12xx_vif *wlvif;
2058 
2059 	dwork = to_delayed_work(work);
2060 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2061 	wl = wlvif->wl;
2062 
2063 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2064 
2065 	mutex_lock(&wl->mutex);
2066 
2067 	if (unlikely(wl->state != WLCORE_STATE_ON))
2068 		goto out;
2069 
2070 	/* Call mac80211 connection loss */
2071 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2072 		goto out;
2073 
2074 	vif = wl12xx_wlvif_to_vif(wlvif);
2075 	ieee80211_connection_loss(vif);
2076 out:
2077 	mutex_unlock(&wl->mutex);
2078 }
2079 
2080 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2081 {
2082 	struct delayed_work *dwork;
2083 	struct wl1271 *wl;
2084 	struct wl12xx_vif *wlvif;
2085 	unsigned long time_spare;
2086 	int ret;
2087 
2088 	dwork = to_delayed_work(work);
2089 	wlvif = container_of(dwork, struct wl12xx_vif,
2090 			     pending_auth_complete_work);
2091 	wl = wlvif->wl;
2092 
2093 	mutex_lock(&wl->mutex);
2094 
2095 	if (unlikely(wl->state != WLCORE_STATE_ON))
2096 		goto out;
2097 
2098 	/*
2099 	 * Make sure a second really passed since the last auth reply. Maybe
2100 	 * a second auth reply arrived while we were stuck on the mutex.
2101 	 * Check for a little less than the timeout to protect from scheduler
2102 	 * irregularities.
2103 	 */
2104 	time_spare = jiffies +
2105 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2106 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2107 		goto out;
2108 
2109 	ret = wl1271_ps_elp_wakeup(wl);
2110 	if (ret < 0)
2111 		goto out;
2112 
2113 	/* cancel the ROC if active */
2114 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2115 
2116 	wl1271_ps_elp_sleep(wl);
2117 out:
2118 	mutex_unlock(&wl->mutex);
2119 }
2120 
2121 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2122 {
2123 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2124 					WL12XX_MAX_RATE_POLICIES);
2125 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2126 		return -EBUSY;
2127 
2128 	__set_bit(policy, wl->rate_policies_map);
2129 	*idx = policy;
2130 	return 0;
2131 }
2132 
2133 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2134 {
2135 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2136 		return;
2137 
2138 	__clear_bit(*idx, wl->rate_policies_map);
2139 	*idx = WL12XX_MAX_RATE_POLICIES;
2140 }
2141 
2142 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2143 {
2144 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2145 					WLCORE_MAX_KLV_TEMPLATES);
2146 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2147 		return -EBUSY;
2148 
2149 	__set_bit(policy, wl->klv_templates_map);
2150 	*idx = policy;
2151 	return 0;
2152 }
2153 
2154 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2155 {
2156 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2157 		return;
2158 
2159 	__clear_bit(*idx, wl->klv_templates_map);
2160 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2161 }
2162 
2163 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2164 {
2165 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2166 
2167 	switch (wlvif->bss_type) {
2168 	case BSS_TYPE_AP_BSS:
2169 		if (wlvif->p2p)
2170 			return WL1271_ROLE_P2P_GO;
2171 		else if (ieee80211_vif_is_mesh(vif))
2172 			return WL1271_ROLE_MESH_POINT;
2173 		else
2174 			return WL1271_ROLE_AP;
2175 
2176 	case BSS_TYPE_STA_BSS:
2177 		if (wlvif->p2p)
2178 			return WL1271_ROLE_P2P_CL;
2179 		else
2180 			return WL1271_ROLE_STA;
2181 
2182 	case BSS_TYPE_IBSS:
2183 		return WL1271_ROLE_IBSS;
2184 
2185 	default:
2186 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2187 	}
2188 	return WL12XX_INVALID_ROLE_TYPE;
2189 }
2190 
2191 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2192 {
2193 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2194 	int i;
2195 
2196 	/* clear everything but the persistent data */
2197 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2198 
2199 	switch (ieee80211_vif_type_p2p(vif)) {
2200 	case NL80211_IFTYPE_P2P_CLIENT:
2201 		wlvif->p2p = 1;
2202 		/* fall-through */
2203 	case NL80211_IFTYPE_STATION:
2204 	case NL80211_IFTYPE_P2P_DEVICE:
2205 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2206 		break;
2207 	case NL80211_IFTYPE_ADHOC:
2208 		wlvif->bss_type = BSS_TYPE_IBSS;
2209 		break;
2210 	case NL80211_IFTYPE_P2P_GO:
2211 		wlvif->p2p = 1;
2212 		/* fall-through */
2213 	case NL80211_IFTYPE_AP:
2214 	case NL80211_IFTYPE_MESH_POINT:
2215 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2216 		break;
2217 	default:
2218 		wlvif->bss_type = MAX_BSS_TYPE;
2219 		return -EOPNOTSUPP;
2220 	}
2221 
2222 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2223 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2224 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2225 
2226 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2227 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2228 		/* init sta/ibss data */
2229 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2230 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2231 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2232 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2233 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2234 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2235 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2236 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2237 	} else {
2238 		/* init ap data */
2239 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2240 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2241 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2242 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2243 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2244 			wl12xx_allocate_rate_policy(wl,
2245 						&wlvif->ap.ucast_rate_idx[i]);
2246 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2247 		/*
2248 		 * TODO: check if basic_rate shouldn't be
2249 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2250 		 * instead (the same thing for STA above).
2251 		*/
2252 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2253 		/* TODO: this seems to be used only for STA, check it */
2254 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2255 	}
2256 
2257 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2258 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2259 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2260 
2261 	/*
2262 	 * mac80211 configures some values globally, while we treat them
2263 	 * per-interface. thus, on init, we have to copy them from wl
2264 	 */
2265 	wlvif->band = wl->band;
2266 	wlvif->channel = wl->channel;
2267 	wlvif->power_level = wl->power_level;
2268 	wlvif->channel_type = wl->channel_type;
2269 
2270 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2271 		  wl1271_rx_streaming_enable_work);
2272 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2273 		  wl1271_rx_streaming_disable_work);
2274 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2275 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2276 			  wlcore_channel_switch_work);
2277 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2278 			  wlcore_connection_loss_work);
2279 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2280 			  wlcore_pending_auth_complete_work);
2281 	INIT_LIST_HEAD(&wlvif->list);
2282 
2283 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2284 		    (unsigned long) wlvif);
2285 	return 0;
2286 }
2287 
2288 static int wl12xx_init_fw(struct wl1271 *wl)
2289 {
2290 	int retries = WL1271_BOOT_RETRIES;
2291 	bool booted = false;
2292 	struct wiphy *wiphy = wl->hw->wiphy;
2293 	int ret;
2294 
2295 	while (retries) {
2296 		retries--;
2297 		ret = wl12xx_chip_wakeup(wl, false);
2298 		if (ret < 0)
2299 			goto power_off;
2300 
2301 		ret = wl->ops->boot(wl);
2302 		if (ret < 0)
2303 			goto power_off;
2304 
2305 		ret = wl1271_hw_init(wl);
2306 		if (ret < 0)
2307 			goto irq_disable;
2308 
2309 		booted = true;
2310 		break;
2311 
2312 irq_disable:
2313 		mutex_unlock(&wl->mutex);
2314 		/* Unlocking the mutex in the middle of handling is
2315 		   inherently unsafe. In this case we deem it safe to do,
2316 		   because we need to let any possibly pending IRQ out of
2317 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2318 		   work function will not do anything.) Also, any other
2319 		   possible concurrent operations will fail due to the
2320 		   current state, hence the wl1271 struct should be safe. */
2321 		wlcore_disable_interrupts(wl);
2322 		wl1271_flush_deferred_work(wl);
2323 		cancel_work_sync(&wl->netstack_work);
2324 		mutex_lock(&wl->mutex);
2325 power_off:
2326 		wl1271_power_off(wl);
2327 	}
2328 
2329 	if (!booted) {
2330 		wl1271_error("firmware boot failed despite %d retries",
2331 			     WL1271_BOOT_RETRIES);
2332 		goto out;
2333 	}
2334 
2335 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2336 
2337 	/* update hw/fw version info in wiphy struct */
2338 	wiphy->hw_version = wl->chip.id;
2339 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2340 		sizeof(wiphy->fw_version));
2341 
2342 	/*
2343 	 * Now we know if 11a is supported (info from the NVS), so disable
2344 	 * 11a channels if not supported
2345 	 */
2346 	if (!wl->enable_11a)
2347 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2348 
2349 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2350 		     wl->enable_11a ? "" : "not ");
2351 
2352 	wl->state = WLCORE_STATE_ON;
2353 out:
2354 	return ret;
2355 }
2356 
2357 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2358 {
2359 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2360 }
2361 
2362 /*
2363  * Check whether a fw switch (i.e. moving from one loaded
2364  * fw to another) is needed. This function is also responsible
2365  * for updating wl->last_vif_count, so it must be called before
2366  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2367  * will be used).
2368  */
2369 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2370 				  struct vif_counter_data vif_counter_data,
2371 				  bool add)
2372 {
2373 	enum wl12xx_fw_type current_fw = wl->fw_type;
2374 	u8 vif_count = vif_counter_data.counter;
2375 
2376 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2377 		return false;
2378 
2379 	/* increase the vif count if this is a new vif */
2380 	if (add && !vif_counter_data.cur_vif_running)
2381 		vif_count++;
2382 
2383 	wl->last_vif_count = vif_count;
2384 
2385 	/* no need for fw change if the device is OFF */
2386 	if (wl->state == WLCORE_STATE_OFF)
2387 		return false;
2388 
2389 	/* no need for fw change if a single fw is used */
2390 	if (!wl->mr_fw_name)
2391 		return false;
2392 
2393 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2394 		return true;
2395 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2396 		return true;
2397 
2398 	return false;
2399 }
2400 
2401 /*
2402  * Enter "forced psm". Make sure the sta is in psm against the ap,
2403  * to make the fw switch a bit more disconnection-persistent.
2404  */
2405 static void wl12xx_force_active_psm(struct wl1271 *wl)
2406 {
2407 	struct wl12xx_vif *wlvif;
2408 
2409 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2410 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2411 	}
2412 }
2413 
2414 struct wlcore_hw_queue_iter_data {
2415 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2416 	/* current vif */
2417 	struct ieee80211_vif *vif;
2418 	/* is the current vif among those iterated */
2419 	bool cur_running;
2420 };
2421 
2422 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2423 				 struct ieee80211_vif *vif)
2424 {
2425 	struct wlcore_hw_queue_iter_data *iter_data = data;
2426 
2427 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2428 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2429 		return;
2430 
2431 	if (iter_data->cur_running || vif == iter_data->vif) {
2432 		iter_data->cur_running = true;
2433 		return;
2434 	}
2435 
2436 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2437 }
2438 
2439 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2440 					 struct wl12xx_vif *wlvif)
2441 {
2442 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2443 	struct wlcore_hw_queue_iter_data iter_data = {};
2444 	int i, q_base;
2445 
2446 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2447 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2448 		return 0;
2449 	}
2450 
2451 	iter_data.vif = vif;
2452 
2453 	/* mark all bits taken by active interfaces */
2454 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2455 					IEEE80211_IFACE_ITER_RESUME_ALL,
2456 					wlcore_hw_queue_iter, &iter_data);
2457 
2458 	/* the current vif is already running in mac80211 (resume/recovery) */
2459 	if (iter_data.cur_running) {
2460 		wlvif->hw_queue_base = vif->hw_queue[0];
2461 		wl1271_debug(DEBUG_MAC80211,
2462 			     "using pre-allocated hw queue base %d",
2463 			     wlvif->hw_queue_base);
2464 
2465 		/* interface type might have changed type */
2466 		goto adjust_cab_queue;
2467 	}
2468 
2469 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2470 				     WLCORE_NUM_MAC_ADDRESSES);
2471 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2472 		return -EBUSY;
2473 
2474 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2475 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2476 		     wlvif->hw_queue_base);
2477 
2478 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2479 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2480 		/* register hw queues in mac80211 */
2481 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2482 	}
2483 
2484 adjust_cab_queue:
2485 	/* the last places are reserved for cab queues per interface */
2486 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2487 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2488 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2489 	else
2490 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2491 
2492 	return 0;
2493 }
2494 
2495 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2496 				   struct ieee80211_vif *vif)
2497 {
2498 	struct wl1271 *wl = hw->priv;
2499 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2500 	struct vif_counter_data vif_count;
2501 	int ret = 0;
2502 	u8 role_type;
2503 
2504 	if (wl->plt) {
2505 		wl1271_error("Adding Interface not allowed while in PLT mode");
2506 		return -EBUSY;
2507 	}
2508 
2509 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2510 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2511 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2512 
2513 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2514 		     ieee80211_vif_type_p2p(vif), vif->addr);
2515 
2516 	wl12xx_get_vif_count(hw, vif, &vif_count);
2517 
2518 	mutex_lock(&wl->mutex);
2519 	ret = wl1271_ps_elp_wakeup(wl);
2520 	if (ret < 0)
2521 		goto out_unlock;
2522 
2523 	/*
2524 	 * in some very corner case HW recovery scenarios its possible to
2525 	 * get here before __wl1271_op_remove_interface is complete, so
2526 	 * opt out if that is the case.
2527 	 */
2528 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2529 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2530 		ret = -EBUSY;
2531 		goto out;
2532 	}
2533 
2534 
2535 	ret = wl12xx_init_vif_data(wl, vif);
2536 	if (ret < 0)
2537 		goto out;
2538 
2539 	wlvif->wl = wl;
2540 	role_type = wl12xx_get_role_type(wl, wlvif);
2541 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2542 		ret = -EINVAL;
2543 		goto out;
2544 	}
2545 
2546 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2547 	if (ret < 0)
2548 		goto out;
2549 
2550 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2551 		wl12xx_force_active_psm(wl);
2552 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2553 		mutex_unlock(&wl->mutex);
2554 		wl1271_recovery_work(&wl->recovery_work);
2555 		return 0;
2556 	}
2557 
2558 	/*
2559 	 * TODO: after the nvs issue will be solved, move this block
2560 	 * to start(), and make sure here the driver is ON.
2561 	 */
2562 	if (wl->state == WLCORE_STATE_OFF) {
2563 		/*
2564 		 * we still need this in order to configure the fw
2565 		 * while uploading the nvs
2566 		 */
2567 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2568 
2569 		ret = wl12xx_init_fw(wl);
2570 		if (ret < 0)
2571 			goto out;
2572 	}
2573 
2574 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2575 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2576 					     role_type, &wlvif->role_id);
2577 		if (ret < 0)
2578 			goto out;
2579 
2580 		ret = wl1271_init_vif_specific(wl, vif);
2581 		if (ret < 0)
2582 			goto out;
2583 
2584 	} else {
2585 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2586 					     &wlvif->dev_role_id);
2587 		if (ret < 0)
2588 			goto out;
2589 
2590 		/* needed mainly for configuring rate policies */
2591 		ret = wl1271_sta_hw_init(wl, wlvif);
2592 		if (ret < 0)
2593 			goto out;
2594 	}
2595 
2596 	list_add(&wlvif->list, &wl->wlvif_list);
2597 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2598 
2599 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2600 		wl->ap_count++;
2601 	else
2602 		wl->sta_count++;
2603 out:
2604 	wl1271_ps_elp_sleep(wl);
2605 out_unlock:
2606 	mutex_unlock(&wl->mutex);
2607 
2608 	return ret;
2609 }
2610 
2611 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2612 					 struct ieee80211_vif *vif,
2613 					 bool reset_tx_queues)
2614 {
2615 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2616 	int i, ret;
2617 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2618 
2619 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2620 
2621 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2622 		return;
2623 
2624 	/* because of hardware recovery, we may get here twice */
2625 	if (wl->state == WLCORE_STATE_OFF)
2626 		return;
2627 
2628 	wl1271_info("down");
2629 
2630 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2631 	    wl->scan_wlvif == wlvif) {
2632 		struct cfg80211_scan_info info = {
2633 			.aborted = true,
2634 		};
2635 
2636 		/*
2637 		 * Rearm the tx watchdog just before idling scan. This
2638 		 * prevents just-finished scans from triggering the watchdog
2639 		 */
2640 		wl12xx_rearm_tx_watchdog_locked(wl);
2641 
2642 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2643 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2644 		wl->scan_wlvif = NULL;
2645 		wl->scan.req = NULL;
2646 		ieee80211_scan_completed(wl->hw, &info);
2647 	}
2648 
2649 	if (wl->sched_vif == wlvif)
2650 		wl->sched_vif = NULL;
2651 
2652 	if (wl->roc_vif == vif) {
2653 		wl->roc_vif = NULL;
2654 		ieee80211_remain_on_channel_expired(wl->hw);
2655 	}
2656 
2657 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2658 		/* disable active roles */
2659 		ret = wl1271_ps_elp_wakeup(wl);
2660 		if (ret < 0)
2661 			goto deinit;
2662 
2663 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2664 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2665 			if (wl12xx_dev_role_started(wlvif))
2666 				wl12xx_stop_dev(wl, wlvif);
2667 		}
2668 
2669 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2670 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2671 			if (ret < 0)
2672 				goto deinit;
2673 		} else {
2674 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2675 			if (ret < 0)
2676 				goto deinit;
2677 		}
2678 
2679 		wl1271_ps_elp_sleep(wl);
2680 	}
2681 deinit:
2682 	wl12xx_tx_reset_wlvif(wl, wlvif);
2683 
2684 	/* clear all hlids (except system_hlid) */
2685 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2686 
2687 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2688 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2689 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2690 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2691 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2692 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2693 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2694 	} else {
2695 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2696 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2697 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2698 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2699 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2700 			wl12xx_free_rate_policy(wl,
2701 						&wlvif->ap.ucast_rate_idx[i]);
2702 		wl1271_free_ap_keys(wl, wlvif);
2703 	}
2704 
2705 	dev_kfree_skb(wlvif->probereq);
2706 	wlvif->probereq = NULL;
2707 	if (wl->last_wlvif == wlvif)
2708 		wl->last_wlvif = NULL;
2709 	list_del(&wlvif->list);
2710 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2711 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2712 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2713 
2714 	if (is_ap)
2715 		wl->ap_count--;
2716 	else
2717 		wl->sta_count--;
2718 
2719 	/*
2720 	 * Last AP, have more stations. Configure sleep auth according to STA.
2721 	 * Don't do thin on unintended recovery.
2722 	 */
2723 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2724 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2725 		goto unlock;
2726 
2727 	if (wl->ap_count == 0 && is_ap) {
2728 		/* mask ap events */
2729 		wl->event_mask &= ~wl->ap_event_mask;
2730 		wl1271_event_unmask(wl);
2731 	}
2732 
2733 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2734 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2735 		/* Configure for power according to debugfs */
2736 		if (sta_auth != WL1271_PSM_ILLEGAL)
2737 			wl1271_acx_sleep_auth(wl, sta_auth);
2738 		/* Configure for ELP power saving */
2739 		else
2740 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2741 	}
2742 
2743 unlock:
2744 	mutex_unlock(&wl->mutex);
2745 
2746 	del_timer_sync(&wlvif->rx_streaming_timer);
2747 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2748 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2749 	cancel_work_sync(&wlvif->rc_update_work);
2750 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2751 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2752 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2753 
2754 	mutex_lock(&wl->mutex);
2755 }
2756 
2757 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2758 				       struct ieee80211_vif *vif)
2759 {
2760 	struct wl1271 *wl = hw->priv;
2761 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2762 	struct wl12xx_vif *iter;
2763 	struct vif_counter_data vif_count;
2764 
2765 	wl12xx_get_vif_count(hw, vif, &vif_count);
2766 	mutex_lock(&wl->mutex);
2767 
2768 	if (wl->state == WLCORE_STATE_OFF ||
2769 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2770 		goto out;
2771 
2772 	/*
2773 	 * wl->vif can be null here if someone shuts down the interface
2774 	 * just when hardware recovery has been started.
2775 	 */
2776 	wl12xx_for_each_wlvif(wl, iter) {
2777 		if (iter != wlvif)
2778 			continue;
2779 
2780 		__wl1271_op_remove_interface(wl, vif, true);
2781 		break;
2782 	}
2783 	WARN_ON(iter != wlvif);
2784 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2785 		wl12xx_force_active_psm(wl);
2786 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2787 		wl12xx_queue_recovery_work(wl);
2788 	}
2789 out:
2790 	mutex_unlock(&wl->mutex);
2791 }
2792 
2793 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2794 				      struct ieee80211_vif *vif,
2795 				      enum nl80211_iftype new_type, bool p2p)
2796 {
2797 	struct wl1271 *wl = hw->priv;
2798 	int ret;
2799 
2800 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2801 	wl1271_op_remove_interface(hw, vif);
2802 
2803 	vif->type = new_type;
2804 	vif->p2p = p2p;
2805 	ret = wl1271_op_add_interface(hw, vif);
2806 
2807 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2808 	return ret;
2809 }
2810 
2811 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2812 {
2813 	int ret;
2814 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2815 
2816 	/*
2817 	 * One of the side effects of the JOIN command is that is clears
2818 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2819 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2820 	 * Currently the only valid scenario for JOIN during association
2821 	 * is on roaming, in which case we will also be given new keys.
2822 	 * Keep the below message for now, unless it starts bothering
2823 	 * users who really like to roam a lot :)
2824 	 */
2825 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2826 		wl1271_info("JOIN while associated.");
2827 
2828 	/* clear encryption type */
2829 	wlvif->encryption_type = KEY_NONE;
2830 
2831 	if (is_ibss)
2832 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2833 	else {
2834 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2835 			/*
2836 			 * TODO: this is an ugly workaround for wl12xx fw
2837 			 * bug - we are not able to tx/rx after the first
2838 			 * start_sta, so make dummy start+stop calls,
2839 			 * and then call start_sta again.
2840 			 * this should be fixed in the fw.
2841 			 */
2842 			wl12xx_cmd_role_start_sta(wl, wlvif);
2843 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2844 		}
2845 
2846 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2847 	}
2848 
2849 	return ret;
2850 }
2851 
2852 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2853 			    int offset)
2854 {
2855 	u8 ssid_len;
2856 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2857 					 skb->len - offset);
2858 
2859 	if (!ptr) {
2860 		wl1271_error("No SSID in IEs!");
2861 		return -ENOENT;
2862 	}
2863 
2864 	ssid_len = ptr[1];
2865 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2866 		wl1271_error("SSID is too long!");
2867 		return -EINVAL;
2868 	}
2869 
2870 	wlvif->ssid_len = ssid_len;
2871 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2872 	return 0;
2873 }
2874 
2875 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2876 {
2877 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2878 	struct sk_buff *skb;
2879 	int ieoffset;
2880 
2881 	/* we currently only support setting the ssid from the ap probe req */
2882 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2883 		return -EINVAL;
2884 
2885 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2886 	if (!skb)
2887 		return -EINVAL;
2888 
2889 	ieoffset = offsetof(struct ieee80211_mgmt,
2890 			    u.probe_req.variable);
2891 	wl1271_ssid_set(wlvif, skb, ieoffset);
2892 	dev_kfree_skb(skb);
2893 
2894 	return 0;
2895 }
2896 
2897 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2898 			    struct ieee80211_bss_conf *bss_conf,
2899 			    u32 sta_rate_set)
2900 {
2901 	int ieoffset;
2902 	int ret;
2903 
2904 	wlvif->aid = bss_conf->aid;
2905 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2906 	wlvif->beacon_int = bss_conf->beacon_int;
2907 	wlvif->wmm_enabled = bss_conf->qos;
2908 
2909 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2910 
2911 	/*
2912 	 * with wl1271, we don't need to update the
2913 	 * beacon_int and dtim_period, because the firmware
2914 	 * updates it by itself when the first beacon is
2915 	 * received after a join.
2916 	 */
2917 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2918 	if (ret < 0)
2919 		return ret;
2920 
2921 	/*
2922 	 * Get a template for hardware connection maintenance
2923 	 */
2924 	dev_kfree_skb(wlvif->probereq);
2925 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2926 							wlvif,
2927 							NULL);
2928 	ieoffset = offsetof(struct ieee80211_mgmt,
2929 			    u.probe_req.variable);
2930 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2931 
2932 	/* enable the connection monitoring feature */
2933 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2934 	if (ret < 0)
2935 		return ret;
2936 
2937 	/*
2938 	 * The join command disable the keep-alive mode, shut down its process,
2939 	 * and also clear the template config, so we need to reset it all after
2940 	 * the join. The acx_aid starts the keep-alive process, and the order
2941 	 * of the commands below is relevant.
2942 	 */
2943 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2944 	if (ret < 0)
2945 		return ret;
2946 
2947 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2948 	if (ret < 0)
2949 		return ret;
2950 
2951 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2952 	if (ret < 0)
2953 		return ret;
2954 
2955 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2956 					   wlvif->sta.klv_template_id,
2957 					   ACX_KEEP_ALIVE_TPL_VALID);
2958 	if (ret < 0)
2959 		return ret;
2960 
2961 	/*
2962 	 * The default fw psm configuration is AUTO, while mac80211 default
2963 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2964 	 */
2965 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	if (sta_rate_set) {
2970 		wlvif->rate_set =
2971 			wl1271_tx_enabled_rates_get(wl,
2972 						    sta_rate_set,
2973 						    wlvif->band);
2974 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2975 		if (ret < 0)
2976 			return ret;
2977 	}
2978 
2979 	return ret;
2980 }
2981 
2982 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2983 {
2984 	int ret;
2985 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2986 
2987 	/* make sure we are connected (sta) joined */
2988 	if (sta &&
2989 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2990 		return false;
2991 
2992 	/* make sure we are joined (ibss) */
2993 	if (!sta &&
2994 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2995 		return false;
2996 
2997 	if (sta) {
2998 		/* use defaults when not associated */
2999 		wlvif->aid = 0;
3000 
3001 		/* free probe-request template */
3002 		dev_kfree_skb(wlvif->probereq);
3003 		wlvif->probereq = NULL;
3004 
3005 		/* disable connection monitor features */
3006 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3007 		if (ret < 0)
3008 			return ret;
3009 
3010 		/* Disable the keep-alive feature */
3011 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3012 		if (ret < 0)
3013 			return ret;
3014 
3015 		/* disable beacon filtering */
3016 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3017 		if (ret < 0)
3018 			return ret;
3019 	}
3020 
3021 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3022 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3023 
3024 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3025 		ieee80211_chswitch_done(vif, false);
3026 		cancel_delayed_work(&wlvif->channel_switch_work);
3027 	}
3028 
3029 	/* invalidate keep-alive template */
3030 	wl1271_acx_keep_alive_config(wl, wlvif,
3031 				     wlvif->sta.klv_template_id,
3032 				     ACX_KEEP_ALIVE_TPL_INVALID);
3033 
3034 	return 0;
3035 }
3036 
3037 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3038 {
3039 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3040 	wlvif->rate_set = wlvif->basic_rate_set;
3041 }
3042 
3043 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3044 				   bool idle)
3045 {
3046 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3047 
3048 	if (idle == cur_idle)
3049 		return;
3050 
3051 	if (idle) {
3052 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3053 	} else {
3054 		/* The current firmware only supports sched_scan in idle */
3055 		if (wl->sched_vif == wlvif)
3056 			wl->ops->sched_scan_stop(wl, wlvif);
3057 
3058 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3059 	}
3060 }
3061 
3062 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3063 			     struct ieee80211_conf *conf, u32 changed)
3064 {
3065 	int ret;
3066 
3067 	if (wlcore_is_p2p_mgmt(wlvif))
3068 		return 0;
3069 
3070 	if (conf->power_level != wlvif->power_level) {
3071 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3072 		if (ret < 0)
3073 			return ret;
3074 
3075 		wlvif->power_level = conf->power_level;
3076 	}
3077 
3078 	return 0;
3079 }
3080 
3081 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3082 {
3083 	struct wl1271 *wl = hw->priv;
3084 	struct wl12xx_vif *wlvif;
3085 	struct ieee80211_conf *conf = &hw->conf;
3086 	int ret = 0;
3087 
3088 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3089 		     " changed 0x%x",
3090 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3091 		     conf->power_level,
3092 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3093 			 changed);
3094 
3095 	mutex_lock(&wl->mutex);
3096 
3097 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3098 		wl->power_level = conf->power_level;
3099 
3100 	if (unlikely(wl->state != WLCORE_STATE_ON))
3101 		goto out;
3102 
3103 	ret = wl1271_ps_elp_wakeup(wl);
3104 	if (ret < 0)
3105 		goto out;
3106 
3107 	/* configure each interface */
3108 	wl12xx_for_each_wlvif(wl, wlvif) {
3109 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3110 		if (ret < 0)
3111 			goto out_sleep;
3112 	}
3113 
3114 out_sleep:
3115 	wl1271_ps_elp_sleep(wl);
3116 
3117 out:
3118 	mutex_unlock(&wl->mutex);
3119 
3120 	return ret;
3121 }
3122 
3123 struct wl1271_filter_params {
3124 	bool enabled;
3125 	int mc_list_length;
3126 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3127 };
3128 
3129 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3130 				       struct netdev_hw_addr_list *mc_list)
3131 {
3132 	struct wl1271_filter_params *fp;
3133 	struct netdev_hw_addr *ha;
3134 
3135 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3136 	if (!fp) {
3137 		wl1271_error("Out of memory setting filters.");
3138 		return 0;
3139 	}
3140 
3141 	/* update multicast filtering parameters */
3142 	fp->mc_list_length = 0;
3143 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3144 		fp->enabled = false;
3145 	} else {
3146 		fp->enabled = true;
3147 		netdev_hw_addr_list_for_each(ha, mc_list) {
3148 			memcpy(fp->mc_list[fp->mc_list_length],
3149 					ha->addr, ETH_ALEN);
3150 			fp->mc_list_length++;
3151 		}
3152 	}
3153 
3154 	return (u64)(unsigned long)fp;
3155 }
3156 
3157 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3158 				  FIF_FCSFAIL | \
3159 				  FIF_BCN_PRBRESP_PROMISC | \
3160 				  FIF_CONTROL | \
3161 				  FIF_OTHER_BSS)
3162 
3163 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3164 				       unsigned int changed,
3165 				       unsigned int *total, u64 multicast)
3166 {
3167 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3168 	struct wl1271 *wl = hw->priv;
3169 	struct wl12xx_vif *wlvif;
3170 
3171 	int ret;
3172 
3173 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3174 		     " total %x", changed, *total);
3175 
3176 	mutex_lock(&wl->mutex);
3177 
3178 	*total &= WL1271_SUPPORTED_FILTERS;
3179 	changed &= WL1271_SUPPORTED_FILTERS;
3180 
3181 	if (unlikely(wl->state != WLCORE_STATE_ON))
3182 		goto out;
3183 
3184 	ret = wl1271_ps_elp_wakeup(wl);
3185 	if (ret < 0)
3186 		goto out;
3187 
3188 	wl12xx_for_each_wlvif(wl, wlvif) {
3189 		if (wlcore_is_p2p_mgmt(wlvif))
3190 			continue;
3191 
3192 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3193 			if (*total & FIF_ALLMULTI)
3194 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3195 								   false,
3196 								   NULL, 0);
3197 			else if (fp)
3198 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3199 							fp->enabled,
3200 							fp->mc_list,
3201 							fp->mc_list_length);
3202 			if (ret < 0)
3203 				goto out_sleep;
3204 		}
3205 
3206 		/*
3207 		 * If interface in AP mode and created with allmulticast then disable
3208 		 * the firmware filters so that all multicast packets are passed
3209 		 * This is mandatory for MDNS based discovery protocols
3210 		 */
3211  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3212  			if (*total & FIF_ALLMULTI) {
3213 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3214 							false,
3215 							NULL, 0);
3216 				if (ret < 0)
3217 					goto out_sleep;
3218 			}
3219 		}
3220 	}
3221 
3222 	/*
3223 	 * the fw doesn't provide an api to configure the filters. instead,
3224 	 * the filters configuration is based on the active roles / ROC
3225 	 * state.
3226 	 */
3227 
3228 out_sleep:
3229 	wl1271_ps_elp_sleep(wl);
3230 
3231 out:
3232 	mutex_unlock(&wl->mutex);
3233 	kfree(fp);
3234 }
3235 
3236 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3237 				u8 id, u8 key_type, u8 key_size,
3238 				const u8 *key, u8 hlid, u32 tx_seq_32,
3239 				u16 tx_seq_16)
3240 {
3241 	struct wl1271_ap_key *ap_key;
3242 	int i;
3243 
3244 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3245 
3246 	if (key_size > MAX_KEY_SIZE)
3247 		return -EINVAL;
3248 
3249 	/*
3250 	 * Find next free entry in ap_keys. Also check we are not replacing
3251 	 * an existing key.
3252 	 */
3253 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3254 		if (wlvif->ap.recorded_keys[i] == NULL)
3255 			break;
3256 
3257 		if (wlvif->ap.recorded_keys[i]->id == id) {
3258 			wl1271_warning("trying to record key replacement");
3259 			return -EINVAL;
3260 		}
3261 	}
3262 
3263 	if (i == MAX_NUM_KEYS)
3264 		return -EBUSY;
3265 
3266 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3267 	if (!ap_key)
3268 		return -ENOMEM;
3269 
3270 	ap_key->id = id;
3271 	ap_key->key_type = key_type;
3272 	ap_key->key_size = key_size;
3273 	memcpy(ap_key->key, key, key_size);
3274 	ap_key->hlid = hlid;
3275 	ap_key->tx_seq_32 = tx_seq_32;
3276 	ap_key->tx_seq_16 = tx_seq_16;
3277 
3278 	wlvif->ap.recorded_keys[i] = ap_key;
3279 	return 0;
3280 }
3281 
3282 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3283 {
3284 	int i;
3285 
3286 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3287 		kfree(wlvif->ap.recorded_keys[i]);
3288 		wlvif->ap.recorded_keys[i] = NULL;
3289 	}
3290 }
3291 
3292 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3293 {
3294 	int i, ret = 0;
3295 	struct wl1271_ap_key *key;
3296 	bool wep_key_added = false;
3297 
3298 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3299 		u8 hlid;
3300 		if (wlvif->ap.recorded_keys[i] == NULL)
3301 			break;
3302 
3303 		key = wlvif->ap.recorded_keys[i];
3304 		hlid = key->hlid;
3305 		if (hlid == WL12XX_INVALID_LINK_ID)
3306 			hlid = wlvif->ap.bcast_hlid;
3307 
3308 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3309 					    key->id, key->key_type,
3310 					    key->key_size, key->key,
3311 					    hlid, key->tx_seq_32,
3312 					    key->tx_seq_16);
3313 		if (ret < 0)
3314 			goto out;
3315 
3316 		if (key->key_type == KEY_WEP)
3317 			wep_key_added = true;
3318 	}
3319 
3320 	if (wep_key_added) {
3321 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3322 						     wlvif->ap.bcast_hlid);
3323 		if (ret < 0)
3324 			goto out;
3325 	}
3326 
3327 out:
3328 	wl1271_free_ap_keys(wl, wlvif);
3329 	return ret;
3330 }
3331 
3332 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3333 		       u16 action, u8 id, u8 key_type,
3334 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3335 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3336 {
3337 	int ret;
3338 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3339 
3340 	if (is_ap) {
3341 		struct wl1271_station *wl_sta;
3342 		u8 hlid;
3343 
3344 		if (sta) {
3345 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3346 			hlid = wl_sta->hlid;
3347 		} else {
3348 			hlid = wlvif->ap.bcast_hlid;
3349 		}
3350 
3351 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3352 			/*
3353 			 * We do not support removing keys after AP shutdown.
3354 			 * Pretend we do to make mac80211 happy.
3355 			 */
3356 			if (action != KEY_ADD_OR_REPLACE)
3357 				return 0;
3358 
3359 			ret = wl1271_record_ap_key(wl, wlvif, id,
3360 					     key_type, key_size,
3361 					     key, hlid, tx_seq_32,
3362 					     tx_seq_16);
3363 		} else {
3364 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3365 					     id, key_type, key_size,
3366 					     key, hlid, tx_seq_32,
3367 					     tx_seq_16);
3368 		}
3369 
3370 		if (ret < 0)
3371 			return ret;
3372 	} else {
3373 		const u8 *addr;
3374 		static const u8 bcast_addr[ETH_ALEN] = {
3375 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3376 		};
3377 
3378 		addr = sta ? sta->addr : bcast_addr;
3379 
3380 		if (is_zero_ether_addr(addr)) {
3381 			/* We dont support TX only encryption */
3382 			return -EOPNOTSUPP;
3383 		}
3384 
3385 		/* The wl1271 does not allow to remove unicast keys - they
3386 		   will be cleared automatically on next CMD_JOIN. Ignore the
3387 		   request silently, as we dont want the mac80211 to emit
3388 		   an error message. */
3389 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3390 			return 0;
3391 
3392 		/* don't remove key if hlid was already deleted */
3393 		if (action == KEY_REMOVE &&
3394 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3395 			return 0;
3396 
3397 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3398 					     id, key_type, key_size,
3399 					     key, addr, tx_seq_32,
3400 					     tx_seq_16);
3401 		if (ret < 0)
3402 			return ret;
3403 
3404 	}
3405 
3406 	return 0;
3407 }
3408 
3409 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3410 			     struct ieee80211_vif *vif,
3411 			     struct ieee80211_sta *sta,
3412 			     struct ieee80211_key_conf *key_conf)
3413 {
3414 	struct wl1271 *wl = hw->priv;
3415 	int ret;
3416 	bool might_change_spare =
3417 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3418 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3419 
3420 	if (might_change_spare) {
3421 		/*
3422 		 * stop the queues and flush to ensure the next packets are
3423 		 * in sync with FW spare block accounting
3424 		 */
3425 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3426 		wl1271_tx_flush(wl);
3427 	}
3428 
3429 	mutex_lock(&wl->mutex);
3430 
3431 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3432 		ret = -EAGAIN;
3433 		goto out_wake_queues;
3434 	}
3435 
3436 	ret = wl1271_ps_elp_wakeup(wl);
3437 	if (ret < 0)
3438 		goto out_wake_queues;
3439 
3440 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3441 
3442 	wl1271_ps_elp_sleep(wl);
3443 
3444 out_wake_queues:
3445 	if (might_change_spare)
3446 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3447 
3448 	mutex_unlock(&wl->mutex);
3449 
3450 	return ret;
3451 }
3452 
3453 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3454 		   struct ieee80211_vif *vif,
3455 		   struct ieee80211_sta *sta,
3456 		   struct ieee80211_key_conf *key_conf)
3457 {
3458 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3459 	int ret;
3460 	u32 tx_seq_32 = 0;
3461 	u16 tx_seq_16 = 0;
3462 	u8 key_type;
3463 	u8 hlid;
3464 
3465 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3466 
3467 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3468 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3469 		     key_conf->cipher, key_conf->keyidx,
3470 		     key_conf->keylen, key_conf->flags);
3471 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3472 
3473 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3474 		if (sta) {
3475 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3476 			hlid = wl_sta->hlid;
3477 		} else {
3478 			hlid = wlvif->ap.bcast_hlid;
3479 		}
3480 	else
3481 		hlid = wlvif->sta.hlid;
3482 
3483 	if (hlid != WL12XX_INVALID_LINK_ID) {
3484 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3485 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3486 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3487 	}
3488 
3489 	switch (key_conf->cipher) {
3490 	case WLAN_CIPHER_SUITE_WEP40:
3491 	case WLAN_CIPHER_SUITE_WEP104:
3492 		key_type = KEY_WEP;
3493 
3494 		key_conf->hw_key_idx = key_conf->keyidx;
3495 		break;
3496 	case WLAN_CIPHER_SUITE_TKIP:
3497 		key_type = KEY_TKIP;
3498 		key_conf->hw_key_idx = key_conf->keyidx;
3499 		break;
3500 	case WLAN_CIPHER_SUITE_CCMP:
3501 		key_type = KEY_AES;
3502 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3503 		break;
3504 	case WL1271_CIPHER_SUITE_GEM:
3505 		key_type = KEY_GEM;
3506 		break;
3507 	default:
3508 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3509 
3510 		return -EOPNOTSUPP;
3511 	}
3512 
3513 	switch (cmd) {
3514 	case SET_KEY:
3515 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3516 				 key_conf->keyidx, key_type,
3517 				 key_conf->keylen, key_conf->key,
3518 				 tx_seq_32, tx_seq_16, sta);
3519 		if (ret < 0) {
3520 			wl1271_error("Could not add or replace key");
3521 			return ret;
3522 		}
3523 
3524 		/*
3525 		 * reconfiguring arp response if the unicast (or common)
3526 		 * encryption key type was changed
3527 		 */
3528 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3529 		    (sta || key_type == KEY_WEP) &&
3530 		    wlvif->encryption_type != key_type) {
3531 			wlvif->encryption_type = key_type;
3532 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3533 			if (ret < 0) {
3534 				wl1271_warning("build arp rsp failed: %d", ret);
3535 				return ret;
3536 			}
3537 		}
3538 		break;
3539 
3540 	case DISABLE_KEY:
3541 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3542 				     key_conf->keyidx, key_type,
3543 				     key_conf->keylen, key_conf->key,
3544 				     0, 0, sta);
3545 		if (ret < 0) {
3546 			wl1271_error("Could not remove key");
3547 			return ret;
3548 		}
3549 		break;
3550 
3551 	default:
3552 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3553 		return -EOPNOTSUPP;
3554 	}
3555 
3556 	return ret;
3557 }
3558 EXPORT_SYMBOL_GPL(wlcore_set_key);
3559 
3560 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3561 					  struct ieee80211_vif *vif,
3562 					  int key_idx)
3563 {
3564 	struct wl1271 *wl = hw->priv;
3565 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3566 	int ret;
3567 
3568 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3569 		     key_idx);
3570 
3571 	/* we don't handle unsetting of default key */
3572 	if (key_idx == -1)
3573 		return;
3574 
3575 	mutex_lock(&wl->mutex);
3576 
3577 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3578 		ret = -EAGAIN;
3579 		goto out_unlock;
3580 	}
3581 
3582 	ret = wl1271_ps_elp_wakeup(wl);
3583 	if (ret < 0)
3584 		goto out_unlock;
3585 
3586 	wlvif->default_key = key_idx;
3587 
3588 	/* the default WEP key needs to be configured at least once */
3589 	if (wlvif->encryption_type == KEY_WEP) {
3590 		ret = wl12xx_cmd_set_default_wep_key(wl,
3591 				key_idx,
3592 				wlvif->sta.hlid);
3593 		if (ret < 0)
3594 			goto out_sleep;
3595 	}
3596 
3597 out_sleep:
3598 	wl1271_ps_elp_sleep(wl);
3599 
3600 out_unlock:
3601 	mutex_unlock(&wl->mutex);
3602 }
3603 
3604 void wlcore_regdomain_config(struct wl1271 *wl)
3605 {
3606 	int ret;
3607 
3608 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3609 		return;
3610 
3611 	mutex_lock(&wl->mutex);
3612 
3613 	if (unlikely(wl->state != WLCORE_STATE_ON))
3614 		goto out;
3615 
3616 	ret = wl1271_ps_elp_wakeup(wl);
3617 	if (ret < 0)
3618 		goto out;
3619 
3620 	ret = wlcore_cmd_regdomain_config_locked(wl);
3621 	if (ret < 0) {
3622 		wl12xx_queue_recovery_work(wl);
3623 		goto out;
3624 	}
3625 
3626 	wl1271_ps_elp_sleep(wl);
3627 out:
3628 	mutex_unlock(&wl->mutex);
3629 }
3630 
3631 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3632 			     struct ieee80211_vif *vif,
3633 			     struct ieee80211_scan_request *hw_req)
3634 {
3635 	struct cfg80211_scan_request *req = &hw_req->req;
3636 	struct wl1271 *wl = hw->priv;
3637 	int ret;
3638 	u8 *ssid = NULL;
3639 	size_t len = 0;
3640 
3641 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3642 
3643 	if (req->n_ssids) {
3644 		ssid = req->ssids[0].ssid;
3645 		len = req->ssids[0].ssid_len;
3646 	}
3647 
3648 	mutex_lock(&wl->mutex);
3649 
3650 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3651 		/*
3652 		 * We cannot return -EBUSY here because cfg80211 will expect
3653 		 * a call to ieee80211_scan_completed if we do - in this case
3654 		 * there won't be any call.
3655 		 */
3656 		ret = -EAGAIN;
3657 		goto out;
3658 	}
3659 
3660 	ret = wl1271_ps_elp_wakeup(wl);
3661 	if (ret < 0)
3662 		goto out;
3663 
3664 	/* fail if there is any role in ROC */
3665 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3666 		/* don't allow scanning right now */
3667 		ret = -EBUSY;
3668 		goto out_sleep;
3669 	}
3670 
3671 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3672 out_sleep:
3673 	wl1271_ps_elp_sleep(wl);
3674 out:
3675 	mutex_unlock(&wl->mutex);
3676 
3677 	return ret;
3678 }
3679 
3680 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3681 				     struct ieee80211_vif *vif)
3682 {
3683 	struct wl1271 *wl = hw->priv;
3684 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3685 	struct cfg80211_scan_info info = {
3686 		.aborted = true,
3687 	};
3688 	int ret;
3689 
3690 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3691 
3692 	mutex_lock(&wl->mutex);
3693 
3694 	if (unlikely(wl->state != WLCORE_STATE_ON))
3695 		goto out;
3696 
3697 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3698 		goto out;
3699 
3700 	ret = wl1271_ps_elp_wakeup(wl);
3701 	if (ret < 0)
3702 		goto out;
3703 
3704 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3705 		ret = wl->ops->scan_stop(wl, wlvif);
3706 		if (ret < 0)
3707 			goto out_sleep;
3708 	}
3709 
3710 	/*
3711 	 * Rearm the tx watchdog just before idling scan. This
3712 	 * prevents just-finished scans from triggering the watchdog
3713 	 */
3714 	wl12xx_rearm_tx_watchdog_locked(wl);
3715 
3716 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3717 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3718 	wl->scan_wlvif = NULL;
3719 	wl->scan.req = NULL;
3720 	ieee80211_scan_completed(wl->hw, &info);
3721 
3722 out_sleep:
3723 	wl1271_ps_elp_sleep(wl);
3724 out:
3725 	mutex_unlock(&wl->mutex);
3726 
3727 	cancel_delayed_work_sync(&wl->scan_complete_work);
3728 }
3729 
3730 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3731 				      struct ieee80211_vif *vif,
3732 				      struct cfg80211_sched_scan_request *req,
3733 				      struct ieee80211_scan_ies *ies)
3734 {
3735 	struct wl1271 *wl = hw->priv;
3736 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3737 	int ret;
3738 
3739 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3740 
3741 	mutex_lock(&wl->mutex);
3742 
3743 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3744 		ret = -EAGAIN;
3745 		goto out;
3746 	}
3747 
3748 	ret = wl1271_ps_elp_wakeup(wl);
3749 	if (ret < 0)
3750 		goto out;
3751 
3752 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3753 	if (ret < 0)
3754 		goto out_sleep;
3755 
3756 	wl->sched_vif = wlvif;
3757 
3758 out_sleep:
3759 	wl1271_ps_elp_sleep(wl);
3760 out:
3761 	mutex_unlock(&wl->mutex);
3762 	return ret;
3763 }
3764 
3765 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3766 				     struct ieee80211_vif *vif)
3767 {
3768 	struct wl1271 *wl = hw->priv;
3769 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3770 	int ret;
3771 
3772 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3773 
3774 	mutex_lock(&wl->mutex);
3775 
3776 	if (unlikely(wl->state != WLCORE_STATE_ON))
3777 		goto out;
3778 
3779 	ret = wl1271_ps_elp_wakeup(wl);
3780 	if (ret < 0)
3781 		goto out;
3782 
3783 	wl->ops->sched_scan_stop(wl, wlvif);
3784 
3785 	wl1271_ps_elp_sleep(wl);
3786 out:
3787 	mutex_unlock(&wl->mutex);
3788 
3789 	return 0;
3790 }
3791 
3792 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3793 {
3794 	struct wl1271 *wl = hw->priv;
3795 	int ret = 0;
3796 
3797 	mutex_lock(&wl->mutex);
3798 
3799 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3800 		ret = -EAGAIN;
3801 		goto out;
3802 	}
3803 
3804 	ret = wl1271_ps_elp_wakeup(wl);
3805 	if (ret < 0)
3806 		goto out;
3807 
3808 	ret = wl1271_acx_frag_threshold(wl, value);
3809 	if (ret < 0)
3810 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3811 
3812 	wl1271_ps_elp_sleep(wl);
3813 
3814 out:
3815 	mutex_unlock(&wl->mutex);
3816 
3817 	return ret;
3818 }
3819 
3820 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3821 {
3822 	struct wl1271 *wl = hw->priv;
3823 	struct wl12xx_vif *wlvif;
3824 	int ret = 0;
3825 
3826 	mutex_lock(&wl->mutex);
3827 
3828 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3829 		ret = -EAGAIN;
3830 		goto out;
3831 	}
3832 
3833 	ret = wl1271_ps_elp_wakeup(wl);
3834 	if (ret < 0)
3835 		goto out;
3836 
3837 	wl12xx_for_each_wlvif(wl, wlvif) {
3838 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3839 		if (ret < 0)
3840 			wl1271_warning("set rts threshold failed: %d", ret);
3841 	}
3842 	wl1271_ps_elp_sleep(wl);
3843 
3844 out:
3845 	mutex_unlock(&wl->mutex);
3846 
3847 	return ret;
3848 }
3849 
3850 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3851 {
3852 	int len;
3853 	const u8 *next, *end = skb->data + skb->len;
3854 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3855 					skb->len - ieoffset);
3856 	if (!ie)
3857 		return;
3858 	len = ie[1] + 2;
3859 	next = ie + len;
3860 	memmove(ie, next, end - next);
3861 	skb_trim(skb, skb->len - len);
3862 }
3863 
3864 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3865 					    unsigned int oui, u8 oui_type,
3866 					    int ieoffset)
3867 {
3868 	int len;
3869 	const u8 *next, *end = skb->data + skb->len;
3870 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3871 					       skb->data + ieoffset,
3872 					       skb->len - ieoffset);
3873 	if (!ie)
3874 		return;
3875 	len = ie[1] + 2;
3876 	next = ie + len;
3877 	memmove(ie, next, end - next);
3878 	skb_trim(skb, skb->len - len);
3879 }
3880 
3881 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3882 					 struct ieee80211_vif *vif)
3883 {
3884 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3885 	struct sk_buff *skb;
3886 	int ret;
3887 
3888 	skb = ieee80211_proberesp_get(wl->hw, vif);
3889 	if (!skb)
3890 		return -EOPNOTSUPP;
3891 
3892 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3893 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3894 				      skb->data,
3895 				      skb->len, 0,
3896 				      rates);
3897 	dev_kfree_skb(skb);
3898 
3899 	if (ret < 0)
3900 		goto out;
3901 
3902 	wl1271_debug(DEBUG_AP, "probe response updated");
3903 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3904 
3905 out:
3906 	return ret;
3907 }
3908 
3909 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3910 					     struct ieee80211_vif *vif,
3911 					     u8 *probe_rsp_data,
3912 					     size_t probe_rsp_len,
3913 					     u32 rates)
3914 {
3915 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3916 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3917 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3918 	int ssid_ie_offset, ie_offset, templ_len;
3919 	const u8 *ptr;
3920 
3921 	/* no need to change probe response if the SSID is set correctly */
3922 	if (wlvif->ssid_len > 0)
3923 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3924 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3925 					       probe_rsp_data,
3926 					       probe_rsp_len, 0,
3927 					       rates);
3928 
3929 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3930 		wl1271_error("probe_rsp template too big");
3931 		return -EINVAL;
3932 	}
3933 
3934 	/* start searching from IE offset */
3935 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3936 
3937 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3938 			       probe_rsp_len - ie_offset);
3939 	if (!ptr) {
3940 		wl1271_error("No SSID in beacon!");
3941 		return -EINVAL;
3942 	}
3943 
3944 	ssid_ie_offset = ptr - probe_rsp_data;
3945 	ptr += (ptr[1] + 2);
3946 
3947 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3948 
3949 	/* insert SSID from bss_conf */
3950 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3951 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3952 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3953 	       bss_conf->ssid, bss_conf->ssid_len);
3954 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3955 
3956 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3957 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3958 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3959 
3960 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3961 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3962 				       probe_rsp_templ,
3963 				       templ_len, 0,
3964 				       rates);
3965 }
3966 
3967 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3968 				       struct ieee80211_vif *vif,
3969 				       struct ieee80211_bss_conf *bss_conf,
3970 				       u32 changed)
3971 {
3972 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3973 	int ret = 0;
3974 
3975 	if (changed & BSS_CHANGED_ERP_SLOT) {
3976 		if (bss_conf->use_short_slot)
3977 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3978 		else
3979 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3980 		if (ret < 0) {
3981 			wl1271_warning("Set slot time failed %d", ret);
3982 			goto out;
3983 		}
3984 	}
3985 
3986 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3987 		if (bss_conf->use_short_preamble)
3988 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3989 		else
3990 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3991 	}
3992 
3993 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3994 		if (bss_conf->use_cts_prot)
3995 			ret = wl1271_acx_cts_protect(wl, wlvif,
3996 						     CTSPROTECT_ENABLE);
3997 		else
3998 			ret = wl1271_acx_cts_protect(wl, wlvif,
3999 						     CTSPROTECT_DISABLE);
4000 		if (ret < 0) {
4001 			wl1271_warning("Set ctsprotect failed %d", ret);
4002 			goto out;
4003 		}
4004 	}
4005 
4006 out:
4007 	return ret;
4008 }
4009 
4010 static int wlcore_set_beacon_template(struct wl1271 *wl,
4011 				      struct ieee80211_vif *vif,
4012 				      bool is_ap)
4013 {
4014 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4015 	struct ieee80211_hdr *hdr;
4016 	u32 min_rate;
4017 	int ret;
4018 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4019 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4020 	u16 tmpl_id;
4021 
4022 	if (!beacon) {
4023 		ret = -EINVAL;
4024 		goto out;
4025 	}
4026 
4027 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4028 
4029 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4030 	if (ret < 0) {
4031 		dev_kfree_skb(beacon);
4032 		goto out;
4033 	}
4034 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4035 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4036 		CMD_TEMPL_BEACON;
4037 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4038 				      beacon->data,
4039 				      beacon->len, 0,
4040 				      min_rate);
4041 	if (ret < 0) {
4042 		dev_kfree_skb(beacon);
4043 		goto out;
4044 	}
4045 
4046 	wlvif->wmm_enabled =
4047 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4048 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4049 					beacon->data + ieoffset,
4050 					beacon->len - ieoffset);
4051 
4052 	/*
4053 	 * In case we already have a probe-resp beacon set explicitly
4054 	 * by usermode, don't use the beacon data.
4055 	 */
4056 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4057 		goto end_bcn;
4058 
4059 	/* remove TIM ie from probe response */
4060 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4061 
4062 	/*
4063 	 * remove p2p ie from probe response.
4064 	 * the fw reponds to probe requests that don't include
4065 	 * the p2p ie. probe requests with p2p ie will be passed,
4066 	 * and will be responded by the supplicant (the spec
4067 	 * forbids including the p2p ie when responding to probe
4068 	 * requests that didn't include it).
4069 	 */
4070 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4071 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4072 
4073 	hdr = (struct ieee80211_hdr *) beacon->data;
4074 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4075 					 IEEE80211_STYPE_PROBE_RESP);
4076 	if (is_ap)
4077 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4078 							   beacon->data,
4079 							   beacon->len,
4080 							   min_rate);
4081 	else
4082 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4083 					      CMD_TEMPL_PROBE_RESPONSE,
4084 					      beacon->data,
4085 					      beacon->len, 0,
4086 					      min_rate);
4087 end_bcn:
4088 	dev_kfree_skb(beacon);
4089 	if (ret < 0)
4090 		goto out;
4091 
4092 out:
4093 	return ret;
4094 }
4095 
4096 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4097 					  struct ieee80211_vif *vif,
4098 					  struct ieee80211_bss_conf *bss_conf,
4099 					  u32 changed)
4100 {
4101 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4102 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4103 	int ret = 0;
4104 
4105 	if (changed & BSS_CHANGED_BEACON_INT) {
4106 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4107 			bss_conf->beacon_int);
4108 
4109 		wlvif->beacon_int = bss_conf->beacon_int;
4110 	}
4111 
4112 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4113 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4114 
4115 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4116 	}
4117 
4118 	if (changed & BSS_CHANGED_BEACON) {
4119 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4120 		if (ret < 0)
4121 			goto out;
4122 
4123 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4124 				       &wlvif->flags)) {
4125 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4126 			if (ret < 0)
4127 				goto out;
4128 		}
4129 	}
4130 out:
4131 	if (ret != 0)
4132 		wl1271_error("beacon info change failed: %d", ret);
4133 	return ret;
4134 }
4135 
4136 /* AP mode changes */
4137 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4138 				       struct ieee80211_vif *vif,
4139 				       struct ieee80211_bss_conf *bss_conf,
4140 				       u32 changed)
4141 {
4142 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4143 	int ret = 0;
4144 
4145 	if (changed & BSS_CHANGED_BASIC_RATES) {
4146 		u32 rates = bss_conf->basic_rates;
4147 
4148 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4149 								 wlvif->band);
4150 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4151 							wlvif->basic_rate_set);
4152 
4153 		ret = wl1271_init_ap_rates(wl, wlvif);
4154 		if (ret < 0) {
4155 			wl1271_error("AP rate policy change failed %d", ret);
4156 			goto out;
4157 		}
4158 
4159 		ret = wl1271_ap_init_templates(wl, vif);
4160 		if (ret < 0)
4161 			goto out;
4162 
4163 		/* No need to set probe resp template for mesh */
4164 		if (!ieee80211_vif_is_mesh(vif)) {
4165 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4166 							    wlvif->basic_rate,
4167 							    vif);
4168 			if (ret < 0)
4169 				goto out;
4170 		}
4171 
4172 		ret = wlcore_set_beacon_template(wl, vif, true);
4173 		if (ret < 0)
4174 			goto out;
4175 	}
4176 
4177 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4178 	if (ret < 0)
4179 		goto out;
4180 
4181 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4182 		if (bss_conf->enable_beacon) {
4183 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4184 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4185 				if (ret < 0)
4186 					goto out;
4187 
4188 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4189 				if (ret < 0)
4190 					goto out;
4191 
4192 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4193 				wl1271_debug(DEBUG_AP, "started AP");
4194 			}
4195 		} else {
4196 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4197 				/*
4198 				 * AP might be in ROC in case we have just
4199 				 * sent auth reply. handle it.
4200 				 */
4201 				if (test_bit(wlvif->role_id, wl->roc_map))
4202 					wl12xx_croc(wl, wlvif->role_id);
4203 
4204 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4205 				if (ret < 0)
4206 					goto out;
4207 
4208 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4209 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4210 					  &wlvif->flags);
4211 				wl1271_debug(DEBUG_AP, "stopped AP");
4212 			}
4213 		}
4214 	}
4215 
4216 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4217 	if (ret < 0)
4218 		goto out;
4219 
4220 	/* Handle HT information change */
4221 	if ((changed & BSS_CHANGED_HT) &&
4222 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4223 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4224 					bss_conf->ht_operation_mode);
4225 		if (ret < 0) {
4226 			wl1271_warning("Set ht information failed %d", ret);
4227 			goto out;
4228 		}
4229 	}
4230 
4231 out:
4232 	return;
4233 }
4234 
4235 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4236 			    struct ieee80211_bss_conf *bss_conf,
4237 			    u32 sta_rate_set)
4238 {
4239 	u32 rates;
4240 	int ret;
4241 
4242 	wl1271_debug(DEBUG_MAC80211,
4243 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4244 	     bss_conf->bssid, bss_conf->aid,
4245 	     bss_conf->beacon_int,
4246 	     bss_conf->basic_rates, sta_rate_set);
4247 
4248 	wlvif->beacon_int = bss_conf->beacon_int;
4249 	rates = bss_conf->basic_rates;
4250 	wlvif->basic_rate_set =
4251 		wl1271_tx_enabled_rates_get(wl, rates,
4252 					    wlvif->band);
4253 	wlvif->basic_rate =
4254 		wl1271_tx_min_rate_get(wl,
4255 				       wlvif->basic_rate_set);
4256 
4257 	if (sta_rate_set)
4258 		wlvif->rate_set =
4259 			wl1271_tx_enabled_rates_get(wl,
4260 						sta_rate_set,
4261 						wlvif->band);
4262 
4263 	/* we only support sched_scan while not connected */
4264 	if (wl->sched_vif == wlvif)
4265 		wl->ops->sched_scan_stop(wl, wlvif);
4266 
4267 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4268 	if (ret < 0)
4269 		return ret;
4270 
4271 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4272 	if (ret < 0)
4273 		return ret;
4274 
4275 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4276 	if (ret < 0)
4277 		return ret;
4278 
4279 	wlcore_set_ssid(wl, wlvif);
4280 
4281 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4282 
4283 	return 0;
4284 }
4285 
4286 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4287 {
4288 	int ret;
4289 
4290 	/* revert back to minimum rates for the current band */
4291 	wl1271_set_band_rate(wl, wlvif);
4292 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4293 
4294 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4295 	if (ret < 0)
4296 		return ret;
4297 
4298 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4299 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4300 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4301 		if (ret < 0)
4302 			return ret;
4303 	}
4304 
4305 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4306 	return 0;
4307 }
4308 /* STA/IBSS mode changes */
4309 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4310 					struct ieee80211_vif *vif,
4311 					struct ieee80211_bss_conf *bss_conf,
4312 					u32 changed)
4313 {
4314 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4315 	bool do_join = false;
4316 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4317 	bool ibss_joined = false;
4318 	u32 sta_rate_set = 0;
4319 	int ret;
4320 	struct ieee80211_sta *sta;
4321 	bool sta_exists = false;
4322 	struct ieee80211_sta_ht_cap sta_ht_cap;
4323 
4324 	if (is_ibss) {
4325 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4326 						     changed);
4327 		if (ret < 0)
4328 			goto out;
4329 	}
4330 
4331 	if (changed & BSS_CHANGED_IBSS) {
4332 		if (bss_conf->ibss_joined) {
4333 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4334 			ibss_joined = true;
4335 		} else {
4336 			wlcore_unset_assoc(wl, wlvif);
4337 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4338 		}
4339 	}
4340 
4341 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4342 		do_join = true;
4343 
4344 	/* Need to update the SSID (for filtering etc) */
4345 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4346 		do_join = true;
4347 
4348 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4349 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4350 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4351 
4352 		do_join = true;
4353 	}
4354 
4355 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4356 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4357 
4358 	if (changed & BSS_CHANGED_CQM) {
4359 		bool enable = false;
4360 		if (bss_conf->cqm_rssi_thold)
4361 			enable = true;
4362 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4363 						  bss_conf->cqm_rssi_thold,
4364 						  bss_conf->cqm_rssi_hyst);
4365 		if (ret < 0)
4366 			goto out;
4367 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4368 	}
4369 
4370 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4371 		       BSS_CHANGED_ASSOC)) {
4372 		rcu_read_lock();
4373 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4374 		if (sta) {
4375 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4376 
4377 			/* save the supp_rates of the ap */
4378 			sta_rate_set = sta->supp_rates[wlvif->band];
4379 			if (sta->ht_cap.ht_supported)
4380 				sta_rate_set |=
4381 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4382 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4383 			sta_ht_cap = sta->ht_cap;
4384 			sta_exists = true;
4385 		}
4386 
4387 		rcu_read_unlock();
4388 	}
4389 
4390 	if (changed & BSS_CHANGED_BSSID) {
4391 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4392 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4393 					       sta_rate_set);
4394 			if (ret < 0)
4395 				goto out;
4396 
4397 			/* Need to update the BSSID (for filtering etc) */
4398 			do_join = true;
4399 		} else {
4400 			ret = wlcore_clear_bssid(wl, wlvif);
4401 			if (ret < 0)
4402 				goto out;
4403 		}
4404 	}
4405 
4406 	if (changed & BSS_CHANGED_IBSS) {
4407 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4408 			     bss_conf->ibss_joined);
4409 
4410 		if (bss_conf->ibss_joined) {
4411 			u32 rates = bss_conf->basic_rates;
4412 			wlvif->basic_rate_set =
4413 				wl1271_tx_enabled_rates_get(wl, rates,
4414 							    wlvif->band);
4415 			wlvif->basic_rate =
4416 				wl1271_tx_min_rate_get(wl,
4417 						       wlvif->basic_rate_set);
4418 
4419 			/* by default, use 11b + OFDM rates */
4420 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4421 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4422 			if (ret < 0)
4423 				goto out;
4424 		}
4425 	}
4426 
4427 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4428 		/* enable beacon filtering */
4429 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4430 		if (ret < 0)
4431 			goto out;
4432 	}
4433 
4434 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4435 	if (ret < 0)
4436 		goto out;
4437 
4438 	if (do_join) {
4439 		ret = wlcore_join(wl, wlvif);
4440 		if (ret < 0) {
4441 			wl1271_warning("cmd join failed %d", ret);
4442 			goto out;
4443 		}
4444 	}
4445 
4446 	if (changed & BSS_CHANGED_ASSOC) {
4447 		if (bss_conf->assoc) {
4448 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4449 					       sta_rate_set);
4450 			if (ret < 0)
4451 				goto out;
4452 
4453 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4454 				wl12xx_set_authorized(wl, wlvif);
4455 		} else {
4456 			wlcore_unset_assoc(wl, wlvif);
4457 		}
4458 	}
4459 
4460 	if (changed & BSS_CHANGED_PS) {
4461 		if ((bss_conf->ps) &&
4462 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4463 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4464 			int ps_mode;
4465 			char *ps_mode_str;
4466 
4467 			if (wl->conf.conn.forced_ps) {
4468 				ps_mode = STATION_POWER_SAVE_MODE;
4469 				ps_mode_str = "forced";
4470 			} else {
4471 				ps_mode = STATION_AUTO_PS_MODE;
4472 				ps_mode_str = "auto";
4473 			}
4474 
4475 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4476 
4477 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4478 			if (ret < 0)
4479 				wl1271_warning("enter %s ps failed %d",
4480 					       ps_mode_str, ret);
4481 		} else if (!bss_conf->ps &&
4482 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4483 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4484 
4485 			ret = wl1271_ps_set_mode(wl, wlvif,
4486 						 STATION_ACTIVE_MODE);
4487 			if (ret < 0)
4488 				wl1271_warning("exit auto ps failed %d", ret);
4489 		}
4490 	}
4491 
4492 	/* Handle new association with HT. Do this after join. */
4493 	if (sta_exists) {
4494 		bool enabled =
4495 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4496 
4497 		ret = wlcore_hw_set_peer_cap(wl,
4498 					     &sta_ht_cap,
4499 					     enabled,
4500 					     wlvif->rate_set,
4501 					     wlvif->sta.hlid);
4502 		if (ret < 0) {
4503 			wl1271_warning("Set ht cap failed %d", ret);
4504 			goto out;
4505 
4506 		}
4507 
4508 		if (enabled) {
4509 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4510 						bss_conf->ht_operation_mode);
4511 			if (ret < 0) {
4512 				wl1271_warning("Set ht information failed %d",
4513 					       ret);
4514 				goto out;
4515 			}
4516 		}
4517 	}
4518 
4519 	/* Handle arp filtering. Done after join. */
4520 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4521 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4522 		__be32 addr = bss_conf->arp_addr_list[0];
4523 		wlvif->sta.qos = bss_conf->qos;
4524 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4525 
4526 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4527 			wlvif->ip_addr = addr;
4528 			/*
4529 			 * The template should have been configured only upon
4530 			 * association. however, it seems that the correct ip
4531 			 * isn't being set (when sending), so we have to
4532 			 * reconfigure the template upon every ip change.
4533 			 */
4534 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4535 			if (ret < 0) {
4536 				wl1271_warning("build arp rsp failed: %d", ret);
4537 				goto out;
4538 			}
4539 
4540 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4541 				(ACX_ARP_FILTER_ARP_FILTERING |
4542 				 ACX_ARP_FILTER_AUTO_ARP),
4543 				addr);
4544 		} else {
4545 			wlvif->ip_addr = 0;
4546 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4547 		}
4548 
4549 		if (ret < 0)
4550 			goto out;
4551 	}
4552 
4553 out:
4554 	return;
4555 }
4556 
4557 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4558 				       struct ieee80211_vif *vif,
4559 				       struct ieee80211_bss_conf *bss_conf,
4560 				       u32 changed)
4561 {
4562 	struct wl1271 *wl = hw->priv;
4563 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4564 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4565 	int ret;
4566 
4567 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4568 		     wlvif->role_id, (int)changed);
4569 
4570 	/*
4571 	 * make sure to cancel pending disconnections if our association
4572 	 * state changed
4573 	 */
4574 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4575 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4576 
4577 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4578 	    !bss_conf->enable_beacon)
4579 		wl1271_tx_flush(wl);
4580 
4581 	mutex_lock(&wl->mutex);
4582 
4583 	if (unlikely(wl->state != WLCORE_STATE_ON))
4584 		goto out;
4585 
4586 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4587 		goto out;
4588 
4589 	ret = wl1271_ps_elp_wakeup(wl);
4590 	if (ret < 0)
4591 		goto out;
4592 
4593 	if ((changed & BSS_CHANGED_TXPOWER) &&
4594 	    bss_conf->txpower != wlvif->power_level) {
4595 
4596 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4597 		if (ret < 0)
4598 			goto out;
4599 
4600 		wlvif->power_level = bss_conf->txpower;
4601 	}
4602 
4603 	if (is_ap)
4604 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4605 	else
4606 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4607 
4608 	wl1271_ps_elp_sleep(wl);
4609 
4610 out:
4611 	mutex_unlock(&wl->mutex);
4612 }
4613 
4614 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4615 				 struct ieee80211_chanctx_conf *ctx)
4616 {
4617 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4618 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4619 		     cfg80211_get_chandef_type(&ctx->def));
4620 	return 0;
4621 }
4622 
4623 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4624 				     struct ieee80211_chanctx_conf *ctx)
4625 {
4626 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4627 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4628 		     cfg80211_get_chandef_type(&ctx->def));
4629 }
4630 
4631 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4632 				     struct ieee80211_chanctx_conf *ctx,
4633 				     u32 changed)
4634 {
4635 	struct wl1271 *wl = hw->priv;
4636 	struct wl12xx_vif *wlvif;
4637 	int ret;
4638 	int channel = ieee80211_frequency_to_channel(
4639 		ctx->def.chan->center_freq);
4640 
4641 	wl1271_debug(DEBUG_MAC80211,
4642 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4643 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4644 
4645 	mutex_lock(&wl->mutex);
4646 
4647 	ret = wl1271_ps_elp_wakeup(wl);
4648 	if (ret < 0)
4649 		goto out;
4650 
4651 	wl12xx_for_each_wlvif(wl, wlvif) {
4652 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4653 
4654 		rcu_read_lock();
4655 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4656 			rcu_read_unlock();
4657 			continue;
4658 		}
4659 		rcu_read_unlock();
4660 
4661 		/* start radar if needed */
4662 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4663 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4664 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4665 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4666 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4667 			wlcore_hw_set_cac(wl, wlvif, true);
4668 			wlvif->radar_enabled = true;
4669 		}
4670 	}
4671 
4672 	wl1271_ps_elp_sleep(wl);
4673 out:
4674 	mutex_unlock(&wl->mutex);
4675 }
4676 
4677 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4678 					struct ieee80211_vif *vif,
4679 					struct ieee80211_chanctx_conf *ctx)
4680 {
4681 	struct wl1271 *wl = hw->priv;
4682 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4683 	int channel = ieee80211_frequency_to_channel(
4684 		ctx->def.chan->center_freq);
4685 	int ret = -EINVAL;
4686 
4687 	wl1271_debug(DEBUG_MAC80211,
4688 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4689 		     wlvif->role_id, channel,
4690 		     cfg80211_get_chandef_type(&ctx->def),
4691 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4692 
4693 	mutex_lock(&wl->mutex);
4694 
4695 	if (unlikely(wl->state != WLCORE_STATE_ON))
4696 		goto out;
4697 
4698 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4699 		goto out;
4700 
4701 	ret = wl1271_ps_elp_wakeup(wl);
4702 	if (ret < 0)
4703 		goto out;
4704 
4705 	wlvif->band = ctx->def.chan->band;
4706 	wlvif->channel = channel;
4707 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4708 
4709 	/* update default rates according to the band */
4710 	wl1271_set_band_rate(wl, wlvif);
4711 
4712 	if (ctx->radar_enabled &&
4713 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4714 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4715 		wlcore_hw_set_cac(wl, wlvif, true);
4716 		wlvif->radar_enabled = true;
4717 	}
4718 
4719 	wl1271_ps_elp_sleep(wl);
4720 out:
4721 	mutex_unlock(&wl->mutex);
4722 
4723 	return 0;
4724 }
4725 
4726 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4727 					   struct ieee80211_vif *vif,
4728 					   struct ieee80211_chanctx_conf *ctx)
4729 {
4730 	struct wl1271 *wl = hw->priv;
4731 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4732 	int ret;
4733 
4734 	wl1271_debug(DEBUG_MAC80211,
4735 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4736 		     wlvif->role_id,
4737 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4738 		     cfg80211_get_chandef_type(&ctx->def));
4739 
4740 	wl1271_tx_flush(wl);
4741 
4742 	mutex_lock(&wl->mutex);
4743 
4744 	if (unlikely(wl->state != WLCORE_STATE_ON))
4745 		goto out;
4746 
4747 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4748 		goto out;
4749 
4750 	ret = wl1271_ps_elp_wakeup(wl);
4751 	if (ret < 0)
4752 		goto out;
4753 
4754 	if (wlvif->radar_enabled) {
4755 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4756 		wlcore_hw_set_cac(wl, wlvif, false);
4757 		wlvif->radar_enabled = false;
4758 	}
4759 
4760 	wl1271_ps_elp_sleep(wl);
4761 out:
4762 	mutex_unlock(&wl->mutex);
4763 }
4764 
4765 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4766 				    struct wl12xx_vif *wlvif,
4767 				    struct ieee80211_chanctx_conf *new_ctx)
4768 {
4769 	int channel = ieee80211_frequency_to_channel(
4770 		new_ctx->def.chan->center_freq);
4771 
4772 	wl1271_debug(DEBUG_MAC80211,
4773 		     "switch vif (role %d) %d -> %d chan_type: %d",
4774 		     wlvif->role_id, wlvif->channel, channel,
4775 		     cfg80211_get_chandef_type(&new_ctx->def));
4776 
4777 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4778 		return 0;
4779 
4780 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4781 
4782 	if (wlvif->radar_enabled) {
4783 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4784 		wlcore_hw_set_cac(wl, wlvif, false);
4785 		wlvif->radar_enabled = false;
4786 	}
4787 
4788 	wlvif->band = new_ctx->def.chan->band;
4789 	wlvif->channel = channel;
4790 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4791 
4792 	/* start radar if needed */
4793 	if (new_ctx->radar_enabled) {
4794 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4795 		wlcore_hw_set_cac(wl, wlvif, true);
4796 		wlvif->radar_enabled = true;
4797 	}
4798 
4799 	return 0;
4800 }
4801 
4802 static int
4803 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4804 			     struct ieee80211_vif_chanctx_switch *vifs,
4805 			     int n_vifs,
4806 			     enum ieee80211_chanctx_switch_mode mode)
4807 {
4808 	struct wl1271 *wl = hw->priv;
4809 	int i, ret;
4810 
4811 	wl1271_debug(DEBUG_MAC80211,
4812 		     "mac80211 switch chanctx n_vifs %d mode %d",
4813 		     n_vifs, mode);
4814 
4815 	mutex_lock(&wl->mutex);
4816 
4817 	ret = wl1271_ps_elp_wakeup(wl);
4818 	if (ret < 0)
4819 		goto out;
4820 
4821 	for (i = 0; i < n_vifs; i++) {
4822 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4823 
4824 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4825 		if (ret)
4826 			goto out_sleep;
4827 	}
4828 out_sleep:
4829 	wl1271_ps_elp_sleep(wl);
4830 out:
4831 	mutex_unlock(&wl->mutex);
4832 
4833 	return 0;
4834 }
4835 
4836 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4837 			     struct ieee80211_vif *vif, u16 queue,
4838 			     const struct ieee80211_tx_queue_params *params)
4839 {
4840 	struct wl1271 *wl = hw->priv;
4841 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4842 	u8 ps_scheme;
4843 	int ret = 0;
4844 
4845 	if (wlcore_is_p2p_mgmt(wlvif))
4846 		return 0;
4847 
4848 	mutex_lock(&wl->mutex);
4849 
4850 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4851 
4852 	if (params->uapsd)
4853 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4854 	else
4855 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4856 
4857 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4858 		goto out;
4859 
4860 	ret = wl1271_ps_elp_wakeup(wl);
4861 	if (ret < 0)
4862 		goto out;
4863 
4864 	/*
4865 	 * the txop is confed in units of 32us by the mac80211,
4866 	 * we need us
4867 	 */
4868 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4869 				params->cw_min, params->cw_max,
4870 				params->aifs, params->txop << 5);
4871 	if (ret < 0)
4872 		goto out_sleep;
4873 
4874 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4875 				 CONF_CHANNEL_TYPE_EDCF,
4876 				 wl1271_tx_get_queue(queue),
4877 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4878 				 0, 0);
4879 
4880 out_sleep:
4881 	wl1271_ps_elp_sleep(wl);
4882 
4883 out:
4884 	mutex_unlock(&wl->mutex);
4885 
4886 	return ret;
4887 }
4888 
4889 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4890 			     struct ieee80211_vif *vif)
4891 {
4892 
4893 	struct wl1271 *wl = hw->priv;
4894 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4895 	u64 mactime = ULLONG_MAX;
4896 	int ret;
4897 
4898 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4899 
4900 	mutex_lock(&wl->mutex);
4901 
4902 	if (unlikely(wl->state != WLCORE_STATE_ON))
4903 		goto out;
4904 
4905 	ret = wl1271_ps_elp_wakeup(wl);
4906 	if (ret < 0)
4907 		goto out;
4908 
4909 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4910 	if (ret < 0)
4911 		goto out_sleep;
4912 
4913 out_sleep:
4914 	wl1271_ps_elp_sleep(wl);
4915 
4916 out:
4917 	mutex_unlock(&wl->mutex);
4918 	return mactime;
4919 }
4920 
4921 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4922 				struct survey_info *survey)
4923 {
4924 	struct ieee80211_conf *conf = &hw->conf;
4925 
4926 	if (idx != 0)
4927 		return -ENOENT;
4928 
4929 	survey->channel = conf->chandef.chan;
4930 	survey->filled = 0;
4931 	return 0;
4932 }
4933 
4934 static int wl1271_allocate_sta(struct wl1271 *wl,
4935 			     struct wl12xx_vif *wlvif,
4936 			     struct ieee80211_sta *sta)
4937 {
4938 	struct wl1271_station *wl_sta;
4939 	int ret;
4940 
4941 
4942 	if (wl->active_sta_count >= wl->max_ap_stations) {
4943 		wl1271_warning("could not allocate HLID - too much stations");
4944 		return -EBUSY;
4945 	}
4946 
4947 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4948 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4949 	if (ret < 0) {
4950 		wl1271_warning("could not allocate HLID - too many links");
4951 		return -EBUSY;
4952 	}
4953 
4954 	/* use the previous security seq, if this is a recovery/resume */
4955 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4956 
4957 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4958 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4959 	wl->active_sta_count++;
4960 	return 0;
4961 }
4962 
4963 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4964 {
4965 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4966 		return;
4967 
4968 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4969 	__clear_bit(hlid, &wl->ap_ps_map);
4970 	__clear_bit(hlid, &wl->ap_fw_ps_map);
4971 
4972 	/*
4973 	 * save the last used PN in the private part of iee80211_sta,
4974 	 * in case of recovery/suspend
4975 	 */
4976 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4977 
4978 	wl12xx_free_link(wl, wlvif, &hlid);
4979 	wl->active_sta_count--;
4980 
4981 	/*
4982 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4983 	 * chance to return STA-buffered packets before complaining.
4984 	 */
4985 	if (wl->active_sta_count == 0)
4986 		wl12xx_rearm_tx_watchdog_locked(wl);
4987 }
4988 
4989 static int wl12xx_sta_add(struct wl1271 *wl,
4990 			  struct wl12xx_vif *wlvif,
4991 			  struct ieee80211_sta *sta)
4992 {
4993 	struct wl1271_station *wl_sta;
4994 	int ret = 0;
4995 	u8 hlid;
4996 
4997 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4998 
4999 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5000 	if (ret < 0)
5001 		return ret;
5002 
5003 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5004 	hlid = wl_sta->hlid;
5005 
5006 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5007 	if (ret < 0)
5008 		wl1271_free_sta(wl, wlvif, hlid);
5009 
5010 	return ret;
5011 }
5012 
5013 static int wl12xx_sta_remove(struct wl1271 *wl,
5014 			     struct wl12xx_vif *wlvif,
5015 			     struct ieee80211_sta *sta)
5016 {
5017 	struct wl1271_station *wl_sta;
5018 	int ret = 0, id;
5019 
5020 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5021 
5022 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5023 	id = wl_sta->hlid;
5024 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5025 		return -EINVAL;
5026 
5027 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5028 	if (ret < 0)
5029 		return ret;
5030 
5031 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5032 	return ret;
5033 }
5034 
5035 static void wlcore_roc_if_possible(struct wl1271 *wl,
5036 				   struct wl12xx_vif *wlvif)
5037 {
5038 	if (find_first_bit(wl->roc_map,
5039 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5040 		return;
5041 
5042 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5043 		return;
5044 
5045 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5046 }
5047 
5048 /*
5049  * when wl_sta is NULL, we treat this call as if coming from a
5050  * pending auth reply.
5051  * wl->mutex must be taken and the FW must be awake when the call
5052  * takes place.
5053  */
5054 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5055 			      struct wl1271_station *wl_sta, bool in_conn)
5056 {
5057 	if (in_conn) {
5058 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5059 			return;
5060 
5061 		if (!wlvif->ap_pending_auth_reply &&
5062 		    !wlvif->inconn_count)
5063 			wlcore_roc_if_possible(wl, wlvif);
5064 
5065 		if (wl_sta) {
5066 			wl_sta->in_connection = true;
5067 			wlvif->inconn_count++;
5068 		} else {
5069 			wlvif->ap_pending_auth_reply = true;
5070 		}
5071 	} else {
5072 		if (wl_sta && !wl_sta->in_connection)
5073 			return;
5074 
5075 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5076 			return;
5077 
5078 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5079 			return;
5080 
5081 		if (wl_sta) {
5082 			wl_sta->in_connection = false;
5083 			wlvif->inconn_count--;
5084 		} else {
5085 			wlvif->ap_pending_auth_reply = false;
5086 		}
5087 
5088 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5089 		    test_bit(wlvif->role_id, wl->roc_map))
5090 			wl12xx_croc(wl, wlvif->role_id);
5091 	}
5092 }
5093 
5094 static int wl12xx_update_sta_state(struct wl1271 *wl,
5095 				   struct wl12xx_vif *wlvif,
5096 				   struct ieee80211_sta *sta,
5097 				   enum ieee80211_sta_state old_state,
5098 				   enum ieee80211_sta_state new_state)
5099 {
5100 	struct wl1271_station *wl_sta;
5101 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5102 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5103 	int ret;
5104 
5105 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5106 
5107 	/* Add station (AP mode) */
5108 	if (is_ap &&
5109 	    old_state == IEEE80211_STA_NOTEXIST &&
5110 	    new_state == IEEE80211_STA_NONE) {
5111 		ret = wl12xx_sta_add(wl, wlvif, sta);
5112 		if (ret)
5113 			return ret;
5114 
5115 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5116 	}
5117 
5118 	/* Remove station (AP mode) */
5119 	if (is_ap &&
5120 	    old_state == IEEE80211_STA_NONE &&
5121 	    new_state == IEEE80211_STA_NOTEXIST) {
5122 		/* must not fail */
5123 		wl12xx_sta_remove(wl, wlvif, sta);
5124 
5125 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5126 	}
5127 
5128 	/* Authorize station (AP mode) */
5129 	if (is_ap &&
5130 	    new_state == IEEE80211_STA_AUTHORIZED) {
5131 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5132 		if (ret < 0)
5133 			return ret;
5134 
5135 		/* reconfigure rates */
5136 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5137 		if (ret < 0)
5138 			return ret;
5139 
5140 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5141 						     wl_sta->hlid);
5142 		if (ret)
5143 			return ret;
5144 
5145 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5146 	}
5147 
5148 	/* Authorize station */
5149 	if (is_sta &&
5150 	    new_state == IEEE80211_STA_AUTHORIZED) {
5151 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5152 		ret = wl12xx_set_authorized(wl, wlvif);
5153 		if (ret)
5154 			return ret;
5155 	}
5156 
5157 	if (is_sta &&
5158 	    old_state == IEEE80211_STA_AUTHORIZED &&
5159 	    new_state == IEEE80211_STA_ASSOC) {
5160 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5161 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5162 	}
5163 
5164 	/* save seq number on disassoc (suspend) */
5165 	if (is_sta &&
5166 	    old_state == IEEE80211_STA_ASSOC &&
5167 	    new_state == IEEE80211_STA_AUTH) {
5168 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5169 		wlvif->total_freed_pkts = 0;
5170 	}
5171 
5172 	/* restore seq number on assoc (resume) */
5173 	if (is_sta &&
5174 	    old_state == IEEE80211_STA_AUTH &&
5175 	    new_state == IEEE80211_STA_ASSOC) {
5176 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5177 	}
5178 
5179 	/* clear ROCs on failure or authorization */
5180 	if (is_sta &&
5181 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5182 	     new_state == IEEE80211_STA_NOTEXIST)) {
5183 		if (test_bit(wlvif->role_id, wl->roc_map))
5184 			wl12xx_croc(wl, wlvif->role_id);
5185 	}
5186 
5187 	if (is_sta &&
5188 	    old_state == IEEE80211_STA_NOTEXIST &&
5189 	    new_state == IEEE80211_STA_NONE) {
5190 		if (find_first_bit(wl->roc_map,
5191 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5192 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5193 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5194 				   wlvif->band, wlvif->channel);
5195 		}
5196 	}
5197 	return 0;
5198 }
5199 
5200 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5201 			       struct ieee80211_vif *vif,
5202 			       struct ieee80211_sta *sta,
5203 			       enum ieee80211_sta_state old_state,
5204 			       enum ieee80211_sta_state new_state)
5205 {
5206 	struct wl1271 *wl = hw->priv;
5207 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5208 	int ret;
5209 
5210 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5211 		     sta->aid, old_state, new_state);
5212 
5213 	mutex_lock(&wl->mutex);
5214 
5215 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5216 		ret = -EBUSY;
5217 		goto out;
5218 	}
5219 
5220 	ret = wl1271_ps_elp_wakeup(wl);
5221 	if (ret < 0)
5222 		goto out;
5223 
5224 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5225 
5226 	wl1271_ps_elp_sleep(wl);
5227 out:
5228 	mutex_unlock(&wl->mutex);
5229 	if (new_state < old_state)
5230 		return 0;
5231 	return ret;
5232 }
5233 
5234 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5235 				  struct ieee80211_vif *vif,
5236 				  struct ieee80211_ampdu_params *params)
5237 {
5238 	struct wl1271 *wl = hw->priv;
5239 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5240 	int ret;
5241 	u8 hlid, *ba_bitmap;
5242 	struct ieee80211_sta *sta = params->sta;
5243 	enum ieee80211_ampdu_mlme_action action = params->action;
5244 	u16 tid = params->tid;
5245 	u16 *ssn = &params->ssn;
5246 
5247 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5248 		     tid);
5249 
5250 	/* sanity check - the fields in FW are only 8bits wide */
5251 	if (WARN_ON(tid > 0xFF))
5252 		return -ENOTSUPP;
5253 
5254 	mutex_lock(&wl->mutex);
5255 
5256 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5257 		ret = -EAGAIN;
5258 		goto out;
5259 	}
5260 
5261 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5262 		hlid = wlvif->sta.hlid;
5263 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5264 		struct wl1271_station *wl_sta;
5265 
5266 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5267 		hlid = wl_sta->hlid;
5268 	} else {
5269 		ret = -EINVAL;
5270 		goto out;
5271 	}
5272 
5273 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5274 
5275 	ret = wl1271_ps_elp_wakeup(wl);
5276 	if (ret < 0)
5277 		goto out;
5278 
5279 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5280 		     tid, action);
5281 
5282 	switch (action) {
5283 	case IEEE80211_AMPDU_RX_START:
5284 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5285 			ret = -ENOTSUPP;
5286 			break;
5287 		}
5288 
5289 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5290 			ret = -EBUSY;
5291 			wl1271_error("exceeded max RX BA sessions");
5292 			break;
5293 		}
5294 
5295 		if (*ba_bitmap & BIT(tid)) {
5296 			ret = -EINVAL;
5297 			wl1271_error("cannot enable RX BA session on active "
5298 				     "tid: %d", tid);
5299 			break;
5300 		}
5301 
5302 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5303 				hlid,
5304 				params->buf_size);
5305 
5306 		if (!ret) {
5307 			*ba_bitmap |= BIT(tid);
5308 			wl->ba_rx_session_count++;
5309 		}
5310 		break;
5311 
5312 	case IEEE80211_AMPDU_RX_STOP:
5313 		if (!(*ba_bitmap & BIT(tid))) {
5314 			/*
5315 			 * this happens on reconfig - so only output a debug
5316 			 * message for now, and don't fail the function.
5317 			 */
5318 			wl1271_debug(DEBUG_MAC80211,
5319 				     "no active RX BA session on tid: %d",
5320 				     tid);
5321 			ret = 0;
5322 			break;
5323 		}
5324 
5325 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5326 							 hlid, 0);
5327 		if (!ret) {
5328 			*ba_bitmap &= ~BIT(tid);
5329 			wl->ba_rx_session_count--;
5330 		}
5331 		break;
5332 
5333 	/*
5334 	 * The BA initiator session management in FW independently.
5335 	 * Falling break here on purpose for all TX APDU commands.
5336 	 */
5337 	case IEEE80211_AMPDU_TX_START:
5338 	case IEEE80211_AMPDU_TX_STOP_CONT:
5339 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5340 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5341 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5342 		ret = -EINVAL;
5343 		break;
5344 
5345 	default:
5346 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5347 		ret = -EINVAL;
5348 	}
5349 
5350 	wl1271_ps_elp_sleep(wl);
5351 
5352 out:
5353 	mutex_unlock(&wl->mutex);
5354 
5355 	return ret;
5356 }
5357 
5358 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5359 				   struct ieee80211_vif *vif,
5360 				   const struct cfg80211_bitrate_mask *mask)
5361 {
5362 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5363 	struct wl1271 *wl = hw->priv;
5364 	int i, ret = 0;
5365 
5366 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5367 		mask->control[NL80211_BAND_2GHZ].legacy,
5368 		mask->control[NL80211_BAND_5GHZ].legacy);
5369 
5370 	mutex_lock(&wl->mutex);
5371 
5372 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5373 		wlvif->bitrate_masks[i] =
5374 			wl1271_tx_enabled_rates_get(wl,
5375 						    mask->control[i].legacy,
5376 						    i);
5377 
5378 	if (unlikely(wl->state != WLCORE_STATE_ON))
5379 		goto out;
5380 
5381 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5382 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5383 
5384 		ret = wl1271_ps_elp_wakeup(wl);
5385 		if (ret < 0)
5386 			goto out;
5387 
5388 		wl1271_set_band_rate(wl, wlvif);
5389 		wlvif->basic_rate =
5390 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5391 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5392 
5393 		wl1271_ps_elp_sleep(wl);
5394 	}
5395 out:
5396 	mutex_unlock(&wl->mutex);
5397 
5398 	return ret;
5399 }
5400 
5401 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5402 				     struct ieee80211_vif *vif,
5403 				     struct ieee80211_channel_switch *ch_switch)
5404 {
5405 	struct wl1271 *wl = hw->priv;
5406 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5407 	int ret;
5408 
5409 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5410 
5411 	wl1271_tx_flush(wl);
5412 
5413 	mutex_lock(&wl->mutex);
5414 
5415 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5416 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5417 			ieee80211_chswitch_done(vif, false);
5418 		goto out;
5419 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5420 		goto out;
5421 	}
5422 
5423 	ret = wl1271_ps_elp_wakeup(wl);
5424 	if (ret < 0)
5425 		goto out;
5426 
5427 	/* TODO: change mac80211 to pass vif as param */
5428 
5429 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5430 		unsigned long delay_usec;
5431 
5432 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5433 		if (ret)
5434 			goto out_sleep;
5435 
5436 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5437 
5438 		/* indicate failure 5 seconds after channel switch time */
5439 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5440 			ch_switch->count;
5441 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5442 					     usecs_to_jiffies(delay_usec) +
5443 					     msecs_to_jiffies(5000));
5444 	}
5445 
5446 out_sleep:
5447 	wl1271_ps_elp_sleep(wl);
5448 
5449 out:
5450 	mutex_unlock(&wl->mutex);
5451 }
5452 
5453 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5454 					struct wl12xx_vif *wlvif,
5455 					u8 eid)
5456 {
5457 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5458 	struct sk_buff *beacon =
5459 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5460 
5461 	if (!beacon)
5462 		return NULL;
5463 
5464 	return cfg80211_find_ie(eid,
5465 				beacon->data + ieoffset,
5466 				beacon->len - ieoffset);
5467 }
5468 
5469 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5470 				u8 *csa_count)
5471 {
5472 	const u8 *ie;
5473 	const struct ieee80211_channel_sw_ie *ie_csa;
5474 
5475 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5476 	if (!ie)
5477 		return -EINVAL;
5478 
5479 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5480 	*csa_count = ie_csa->count;
5481 
5482 	return 0;
5483 }
5484 
5485 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5486 					    struct ieee80211_vif *vif,
5487 					    struct cfg80211_chan_def *chandef)
5488 {
5489 	struct wl1271 *wl = hw->priv;
5490 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5491 	struct ieee80211_channel_switch ch_switch = {
5492 		.block_tx = true,
5493 		.chandef = *chandef,
5494 	};
5495 	int ret;
5496 
5497 	wl1271_debug(DEBUG_MAC80211,
5498 		     "mac80211 channel switch beacon (role %d)",
5499 		     wlvif->role_id);
5500 
5501 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5502 	if (ret < 0) {
5503 		wl1271_error("error getting beacon (for CSA counter)");
5504 		return;
5505 	}
5506 
5507 	mutex_lock(&wl->mutex);
5508 
5509 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5510 		ret = -EBUSY;
5511 		goto out;
5512 	}
5513 
5514 	ret = wl1271_ps_elp_wakeup(wl);
5515 	if (ret < 0)
5516 		goto out;
5517 
5518 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5519 	if (ret)
5520 		goto out_sleep;
5521 
5522 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5523 
5524 out_sleep:
5525 	wl1271_ps_elp_sleep(wl);
5526 out:
5527 	mutex_unlock(&wl->mutex);
5528 }
5529 
5530 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5531 			    u32 queues, bool drop)
5532 {
5533 	struct wl1271 *wl = hw->priv;
5534 
5535 	wl1271_tx_flush(wl);
5536 }
5537 
5538 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5539 				       struct ieee80211_vif *vif,
5540 				       struct ieee80211_channel *chan,
5541 				       int duration,
5542 				       enum ieee80211_roc_type type)
5543 {
5544 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5545 	struct wl1271 *wl = hw->priv;
5546 	int channel, active_roc, ret = 0;
5547 
5548 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5549 
5550 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5551 		     channel, wlvif->role_id);
5552 
5553 	mutex_lock(&wl->mutex);
5554 
5555 	if (unlikely(wl->state != WLCORE_STATE_ON))
5556 		goto out;
5557 
5558 	/* return EBUSY if we can't ROC right now */
5559 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5560 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5561 		wl1271_warning("active roc on role %d", active_roc);
5562 		ret = -EBUSY;
5563 		goto out;
5564 	}
5565 
5566 	ret = wl1271_ps_elp_wakeup(wl);
5567 	if (ret < 0)
5568 		goto out;
5569 
5570 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5571 	if (ret < 0)
5572 		goto out_sleep;
5573 
5574 	wl->roc_vif = vif;
5575 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5576 				     msecs_to_jiffies(duration));
5577 out_sleep:
5578 	wl1271_ps_elp_sleep(wl);
5579 out:
5580 	mutex_unlock(&wl->mutex);
5581 	return ret;
5582 }
5583 
5584 static int __wlcore_roc_completed(struct wl1271 *wl)
5585 {
5586 	struct wl12xx_vif *wlvif;
5587 	int ret;
5588 
5589 	/* already completed */
5590 	if (unlikely(!wl->roc_vif))
5591 		return 0;
5592 
5593 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5594 
5595 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5596 		return -EBUSY;
5597 
5598 	ret = wl12xx_stop_dev(wl, wlvif);
5599 	if (ret < 0)
5600 		return ret;
5601 
5602 	wl->roc_vif = NULL;
5603 
5604 	return 0;
5605 }
5606 
5607 static int wlcore_roc_completed(struct wl1271 *wl)
5608 {
5609 	int ret;
5610 
5611 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5612 
5613 	mutex_lock(&wl->mutex);
5614 
5615 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5616 		ret = -EBUSY;
5617 		goto out;
5618 	}
5619 
5620 	ret = wl1271_ps_elp_wakeup(wl);
5621 	if (ret < 0)
5622 		goto out;
5623 
5624 	ret = __wlcore_roc_completed(wl);
5625 
5626 	wl1271_ps_elp_sleep(wl);
5627 out:
5628 	mutex_unlock(&wl->mutex);
5629 
5630 	return ret;
5631 }
5632 
5633 static void wlcore_roc_complete_work(struct work_struct *work)
5634 {
5635 	struct delayed_work *dwork;
5636 	struct wl1271 *wl;
5637 	int ret;
5638 
5639 	dwork = to_delayed_work(work);
5640 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5641 
5642 	ret = wlcore_roc_completed(wl);
5643 	if (!ret)
5644 		ieee80211_remain_on_channel_expired(wl->hw);
5645 }
5646 
5647 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5648 {
5649 	struct wl1271 *wl = hw->priv;
5650 
5651 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5652 
5653 	/* TODO: per-vif */
5654 	wl1271_tx_flush(wl);
5655 
5656 	/*
5657 	 * we can't just flush_work here, because it might deadlock
5658 	 * (as we might get called from the same workqueue)
5659 	 */
5660 	cancel_delayed_work_sync(&wl->roc_complete_work);
5661 	wlcore_roc_completed(wl);
5662 
5663 	return 0;
5664 }
5665 
5666 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5667 				    struct ieee80211_vif *vif,
5668 				    struct ieee80211_sta *sta,
5669 				    u32 changed)
5670 {
5671 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5672 
5673 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5674 
5675 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5676 		return;
5677 
5678 	/* this callback is atomic, so schedule a new work */
5679 	wlvif->rc_update_bw = sta->bandwidth;
5680 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5681 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5682 }
5683 
5684 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5685 				     struct ieee80211_vif *vif,
5686 				     struct ieee80211_sta *sta,
5687 				     struct station_info *sinfo)
5688 {
5689 	struct wl1271 *wl = hw->priv;
5690 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5691 	s8 rssi_dbm;
5692 	int ret;
5693 
5694 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5695 
5696 	mutex_lock(&wl->mutex);
5697 
5698 	if (unlikely(wl->state != WLCORE_STATE_ON))
5699 		goto out;
5700 
5701 	ret = wl1271_ps_elp_wakeup(wl);
5702 	if (ret < 0)
5703 		goto out_sleep;
5704 
5705 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5706 	if (ret < 0)
5707 		goto out_sleep;
5708 
5709 	sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5710 	sinfo->signal = rssi_dbm;
5711 
5712 out_sleep:
5713 	wl1271_ps_elp_sleep(wl);
5714 
5715 out:
5716 	mutex_unlock(&wl->mutex);
5717 }
5718 
5719 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5720 					     struct ieee80211_sta *sta)
5721 {
5722 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5723 	struct wl1271 *wl = hw->priv;
5724 	u8 hlid = wl_sta->hlid;
5725 
5726 	/* return in units of Kbps */
5727 	return (wl->links[hlid].fw_rate_mbps * 1000);
5728 }
5729 
5730 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5731 {
5732 	struct wl1271 *wl = hw->priv;
5733 	bool ret = false;
5734 
5735 	mutex_lock(&wl->mutex);
5736 
5737 	if (unlikely(wl->state != WLCORE_STATE_ON))
5738 		goto out;
5739 
5740 	/* packets are considered pending if in the TX queue or the FW */
5741 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5742 out:
5743 	mutex_unlock(&wl->mutex);
5744 
5745 	return ret;
5746 }
5747 
5748 /* can't be const, mac80211 writes to this */
5749 static struct ieee80211_rate wl1271_rates[] = {
5750 	{ .bitrate = 10,
5751 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5752 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5753 	{ .bitrate = 20,
5754 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5755 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5756 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5757 	{ .bitrate = 55,
5758 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5759 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5760 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5761 	{ .bitrate = 110,
5762 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5763 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5764 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5765 	{ .bitrate = 60,
5766 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5767 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5768 	{ .bitrate = 90,
5769 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5770 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5771 	{ .bitrate = 120,
5772 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5773 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5774 	{ .bitrate = 180,
5775 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5776 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5777 	{ .bitrate = 240,
5778 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5779 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5780 	{ .bitrate = 360,
5781 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5782 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5783 	{ .bitrate = 480,
5784 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5785 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5786 	{ .bitrate = 540,
5787 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5788 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5789 };
5790 
5791 /* can't be const, mac80211 writes to this */
5792 static struct ieee80211_channel wl1271_channels[] = {
5793 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5794 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5795 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5796 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5797 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5798 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5799 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5800 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5801 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5802 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5803 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5804 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5805 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5806 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5807 };
5808 
5809 /* can't be const, mac80211 writes to this */
5810 static struct ieee80211_supported_band wl1271_band_2ghz = {
5811 	.channels = wl1271_channels,
5812 	.n_channels = ARRAY_SIZE(wl1271_channels),
5813 	.bitrates = wl1271_rates,
5814 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5815 };
5816 
5817 /* 5 GHz data rates for WL1273 */
5818 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5819 	{ .bitrate = 60,
5820 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5821 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5822 	{ .bitrate = 90,
5823 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5824 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5825 	{ .bitrate = 120,
5826 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5827 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5828 	{ .bitrate = 180,
5829 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5830 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5831 	{ .bitrate = 240,
5832 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5833 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5834 	{ .bitrate = 360,
5835 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5836 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5837 	{ .bitrate = 480,
5838 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5839 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5840 	{ .bitrate = 540,
5841 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5842 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5843 };
5844 
5845 /* 5 GHz band channels for WL1273 */
5846 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5847 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5848 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5849 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5850 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5851 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5852 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5853 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5854 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5855 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5856 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5857 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5858 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5859 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5860 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5861 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5862 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5863 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5864 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5865 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5866 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5867 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5868 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5869 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5870 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5871 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5872 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5873 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5874 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5875 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5876 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5877 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5878 };
5879 
5880 static struct ieee80211_supported_band wl1271_band_5ghz = {
5881 	.channels = wl1271_channels_5ghz,
5882 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5883 	.bitrates = wl1271_rates_5ghz,
5884 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5885 };
5886 
5887 static const struct ieee80211_ops wl1271_ops = {
5888 	.start = wl1271_op_start,
5889 	.stop = wlcore_op_stop,
5890 	.add_interface = wl1271_op_add_interface,
5891 	.remove_interface = wl1271_op_remove_interface,
5892 	.change_interface = wl12xx_op_change_interface,
5893 #ifdef CONFIG_PM
5894 	.suspend = wl1271_op_suspend,
5895 	.resume = wl1271_op_resume,
5896 #endif
5897 	.config = wl1271_op_config,
5898 	.prepare_multicast = wl1271_op_prepare_multicast,
5899 	.configure_filter = wl1271_op_configure_filter,
5900 	.tx = wl1271_op_tx,
5901 	.set_key = wlcore_op_set_key,
5902 	.hw_scan = wl1271_op_hw_scan,
5903 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5904 	.sched_scan_start = wl1271_op_sched_scan_start,
5905 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5906 	.bss_info_changed = wl1271_op_bss_info_changed,
5907 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5908 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5909 	.conf_tx = wl1271_op_conf_tx,
5910 	.get_tsf = wl1271_op_get_tsf,
5911 	.get_survey = wl1271_op_get_survey,
5912 	.sta_state = wl12xx_op_sta_state,
5913 	.ampdu_action = wl1271_op_ampdu_action,
5914 	.tx_frames_pending = wl1271_tx_frames_pending,
5915 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5916 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5917 	.channel_switch = wl12xx_op_channel_switch,
5918 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
5919 	.flush = wlcore_op_flush,
5920 	.remain_on_channel = wlcore_op_remain_on_channel,
5921 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5922 	.add_chanctx = wlcore_op_add_chanctx,
5923 	.remove_chanctx = wlcore_op_remove_chanctx,
5924 	.change_chanctx = wlcore_op_change_chanctx,
5925 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5926 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5927 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5928 	.sta_rc_update = wlcore_op_sta_rc_update,
5929 	.sta_statistics = wlcore_op_sta_statistics,
5930 	.get_expected_throughput = wlcore_op_get_expected_throughput,
5931 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5932 };
5933 
5934 
5935 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5936 {
5937 	u8 idx;
5938 
5939 	BUG_ON(band >= 2);
5940 
5941 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5942 		wl1271_error("Illegal RX rate from HW: %d", rate);
5943 		return 0;
5944 	}
5945 
5946 	idx = wl->band_rate_to_idx[band][rate];
5947 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5948 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5949 		return 0;
5950 	}
5951 
5952 	return idx;
5953 }
5954 
5955 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5956 {
5957 	int i;
5958 
5959 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5960 		     oui, nic);
5961 
5962 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5963 		wl1271_warning("NIC part of the MAC address wraps around!");
5964 
5965 	for (i = 0; i < wl->num_mac_addr; i++) {
5966 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5967 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5968 		wl->addresses[i].addr[2] = (u8) oui;
5969 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5970 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5971 		wl->addresses[i].addr[5] = (u8) nic;
5972 		nic++;
5973 	}
5974 
5975 	/* we may be one address short at the most */
5976 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5977 
5978 	/*
5979 	 * turn on the LAA bit in the first address and use it as
5980 	 * the last address.
5981 	 */
5982 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5983 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5984 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5985 		       sizeof(wl->addresses[0]));
5986 		/* LAA bit */
5987 		wl->addresses[idx].addr[0] |= BIT(1);
5988 	}
5989 
5990 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5991 	wl->hw->wiphy->addresses = wl->addresses;
5992 }
5993 
5994 static int wl12xx_get_hw_info(struct wl1271 *wl)
5995 {
5996 	int ret;
5997 
5998 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5999 	if (ret < 0)
6000 		goto out;
6001 
6002 	wl->fuse_oui_addr = 0;
6003 	wl->fuse_nic_addr = 0;
6004 
6005 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6006 	if (ret < 0)
6007 		goto out;
6008 
6009 	if (wl->ops->get_mac)
6010 		ret = wl->ops->get_mac(wl);
6011 
6012 out:
6013 	return ret;
6014 }
6015 
6016 static int wl1271_register_hw(struct wl1271 *wl)
6017 {
6018 	int ret;
6019 	u32 oui_addr = 0, nic_addr = 0;
6020 
6021 	if (wl->mac80211_registered)
6022 		return 0;
6023 
6024 	if (wl->nvs_len >= 12) {
6025 		/* NOTE: The wl->nvs->nvs element must be first, in
6026 		 * order to simplify the casting, we assume it is at
6027 		 * the beginning of the wl->nvs structure.
6028 		 */
6029 		u8 *nvs_ptr = (u8 *)wl->nvs;
6030 
6031 		oui_addr =
6032 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6033 		nic_addr =
6034 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6035 	}
6036 
6037 	/* if the MAC address is zeroed in the NVS derive from fuse */
6038 	if (oui_addr == 0 && nic_addr == 0) {
6039 		oui_addr = wl->fuse_oui_addr;
6040 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6041 		nic_addr = wl->fuse_nic_addr + 1;
6042 	}
6043 
6044 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6045 
6046 	ret = ieee80211_register_hw(wl->hw);
6047 	if (ret < 0) {
6048 		wl1271_error("unable to register mac80211 hw: %d", ret);
6049 		goto out;
6050 	}
6051 
6052 	wl->mac80211_registered = true;
6053 
6054 	wl1271_debugfs_init(wl);
6055 
6056 	wl1271_notice("loaded");
6057 
6058 out:
6059 	return ret;
6060 }
6061 
6062 static void wl1271_unregister_hw(struct wl1271 *wl)
6063 {
6064 	if (wl->plt)
6065 		wl1271_plt_stop(wl);
6066 
6067 	ieee80211_unregister_hw(wl->hw);
6068 	wl->mac80211_registered = false;
6069 
6070 }
6071 
6072 static int wl1271_init_ieee80211(struct wl1271 *wl)
6073 {
6074 	int i;
6075 	static const u32 cipher_suites[] = {
6076 		WLAN_CIPHER_SUITE_WEP40,
6077 		WLAN_CIPHER_SUITE_WEP104,
6078 		WLAN_CIPHER_SUITE_TKIP,
6079 		WLAN_CIPHER_SUITE_CCMP,
6080 		WL1271_CIPHER_SUITE_GEM,
6081 	};
6082 
6083 	/* The tx descriptor buffer */
6084 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6085 
6086 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6087 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6088 
6089 	/* unit us */
6090 	/* FIXME: find a proper value */
6091 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6092 
6093 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6094 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6095 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6096 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6097 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6098 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6099 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6100 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6101 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6102 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6103 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6104 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6105 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6106 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6107 
6108 	wl->hw->wiphy->cipher_suites = cipher_suites;
6109 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6110 
6111 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6112 					 BIT(NL80211_IFTYPE_AP) |
6113 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6114 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6115 #ifdef CONFIG_MAC80211_MESH
6116 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6117 #endif
6118 					 BIT(NL80211_IFTYPE_P2P_GO);
6119 
6120 	wl->hw->wiphy->max_scan_ssids = 1;
6121 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6122 	wl->hw->wiphy->max_match_sets = 16;
6123 	/*
6124 	 * Maximum length of elements in scanning probe request templates
6125 	 * should be the maximum length possible for a template, without
6126 	 * the IEEE80211 header of the template
6127 	 */
6128 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6129 			sizeof(struct ieee80211_header);
6130 
6131 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6132 		sizeof(struct ieee80211_header);
6133 
6134 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6135 
6136 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6137 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6138 				WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6139 				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6140 
6141 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6142 
6143 	/* make sure all our channels fit in the scanned_ch bitmask */
6144 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6145 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6146 		     WL1271_MAX_CHANNELS);
6147 	/*
6148 	* clear channel flags from the previous usage
6149 	* and restore max_power & max_antenna_gain values.
6150 	*/
6151 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6152 		wl1271_band_2ghz.channels[i].flags = 0;
6153 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6154 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6155 	}
6156 
6157 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6158 		wl1271_band_5ghz.channels[i].flags = 0;
6159 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6160 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6161 	}
6162 
6163 	/*
6164 	 * We keep local copies of the band structs because we need to
6165 	 * modify them on a per-device basis.
6166 	 */
6167 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6168 	       sizeof(wl1271_band_2ghz));
6169 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6170 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6171 	       sizeof(*wl->ht_cap));
6172 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6173 	       sizeof(wl1271_band_5ghz));
6174 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6175 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6176 	       sizeof(*wl->ht_cap));
6177 
6178 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6179 		&wl->bands[NL80211_BAND_2GHZ];
6180 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6181 		&wl->bands[NL80211_BAND_5GHZ];
6182 
6183 	/*
6184 	 * allow 4 queues per mac address we support +
6185 	 * 1 cab queue per mac + one global offchannel Tx queue
6186 	 */
6187 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6188 
6189 	/* the last queue is the offchannel queue */
6190 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6191 	wl->hw->max_rates = 1;
6192 
6193 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6194 
6195 	/* the FW answers probe-requests in AP-mode */
6196 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6197 	wl->hw->wiphy->probe_resp_offload =
6198 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6199 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6200 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6201 
6202 	/* allowed interface combinations */
6203 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6204 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6205 
6206 	/* register vendor commands */
6207 	wlcore_set_vendor_commands(wl->hw->wiphy);
6208 
6209 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6210 
6211 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6212 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6213 
6214 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6215 
6216 	return 0;
6217 }
6218 
6219 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6220 				     u32 mbox_size)
6221 {
6222 	struct ieee80211_hw *hw;
6223 	struct wl1271 *wl;
6224 	int i, j, ret;
6225 	unsigned int order;
6226 
6227 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6228 	if (!hw) {
6229 		wl1271_error("could not alloc ieee80211_hw");
6230 		ret = -ENOMEM;
6231 		goto err_hw_alloc;
6232 	}
6233 
6234 	wl = hw->priv;
6235 	memset(wl, 0, sizeof(*wl));
6236 
6237 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6238 	if (!wl->priv) {
6239 		wl1271_error("could not alloc wl priv");
6240 		ret = -ENOMEM;
6241 		goto err_priv_alloc;
6242 	}
6243 
6244 	INIT_LIST_HEAD(&wl->wlvif_list);
6245 
6246 	wl->hw = hw;
6247 
6248 	/*
6249 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6250 	 * we don't allocate any additional resource here, so that's fine.
6251 	 */
6252 	for (i = 0; i < NUM_TX_QUEUES; i++)
6253 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6254 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6255 
6256 	skb_queue_head_init(&wl->deferred_rx_queue);
6257 	skb_queue_head_init(&wl->deferred_tx_queue);
6258 
6259 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6260 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6261 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6262 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6263 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6264 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6265 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6266 
6267 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6268 	if (!wl->freezable_wq) {
6269 		ret = -ENOMEM;
6270 		goto err_hw;
6271 	}
6272 
6273 	wl->channel = 0;
6274 	wl->rx_counter = 0;
6275 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6276 	wl->band = NL80211_BAND_2GHZ;
6277 	wl->channel_type = NL80211_CHAN_NO_HT;
6278 	wl->flags = 0;
6279 	wl->sg_enabled = true;
6280 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6281 	wl->recovery_count = 0;
6282 	wl->hw_pg_ver = -1;
6283 	wl->ap_ps_map = 0;
6284 	wl->ap_fw_ps_map = 0;
6285 	wl->quirks = 0;
6286 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6287 	wl->active_sta_count = 0;
6288 	wl->active_link_count = 0;
6289 	wl->fwlog_size = 0;
6290 
6291 	/* The system link is always allocated */
6292 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6293 
6294 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6295 	for (i = 0; i < wl->num_tx_desc; i++)
6296 		wl->tx_frames[i] = NULL;
6297 
6298 	spin_lock_init(&wl->wl_lock);
6299 
6300 	wl->state = WLCORE_STATE_OFF;
6301 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6302 	mutex_init(&wl->mutex);
6303 	mutex_init(&wl->flush_mutex);
6304 	init_completion(&wl->nvs_loading_complete);
6305 
6306 	order = get_order(aggr_buf_size);
6307 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6308 	if (!wl->aggr_buf) {
6309 		ret = -ENOMEM;
6310 		goto err_wq;
6311 	}
6312 	wl->aggr_buf_size = aggr_buf_size;
6313 
6314 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6315 	if (!wl->dummy_packet) {
6316 		ret = -ENOMEM;
6317 		goto err_aggr;
6318 	}
6319 
6320 	/* Allocate one page for the FW log */
6321 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6322 	if (!wl->fwlog) {
6323 		ret = -ENOMEM;
6324 		goto err_dummy_packet;
6325 	}
6326 
6327 	wl->mbox_size = mbox_size;
6328 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6329 	if (!wl->mbox) {
6330 		ret = -ENOMEM;
6331 		goto err_fwlog;
6332 	}
6333 
6334 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6335 	if (!wl->buffer_32) {
6336 		ret = -ENOMEM;
6337 		goto err_mbox;
6338 	}
6339 
6340 	return hw;
6341 
6342 err_mbox:
6343 	kfree(wl->mbox);
6344 
6345 err_fwlog:
6346 	free_page((unsigned long)wl->fwlog);
6347 
6348 err_dummy_packet:
6349 	dev_kfree_skb(wl->dummy_packet);
6350 
6351 err_aggr:
6352 	free_pages((unsigned long)wl->aggr_buf, order);
6353 
6354 err_wq:
6355 	destroy_workqueue(wl->freezable_wq);
6356 
6357 err_hw:
6358 	wl1271_debugfs_exit(wl);
6359 	kfree(wl->priv);
6360 
6361 err_priv_alloc:
6362 	ieee80211_free_hw(hw);
6363 
6364 err_hw_alloc:
6365 
6366 	return ERR_PTR(ret);
6367 }
6368 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6369 
6370 int wlcore_free_hw(struct wl1271 *wl)
6371 {
6372 	/* Unblock any fwlog readers */
6373 	mutex_lock(&wl->mutex);
6374 	wl->fwlog_size = -1;
6375 	mutex_unlock(&wl->mutex);
6376 
6377 	wlcore_sysfs_free(wl);
6378 
6379 	kfree(wl->buffer_32);
6380 	kfree(wl->mbox);
6381 	free_page((unsigned long)wl->fwlog);
6382 	dev_kfree_skb(wl->dummy_packet);
6383 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6384 
6385 	wl1271_debugfs_exit(wl);
6386 
6387 	vfree(wl->fw);
6388 	wl->fw = NULL;
6389 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6390 	kfree(wl->nvs);
6391 	wl->nvs = NULL;
6392 
6393 	kfree(wl->raw_fw_status);
6394 	kfree(wl->fw_status);
6395 	kfree(wl->tx_res_if);
6396 	destroy_workqueue(wl->freezable_wq);
6397 
6398 	kfree(wl->priv);
6399 	ieee80211_free_hw(wl->hw);
6400 
6401 	return 0;
6402 }
6403 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6404 
6405 #ifdef CONFIG_PM
6406 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6407 	.flags = WIPHY_WOWLAN_ANY,
6408 	.n_patterns = WL1271_MAX_RX_FILTERS,
6409 	.pattern_min_len = 1,
6410 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6411 };
6412 #endif
6413 
6414 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6415 {
6416 	return IRQ_WAKE_THREAD;
6417 }
6418 
6419 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6420 {
6421 	struct wl1271 *wl = context;
6422 	struct platform_device *pdev = wl->pdev;
6423 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6424 	struct resource *res;
6425 
6426 	int ret;
6427 	irq_handler_t hardirq_fn = NULL;
6428 
6429 	if (fw) {
6430 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6431 		if (!wl->nvs) {
6432 			wl1271_error("Could not allocate nvs data");
6433 			goto out;
6434 		}
6435 		wl->nvs_len = fw->size;
6436 	} else if (pdev_data->family->nvs_name) {
6437 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6438 			     pdev_data->family->nvs_name);
6439 		wl->nvs = NULL;
6440 		wl->nvs_len = 0;
6441 	} else {
6442 		wl->nvs = NULL;
6443 		wl->nvs_len = 0;
6444 	}
6445 
6446 	ret = wl->ops->setup(wl);
6447 	if (ret < 0)
6448 		goto out_free_nvs;
6449 
6450 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6451 
6452 	/* adjust some runtime configuration parameters */
6453 	wlcore_adjust_conf(wl);
6454 
6455 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6456 	if (!res) {
6457 		wl1271_error("Could not get IRQ resource");
6458 		goto out_free_nvs;
6459 	}
6460 
6461 	wl->irq = res->start;
6462 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6463 	wl->if_ops = pdev_data->if_ops;
6464 
6465 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6466 		hardirq_fn = wlcore_hardirq;
6467 	else
6468 		wl->irq_flags |= IRQF_ONESHOT;
6469 
6470 	ret = wl12xx_set_power_on(wl);
6471 	if (ret < 0)
6472 		goto out_free_nvs;
6473 
6474 	ret = wl12xx_get_hw_info(wl);
6475 	if (ret < 0) {
6476 		wl1271_error("couldn't get hw info");
6477 		wl1271_power_off(wl);
6478 		goto out_free_nvs;
6479 	}
6480 
6481 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6482 				   wl->irq_flags, pdev->name, wl);
6483 	if (ret < 0) {
6484 		wl1271_error("interrupt configuration failed");
6485 		wl1271_power_off(wl);
6486 		goto out_free_nvs;
6487 	}
6488 
6489 #ifdef CONFIG_PM
6490 	ret = enable_irq_wake(wl->irq);
6491 	if (!ret) {
6492 		wl->irq_wake_enabled = true;
6493 		device_init_wakeup(wl->dev, 1);
6494 		if (pdev_data->pwr_in_suspend)
6495 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6496 	}
6497 #endif
6498 	disable_irq(wl->irq);
6499 	wl1271_power_off(wl);
6500 
6501 	ret = wl->ops->identify_chip(wl);
6502 	if (ret < 0)
6503 		goto out_irq;
6504 
6505 	ret = wl1271_init_ieee80211(wl);
6506 	if (ret)
6507 		goto out_irq;
6508 
6509 	ret = wl1271_register_hw(wl);
6510 	if (ret)
6511 		goto out_irq;
6512 
6513 	ret = wlcore_sysfs_init(wl);
6514 	if (ret)
6515 		goto out_unreg;
6516 
6517 	wl->initialized = true;
6518 	goto out;
6519 
6520 out_unreg:
6521 	wl1271_unregister_hw(wl);
6522 
6523 out_irq:
6524 	free_irq(wl->irq, wl);
6525 
6526 out_free_nvs:
6527 	kfree(wl->nvs);
6528 
6529 out:
6530 	release_firmware(fw);
6531 	complete_all(&wl->nvs_loading_complete);
6532 }
6533 
6534 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6535 {
6536 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6537 	const char *nvs_name;
6538 	int ret = 0;
6539 
6540 	if (!wl->ops || !wl->ptable || !pdev_data)
6541 		return -EINVAL;
6542 
6543 	wl->dev = &pdev->dev;
6544 	wl->pdev = pdev;
6545 	platform_set_drvdata(pdev, wl);
6546 
6547 	if (pdev_data->family && pdev_data->family->nvs_name) {
6548 		nvs_name = pdev_data->family->nvs_name;
6549 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6550 					      nvs_name, &pdev->dev, GFP_KERNEL,
6551 					      wl, wlcore_nvs_cb);
6552 		if (ret < 0) {
6553 			wl1271_error("request_firmware_nowait failed for %s: %d",
6554 				     nvs_name, ret);
6555 			complete_all(&wl->nvs_loading_complete);
6556 		}
6557 	} else {
6558 		wlcore_nvs_cb(NULL, wl);
6559 	}
6560 
6561 	return ret;
6562 }
6563 EXPORT_SYMBOL_GPL(wlcore_probe);
6564 
6565 int wlcore_remove(struct platform_device *pdev)
6566 {
6567 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6568 	struct wl1271 *wl = platform_get_drvdata(pdev);
6569 
6570 	if (pdev_data->family && pdev_data->family->nvs_name)
6571 		wait_for_completion(&wl->nvs_loading_complete);
6572 	if (!wl->initialized)
6573 		return 0;
6574 
6575 	if (wl->irq_wake_enabled) {
6576 		device_init_wakeup(wl->dev, 0);
6577 		disable_irq_wake(wl->irq);
6578 	}
6579 	wl1271_unregister_hw(wl);
6580 	free_irq(wl->irq, wl);
6581 	wlcore_free_hw(wl);
6582 
6583 	return 0;
6584 }
6585 EXPORT_SYMBOL_GPL(wlcore_remove);
6586 
6587 u32 wl12xx_debug_level = DEBUG_NONE;
6588 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6589 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6590 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6591 
6592 module_param_named(fwlog, fwlog_param, charp, 0);
6593 MODULE_PARM_DESC(fwlog,
6594 		 "FW logger options: continuous, dbgpins or disable");
6595 
6596 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6597 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6598 
6599 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6600 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6601 
6602 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6603 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6604 
6605 MODULE_LICENSE("GPL");
6606 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6607 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6608