xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision da2ef666)
1 /*
2  * This file is part of wlcore
3  *
4  * Copyright (C) 2008-2010 Nokia Corporation
5  * Copyright (C) 2011-2013 Texas Instruments Inc.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19  * 02110-1301 USA
20  *
21  */
22 
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/pm_runtime.h>
30 
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "vendor_cmd.h"
41 #include "scan.h"
42 #include "hw_ops.h"
43 #include "sysfs.h"
44 
45 #define WL1271_BOOT_RETRIES 3
46 #define WL1271_SUSPEND_SLEEP 100
47 #define WL1271_WAKEUP_TIMEOUT 500
48 
49 static char *fwlog_param;
50 static int fwlog_mem_blocks = -1;
51 static int bug_on_recovery = -1;
52 static int no_recovery     = -1;
53 
54 static void __wl1271_op_remove_interface(struct wl1271 *wl,
55 					 struct ieee80211_vif *vif,
56 					 bool reset_tx_queues);
57 static void wlcore_op_stop_locked(struct wl1271 *wl);
58 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
59 
60 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 {
62 	int ret;
63 
64 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 		return -EINVAL;
66 
67 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 		return 0;
69 
70 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 		return 0;
72 
73 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 	if (ret < 0)
75 		return ret;
76 
77 	wl1271_info("Association completed.");
78 	return 0;
79 }
80 
81 static void wl1271_reg_notify(struct wiphy *wiphy,
82 			      struct regulatory_request *request)
83 {
84 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 	struct wl1271 *wl = hw->priv;
86 
87 	/* copy the current dfs region */
88 	if (request)
89 		wl->dfs_region = request->dfs_region;
90 
91 	wlcore_regdomain_config(wl);
92 }
93 
94 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
95 				   bool enable)
96 {
97 	int ret = 0;
98 
99 	/* we should hold wl->mutex */
100 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
101 	if (ret < 0)
102 		goto out;
103 
104 	if (enable)
105 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
106 	else
107 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
108 out:
109 	return ret;
110 }
111 
112 /*
113  * this function is being called when the rx_streaming interval
114  * has beed changed or rx_streaming should be disabled
115  */
116 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
117 {
118 	int ret = 0;
119 	int period = wl->conf.rx_streaming.interval;
120 
121 	/* don't reconfigure if rx_streaming is disabled */
122 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 		goto out;
124 
125 	/* reconfigure/disable according to new streaming_period */
126 	if (period &&
127 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
128 	    (wl->conf.rx_streaming.always ||
129 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
130 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
131 	else {
132 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
133 		/* don't cancel_work_sync since we might deadlock */
134 		del_timer_sync(&wlvif->rx_streaming_timer);
135 	}
136 out:
137 	return ret;
138 }
139 
140 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
141 {
142 	int ret;
143 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
144 						rx_streaming_enable_work);
145 	struct wl1271 *wl = wlvif->wl;
146 
147 	mutex_lock(&wl->mutex);
148 
149 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
150 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
151 	    (!wl->conf.rx_streaming.always &&
152 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 		goto out;
154 
155 	if (!wl->conf.rx_streaming.interval)
156 		goto out;
157 
158 	ret = pm_runtime_get_sync(wl->dev);
159 	if (ret < 0) {
160 		pm_runtime_put_noidle(wl->dev);
161 		goto out;
162 	}
163 
164 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
165 	if (ret < 0)
166 		goto out_sleep;
167 
168 	/* stop it after some time of inactivity */
169 	mod_timer(&wlvif->rx_streaming_timer,
170 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
171 
172 out_sleep:
173 	pm_runtime_mark_last_busy(wl->dev);
174 	pm_runtime_put_autosuspend(wl->dev);
175 out:
176 	mutex_unlock(&wl->mutex);
177 }
178 
179 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
180 {
181 	int ret;
182 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
183 						rx_streaming_disable_work);
184 	struct wl1271 *wl = wlvif->wl;
185 
186 	mutex_lock(&wl->mutex);
187 
188 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
189 		goto out;
190 
191 	ret = pm_runtime_get_sync(wl->dev);
192 	if (ret < 0) {
193 		pm_runtime_put_noidle(wl->dev);
194 		goto out;
195 	}
196 
197 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
198 	if (ret)
199 		goto out_sleep;
200 
201 out_sleep:
202 	pm_runtime_mark_last_busy(wl->dev);
203 	pm_runtime_put_autosuspend(wl->dev);
204 out:
205 	mutex_unlock(&wl->mutex);
206 }
207 
208 static void wl1271_rx_streaming_timer(struct timer_list *t)
209 {
210 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
211 	struct wl1271 *wl = wlvif->wl;
212 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
213 }
214 
215 /* wl->mutex must be taken */
216 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
217 {
218 	/* if the watchdog is not armed, don't do anything */
219 	if (wl->tx_allocated_blocks == 0)
220 		return;
221 
222 	cancel_delayed_work(&wl->tx_watchdog_work);
223 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
224 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
225 }
226 
227 static void wlcore_rc_update_work(struct work_struct *work)
228 {
229 	int ret;
230 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
231 						rc_update_work);
232 	struct wl1271 *wl = wlvif->wl;
233 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
234 
235 	mutex_lock(&wl->mutex);
236 
237 	if (unlikely(wl->state != WLCORE_STATE_ON))
238 		goto out;
239 
240 	ret = pm_runtime_get_sync(wl->dev);
241 	if (ret < 0) {
242 		pm_runtime_put_noidle(wl->dev);
243 		goto out;
244 	}
245 
246 	if (ieee80211_vif_is_mesh(vif)) {
247 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
248 						     true, wlvif->sta.hlid);
249 		if (ret < 0)
250 			goto out_sleep;
251 	} else {
252 		wlcore_hw_sta_rc_update(wl, wlvif);
253 	}
254 
255 out_sleep:
256 	pm_runtime_mark_last_busy(wl->dev);
257 	pm_runtime_put_autosuspend(wl->dev);
258 out:
259 	mutex_unlock(&wl->mutex);
260 }
261 
262 static void wl12xx_tx_watchdog_work(struct work_struct *work)
263 {
264 	struct delayed_work *dwork;
265 	struct wl1271 *wl;
266 
267 	dwork = to_delayed_work(work);
268 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
269 
270 	mutex_lock(&wl->mutex);
271 
272 	if (unlikely(wl->state != WLCORE_STATE_ON))
273 		goto out;
274 
275 	/* Tx went out in the meantime - everything is ok */
276 	if (unlikely(wl->tx_allocated_blocks == 0))
277 		goto out;
278 
279 	/*
280 	 * if a ROC is in progress, we might not have any Tx for a long
281 	 * time (e.g. pending Tx on the non-ROC channels)
282 	 */
283 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
284 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
285 			     wl->conf.tx.tx_watchdog_timeout);
286 		wl12xx_rearm_tx_watchdog_locked(wl);
287 		goto out;
288 	}
289 
290 	/*
291 	 * if a scan is in progress, we might not have any Tx for a long
292 	 * time
293 	 */
294 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
295 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
296 			     wl->conf.tx.tx_watchdog_timeout);
297 		wl12xx_rearm_tx_watchdog_locked(wl);
298 		goto out;
299 	}
300 
301 	/*
302 	* AP might cache a frame for a long time for a sleeping station,
303 	* so rearm the timer if there's an AP interface with stations. If
304 	* Tx is genuinely stuck we will most hopefully discover it when all
305 	* stations are removed due to inactivity.
306 	*/
307 	if (wl->active_sta_count) {
308 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
309 			     " %d stations",
310 			      wl->conf.tx.tx_watchdog_timeout,
311 			      wl->active_sta_count);
312 		wl12xx_rearm_tx_watchdog_locked(wl);
313 		goto out;
314 	}
315 
316 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
317 		     wl->conf.tx.tx_watchdog_timeout);
318 	wl12xx_queue_recovery_work(wl);
319 
320 out:
321 	mutex_unlock(&wl->mutex);
322 }
323 
324 static void wlcore_adjust_conf(struct wl1271 *wl)
325 {
326 
327 	if (fwlog_param) {
328 		if (!strcmp(fwlog_param, "continuous")) {
329 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
330 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
331 		} else if (!strcmp(fwlog_param, "dbgpins")) {
332 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
333 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
334 		} else if (!strcmp(fwlog_param, "disable")) {
335 			wl->conf.fwlog.mem_blocks = 0;
336 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
337 		} else {
338 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
339 		}
340 	}
341 
342 	if (bug_on_recovery != -1)
343 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
344 
345 	if (no_recovery != -1)
346 		wl->conf.recovery.no_recovery = (u8) no_recovery;
347 }
348 
349 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
350 					struct wl12xx_vif *wlvif,
351 					u8 hlid, u8 tx_pkts)
352 {
353 	bool fw_ps;
354 
355 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
356 
357 	/*
358 	 * Wake up from high level PS if the STA is asleep with too little
359 	 * packets in FW or if the STA is awake.
360 	 */
361 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
362 		wl12xx_ps_link_end(wl, wlvif, hlid);
363 
364 	/*
365 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
366 	 * Make an exception if this is the only connected link. In this
367 	 * case FW-memory congestion is less of a problem.
368 	 * Note that a single connected STA means 2*ap_count + 1 active links,
369 	 * since we must account for the global and broadcast AP links
370 	 * for each AP. The "fw_ps" check assures us the other link is a STA
371 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
372 	 */
373 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
374 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
375 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
376 }
377 
378 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
379 					   struct wl12xx_vif *wlvif,
380 					   struct wl_fw_status *status)
381 {
382 	unsigned long cur_fw_ps_map;
383 	u8 hlid;
384 
385 	cur_fw_ps_map = status->link_ps_bitmap;
386 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
387 		wl1271_debug(DEBUG_PSM,
388 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
389 			     wl->ap_fw_ps_map, cur_fw_ps_map,
390 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
391 
392 		wl->ap_fw_ps_map = cur_fw_ps_map;
393 	}
394 
395 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
396 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
397 					    wl->links[hlid].allocated_pkts);
398 }
399 
400 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
401 {
402 	struct wl12xx_vif *wlvif;
403 	u32 old_tx_blk_count = wl->tx_blocks_available;
404 	int avail, freed_blocks;
405 	int i;
406 	int ret;
407 	struct wl1271_link *lnk;
408 
409 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
410 				   wl->raw_fw_status,
411 				   wl->fw_status_len, false);
412 	if (ret < 0)
413 		return ret;
414 
415 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
416 
417 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
418 		     "drv_rx_counter = %d, tx_results_counter = %d)",
419 		     status->intr,
420 		     status->fw_rx_counter,
421 		     status->drv_rx_counter,
422 		     status->tx_results_counter);
423 
424 	for (i = 0; i < NUM_TX_QUEUES; i++) {
425 		/* prevent wrap-around in freed-packets counter */
426 		wl->tx_allocated_pkts[i] -=
427 				(status->counters.tx_released_pkts[i] -
428 				wl->tx_pkts_freed[i]) & 0xff;
429 
430 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
431 	}
432 
433 
434 	for_each_set_bit(i, wl->links_map, wl->num_links) {
435 		u8 diff;
436 		lnk = &wl->links[i];
437 
438 		/* prevent wrap-around in freed-packets counter */
439 		diff = (status->counters.tx_lnk_free_pkts[i] -
440 		       lnk->prev_freed_pkts) & 0xff;
441 
442 		if (diff == 0)
443 			continue;
444 
445 		lnk->allocated_pkts -= diff;
446 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
447 
448 		/* accumulate the prev_freed_pkts counter */
449 		lnk->total_freed_pkts += diff;
450 	}
451 
452 	/* prevent wrap-around in total blocks counter */
453 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
454 		freed_blocks = status->total_released_blks -
455 			       wl->tx_blocks_freed;
456 	else
457 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
458 			       status->total_released_blks;
459 
460 	wl->tx_blocks_freed = status->total_released_blks;
461 
462 	wl->tx_allocated_blocks -= freed_blocks;
463 
464 	/*
465 	 * If the FW freed some blocks:
466 	 * If we still have allocated blocks - re-arm the timer, Tx is
467 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
468 	 */
469 	if (freed_blocks) {
470 		if (wl->tx_allocated_blocks)
471 			wl12xx_rearm_tx_watchdog_locked(wl);
472 		else
473 			cancel_delayed_work(&wl->tx_watchdog_work);
474 	}
475 
476 	avail = status->tx_total - wl->tx_allocated_blocks;
477 
478 	/*
479 	 * The FW might change the total number of TX memblocks before
480 	 * we get a notification about blocks being released. Thus, the
481 	 * available blocks calculation might yield a temporary result
482 	 * which is lower than the actual available blocks. Keeping in
483 	 * mind that only blocks that were allocated can be moved from
484 	 * TX to RX, tx_blocks_available should never decrease here.
485 	 */
486 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
487 				      avail);
488 
489 	/* if more blocks are available now, tx work can be scheduled */
490 	if (wl->tx_blocks_available > old_tx_blk_count)
491 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
492 
493 	/* for AP update num of allocated TX blocks per link and ps status */
494 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
495 		wl12xx_irq_update_links_status(wl, wlvif, status);
496 	}
497 
498 	/* update the host-chipset time offset */
499 	wl->time_offset = (ktime_get_boot_ns() >> 10) -
500 		(s64)(status->fw_localtime);
501 
502 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
503 
504 	return 0;
505 }
506 
507 static void wl1271_flush_deferred_work(struct wl1271 *wl)
508 {
509 	struct sk_buff *skb;
510 
511 	/* Pass all received frames to the network stack */
512 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
513 		ieee80211_rx_ni(wl->hw, skb);
514 
515 	/* Return sent skbs to the network stack */
516 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
517 		ieee80211_tx_status_ni(wl->hw, skb);
518 }
519 
520 static void wl1271_netstack_work(struct work_struct *work)
521 {
522 	struct wl1271 *wl =
523 		container_of(work, struct wl1271, netstack_work);
524 
525 	do {
526 		wl1271_flush_deferred_work(wl);
527 	} while (skb_queue_len(&wl->deferred_rx_queue));
528 }
529 
530 #define WL1271_IRQ_MAX_LOOPS 256
531 
532 static int wlcore_irq_locked(struct wl1271 *wl)
533 {
534 	int ret = 0;
535 	u32 intr;
536 	int loopcount = WL1271_IRQ_MAX_LOOPS;
537 	bool done = false;
538 	unsigned int defer_count;
539 	unsigned long flags;
540 
541 	/*
542 	 * In case edge triggered interrupt must be used, we cannot iterate
543 	 * more than once without introducing race conditions with the hardirq.
544 	 */
545 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
546 		loopcount = 1;
547 
548 	wl1271_debug(DEBUG_IRQ, "IRQ work");
549 
550 	if (unlikely(wl->state != WLCORE_STATE_ON))
551 		goto out;
552 
553 	ret = pm_runtime_get_sync(wl->dev);
554 	if (ret < 0) {
555 		pm_runtime_put_noidle(wl->dev);
556 		goto out;
557 	}
558 
559 	while (!done && loopcount--) {
560 		/*
561 		 * In order to avoid a race with the hardirq, clear the flag
562 		 * before acknowledging the chip.
563 		 */
564 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
565 		smp_mb__after_atomic();
566 
567 		ret = wlcore_fw_status(wl, wl->fw_status);
568 		if (ret < 0)
569 			goto out;
570 
571 		wlcore_hw_tx_immediate_compl(wl);
572 
573 		intr = wl->fw_status->intr;
574 		intr &= WLCORE_ALL_INTR_MASK;
575 		if (!intr) {
576 			done = true;
577 			continue;
578 		}
579 
580 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
581 			wl1271_error("HW watchdog interrupt received! starting recovery.");
582 			wl->watchdog_recovery = true;
583 			ret = -EIO;
584 
585 			/* restarting the chip. ignore any other interrupt. */
586 			goto out;
587 		}
588 
589 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
590 			wl1271_error("SW watchdog interrupt received! "
591 				     "starting recovery.");
592 			wl->watchdog_recovery = true;
593 			ret = -EIO;
594 
595 			/* restarting the chip. ignore any other interrupt. */
596 			goto out;
597 		}
598 
599 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
600 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
601 
602 			ret = wlcore_rx(wl, wl->fw_status);
603 			if (ret < 0)
604 				goto out;
605 
606 			/* Check if any tx blocks were freed */
607 			spin_lock_irqsave(&wl->wl_lock, flags);
608 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
609 			    wl1271_tx_total_queue_count(wl) > 0) {
610 				spin_unlock_irqrestore(&wl->wl_lock, flags);
611 				/*
612 				 * In order to avoid starvation of the TX path,
613 				 * call the work function directly.
614 				 */
615 				ret = wlcore_tx_work_locked(wl);
616 				if (ret < 0)
617 					goto out;
618 			} else {
619 				spin_unlock_irqrestore(&wl->wl_lock, flags);
620 			}
621 
622 			/* check for tx results */
623 			ret = wlcore_hw_tx_delayed_compl(wl);
624 			if (ret < 0)
625 				goto out;
626 
627 			/* Make sure the deferred queues don't get too long */
628 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
629 				      skb_queue_len(&wl->deferred_rx_queue);
630 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
631 				wl1271_flush_deferred_work(wl);
632 		}
633 
634 		if (intr & WL1271_ACX_INTR_EVENT_A) {
635 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
636 			ret = wl1271_event_handle(wl, 0);
637 			if (ret < 0)
638 				goto out;
639 		}
640 
641 		if (intr & WL1271_ACX_INTR_EVENT_B) {
642 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
643 			ret = wl1271_event_handle(wl, 1);
644 			if (ret < 0)
645 				goto out;
646 		}
647 
648 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
649 			wl1271_debug(DEBUG_IRQ,
650 				     "WL1271_ACX_INTR_INIT_COMPLETE");
651 
652 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
653 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
654 	}
655 
656 	pm_runtime_mark_last_busy(wl->dev);
657 	pm_runtime_put_autosuspend(wl->dev);
658 
659 out:
660 	return ret;
661 }
662 
663 static irqreturn_t wlcore_irq(int irq, void *cookie)
664 {
665 	int ret;
666 	unsigned long flags;
667 	struct wl1271 *wl = cookie;
668 
669 	/* complete the ELP completion */
670 	spin_lock_irqsave(&wl->wl_lock, flags);
671 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
672 	if (wl->elp_compl) {
673 		complete(wl->elp_compl);
674 		wl->elp_compl = NULL;
675 	}
676 
677 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
678 		/* don't enqueue a work right now. mark it as pending */
679 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
680 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
681 		disable_irq_nosync(wl->irq);
682 		pm_wakeup_event(wl->dev, 0);
683 		spin_unlock_irqrestore(&wl->wl_lock, flags);
684 		return IRQ_HANDLED;
685 	}
686 	spin_unlock_irqrestore(&wl->wl_lock, flags);
687 
688 	/* TX might be handled here, avoid redundant work */
689 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
690 	cancel_work_sync(&wl->tx_work);
691 
692 	mutex_lock(&wl->mutex);
693 
694 	ret = wlcore_irq_locked(wl);
695 	if (ret)
696 		wl12xx_queue_recovery_work(wl);
697 
698 	spin_lock_irqsave(&wl->wl_lock, flags);
699 	/* In case TX was not handled here, queue TX work */
700 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
701 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
702 	    wl1271_tx_total_queue_count(wl) > 0)
703 		ieee80211_queue_work(wl->hw, &wl->tx_work);
704 	spin_unlock_irqrestore(&wl->wl_lock, flags);
705 
706 	mutex_unlock(&wl->mutex);
707 
708 	return IRQ_HANDLED;
709 }
710 
711 struct vif_counter_data {
712 	u8 counter;
713 
714 	struct ieee80211_vif *cur_vif;
715 	bool cur_vif_running;
716 };
717 
718 static void wl12xx_vif_count_iter(void *data, u8 *mac,
719 				  struct ieee80211_vif *vif)
720 {
721 	struct vif_counter_data *counter = data;
722 
723 	counter->counter++;
724 	if (counter->cur_vif == vif)
725 		counter->cur_vif_running = true;
726 }
727 
728 /* caller must not hold wl->mutex, as it might deadlock */
729 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
730 			       struct ieee80211_vif *cur_vif,
731 			       struct vif_counter_data *data)
732 {
733 	memset(data, 0, sizeof(*data));
734 	data->cur_vif = cur_vif;
735 
736 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
737 					    wl12xx_vif_count_iter, data);
738 }
739 
740 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
741 {
742 	const struct firmware *fw;
743 	const char *fw_name;
744 	enum wl12xx_fw_type fw_type;
745 	int ret;
746 
747 	if (plt) {
748 		fw_type = WL12XX_FW_TYPE_PLT;
749 		fw_name = wl->plt_fw_name;
750 	} else {
751 		/*
752 		 * we can't call wl12xx_get_vif_count() here because
753 		 * wl->mutex is taken, so use the cached last_vif_count value
754 		 */
755 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
756 			fw_type = WL12XX_FW_TYPE_MULTI;
757 			fw_name = wl->mr_fw_name;
758 		} else {
759 			fw_type = WL12XX_FW_TYPE_NORMAL;
760 			fw_name = wl->sr_fw_name;
761 		}
762 	}
763 
764 	if (wl->fw_type == fw_type)
765 		return 0;
766 
767 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
768 
769 	ret = request_firmware(&fw, fw_name, wl->dev);
770 
771 	if (ret < 0) {
772 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
773 		return ret;
774 	}
775 
776 	if (fw->size % 4) {
777 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 			     fw->size);
779 		ret = -EILSEQ;
780 		goto out;
781 	}
782 
783 	vfree(wl->fw);
784 	wl->fw_type = WL12XX_FW_TYPE_NONE;
785 	wl->fw_len = fw->size;
786 	wl->fw = vmalloc(wl->fw_len);
787 
788 	if (!wl->fw) {
789 		wl1271_error("could not allocate memory for the firmware");
790 		ret = -ENOMEM;
791 		goto out;
792 	}
793 
794 	memcpy(wl->fw, fw->data, wl->fw_len);
795 	ret = 0;
796 	wl->fw_type = fw_type;
797 out:
798 	release_firmware(fw);
799 
800 	return ret;
801 }
802 
803 void wl12xx_queue_recovery_work(struct wl1271 *wl)
804 {
805 	/* Avoid a recursive recovery */
806 	if (wl->state == WLCORE_STATE_ON) {
807 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
808 				  &wl->flags));
809 
810 		wl->state = WLCORE_STATE_RESTARTING;
811 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
812 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
813 	}
814 }
815 
816 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
817 {
818 	size_t len;
819 
820 	/* Make sure we have enough room */
821 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
822 
823 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
824 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
825 	wl->fwlog_size += len;
826 
827 	return len;
828 }
829 
830 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
831 {
832 	u32 end_of_log = 0;
833 	int error;
834 
835 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
836 		return;
837 
838 	wl1271_info("Reading FW panic log");
839 
840 	/*
841 	 * Make sure the chip is awake and the logger isn't active.
842 	 * Do not send a stop fwlog command if the fw is hanged or if
843 	 * dbgpins are used (due to some fw bug).
844 	 */
845 	error = pm_runtime_get_sync(wl->dev);
846 	if (error < 0) {
847 		pm_runtime_put_noidle(wl->dev);
848 		return;
849 	}
850 	if (!wl->watchdog_recovery &&
851 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
852 		wl12xx_cmd_stop_fwlog(wl);
853 
854 	/* Traverse the memory blocks linked list */
855 	do {
856 		end_of_log = wlcore_event_fw_logger(wl);
857 		if (end_of_log == 0) {
858 			msleep(100);
859 			end_of_log = wlcore_event_fw_logger(wl);
860 		}
861 	} while (end_of_log != 0);
862 }
863 
864 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
865 				   u8 hlid, struct ieee80211_sta *sta)
866 {
867 	struct wl1271_station *wl_sta;
868 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
869 
870 	wl_sta = (void *)sta->drv_priv;
871 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
872 
873 	/*
874 	 * increment the initial seq number on recovery to account for
875 	 * transmitted packets that we haven't yet got in the FW status
876 	 */
877 	if (wlvif->encryption_type == KEY_GEM)
878 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
879 
880 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
881 		wl_sta->total_freed_pkts += sqn_recovery_padding;
882 }
883 
884 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
885 					struct wl12xx_vif *wlvif,
886 					u8 hlid, const u8 *addr)
887 {
888 	struct ieee80211_sta *sta;
889 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
890 
891 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
892 		    is_zero_ether_addr(addr)))
893 		return;
894 
895 	rcu_read_lock();
896 	sta = ieee80211_find_sta(vif, addr);
897 	if (sta)
898 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
899 	rcu_read_unlock();
900 }
901 
902 static void wlcore_print_recovery(struct wl1271 *wl)
903 {
904 	u32 pc = 0;
905 	u32 hint_sts = 0;
906 	int ret;
907 
908 	wl1271_info("Hardware recovery in progress. FW ver: %s",
909 		    wl->chip.fw_ver_str);
910 
911 	/* change partitions momentarily so we can read the FW pc */
912 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
913 	if (ret < 0)
914 		return;
915 
916 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
917 	if (ret < 0)
918 		return;
919 
920 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
921 	if (ret < 0)
922 		return;
923 
924 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
925 				pc, hint_sts, ++wl->recovery_count);
926 
927 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
928 }
929 
930 
931 static void wl1271_recovery_work(struct work_struct *work)
932 {
933 	struct wl1271 *wl =
934 		container_of(work, struct wl1271, recovery_work);
935 	struct wl12xx_vif *wlvif;
936 	struct ieee80211_vif *vif;
937 	int error;
938 
939 	mutex_lock(&wl->mutex);
940 
941 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
942 		goto out_unlock;
943 
944 	error = pm_runtime_get_sync(wl->dev);
945 	if (error < 0) {
946 		wl1271_warning("Enable for recovery failed");
947 		pm_runtime_put_noidle(wl->dev);
948 	}
949 	wlcore_disable_interrupts_nosync(wl);
950 
951 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
952 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
953 			wl12xx_read_fwlog_panic(wl);
954 		wlcore_print_recovery(wl);
955 	}
956 
957 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
958 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
959 
960 	if (wl->conf.recovery.no_recovery) {
961 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
962 		goto out_unlock;
963 	}
964 
965 	/* Prevent spurious TX during FW restart */
966 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
967 
968 	/* reboot the chipset */
969 	while (!list_empty(&wl->wlvif_list)) {
970 		wlvif = list_first_entry(&wl->wlvif_list,
971 				       struct wl12xx_vif, list);
972 		vif = wl12xx_wlvif_to_vif(wlvif);
973 
974 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
975 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
976 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
977 						    vif->bss_conf.bssid);
978 		}
979 
980 		__wl1271_op_remove_interface(wl, vif, false);
981 	}
982 
983 	wlcore_op_stop_locked(wl);
984 	pm_runtime_mark_last_busy(wl->dev);
985 	pm_runtime_put_autosuspend(wl->dev);
986 
987 	ieee80211_restart_hw(wl->hw);
988 
989 	/*
990 	 * Its safe to enable TX now - the queues are stopped after a request
991 	 * to restart the HW.
992 	 */
993 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
994 
995 out_unlock:
996 	wl->watchdog_recovery = false;
997 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
998 	mutex_unlock(&wl->mutex);
999 }
1000 
1001 static int wlcore_fw_wakeup(struct wl1271 *wl)
1002 {
1003 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1004 }
1005 
1006 static int wl1271_setup(struct wl1271 *wl)
1007 {
1008 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1009 	if (!wl->raw_fw_status)
1010 		goto err;
1011 
1012 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1013 	if (!wl->fw_status)
1014 		goto err;
1015 
1016 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1017 	if (!wl->tx_res_if)
1018 		goto err;
1019 
1020 	return 0;
1021 err:
1022 	kfree(wl->fw_status);
1023 	kfree(wl->raw_fw_status);
1024 	return -ENOMEM;
1025 }
1026 
1027 static int wl12xx_set_power_on(struct wl1271 *wl)
1028 {
1029 	int ret;
1030 
1031 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1032 	ret = wl1271_power_on(wl);
1033 	if (ret < 0)
1034 		goto out;
1035 	msleep(WL1271_POWER_ON_SLEEP);
1036 	wl1271_io_reset(wl);
1037 	wl1271_io_init(wl);
1038 
1039 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1040 	if (ret < 0)
1041 		goto fail;
1042 
1043 	/* ELP module wake up */
1044 	ret = wlcore_fw_wakeup(wl);
1045 	if (ret < 0)
1046 		goto fail;
1047 
1048 out:
1049 	return ret;
1050 
1051 fail:
1052 	wl1271_power_off(wl);
1053 	return ret;
1054 }
1055 
1056 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1057 {
1058 	int ret = 0;
1059 
1060 	ret = wl12xx_set_power_on(wl);
1061 	if (ret < 0)
1062 		goto out;
1063 
1064 	/*
1065 	 * For wl127x based devices we could use the default block
1066 	 * size (512 bytes), but due to a bug in the sdio driver, we
1067 	 * need to set it explicitly after the chip is powered on.  To
1068 	 * simplify the code and since the performance impact is
1069 	 * negligible, we use the same block size for all different
1070 	 * chip types.
1071 	 *
1072 	 * Check if the bus supports blocksize alignment and, if it
1073 	 * doesn't, make sure we don't have the quirk.
1074 	 */
1075 	if (!wl1271_set_block_size(wl))
1076 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1077 
1078 	/* TODO: make sure the lower driver has set things up correctly */
1079 
1080 	ret = wl1271_setup(wl);
1081 	if (ret < 0)
1082 		goto out;
1083 
1084 	ret = wl12xx_fetch_firmware(wl, plt);
1085 	if (ret < 0)
1086 		goto out;
1087 
1088 out:
1089 	return ret;
1090 }
1091 
1092 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1093 {
1094 	int retries = WL1271_BOOT_RETRIES;
1095 	struct wiphy *wiphy = wl->hw->wiphy;
1096 
1097 	static const char* const PLT_MODE[] = {
1098 		"PLT_OFF",
1099 		"PLT_ON",
1100 		"PLT_FEM_DETECT",
1101 		"PLT_CHIP_AWAKE"
1102 	};
1103 
1104 	int ret;
1105 
1106 	mutex_lock(&wl->mutex);
1107 
1108 	wl1271_notice("power up");
1109 
1110 	if (wl->state != WLCORE_STATE_OFF) {
1111 		wl1271_error("cannot go into PLT state because not "
1112 			     "in off state: %d", wl->state);
1113 		ret = -EBUSY;
1114 		goto out;
1115 	}
1116 
1117 	/* Indicate to lower levels that we are now in PLT mode */
1118 	wl->plt = true;
1119 	wl->plt_mode = plt_mode;
1120 
1121 	while (retries) {
1122 		retries--;
1123 		ret = wl12xx_chip_wakeup(wl, true);
1124 		if (ret < 0)
1125 			goto power_off;
1126 
1127 		if (plt_mode != PLT_CHIP_AWAKE) {
1128 			ret = wl->ops->plt_init(wl);
1129 			if (ret < 0)
1130 				goto power_off;
1131 		}
1132 
1133 		wl->state = WLCORE_STATE_ON;
1134 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1135 			      PLT_MODE[plt_mode],
1136 			      wl->chip.fw_ver_str);
1137 
1138 		/* update hw/fw version info in wiphy struct */
1139 		wiphy->hw_version = wl->chip.id;
1140 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1141 			sizeof(wiphy->fw_version));
1142 
1143 		goto out;
1144 
1145 power_off:
1146 		wl1271_power_off(wl);
1147 	}
1148 
1149 	wl->plt = false;
1150 	wl->plt_mode = PLT_OFF;
1151 
1152 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1153 		     WL1271_BOOT_RETRIES);
1154 out:
1155 	mutex_unlock(&wl->mutex);
1156 
1157 	return ret;
1158 }
1159 
1160 int wl1271_plt_stop(struct wl1271 *wl)
1161 {
1162 	int ret = 0;
1163 
1164 	wl1271_notice("power down");
1165 
1166 	/*
1167 	 * Interrupts must be disabled before setting the state to OFF.
1168 	 * Otherwise, the interrupt handler might be called and exit without
1169 	 * reading the interrupt status.
1170 	 */
1171 	wlcore_disable_interrupts(wl);
1172 	mutex_lock(&wl->mutex);
1173 	if (!wl->plt) {
1174 		mutex_unlock(&wl->mutex);
1175 
1176 		/*
1177 		 * This will not necessarily enable interrupts as interrupts
1178 		 * may have been disabled when op_stop was called. It will,
1179 		 * however, balance the above call to disable_interrupts().
1180 		 */
1181 		wlcore_enable_interrupts(wl);
1182 
1183 		wl1271_error("cannot power down because not in PLT "
1184 			     "state: %d", wl->state);
1185 		ret = -EBUSY;
1186 		goto out;
1187 	}
1188 
1189 	mutex_unlock(&wl->mutex);
1190 
1191 	wl1271_flush_deferred_work(wl);
1192 	cancel_work_sync(&wl->netstack_work);
1193 	cancel_work_sync(&wl->recovery_work);
1194 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1195 
1196 	mutex_lock(&wl->mutex);
1197 	wl1271_power_off(wl);
1198 	wl->flags = 0;
1199 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1200 	wl->state = WLCORE_STATE_OFF;
1201 	wl->plt = false;
1202 	wl->plt_mode = PLT_OFF;
1203 	wl->rx_counter = 0;
1204 	mutex_unlock(&wl->mutex);
1205 
1206 out:
1207 	return ret;
1208 }
1209 
1210 static void wl1271_op_tx(struct ieee80211_hw *hw,
1211 			 struct ieee80211_tx_control *control,
1212 			 struct sk_buff *skb)
1213 {
1214 	struct wl1271 *wl = hw->priv;
1215 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1216 	struct ieee80211_vif *vif = info->control.vif;
1217 	struct wl12xx_vif *wlvif = NULL;
1218 	unsigned long flags;
1219 	int q, mapping;
1220 	u8 hlid;
1221 
1222 	if (!vif) {
1223 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1224 		ieee80211_free_txskb(hw, skb);
1225 		return;
1226 	}
1227 
1228 	wlvif = wl12xx_vif_to_data(vif);
1229 	mapping = skb_get_queue_mapping(skb);
1230 	q = wl1271_tx_get_queue(mapping);
1231 
1232 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1233 
1234 	spin_lock_irqsave(&wl->wl_lock, flags);
1235 
1236 	/*
1237 	 * drop the packet if the link is invalid or the queue is stopped
1238 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1239 	 * allow these packets through.
1240 	 */
1241 	if (hlid == WL12XX_INVALID_LINK_ID ||
1242 	    (!test_bit(hlid, wlvif->links_map)) ||
1243 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1244 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1245 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1246 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1247 		ieee80211_free_txskb(hw, skb);
1248 		goto out;
1249 	}
1250 
1251 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1252 		     hlid, q, skb->len);
1253 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1254 
1255 	wl->tx_queue_count[q]++;
1256 	wlvif->tx_queue_count[q]++;
1257 
1258 	/*
1259 	 * The workqueue is slow to process the tx_queue and we need stop
1260 	 * the queue here, otherwise the queue will get too long.
1261 	 */
1262 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1263 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1264 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1265 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1266 		wlcore_stop_queue_locked(wl, wlvif, q,
1267 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1268 	}
1269 
1270 	/*
1271 	 * The chip specific setup must run before the first TX packet -
1272 	 * before that, the tx_work will not be initialized!
1273 	 */
1274 
1275 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1276 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1277 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1278 
1279 out:
1280 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1281 }
1282 
1283 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1284 {
1285 	unsigned long flags;
1286 	int q;
1287 
1288 	/* no need to queue a new dummy packet if one is already pending */
1289 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1290 		return 0;
1291 
1292 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1293 
1294 	spin_lock_irqsave(&wl->wl_lock, flags);
1295 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1296 	wl->tx_queue_count[q]++;
1297 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1298 
1299 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1300 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1301 		return wlcore_tx_work_locked(wl);
1302 
1303 	/*
1304 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1305 	 * interrupt handler function
1306 	 */
1307 	return 0;
1308 }
1309 
1310 /*
1311  * The size of the dummy packet should be at least 1400 bytes. However, in
1312  * order to minimize the number of bus transactions, aligning it to 512 bytes
1313  * boundaries could be beneficial, performance wise
1314  */
1315 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1316 
1317 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1318 {
1319 	struct sk_buff *skb;
1320 	struct ieee80211_hdr_3addr *hdr;
1321 	unsigned int dummy_packet_size;
1322 
1323 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1324 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1325 
1326 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1327 	if (!skb) {
1328 		wl1271_warning("Failed to allocate a dummy packet skb");
1329 		return NULL;
1330 	}
1331 
1332 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1333 
1334 	hdr = skb_put_zero(skb, sizeof(*hdr));
1335 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1336 					 IEEE80211_STYPE_NULLFUNC |
1337 					 IEEE80211_FCTL_TODS);
1338 
1339 	skb_put_zero(skb, dummy_packet_size);
1340 
1341 	/* Dummy packets require the TID to be management */
1342 	skb->priority = WL1271_TID_MGMT;
1343 
1344 	/* Initialize all fields that might be used */
1345 	skb_set_queue_mapping(skb, 0);
1346 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1347 
1348 	return skb;
1349 }
1350 
1351 
1352 static int
1353 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1354 {
1355 	int num_fields = 0, in_field = 0, fields_size = 0;
1356 	int i, pattern_len = 0;
1357 
1358 	if (!p->mask) {
1359 		wl1271_warning("No mask in WoWLAN pattern");
1360 		return -EINVAL;
1361 	}
1362 
1363 	/*
1364 	 * The pattern is broken up into segments of bytes at different offsets
1365 	 * that need to be checked by the FW filter. Each segment is called
1366 	 * a field in the FW API. We verify that the total number of fields
1367 	 * required for this pattern won't exceed FW limits (8)
1368 	 * as well as the total fields buffer won't exceed the FW limit.
1369 	 * Note that if there's a pattern which crosses Ethernet/IP header
1370 	 * boundary a new field is required.
1371 	 */
1372 	for (i = 0; i < p->pattern_len; i++) {
1373 		if (test_bit(i, (unsigned long *)p->mask)) {
1374 			if (!in_field) {
1375 				in_field = 1;
1376 				pattern_len = 1;
1377 			} else {
1378 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1379 					num_fields++;
1380 					fields_size += pattern_len +
1381 						RX_FILTER_FIELD_OVERHEAD;
1382 					pattern_len = 1;
1383 				} else
1384 					pattern_len++;
1385 			}
1386 		} else {
1387 			if (in_field) {
1388 				in_field = 0;
1389 				fields_size += pattern_len +
1390 					RX_FILTER_FIELD_OVERHEAD;
1391 				num_fields++;
1392 			}
1393 		}
1394 	}
1395 
1396 	if (in_field) {
1397 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1398 		num_fields++;
1399 	}
1400 
1401 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1402 		wl1271_warning("RX Filter too complex. Too many segments");
1403 		return -EINVAL;
1404 	}
1405 
1406 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1407 		wl1271_warning("RX filter pattern is too big");
1408 		return -E2BIG;
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1415 {
1416 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1417 }
1418 
1419 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1420 {
1421 	int i;
1422 
1423 	if (filter == NULL)
1424 		return;
1425 
1426 	for (i = 0; i < filter->num_fields; i++)
1427 		kfree(filter->fields[i].pattern);
1428 
1429 	kfree(filter);
1430 }
1431 
1432 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1433 				 u16 offset, u8 flags,
1434 				 const u8 *pattern, u8 len)
1435 {
1436 	struct wl12xx_rx_filter_field *field;
1437 
1438 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1439 		wl1271_warning("Max fields per RX filter. can't alloc another");
1440 		return -EINVAL;
1441 	}
1442 
1443 	field = &filter->fields[filter->num_fields];
1444 
1445 	field->pattern = kzalloc(len, GFP_KERNEL);
1446 	if (!field->pattern) {
1447 		wl1271_warning("Failed to allocate RX filter pattern");
1448 		return -ENOMEM;
1449 	}
1450 
1451 	filter->num_fields++;
1452 
1453 	field->offset = cpu_to_le16(offset);
1454 	field->flags = flags;
1455 	field->len = len;
1456 	memcpy(field->pattern, pattern, len);
1457 
1458 	return 0;
1459 }
1460 
1461 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1462 {
1463 	int i, fields_size = 0;
1464 
1465 	for (i = 0; i < filter->num_fields; i++)
1466 		fields_size += filter->fields[i].len +
1467 			sizeof(struct wl12xx_rx_filter_field) -
1468 			sizeof(u8 *);
1469 
1470 	return fields_size;
1471 }
1472 
1473 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1474 				    u8 *buf)
1475 {
1476 	int i;
1477 	struct wl12xx_rx_filter_field *field;
1478 
1479 	for (i = 0; i < filter->num_fields; i++) {
1480 		field = (struct wl12xx_rx_filter_field *)buf;
1481 
1482 		field->offset = filter->fields[i].offset;
1483 		field->flags = filter->fields[i].flags;
1484 		field->len = filter->fields[i].len;
1485 
1486 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1487 		buf += sizeof(struct wl12xx_rx_filter_field) -
1488 			sizeof(u8 *) + field->len;
1489 	}
1490 }
1491 
1492 /*
1493  * Allocates an RX filter returned through f
1494  * which needs to be freed using rx_filter_free()
1495  */
1496 static int
1497 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1498 					   struct wl12xx_rx_filter **f)
1499 {
1500 	int i, j, ret = 0;
1501 	struct wl12xx_rx_filter *filter;
1502 	u16 offset;
1503 	u8 flags, len;
1504 
1505 	filter = wl1271_rx_filter_alloc();
1506 	if (!filter) {
1507 		wl1271_warning("Failed to alloc rx filter");
1508 		ret = -ENOMEM;
1509 		goto err;
1510 	}
1511 
1512 	i = 0;
1513 	while (i < p->pattern_len) {
1514 		if (!test_bit(i, (unsigned long *)p->mask)) {
1515 			i++;
1516 			continue;
1517 		}
1518 
1519 		for (j = i; j < p->pattern_len; j++) {
1520 			if (!test_bit(j, (unsigned long *)p->mask))
1521 				break;
1522 
1523 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1524 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1525 				break;
1526 		}
1527 
1528 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1529 			offset = i;
1530 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1531 		} else {
1532 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1533 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1534 		}
1535 
1536 		len = j - i;
1537 
1538 		ret = wl1271_rx_filter_alloc_field(filter,
1539 						   offset,
1540 						   flags,
1541 						   &p->pattern[i], len);
1542 		if (ret)
1543 			goto err;
1544 
1545 		i = j;
1546 	}
1547 
1548 	filter->action = FILTER_SIGNAL;
1549 
1550 	*f = filter;
1551 	return 0;
1552 
1553 err:
1554 	wl1271_rx_filter_free(filter);
1555 	*f = NULL;
1556 
1557 	return ret;
1558 }
1559 
1560 static int wl1271_configure_wowlan(struct wl1271 *wl,
1561 				   struct cfg80211_wowlan *wow)
1562 {
1563 	int i, ret;
1564 
1565 	if (!wow || wow->any || !wow->n_patterns) {
1566 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1567 							  FILTER_SIGNAL);
1568 		if (ret)
1569 			goto out;
1570 
1571 		ret = wl1271_rx_filter_clear_all(wl);
1572 		if (ret)
1573 			goto out;
1574 
1575 		return 0;
1576 	}
1577 
1578 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1579 		return -EINVAL;
1580 
1581 	/* Validate all incoming patterns before clearing current FW state */
1582 	for (i = 0; i < wow->n_patterns; i++) {
1583 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1584 		if (ret) {
1585 			wl1271_warning("Bad wowlan pattern %d", i);
1586 			return ret;
1587 		}
1588 	}
1589 
1590 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1591 	if (ret)
1592 		goto out;
1593 
1594 	ret = wl1271_rx_filter_clear_all(wl);
1595 	if (ret)
1596 		goto out;
1597 
1598 	/* Translate WoWLAN patterns into filters */
1599 	for (i = 0; i < wow->n_patterns; i++) {
1600 		struct cfg80211_pkt_pattern *p;
1601 		struct wl12xx_rx_filter *filter = NULL;
1602 
1603 		p = &wow->patterns[i];
1604 
1605 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1606 		if (ret) {
1607 			wl1271_warning("Failed to create an RX filter from "
1608 				       "wowlan pattern %d", i);
1609 			goto out;
1610 		}
1611 
1612 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1613 
1614 		wl1271_rx_filter_free(filter);
1615 		if (ret)
1616 			goto out;
1617 	}
1618 
1619 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1620 
1621 out:
1622 	return ret;
1623 }
1624 
1625 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1626 					struct wl12xx_vif *wlvif,
1627 					struct cfg80211_wowlan *wow)
1628 {
1629 	int ret = 0;
1630 
1631 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1632 		goto out;
1633 
1634 	ret = wl1271_configure_wowlan(wl, wow);
1635 	if (ret < 0)
1636 		goto out;
1637 
1638 	if ((wl->conf.conn.suspend_wake_up_event ==
1639 	     wl->conf.conn.wake_up_event) &&
1640 	    (wl->conf.conn.suspend_listen_interval ==
1641 	     wl->conf.conn.listen_interval))
1642 		goto out;
1643 
1644 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1645 				    wl->conf.conn.suspend_wake_up_event,
1646 				    wl->conf.conn.suspend_listen_interval);
1647 
1648 	if (ret < 0)
1649 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1650 out:
1651 	return ret;
1652 
1653 }
1654 
1655 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1656 					struct wl12xx_vif *wlvif,
1657 					struct cfg80211_wowlan *wow)
1658 {
1659 	int ret = 0;
1660 
1661 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1662 		goto out;
1663 
1664 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1665 	if (ret < 0)
1666 		goto out;
1667 
1668 	ret = wl1271_configure_wowlan(wl, wow);
1669 	if (ret < 0)
1670 		goto out;
1671 
1672 out:
1673 	return ret;
1674 
1675 }
1676 
1677 static int wl1271_configure_suspend(struct wl1271 *wl,
1678 				    struct wl12xx_vif *wlvif,
1679 				    struct cfg80211_wowlan *wow)
1680 {
1681 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1682 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1683 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1684 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1685 	return 0;
1686 }
1687 
1688 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1689 {
1690 	int ret = 0;
1691 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1692 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1693 
1694 	if ((!is_ap) && (!is_sta))
1695 		return;
1696 
1697 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1698 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1699 		return;
1700 
1701 	wl1271_configure_wowlan(wl, NULL);
1702 
1703 	if (is_sta) {
1704 		if ((wl->conf.conn.suspend_wake_up_event ==
1705 		     wl->conf.conn.wake_up_event) &&
1706 		    (wl->conf.conn.suspend_listen_interval ==
1707 		     wl->conf.conn.listen_interval))
1708 			return;
1709 
1710 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1711 				    wl->conf.conn.wake_up_event,
1712 				    wl->conf.conn.listen_interval);
1713 
1714 		if (ret < 0)
1715 			wl1271_error("resume: wake up conditions failed: %d",
1716 				     ret);
1717 
1718 	} else if (is_ap) {
1719 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1720 	}
1721 }
1722 
1723 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1724 					    struct cfg80211_wowlan *wow)
1725 {
1726 	struct wl1271 *wl = hw->priv;
1727 	struct wl12xx_vif *wlvif;
1728 	unsigned long flags;
1729 	int ret;
1730 
1731 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1732 	WARN_ON(!wow);
1733 
1734 	/* we want to perform the recovery before suspending */
1735 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1736 		wl1271_warning("postponing suspend to perform recovery");
1737 		return -EBUSY;
1738 	}
1739 
1740 	wl1271_tx_flush(wl);
1741 
1742 	mutex_lock(&wl->mutex);
1743 
1744 	ret = pm_runtime_get_sync(wl->dev);
1745 	if (ret < 0) {
1746 		pm_runtime_put_noidle(wl->dev);
1747 		mutex_unlock(&wl->mutex);
1748 		return ret;
1749 	}
1750 
1751 	wl->wow_enabled = true;
1752 	wl12xx_for_each_wlvif(wl, wlvif) {
1753 		if (wlcore_is_p2p_mgmt(wlvif))
1754 			continue;
1755 
1756 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1757 		if (ret < 0) {
1758 			mutex_unlock(&wl->mutex);
1759 			wl1271_warning("couldn't prepare device to suspend");
1760 			return ret;
1761 		}
1762 	}
1763 
1764 	/* disable fast link flow control notifications from FW */
1765 	ret = wlcore_hw_interrupt_notify(wl, false);
1766 	if (ret < 0)
1767 		goto out_sleep;
1768 
1769 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1770 	ret = wlcore_hw_rx_ba_filter(wl,
1771 				     !!wl->conf.conn.suspend_rx_ba_activity);
1772 	if (ret < 0)
1773 		goto out_sleep;
1774 
1775 out_sleep:
1776 	pm_runtime_put_noidle(wl->dev);
1777 	mutex_unlock(&wl->mutex);
1778 
1779 	if (ret < 0) {
1780 		wl1271_warning("couldn't prepare device to suspend");
1781 		return ret;
1782 	}
1783 
1784 	/* flush any remaining work */
1785 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1786 
1787 	flush_work(&wl->tx_work);
1788 
1789 	/*
1790 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1791 	 * it on resume anyway.
1792 	 */
1793 	cancel_delayed_work(&wl->tx_watchdog_work);
1794 
1795 	/*
1796 	 * set suspended flag to avoid triggering a new threaded_irq
1797 	 * work.
1798 	 */
1799 	spin_lock_irqsave(&wl->wl_lock, flags);
1800 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1801 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1802 
1803 	return pm_runtime_force_suspend(wl->dev);
1804 }
1805 
1806 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1807 {
1808 	struct wl1271 *wl = hw->priv;
1809 	struct wl12xx_vif *wlvif;
1810 	unsigned long flags;
1811 	bool run_irq_work = false, pending_recovery;
1812 	int ret;
1813 
1814 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1815 		     wl->wow_enabled);
1816 	WARN_ON(!wl->wow_enabled);
1817 
1818 	ret = pm_runtime_force_resume(wl->dev);
1819 	if (ret < 0) {
1820 		wl1271_error("ELP wakeup failure!");
1821 		goto out_sleep;
1822 	}
1823 
1824 	/*
1825 	 * re-enable irq_work enqueuing, and call irq_work directly if
1826 	 * there is a pending work.
1827 	 */
1828 	spin_lock_irqsave(&wl->wl_lock, flags);
1829 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1830 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1831 		run_irq_work = true;
1832 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1833 
1834 	mutex_lock(&wl->mutex);
1835 
1836 	/* test the recovery flag before calling any SDIO functions */
1837 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1838 				    &wl->flags);
1839 
1840 	if (run_irq_work) {
1841 		wl1271_debug(DEBUG_MAC80211,
1842 			     "run postponed irq_work directly");
1843 
1844 		/* don't talk to the HW if recovery is pending */
1845 		if (!pending_recovery) {
1846 			ret = wlcore_irq_locked(wl);
1847 			if (ret)
1848 				wl12xx_queue_recovery_work(wl);
1849 		}
1850 
1851 		wlcore_enable_interrupts(wl);
1852 	}
1853 
1854 	if (pending_recovery) {
1855 		wl1271_warning("queuing forgotten recovery on resume");
1856 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1857 		goto out_sleep;
1858 	}
1859 
1860 	ret = pm_runtime_get_sync(wl->dev);
1861 	if (ret < 0) {
1862 		pm_runtime_put_noidle(wl->dev);
1863 		goto out;
1864 	}
1865 
1866 	wl12xx_for_each_wlvif(wl, wlvif) {
1867 		if (wlcore_is_p2p_mgmt(wlvif))
1868 			continue;
1869 
1870 		wl1271_configure_resume(wl, wlvif);
1871 	}
1872 
1873 	ret = wlcore_hw_interrupt_notify(wl, true);
1874 	if (ret < 0)
1875 		goto out_sleep;
1876 
1877 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1878 	ret = wlcore_hw_rx_ba_filter(wl, false);
1879 	if (ret < 0)
1880 		goto out_sleep;
1881 
1882 out_sleep:
1883 	pm_runtime_mark_last_busy(wl->dev);
1884 	pm_runtime_put_autosuspend(wl->dev);
1885 
1886 out:
1887 	wl->wow_enabled = false;
1888 
1889 	/*
1890 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1891 	 * That way we avoid possible conditions where Tx-complete interrupts
1892 	 * fail to arrive and we perform a spurious recovery.
1893 	 */
1894 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1895 	mutex_unlock(&wl->mutex);
1896 
1897 	return 0;
1898 }
1899 
1900 static int wl1271_op_start(struct ieee80211_hw *hw)
1901 {
1902 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1903 
1904 	/*
1905 	 * We have to delay the booting of the hardware because
1906 	 * we need to know the local MAC address before downloading and
1907 	 * initializing the firmware. The MAC address cannot be changed
1908 	 * after boot, and without the proper MAC address, the firmware
1909 	 * will not function properly.
1910 	 *
1911 	 * The MAC address is first known when the corresponding interface
1912 	 * is added. That is where we will initialize the hardware.
1913 	 */
1914 
1915 	return 0;
1916 }
1917 
1918 static void wlcore_op_stop_locked(struct wl1271 *wl)
1919 {
1920 	int i;
1921 
1922 	if (wl->state == WLCORE_STATE_OFF) {
1923 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1924 					&wl->flags))
1925 			wlcore_enable_interrupts(wl);
1926 
1927 		return;
1928 	}
1929 
1930 	/*
1931 	 * this must be before the cancel_work calls below, so that the work
1932 	 * functions don't perform further work.
1933 	 */
1934 	wl->state = WLCORE_STATE_OFF;
1935 
1936 	/*
1937 	 * Use the nosync variant to disable interrupts, so the mutex could be
1938 	 * held while doing so without deadlocking.
1939 	 */
1940 	wlcore_disable_interrupts_nosync(wl);
1941 
1942 	mutex_unlock(&wl->mutex);
1943 
1944 	wlcore_synchronize_interrupts(wl);
1945 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1946 		cancel_work_sync(&wl->recovery_work);
1947 	wl1271_flush_deferred_work(wl);
1948 	cancel_delayed_work_sync(&wl->scan_complete_work);
1949 	cancel_work_sync(&wl->netstack_work);
1950 	cancel_work_sync(&wl->tx_work);
1951 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1952 
1953 	/* let's notify MAC80211 about the remaining pending TX frames */
1954 	mutex_lock(&wl->mutex);
1955 	wl12xx_tx_reset(wl);
1956 
1957 	wl1271_power_off(wl);
1958 	/*
1959 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1960 	 * an interrupt storm. Now that the power is down, it is safe to
1961 	 * re-enable interrupts to balance the disable depth
1962 	 */
1963 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1964 		wlcore_enable_interrupts(wl);
1965 
1966 	wl->band = NL80211_BAND_2GHZ;
1967 
1968 	wl->rx_counter = 0;
1969 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1970 	wl->channel_type = NL80211_CHAN_NO_HT;
1971 	wl->tx_blocks_available = 0;
1972 	wl->tx_allocated_blocks = 0;
1973 	wl->tx_results_count = 0;
1974 	wl->tx_packets_count = 0;
1975 	wl->time_offset = 0;
1976 	wl->ap_fw_ps_map = 0;
1977 	wl->ap_ps_map = 0;
1978 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1979 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1980 	memset(wl->links_map, 0, sizeof(wl->links_map));
1981 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1982 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1983 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1984 	wl->active_sta_count = 0;
1985 	wl->active_link_count = 0;
1986 
1987 	/* The system link is always allocated */
1988 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1989 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1990 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1991 
1992 	/*
1993 	 * this is performed after the cancel_work calls and the associated
1994 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1995 	 * get executed before all these vars have been reset.
1996 	 */
1997 	wl->flags = 0;
1998 
1999 	wl->tx_blocks_freed = 0;
2000 
2001 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2002 		wl->tx_pkts_freed[i] = 0;
2003 		wl->tx_allocated_pkts[i] = 0;
2004 	}
2005 
2006 	wl1271_debugfs_reset(wl);
2007 
2008 	kfree(wl->raw_fw_status);
2009 	wl->raw_fw_status = NULL;
2010 	kfree(wl->fw_status);
2011 	wl->fw_status = NULL;
2012 	kfree(wl->tx_res_if);
2013 	wl->tx_res_if = NULL;
2014 	kfree(wl->target_mem_map);
2015 	wl->target_mem_map = NULL;
2016 
2017 	/*
2018 	 * FW channels must be re-calibrated after recovery,
2019 	 * save current Reg-Domain channel configuration and clear it.
2020 	 */
2021 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2022 	       sizeof(wl->reg_ch_conf_pending));
2023 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2024 }
2025 
2026 static void wlcore_op_stop(struct ieee80211_hw *hw)
2027 {
2028 	struct wl1271 *wl = hw->priv;
2029 
2030 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2031 
2032 	mutex_lock(&wl->mutex);
2033 
2034 	wlcore_op_stop_locked(wl);
2035 
2036 	mutex_unlock(&wl->mutex);
2037 }
2038 
2039 static void wlcore_channel_switch_work(struct work_struct *work)
2040 {
2041 	struct delayed_work *dwork;
2042 	struct wl1271 *wl;
2043 	struct ieee80211_vif *vif;
2044 	struct wl12xx_vif *wlvif;
2045 	int ret;
2046 
2047 	dwork = to_delayed_work(work);
2048 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2049 	wl = wlvif->wl;
2050 
2051 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2052 
2053 	mutex_lock(&wl->mutex);
2054 
2055 	if (unlikely(wl->state != WLCORE_STATE_ON))
2056 		goto out;
2057 
2058 	/* check the channel switch is still ongoing */
2059 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2060 		goto out;
2061 
2062 	vif = wl12xx_wlvif_to_vif(wlvif);
2063 	ieee80211_chswitch_done(vif, false);
2064 
2065 	ret = pm_runtime_get_sync(wl->dev);
2066 	if (ret < 0) {
2067 		pm_runtime_put_noidle(wl->dev);
2068 		goto out;
2069 	}
2070 
2071 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2072 
2073 	pm_runtime_mark_last_busy(wl->dev);
2074 	pm_runtime_put_autosuspend(wl->dev);
2075 out:
2076 	mutex_unlock(&wl->mutex);
2077 }
2078 
2079 static void wlcore_connection_loss_work(struct work_struct *work)
2080 {
2081 	struct delayed_work *dwork;
2082 	struct wl1271 *wl;
2083 	struct ieee80211_vif *vif;
2084 	struct wl12xx_vif *wlvif;
2085 
2086 	dwork = to_delayed_work(work);
2087 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2088 	wl = wlvif->wl;
2089 
2090 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2091 
2092 	mutex_lock(&wl->mutex);
2093 
2094 	if (unlikely(wl->state != WLCORE_STATE_ON))
2095 		goto out;
2096 
2097 	/* Call mac80211 connection loss */
2098 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2099 		goto out;
2100 
2101 	vif = wl12xx_wlvif_to_vif(wlvif);
2102 	ieee80211_connection_loss(vif);
2103 out:
2104 	mutex_unlock(&wl->mutex);
2105 }
2106 
2107 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2108 {
2109 	struct delayed_work *dwork;
2110 	struct wl1271 *wl;
2111 	struct wl12xx_vif *wlvif;
2112 	unsigned long time_spare;
2113 	int ret;
2114 
2115 	dwork = to_delayed_work(work);
2116 	wlvif = container_of(dwork, struct wl12xx_vif,
2117 			     pending_auth_complete_work);
2118 	wl = wlvif->wl;
2119 
2120 	mutex_lock(&wl->mutex);
2121 
2122 	if (unlikely(wl->state != WLCORE_STATE_ON))
2123 		goto out;
2124 
2125 	/*
2126 	 * Make sure a second really passed since the last auth reply. Maybe
2127 	 * a second auth reply arrived while we were stuck on the mutex.
2128 	 * Check for a little less than the timeout to protect from scheduler
2129 	 * irregularities.
2130 	 */
2131 	time_spare = jiffies +
2132 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2133 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2134 		goto out;
2135 
2136 	ret = pm_runtime_get_sync(wl->dev);
2137 	if (ret < 0) {
2138 		pm_runtime_put_noidle(wl->dev);
2139 		goto out;
2140 	}
2141 
2142 	/* cancel the ROC if active */
2143 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2144 
2145 	pm_runtime_mark_last_busy(wl->dev);
2146 	pm_runtime_put_autosuspend(wl->dev);
2147 out:
2148 	mutex_unlock(&wl->mutex);
2149 }
2150 
2151 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2152 {
2153 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2154 					WL12XX_MAX_RATE_POLICIES);
2155 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2156 		return -EBUSY;
2157 
2158 	__set_bit(policy, wl->rate_policies_map);
2159 	*idx = policy;
2160 	return 0;
2161 }
2162 
2163 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2164 {
2165 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2166 		return;
2167 
2168 	__clear_bit(*idx, wl->rate_policies_map);
2169 	*idx = WL12XX_MAX_RATE_POLICIES;
2170 }
2171 
2172 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2173 {
2174 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2175 					WLCORE_MAX_KLV_TEMPLATES);
2176 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2177 		return -EBUSY;
2178 
2179 	__set_bit(policy, wl->klv_templates_map);
2180 	*idx = policy;
2181 	return 0;
2182 }
2183 
2184 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2185 {
2186 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2187 		return;
2188 
2189 	__clear_bit(*idx, wl->klv_templates_map);
2190 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2191 }
2192 
2193 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2194 {
2195 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2196 
2197 	switch (wlvif->bss_type) {
2198 	case BSS_TYPE_AP_BSS:
2199 		if (wlvif->p2p)
2200 			return WL1271_ROLE_P2P_GO;
2201 		else if (ieee80211_vif_is_mesh(vif))
2202 			return WL1271_ROLE_MESH_POINT;
2203 		else
2204 			return WL1271_ROLE_AP;
2205 
2206 	case BSS_TYPE_STA_BSS:
2207 		if (wlvif->p2p)
2208 			return WL1271_ROLE_P2P_CL;
2209 		else
2210 			return WL1271_ROLE_STA;
2211 
2212 	case BSS_TYPE_IBSS:
2213 		return WL1271_ROLE_IBSS;
2214 
2215 	default:
2216 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2217 	}
2218 	return WL12XX_INVALID_ROLE_TYPE;
2219 }
2220 
2221 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2222 {
2223 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2224 	int i;
2225 
2226 	/* clear everything but the persistent data */
2227 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2228 
2229 	switch (ieee80211_vif_type_p2p(vif)) {
2230 	case NL80211_IFTYPE_P2P_CLIENT:
2231 		wlvif->p2p = 1;
2232 		/* fall-through */
2233 	case NL80211_IFTYPE_STATION:
2234 	case NL80211_IFTYPE_P2P_DEVICE:
2235 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2236 		break;
2237 	case NL80211_IFTYPE_ADHOC:
2238 		wlvif->bss_type = BSS_TYPE_IBSS;
2239 		break;
2240 	case NL80211_IFTYPE_P2P_GO:
2241 		wlvif->p2p = 1;
2242 		/* fall-through */
2243 	case NL80211_IFTYPE_AP:
2244 	case NL80211_IFTYPE_MESH_POINT:
2245 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2246 		break;
2247 	default:
2248 		wlvif->bss_type = MAX_BSS_TYPE;
2249 		return -EOPNOTSUPP;
2250 	}
2251 
2252 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2253 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2254 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2255 
2256 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2257 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2258 		/* init sta/ibss data */
2259 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2260 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2261 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2262 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2263 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2264 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2265 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2266 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2267 	} else {
2268 		/* init ap data */
2269 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2270 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2271 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2272 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2273 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2274 			wl12xx_allocate_rate_policy(wl,
2275 						&wlvif->ap.ucast_rate_idx[i]);
2276 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2277 		/*
2278 		 * TODO: check if basic_rate shouldn't be
2279 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2280 		 * instead (the same thing for STA above).
2281 		*/
2282 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2283 		/* TODO: this seems to be used only for STA, check it */
2284 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2285 	}
2286 
2287 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2288 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2289 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2290 
2291 	/*
2292 	 * mac80211 configures some values globally, while we treat them
2293 	 * per-interface. thus, on init, we have to copy them from wl
2294 	 */
2295 	wlvif->band = wl->band;
2296 	wlvif->channel = wl->channel;
2297 	wlvif->power_level = wl->power_level;
2298 	wlvif->channel_type = wl->channel_type;
2299 
2300 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2301 		  wl1271_rx_streaming_enable_work);
2302 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2303 		  wl1271_rx_streaming_disable_work);
2304 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2305 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2306 			  wlcore_channel_switch_work);
2307 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2308 			  wlcore_connection_loss_work);
2309 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2310 			  wlcore_pending_auth_complete_work);
2311 	INIT_LIST_HEAD(&wlvif->list);
2312 
2313 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2314 	return 0;
2315 }
2316 
2317 static int wl12xx_init_fw(struct wl1271 *wl)
2318 {
2319 	int retries = WL1271_BOOT_RETRIES;
2320 	bool booted = false;
2321 	struct wiphy *wiphy = wl->hw->wiphy;
2322 	int ret;
2323 
2324 	while (retries) {
2325 		retries--;
2326 		ret = wl12xx_chip_wakeup(wl, false);
2327 		if (ret < 0)
2328 			goto power_off;
2329 
2330 		ret = wl->ops->boot(wl);
2331 		if (ret < 0)
2332 			goto power_off;
2333 
2334 		ret = wl1271_hw_init(wl);
2335 		if (ret < 0)
2336 			goto irq_disable;
2337 
2338 		booted = true;
2339 		break;
2340 
2341 irq_disable:
2342 		mutex_unlock(&wl->mutex);
2343 		/* Unlocking the mutex in the middle of handling is
2344 		   inherently unsafe. In this case we deem it safe to do,
2345 		   because we need to let any possibly pending IRQ out of
2346 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2347 		   work function will not do anything.) Also, any other
2348 		   possible concurrent operations will fail due to the
2349 		   current state, hence the wl1271 struct should be safe. */
2350 		wlcore_disable_interrupts(wl);
2351 		wl1271_flush_deferred_work(wl);
2352 		cancel_work_sync(&wl->netstack_work);
2353 		mutex_lock(&wl->mutex);
2354 power_off:
2355 		wl1271_power_off(wl);
2356 	}
2357 
2358 	if (!booted) {
2359 		wl1271_error("firmware boot failed despite %d retries",
2360 			     WL1271_BOOT_RETRIES);
2361 		goto out;
2362 	}
2363 
2364 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2365 
2366 	/* update hw/fw version info in wiphy struct */
2367 	wiphy->hw_version = wl->chip.id;
2368 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2369 		sizeof(wiphy->fw_version));
2370 
2371 	/*
2372 	 * Now we know if 11a is supported (info from the NVS), so disable
2373 	 * 11a channels if not supported
2374 	 */
2375 	if (!wl->enable_11a)
2376 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2377 
2378 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2379 		     wl->enable_11a ? "" : "not ");
2380 
2381 	wl->state = WLCORE_STATE_ON;
2382 out:
2383 	return ret;
2384 }
2385 
2386 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2387 {
2388 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2389 }
2390 
2391 /*
2392  * Check whether a fw switch (i.e. moving from one loaded
2393  * fw to another) is needed. This function is also responsible
2394  * for updating wl->last_vif_count, so it must be called before
2395  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2396  * will be used).
2397  */
2398 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2399 				  struct vif_counter_data vif_counter_data,
2400 				  bool add)
2401 {
2402 	enum wl12xx_fw_type current_fw = wl->fw_type;
2403 	u8 vif_count = vif_counter_data.counter;
2404 
2405 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2406 		return false;
2407 
2408 	/* increase the vif count if this is a new vif */
2409 	if (add && !vif_counter_data.cur_vif_running)
2410 		vif_count++;
2411 
2412 	wl->last_vif_count = vif_count;
2413 
2414 	/* no need for fw change if the device is OFF */
2415 	if (wl->state == WLCORE_STATE_OFF)
2416 		return false;
2417 
2418 	/* no need for fw change if a single fw is used */
2419 	if (!wl->mr_fw_name)
2420 		return false;
2421 
2422 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2423 		return true;
2424 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2425 		return true;
2426 
2427 	return false;
2428 }
2429 
2430 /*
2431  * Enter "forced psm". Make sure the sta is in psm against the ap,
2432  * to make the fw switch a bit more disconnection-persistent.
2433  */
2434 static void wl12xx_force_active_psm(struct wl1271 *wl)
2435 {
2436 	struct wl12xx_vif *wlvif;
2437 
2438 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2439 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2440 	}
2441 }
2442 
2443 struct wlcore_hw_queue_iter_data {
2444 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2445 	/* current vif */
2446 	struct ieee80211_vif *vif;
2447 	/* is the current vif among those iterated */
2448 	bool cur_running;
2449 };
2450 
2451 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2452 				 struct ieee80211_vif *vif)
2453 {
2454 	struct wlcore_hw_queue_iter_data *iter_data = data;
2455 
2456 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2457 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2458 		return;
2459 
2460 	if (iter_data->cur_running || vif == iter_data->vif) {
2461 		iter_data->cur_running = true;
2462 		return;
2463 	}
2464 
2465 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2466 }
2467 
2468 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2469 					 struct wl12xx_vif *wlvif)
2470 {
2471 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2472 	struct wlcore_hw_queue_iter_data iter_data = {};
2473 	int i, q_base;
2474 
2475 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2476 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2477 		return 0;
2478 	}
2479 
2480 	iter_data.vif = vif;
2481 
2482 	/* mark all bits taken by active interfaces */
2483 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2484 					IEEE80211_IFACE_ITER_RESUME_ALL,
2485 					wlcore_hw_queue_iter, &iter_data);
2486 
2487 	/* the current vif is already running in mac80211 (resume/recovery) */
2488 	if (iter_data.cur_running) {
2489 		wlvif->hw_queue_base = vif->hw_queue[0];
2490 		wl1271_debug(DEBUG_MAC80211,
2491 			     "using pre-allocated hw queue base %d",
2492 			     wlvif->hw_queue_base);
2493 
2494 		/* interface type might have changed type */
2495 		goto adjust_cab_queue;
2496 	}
2497 
2498 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2499 				     WLCORE_NUM_MAC_ADDRESSES);
2500 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2501 		return -EBUSY;
2502 
2503 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2504 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2505 		     wlvif->hw_queue_base);
2506 
2507 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2508 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2509 		/* register hw queues in mac80211 */
2510 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2511 	}
2512 
2513 adjust_cab_queue:
2514 	/* the last places are reserved for cab queues per interface */
2515 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2516 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2517 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2518 	else
2519 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2520 
2521 	return 0;
2522 }
2523 
2524 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2525 				   struct ieee80211_vif *vif)
2526 {
2527 	struct wl1271 *wl = hw->priv;
2528 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2529 	struct vif_counter_data vif_count;
2530 	int ret = 0;
2531 	u8 role_type;
2532 
2533 	if (wl->plt) {
2534 		wl1271_error("Adding Interface not allowed while in PLT mode");
2535 		return -EBUSY;
2536 	}
2537 
2538 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2539 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2540 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2541 
2542 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2543 		     ieee80211_vif_type_p2p(vif), vif->addr);
2544 
2545 	wl12xx_get_vif_count(hw, vif, &vif_count);
2546 
2547 	mutex_lock(&wl->mutex);
2548 
2549 	/*
2550 	 * in some very corner case HW recovery scenarios its possible to
2551 	 * get here before __wl1271_op_remove_interface is complete, so
2552 	 * opt out if that is the case.
2553 	 */
2554 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2555 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2556 		ret = -EBUSY;
2557 		goto out;
2558 	}
2559 
2560 
2561 	ret = wl12xx_init_vif_data(wl, vif);
2562 	if (ret < 0)
2563 		goto out;
2564 
2565 	wlvif->wl = wl;
2566 	role_type = wl12xx_get_role_type(wl, wlvif);
2567 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2568 		ret = -EINVAL;
2569 		goto out;
2570 	}
2571 
2572 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2573 	if (ret < 0)
2574 		goto out;
2575 
2576 	/*
2577 	 * TODO: after the nvs issue will be solved, move this block
2578 	 * to start(), and make sure here the driver is ON.
2579 	 */
2580 	if (wl->state == WLCORE_STATE_OFF) {
2581 		/*
2582 		 * we still need this in order to configure the fw
2583 		 * while uploading the nvs
2584 		 */
2585 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2586 
2587 		ret = wl12xx_init_fw(wl);
2588 		if (ret < 0)
2589 			goto out;
2590 	}
2591 
2592 	/*
2593 	 * Call runtime PM only after possible wl12xx_init_fw() above
2594 	 * is done. Otherwise we do not have interrupts enabled.
2595 	 */
2596 	ret = pm_runtime_get_sync(wl->dev);
2597 	if (ret < 0) {
2598 		pm_runtime_put_noidle(wl->dev);
2599 		goto out_unlock;
2600 	}
2601 
2602 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2603 		wl12xx_force_active_psm(wl);
2604 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2605 		mutex_unlock(&wl->mutex);
2606 		wl1271_recovery_work(&wl->recovery_work);
2607 		return 0;
2608 	}
2609 
2610 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2611 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2612 					     role_type, &wlvif->role_id);
2613 		if (ret < 0)
2614 			goto out;
2615 
2616 		ret = wl1271_init_vif_specific(wl, vif);
2617 		if (ret < 0)
2618 			goto out;
2619 
2620 	} else {
2621 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2622 					     &wlvif->dev_role_id);
2623 		if (ret < 0)
2624 			goto out;
2625 
2626 		/* needed mainly for configuring rate policies */
2627 		ret = wl1271_sta_hw_init(wl, wlvif);
2628 		if (ret < 0)
2629 			goto out;
2630 	}
2631 
2632 	list_add(&wlvif->list, &wl->wlvif_list);
2633 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2634 
2635 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2636 		wl->ap_count++;
2637 	else
2638 		wl->sta_count++;
2639 out:
2640 	pm_runtime_mark_last_busy(wl->dev);
2641 	pm_runtime_put_autosuspend(wl->dev);
2642 out_unlock:
2643 	mutex_unlock(&wl->mutex);
2644 
2645 	return ret;
2646 }
2647 
2648 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2649 					 struct ieee80211_vif *vif,
2650 					 bool reset_tx_queues)
2651 {
2652 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2653 	int i, ret;
2654 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2655 
2656 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2657 
2658 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2659 		return;
2660 
2661 	/* because of hardware recovery, we may get here twice */
2662 	if (wl->state == WLCORE_STATE_OFF)
2663 		return;
2664 
2665 	wl1271_info("down");
2666 
2667 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2668 	    wl->scan_wlvif == wlvif) {
2669 		struct cfg80211_scan_info info = {
2670 			.aborted = true,
2671 		};
2672 
2673 		/*
2674 		 * Rearm the tx watchdog just before idling scan. This
2675 		 * prevents just-finished scans from triggering the watchdog
2676 		 */
2677 		wl12xx_rearm_tx_watchdog_locked(wl);
2678 
2679 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2680 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2681 		wl->scan_wlvif = NULL;
2682 		wl->scan.req = NULL;
2683 		ieee80211_scan_completed(wl->hw, &info);
2684 	}
2685 
2686 	if (wl->sched_vif == wlvif)
2687 		wl->sched_vif = NULL;
2688 
2689 	if (wl->roc_vif == vif) {
2690 		wl->roc_vif = NULL;
2691 		ieee80211_remain_on_channel_expired(wl->hw);
2692 	}
2693 
2694 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2695 		/* disable active roles */
2696 		ret = pm_runtime_get_sync(wl->dev);
2697 		if (ret < 0) {
2698 			pm_runtime_put_noidle(wl->dev);
2699 			goto deinit;
2700 		}
2701 
2702 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2703 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2704 			if (wl12xx_dev_role_started(wlvif))
2705 				wl12xx_stop_dev(wl, wlvif);
2706 		}
2707 
2708 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2709 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2710 			if (ret < 0)
2711 				goto deinit;
2712 		} else {
2713 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2714 			if (ret < 0)
2715 				goto deinit;
2716 		}
2717 
2718 		pm_runtime_mark_last_busy(wl->dev);
2719 		pm_runtime_put_autosuspend(wl->dev);
2720 	}
2721 deinit:
2722 	wl12xx_tx_reset_wlvif(wl, wlvif);
2723 
2724 	/* clear all hlids (except system_hlid) */
2725 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2726 
2727 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2728 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2729 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2730 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2731 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2732 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2733 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2734 	} else {
2735 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2736 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2737 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2738 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2739 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2740 			wl12xx_free_rate_policy(wl,
2741 						&wlvif->ap.ucast_rate_idx[i]);
2742 		wl1271_free_ap_keys(wl, wlvif);
2743 	}
2744 
2745 	dev_kfree_skb(wlvif->probereq);
2746 	wlvif->probereq = NULL;
2747 	if (wl->last_wlvif == wlvif)
2748 		wl->last_wlvif = NULL;
2749 	list_del(&wlvif->list);
2750 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2751 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2752 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2753 
2754 	if (is_ap)
2755 		wl->ap_count--;
2756 	else
2757 		wl->sta_count--;
2758 
2759 	/*
2760 	 * Last AP, have more stations. Configure sleep auth according to STA.
2761 	 * Don't do thin on unintended recovery.
2762 	 */
2763 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2764 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2765 		goto unlock;
2766 
2767 	if (wl->ap_count == 0 && is_ap) {
2768 		/* mask ap events */
2769 		wl->event_mask &= ~wl->ap_event_mask;
2770 		wl1271_event_unmask(wl);
2771 	}
2772 
2773 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2774 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2775 		/* Configure for power according to debugfs */
2776 		if (sta_auth != WL1271_PSM_ILLEGAL)
2777 			wl1271_acx_sleep_auth(wl, sta_auth);
2778 		/* Configure for ELP power saving */
2779 		else
2780 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2781 	}
2782 
2783 unlock:
2784 	mutex_unlock(&wl->mutex);
2785 
2786 	del_timer_sync(&wlvif->rx_streaming_timer);
2787 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2788 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2789 	cancel_work_sync(&wlvif->rc_update_work);
2790 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2791 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2792 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2793 
2794 	mutex_lock(&wl->mutex);
2795 }
2796 
2797 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2798 				       struct ieee80211_vif *vif)
2799 {
2800 	struct wl1271 *wl = hw->priv;
2801 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2802 	struct wl12xx_vif *iter;
2803 	struct vif_counter_data vif_count;
2804 
2805 	wl12xx_get_vif_count(hw, vif, &vif_count);
2806 	mutex_lock(&wl->mutex);
2807 
2808 	if (wl->state == WLCORE_STATE_OFF ||
2809 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2810 		goto out;
2811 
2812 	/*
2813 	 * wl->vif can be null here if someone shuts down the interface
2814 	 * just when hardware recovery has been started.
2815 	 */
2816 	wl12xx_for_each_wlvif(wl, iter) {
2817 		if (iter != wlvif)
2818 			continue;
2819 
2820 		__wl1271_op_remove_interface(wl, vif, true);
2821 		break;
2822 	}
2823 	WARN_ON(iter != wlvif);
2824 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2825 		wl12xx_force_active_psm(wl);
2826 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2827 		wl12xx_queue_recovery_work(wl);
2828 	}
2829 out:
2830 	mutex_unlock(&wl->mutex);
2831 }
2832 
2833 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2834 				      struct ieee80211_vif *vif,
2835 				      enum nl80211_iftype new_type, bool p2p)
2836 {
2837 	struct wl1271 *wl = hw->priv;
2838 	int ret;
2839 
2840 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2841 	wl1271_op_remove_interface(hw, vif);
2842 
2843 	vif->type = new_type;
2844 	vif->p2p = p2p;
2845 	ret = wl1271_op_add_interface(hw, vif);
2846 
2847 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2848 	return ret;
2849 }
2850 
2851 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2852 {
2853 	int ret;
2854 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2855 
2856 	/*
2857 	 * One of the side effects of the JOIN command is that is clears
2858 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2859 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2860 	 * Currently the only valid scenario for JOIN during association
2861 	 * is on roaming, in which case we will also be given new keys.
2862 	 * Keep the below message for now, unless it starts bothering
2863 	 * users who really like to roam a lot :)
2864 	 */
2865 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2866 		wl1271_info("JOIN while associated.");
2867 
2868 	/* clear encryption type */
2869 	wlvif->encryption_type = KEY_NONE;
2870 
2871 	if (is_ibss)
2872 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2873 	else {
2874 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2875 			/*
2876 			 * TODO: this is an ugly workaround for wl12xx fw
2877 			 * bug - we are not able to tx/rx after the first
2878 			 * start_sta, so make dummy start+stop calls,
2879 			 * and then call start_sta again.
2880 			 * this should be fixed in the fw.
2881 			 */
2882 			wl12xx_cmd_role_start_sta(wl, wlvif);
2883 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2884 		}
2885 
2886 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2887 	}
2888 
2889 	return ret;
2890 }
2891 
2892 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2893 			    int offset)
2894 {
2895 	u8 ssid_len;
2896 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2897 					 skb->len - offset);
2898 
2899 	if (!ptr) {
2900 		wl1271_error("No SSID in IEs!");
2901 		return -ENOENT;
2902 	}
2903 
2904 	ssid_len = ptr[1];
2905 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2906 		wl1271_error("SSID is too long!");
2907 		return -EINVAL;
2908 	}
2909 
2910 	wlvif->ssid_len = ssid_len;
2911 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2912 	return 0;
2913 }
2914 
2915 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2916 {
2917 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2918 	struct sk_buff *skb;
2919 	int ieoffset;
2920 
2921 	/* we currently only support setting the ssid from the ap probe req */
2922 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2923 		return -EINVAL;
2924 
2925 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2926 	if (!skb)
2927 		return -EINVAL;
2928 
2929 	ieoffset = offsetof(struct ieee80211_mgmt,
2930 			    u.probe_req.variable);
2931 	wl1271_ssid_set(wlvif, skb, ieoffset);
2932 	dev_kfree_skb(skb);
2933 
2934 	return 0;
2935 }
2936 
2937 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2938 			    struct ieee80211_bss_conf *bss_conf,
2939 			    u32 sta_rate_set)
2940 {
2941 	int ieoffset;
2942 	int ret;
2943 
2944 	wlvif->aid = bss_conf->aid;
2945 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2946 	wlvif->beacon_int = bss_conf->beacon_int;
2947 	wlvif->wmm_enabled = bss_conf->qos;
2948 
2949 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2950 
2951 	/*
2952 	 * with wl1271, we don't need to update the
2953 	 * beacon_int and dtim_period, because the firmware
2954 	 * updates it by itself when the first beacon is
2955 	 * received after a join.
2956 	 */
2957 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2958 	if (ret < 0)
2959 		return ret;
2960 
2961 	/*
2962 	 * Get a template for hardware connection maintenance
2963 	 */
2964 	dev_kfree_skb(wlvif->probereq);
2965 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2966 							wlvif,
2967 							NULL);
2968 	ieoffset = offsetof(struct ieee80211_mgmt,
2969 			    u.probe_req.variable);
2970 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2971 
2972 	/* enable the connection monitoring feature */
2973 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2974 	if (ret < 0)
2975 		return ret;
2976 
2977 	/*
2978 	 * The join command disable the keep-alive mode, shut down its process,
2979 	 * and also clear the template config, so we need to reset it all after
2980 	 * the join. The acx_aid starts the keep-alive process, and the order
2981 	 * of the commands below is relevant.
2982 	 */
2983 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2984 	if (ret < 0)
2985 		return ret;
2986 
2987 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2988 	if (ret < 0)
2989 		return ret;
2990 
2991 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2992 	if (ret < 0)
2993 		return ret;
2994 
2995 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2996 					   wlvif->sta.klv_template_id,
2997 					   ACX_KEEP_ALIVE_TPL_VALID);
2998 	if (ret < 0)
2999 		return ret;
3000 
3001 	/*
3002 	 * The default fw psm configuration is AUTO, while mac80211 default
3003 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3004 	 */
3005 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3006 	if (ret < 0)
3007 		return ret;
3008 
3009 	if (sta_rate_set) {
3010 		wlvif->rate_set =
3011 			wl1271_tx_enabled_rates_get(wl,
3012 						    sta_rate_set,
3013 						    wlvif->band);
3014 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3015 		if (ret < 0)
3016 			return ret;
3017 	}
3018 
3019 	return ret;
3020 }
3021 
3022 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3023 {
3024 	int ret;
3025 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3026 
3027 	/* make sure we are connected (sta) joined */
3028 	if (sta &&
3029 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3030 		return false;
3031 
3032 	/* make sure we are joined (ibss) */
3033 	if (!sta &&
3034 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3035 		return false;
3036 
3037 	if (sta) {
3038 		/* use defaults when not associated */
3039 		wlvif->aid = 0;
3040 
3041 		/* free probe-request template */
3042 		dev_kfree_skb(wlvif->probereq);
3043 		wlvif->probereq = NULL;
3044 
3045 		/* disable connection monitor features */
3046 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3047 		if (ret < 0)
3048 			return ret;
3049 
3050 		/* Disable the keep-alive feature */
3051 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3052 		if (ret < 0)
3053 			return ret;
3054 
3055 		/* disable beacon filtering */
3056 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3057 		if (ret < 0)
3058 			return ret;
3059 	}
3060 
3061 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3062 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3063 
3064 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3065 		ieee80211_chswitch_done(vif, false);
3066 		cancel_delayed_work(&wlvif->channel_switch_work);
3067 	}
3068 
3069 	/* invalidate keep-alive template */
3070 	wl1271_acx_keep_alive_config(wl, wlvif,
3071 				     wlvif->sta.klv_template_id,
3072 				     ACX_KEEP_ALIVE_TPL_INVALID);
3073 
3074 	return 0;
3075 }
3076 
3077 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3078 {
3079 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3080 	wlvif->rate_set = wlvif->basic_rate_set;
3081 }
3082 
3083 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3084 				   bool idle)
3085 {
3086 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3087 
3088 	if (idle == cur_idle)
3089 		return;
3090 
3091 	if (idle) {
3092 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3093 	} else {
3094 		/* The current firmware only supports sched_scan in idle */
3095 		if (wl->sched_vif == wlvif)
3096 			wl->ops->sched_scan_stop(wl, wlvif);
3097 
3098 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3099 	}
3100 }
3101 
3102 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3103 			     struct ieee80211_conf *conf, u32 changed)
3104 {
3105 	int ret;
3106 
3107 	if (wlcore_is_p2p_mgmt(wlvif))
3108 		return 0;
3109 
3110 	if (conf->power_level != wlvif->power_level) {
3111 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3112 		if (ret < 0)
3113 			return ret;
3114 
3115 		wlvif->power_level = conf->power_level;
3116 	}
3117 
3118 	return 0;
3119 }
3120 
3121 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3122 {
3123 	struct wl1271 *wl = hw->priv;
3124 	struct wl12xx_vif *wlvif;
3125 	struct ieee80211_conf *conf = &hw->conf;
3126 	int ret = 0;
3127 
3128 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3129 		     " changed 0x%x",
3130 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3131 		     conf->power_level,
3132 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3133 			 changed);
3134 
3135 	mutex_lock(&wl->mutex);
3136 
3137 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3138 		wl->power_level = conf->power_level;
3139 
3140 	if (unlikely(wl->state != WLCORE_STATE_ON))
3141 		goto out;
3142 
3143 	ret = pm_runtime_get_sync(wl->dev);
3144 	if (ret < 0) {
3145 		pm_runtime_put_noidle(wl->dev);
3146 		goto out;
3147 	}
3148 
3149 	/* configure each interface */
3150 	wl12xx_for_each_wlvif(wl, wlvif) {
3151 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3152 		if (ret < 0)
3153 			goto out_sleep;
3154 	}
3155 
3156 out_sleep:
3157 	pm_runtime_mark_last_busy(wl->dev);
3158 	pm_runtime_put_autosuspend(wl->dev);
3159 
3160 out:
3161 	mutex_unlock(&wl->mutex);
3162 
3163 	return ret;
3164 }
3165 
3166 struct wl1271_filter_params {
3167 	bool enabled;
3168 	int mc_list_length;
3169 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3170 };
3171 
3172 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3173 				       struct netdev_hw_addr_list *mc_list)
3174 {
3175 	struct wl1271_filter_params *fp;
3176 	struct netdev_hw_addr *ha;
3177 
3178 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3179 	if (!fp) {
3180 		wl1271_error("Out of memory setting filters.");
3181 		return 0;
3182 	}
3183 
3184 	/* update multicast filtering parameters */
3185 	fp->mc_list_length = 0;
3186 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3187 		fp->enabled = false;
3188 	} else {
3189 		fp->enabled = true;
3190 		netdev_hw_addr_list_for_each(ha, mc_list) {
3191 			memcpy(fp->mc_list[fp->mc_list_length],
3192 					ha->addr, ETH_ALEN);
3193 			fp->mc_list_length++;
3194 		}
3195 	}
3196 
3197 	return (u64)(unsigned long)fp;
3198 }
3199 
3200 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3201 				  FIF_FCSFAIL | \
3202 				  FIF_BCN_PRBRESP_PROMISC | \
3203 				  FIF_CONTROL | \
3204 				  FIF_OTHER_BSS)
3205 
3206 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3207 				       unsigned int changed,
3208 				       unsigned int *total, u64 multicast)
3209 {
3210 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3211 	struct wl1271 *wl = hw->priv;
3212 	struct wl12xx_vif *wlvif;
3213 
3214 	int ret;
3215 
3216 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3217 		     " total %x", changed, *total);
3218 
3219 	mutex_lock(&wl->mutex);
3220 
3221 	*total &= WL1271_SUPPORTED_FILTERS;
3222 	changed &= WL1271_SUPPORTED_FILTERS;
3223 
3224 	if (unlikely(wl->state != WLCORE_STATE_ON))
3225 		goto out;
3226 
3227 	ret = pm_runtime_get_sync(wl->dev);
3228 	if (ret < 0) {
3229 		pm_runtime_put_noidle(wl->dev);
3230 		goto out;
3231 	}
3232 
3233 	wl12xx_for_each_wlvif(wl, wlvif) {
3234 		if (wlcore_is_p2p_mgmt(wlvif))
3235 			continue;
3236 
3237 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3238 			if (*total & FIF_ALLMULTI)
3239 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3240 								   false,
3241 								   NULL, 0);
3242 			else if (fp)
3243 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3244 							fp->enabled,
3245 							fp->mc_list,
3246 							fp->mc_list_length);
3247 			if (ret < 0)
3248 				goto out_sleep;
3249 		}
3250 
3251 		/*
3252 		 * If interface in AP mode and created with allmulticast then disable
3253 		 * the firmware filters so that all multicast packets are passed
3254 		 * This is mandatory for MDNS based discovery protocols
3255 		 */
3256  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3257  			if (*total & FIF_ALLMULTI) {
3258 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3259 							false,
3260 							NULL, 0);
3261 				if (ret < 0)
3262 					goto out_sleep;
3263 			}
3264 		}
3265 	}
3266 
3267 	/*
3268 	 * the fw doesn't provide an api to configure the filters. instead,
3269 	 * the filters configuration is based on the active roles / ROC
3270 	 * state.
3271 	 */
3272 
3273 out_sleep:
3274 	pm_runtime_mark_last_busy(wl->dev);
3275 	pm_runtime_put_autosuspend(wl->dev);
3276 
3277 out:
3278 	mutex_unlock(&wl->mutex);
3279 	kfree(fp);
3280 }
3281 
3282 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3283 				u8 id, u8 key_type, u8 key_size,
3284 				const u8 *key, u8 hlid, u32 tx_seq_32,
3285 				u16 tx_seq_16)
3286 {
3287 	struct wl1271_ap_key *ap_key;
3288 	int i;
3289 
3290 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3291 
3292 	if (key_size > MAX_KEY_SIZE)
3293 		return -EINVAL;
3294 
3295 	/*
3296 	 * Find next free entry in ap_keys. Also check we are not replacing
3297 	 * an existing key.
3298 	 */
3299 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3300 		if (wlvif->ap.recorded_keys[i] == NULL)
3301 			break;
3302 
3303 		if (wlvif->ap.recorded_keys[i]->id == id) {
3304 			wl1271_warning("trying to record key replacement");
3305 			return -EINVAL;
3306 		}
3307 	}
3308 
3309 	if (i == MAX_NUM_KEYS)
3310 		return -EBUSY;
3311 
3312 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3313 	if (!ap_key)
3314 		return -ENOMEM;
3315 
3316 	ap_key->id = id;
3317 	ap_key->key_type = key_type;
3318 	ap_key->key_size = key_size;
3319 	memcpy(ap_key->key, key, key_size);
3320 	ap_key->hlid = hlid;
3321 	ap_key->tx_seq_32 = tx_seq_32;
3322 	ap_key->tx_seq_16 = tx_seq_16;
3323 
3324 	wlvif->ap.recorded_keys[i] = ap_key;
3325 	return 0;
3326 }
3327 
3328 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3329 {
3330 	int i;
3331 
3332 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3333 		kfree(wlvif->ap.recorded_keys[i]);
3334 		wlvif->ap.recorded_keys[i] = NULL;
3335 	}
3336 }
3337 
3338 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3339 {
3340 	int i, ret = 0;
3341 	struct wl1271_ap_key *key;
3342 	bool wep_key_added = false;
3343 
3344 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3345 		u8 hlid;
3346 		if (wlvif->ap.recorded_keys[i] == NULL)
3347 			break;
3348 
3349 		key = wlvif->ap.recorded_keys[i];
3350 		hlid = key->hlid;
3351 		if (hlid == WL12XX_INVALID_LINK_ID)
3352 			hlid = wlvif->ap.bcast_hlid;
3353 
3354 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3355 					    key->id, key->key_type,
3356 					    key->key_size, key->key,
3357 					    hlid, key->tx_seq_32,
3358 					    key->tx_seq_16);
3359 		if (ret < 0)
3360 			goto out;
3361 
3362 		if (key->key_type == KEY_WEP)
3363 			wep_key_added = true;
3364 	}
3365 
3366 	if (wep_key_added) {
3367 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3368 						     wlvif->ap.bcast_hlid);
3369 		if (ret < 0)
3370 			goto out;
3371 	}
3372 
3373 out:
3374 	wl1271_free_ap_keys(wl, wlvif);
3375 	return ret;
3376 }
3377 
3378 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3379 		       u16 action, u8 id, u8 key_type,
3380 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3381 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3382 {
3383 	int ret;
3384 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3385 
3386 	if (is_ap) {
3387 		struct wl1271_station *wl_sta;
3388 		u8 hlid;
3389 
3390 		if (sta) {
3391 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3392 			hlid = wl_sta->hlid;
3393 		} else {
3394 			hlid = wlvif->ap.bcast_hlid;
3395 		}
3396 
3397 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3398 			/*
3399 			 * We do not support removing keys after AP shutdown.
3400 			 * Pretend we do to make mac80211 happy.
3401 			 */
3402 			if (action != KEY_ADD_OR_REPLACE)
3403 				return 0;
3404 
3405 			ret = wl1271_record_ap_key(wl, wlvif, id,
3406 					     key_type, key_size,
3407 					     key, hlid, tx_seq_32,
3408 					     tx_seq_16);
3409 		} else {
3410 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3411 					     id, key_type, key_size,
3412 					     key, hlid, tx_seq_32,
3413 					     tx_seq_16);
3414 		}
3415 
3416 		if (ret < 0)
3417 			return ret;
3418 	} else {
3419 		const u8 *addr;
3420 		static const u8 bcast_addr[ETH_ALEN] = {
3421 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3422 		};
3423 
3424 		addr = sta ? sta->addr : bcast_addr;
3425 
3426 		if (is_zero_ether_addr(addr)) {
3427 			/* We dont support TX only encryption */
3428 			return -EOPNOTSUPP;
3429 		}
3430 
3431 		/* The wl1271 does not allow to remove unicast keys - they
3432 		   will be cleared automatically on next CMD_JOIN. Ignore the
3433 		   request silently, as we dont want the mac80211 to emit
3434 		   an error message. */
3435 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3436 			return 0;
3437 
3438 		/* don't remove key if hlid was already deleted */
3439 		if (action == KEY_REMOVE &&
3440 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3441 			return 0;
3442 
3443 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3444 					     id, key_type, key_size,
3445 					     key, addr, tx_seq_32,
3446 					     tx_seq_16);
3447 		if (ret < 0)
3448 			return ret;
3449 
3450 	}
3451 
3452 	return 0;
3453 }
3454 
3455 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3456 			     struct ieee80211_vif *vif,
3457 			     struct ieee80211_sta *sta,
3458 			     struct ieee80211_key_conf *key_conf)
3459 {
3460 	struct wl1271 *wl = hw->priv;
3461 	int ret;
3462 	bool might_change_spare =
3463 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3464 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3465 
3466 	if (might_change_spare) {
3467 		/*
3468 		 * stop the queues and flush to ensure the next packets are
3469 		 * in sync with FW spare block accounting
3470 		 */
3471 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3472 		wl1271_tx_flush(wl);
3473 	}
3474 
3475 	mutex_lock(&wl->mutex);
3476 
3477 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3478 		ret = -EAGAIN;
3479 		goto out_wake_queues;
3480 	}
3481 
3482 	ret = pm_runtime_get_sync(wl->dev);
3483 	if (ret < 0) {
3484 		pm_runtime_put_noidle(wl->dev);
3485 		goto out_wake_queues;
3486 	}
3487 
3488 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3489 
3490 	pm_runtime_mark_last_busy(wl->dev);
3491 	pm_runtime_put_autosuspend(wl->dev);
3492 
3493 out_wake_queues:
3494 	if (might_change_spare)
3495 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3496 
3497 	mutex_unlock(&wl->mutex);
3498 
3499 	return ret;
3500 }
3501 
3502 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3503 		   struct ieee80211_vif *vif,
3504 		   struct ieee80211_sta *sta,
3505 		   struct ieee80211_key_conf *key_conf)
3506 {
3507 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3508 	int ret;
3509 	u32 tx_seq_32 = 0;
3510 	u16 tx_seq_16 = 0;
3511 	u8 key_type;
3512 	u8 hlid;
3513 
3514 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3515 
3516 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3517 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3518 		     key_conf->cipher, key_conf->keyidx,
3519 		     key_conf->keylen, key_conf->flags);
3520 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3521 
3522 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3523 		if (sta) {
3524 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3525 			hlid = wl_sta->hlid;
3526 		} else {
3527 			hlid = wlvif->ap.bcast_hlid;
3528 		}
3529 	else
3530 		hlid = wlvif->sta.hlid;
3531 
3532 	if (hlid != WL12XX_INVALID_LINK_ID) {
3533 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3534 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3535 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3536 	}
3537 
3538 	switch (key_conf->cipher) {
3539 	case WLAN_CIPHER_SUITE_WEP40:
3540 	case WLAN_CIPHER_SUITE_WEP104:
3541 		key_type = KEY_WEP;
3542 
3543 		key_conf->hw_key_idx = key_conf->keyidx;
3544 		break;
3545 	case WLAN_CIPHER_SUITE_TKIP:
3546 		key_type = KEY_TKIP;
3547 		key_conf->hw_key_idx = key_conf->keyidx;
3548 		break;
3549 	case WLAN_CIPHER_SUITE_CCMP:
3550 		key_type = KEY_AES;
3551 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3552 		break;
3553 	case WL1271_CIPHER_SUITE_GEM:
3554 		key_type = KEY_GEM;
3555 		break;
3556 	default:
3557 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3558 
3559 		return -EOPNOTSUPP;
3560 	}
3561 
3562 	switch (cmd) {
3563 	case SET_KEY:
3564 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3565 				 key_conf->keyidx, key_type,
3566 				 key_conf->keylen, key_conf->key,
3567 				 tx_seq_32, tx_seq_16, sta);
3568 		if (ret < 0) {
3569 			wl1271_error("Could not add or replace key");
3570 			return ret;
3571 		}
3572 
3573 		/*
3574 		 * reconfiguring arp response if the unicast (or common)
3575 		 * encryption key type was changed
3576 		 */
3577 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3578 		    (sta || key_type == KEY_WEP) &&
3579 		    wlvif->encryption_type != key_type) {
3580 			wlvif->encryption_type = key_type;
3581 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3582 			if (ret < 0) {
3583 				wl1271_warning("build arp rsp failed: %d", ret);
3584 				return ret;
3585 			}
3586 		}
3587 		break;
3588 
3589 	case DISABLE_KEY:
3590 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3591 				     key_conf->keyidx, key_type,
3592 				     key_conf->keylen, key_conf->key,
3593 				     0, 0, sta);
3594 		if (ret < 0) {
3595 			wl1271_error("Could not remove key");
3596 			return ret;
3597 		}
3598 		break;
3599 
3600 	default:
3601 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3602 		return -EOPNOTSUPP;
3603 	}
3604 
3605 	return ret;
3606 }
3607 EXPORT_SYMBOL_GPL(wlcore_set_key);
3608 
3609 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3610 					  struct ieee80211_vif *vif,
3611 					  int key_idx)
3612 {
3613 	struct wl1271 *wl = hw->priv;
3614 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3615 	int ret;
3616 
3617 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3618 		     key_idx);
3619 
3620 	/* we don't handle unsetting of default key */
3621 	if (key_idx == -1)
3622 		return;
3623 
3624 	mutex_lock(&wl->mutex);
3625 
3626 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3627 		ret = -EAGAIN;
3628 		goto out_unlock;
3629 	}
3630 
3631 	ret = pm_runtime_get_sync(wl->dev);
3632 	if (ret < 0) {
3633 		pm_runtime_put_noidle(wl->dev);
3634 		goto out_unlock;
3635 	}
3636 
3637 	wlvif->default_key = key_idx;
3638 
3639 	/* the default WEP key needs to be configured at least once */
3640 	if (wlvif->encryption_type == KEY_WEP) {
3641 		ret = wl12xx_cmd_set_default_wep_key(wl,
3642 				key_idx,
3643 				wlvif->sta.hlid);
3644 		if (ret < 0)
3645 			goto out_sleep;
3646 	}
3647 
3648 out_sleep:
3649 	pm_runtime_mark_last_busy(wl->dev);
3650 	pm_runtime_put_autosuspend(wl->dev);
3651 
3652 out_unlock:
3653 	mutex_unlock(&wl->mutex);
3654 }
3655 
3656 void wlcore_regdomain_config(struct wl1271 *wl)
3657 {
3658 	int ret;
3659 
3660 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3661 		return;
3662 
3663 	mutex_lock(&wl->mutex);
3664 
3665 	if (unlikely(wl->state != WLCORE_STATE_ON))
3666 		goto out;
3667 
3668 	ret = pm_runtime_get_sync(wl->dev);
3669 	if (ret < 0)
3670 		goto out;
3671 
3672 	ret = wlcore_cmd_regdomain_config_locked(wl);
3673 	if (ret < 0) {
3674 		wl12xx_queue_recovery_work(wl);
3675 		goto out;
3676 	}
3677 
3678 	pm_runtime_mark_last_busy(wl->dev);
3679 	pm_runtime_put_autosuspend(wl->dev);
3680 out:
3681 	mutex_unlock(&wl->mutex);
3682 }
3683 
3684 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3685 			     struct ieee80211_vif *vif,
3686 			     struct ieee80211_scan_request *hw_req)
3687 {
3688 	struct cfg80211_scan_request *req = &hw_req->req;
3689 	struct wl1271 *wl = hw->priv;
3690 	int ret;
3691 	u8 *ssid = NULL;
3692 	size_t len = 0;
3693 
3694 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3695 
3696 	if (req->n_ssids) {
3697 		ssid = req->ssids[0].ssid;
3698 		len = req->ssids[0].ssid_len;
3699 	}
3700 
3701 	mutex_lock(&wl->mutex);
3702 
3703 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3704 		/*
3705 		 * We cannot return -EBUSY here because cfg80211 will expect
3706 		 * a call to ieee80211_scan_completed if we do - in this case
3707 		 * there won't be any call.
3708 		 */
3709 		ret = -EAGAIN;
3710 		goto out;
3711 	}
3712 
3713 	ret = pm_runtime_get_sync(wl->dev);
3714 	if (ret < 0) {
3715 		pm_runtime_put_noidle(wl->dev);
3716 		goto out;
3717 	}
3718 
3719 	/* fail if there is any role in ROC */
3720 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3721 		/* don't allow scanning right now */
3722 		ret = -EBUSY;
3723 		goto out_sleep;
3724 	}
3725 
3726 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3727 out_sleep:
3728 	pm_runtime_mark_last_busy(wl->dev);
3729 	pm_runtime_put_autosuspend(wl->dev);
3730 out:
3731 	mutex_unlock(&wl->mutex);
3732 
3733 	return ret;
3734 }
3735 
3736 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3737 				     struct ieee80211_vif *vif)
3738 {
3739 	struct wl1271 *wl = hw->priv;
3740 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3741 	struct cfg80211_scan_info info = {
3742 		.aborted = true,
3743 	};
3744 	int ret;
3745 
3746 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3747 
3748 	mutex_lock(&wl->mutex);
3749 
3750 	if (unlikely(wl->state != WLCORE_STATE_ON))
3751 		goto out;
3752 
3753 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3754 		goto out;
3755 
3756 	ret = pm_runtime_get_sync(wl->dev);
3757 	if (ret < 0) {
3758 		pm_runtime_put_noidle(wl->dev);
3759 		goto out;
3760 	}
3761 
3762 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3763 		ret = wl->ops->scan_stop(wl, wlvif);
3764 		if (ret < 0)
3765 			goto out_sleep;
3766 	}
3767 
3768 	/*
3769 	 * Rearm the tx watchdog just before idling scan. This
3770 	 * prevents just-finished scans from triggering the watchdog
3771 	 */
3772 	wl12xx_rearm_tx_watchdog_locked(wl);
3773 
3774 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3775 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3776 	wl->scan_wlvif = NULL;
3777 	wl->scan.req = NULL;
3778 	ieee80211_scan_completed(wl->hw, &info);
3779 
3780 out_sleep:
3781 	pm_runtime_mark_last_busy(wl->dev);
3782 	pm_runtime_put_autosuspend(wl->dev);
3783 out:
3784 	mutex_unlock(&wl->mutex);
3785 
3786 	cancel_delayed_work_sync(&wl->scan_complete_work);
3787 }
3788 
3789 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3790 				      struct ieee80211_vif *vif,
3791 				      struct cfg80211_sched_scan_request *req,
3792 				      struct ieee80211_scan_ies *ies)
3793 {
3794 	struct wl1271 *wl = hw->priv;
3795 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3796 	int ret;
3797 
3798 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3799 
3800 	mutex_lock(&wl->mutex);
3801 
3802 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3803 		ret = -EAGAIN;
3804 		goto out;
3805 	}
3806 
3807 	ret = pm_runtime_get_sync(wl->dev);
3808 	if (ret < 0) {
3809 		pm_runtime_put_noidle(wl->dev);
3810 		goto out;
3811 	}
3812 
3813 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3814 	if (ret < 0)
3815 		goto out_sleep;
3816 
3817 	wl->sched_vif = wlvif;
3818 
3819 out_sleep:
3820 	pm_runtime_mark_last_busy(wl->dev);
3821 	pm_runtime_put_autosuspend(wl->dev);
3822 out:
3823 	mutex_unlock(&wl->mutex);
3824 	return ret;
3825 }
3826 
3827 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3828 				     struct ieee80211_vif *vif)
3829 {
3830 	struct wl1271 *wl = hw->priv;
3831 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3832 	int ret;
3833 
3834 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3835 
3836 	mutex_lock(&wl->mutex);
3837 
3838 	if (unlikely(wl->state != WLCORE_STATE_ON))
3839 		goto out;
3840 
3841 	ret = pm_runtime_get_sync(wl->dev);
3842 	if (ret < 0) {
3843 		pm_runtime_put_noidle(wl->dev);
3844 		goto out;
3845 	}
3846 
3847 	wl->ops->sched_scan_stop(wl, wlvif);
3848 
3849 	pm_runtime_mark_last_busy(wl->dev);
3850 	pm_runtime_put_autosuspend(wl->dev);
3851 out:
3852 	mutex_unlock(&wl->mutex);
3853 
3854 	return 0;
3855 }
3856 
3857 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3858 {
3859 	struct wl1271 *wl = hw->priv;
3860 	int ret = 0;
3861 
3862 	mutex_lock(&wl->mutex);
3863 
3864 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3865 		ret = -EAGAIN;
3866 		goto out;
3867 	}
3868 
3869 	ret = pm_runtime_get_sync(wl->dev);
3870 	if (ret < 0) {
3871 		pm_runtime_put_noidle(wl->dev);
3872 		goto out;
3873 	}
3874 
3875 	ret = wl1271_acx_frag_threshold(wl, value);
3876 	if (ret < 0)
3877 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3878 
3879 	pm_runtime_mark_last_busy(wl->dev);
3880 	pm_runtime_put_autosuspend(wl->dev);
3881 
3882 out:
3883 	mutex_unlock(&wl->mutex);
3884 
3885 	return ret;
3886 }
3887 
3888 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3889 {
3890 	struct wl1271 *wl = hw->priv;
3891 	struct wl12xx_vif *wlvif;
3892 	int ret = 0;
3893 
3894 	mutex_lock(&wl->mutex);
3895 
3896 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3897 		ret = -EAGAIN;
3898 		goto out;
3899 	}
3900 
3901 	ret = pm_runtime_get_sync(wl->dev);
3902 	if (ret < 0) {
3903 		pm_runtime_put_noidle(wl->dev);
3904 		goto out;
3905 	}
3906 
3907 	wl12xx_for_each_wlvif(wl, wlvif) {
3908 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3909 		if (ret < 0)
3910 			wl1271_warning("set rts threshold failed: %d", ret);
3911 	}
3912 	pm_runtime_mark_last_busy(wl->dev);
3913 	pm_runtime_put_autosuspend(wl->dev);
3914 
3915 out:
3916 	mutex_unlock(&wl->mutex);
3917 
3918 	return ret;
3919 }
3920 
3921 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3922 {
3923 	int len;
3924 	const u8 *next, *end = skb->data + skb->len;
3925 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3926 					skb->len - ieoffset);
3927 	if (!ie)
3928 		return;
3929 	len = ie[1] + 2;
3930 	next = ie + len;
3931 	memmove(ie, next, end - next);
3932 	skb_trim(skb, skb->len - len);
3933 }
3934 
3935 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3936 					    unsigned int oui, u8 oui_type,
3937 					    int ieoffset)
3938 {
3939 	int len;
3940 	const u8 *next, *end = skb->data + skb->len;
3941 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3942 					       skb->data + ieoffset,
3943 					       skb->len - ieoffset);
3944 	if (!ie)
3945 		return;
3946 	len = ie[1] + 2;
3947 	next = ie + len;
3948 	memmove(ie, next, end - next);
3949 	skb_trim(skb, skb->len - len);
3950 }
3951 
3952 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3953 					 struct ieee80211_vif *vif)
3954 {
3955 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3956 	struct sk_buff *skb;
3957 	int ret;
3958 
3959 	skb = ieee80211_proberesp_get(wl->hw, vif);
3960 	if (!skb)
3961 		return -EOPNOTSUPP;
3962 
3963 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3964 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3965 				      skb->data,
3966 				      skb->len, 0,
3967 				      rates);
3968 	dev_kfree_skb(skb);
3969 
3970 	if (ret < 0)
3971 		goto out;
3972 
3973 	wl1271_debug(DEBUG_AP, "probe response updated");
3974 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3975 
3976 out:
3977 	return ret;
3978 }
3979 
3980 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3981 					     struct ieee80211_vif *vif,
3982 					     u8 *probe_rsp_data,
3983 					     size_t probe_rsp_len,
3984 					     u32 rates)
3985 {
3986 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3987 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3988 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3989 	int ssid_ie_offset, ie_offset, templ_len;
3990 	const u8 *ptr;
3991 
3992 	/* no need to change probe response if the SSID is set correctly */
3993 	if (wlvif->ssid_len > 0)
3994 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3995 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3996 					       probe_rsp_data,
3997 					       probe_rsp_len, 0,
3998 					       rates);
3999 
4000 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4001 		wl1271_error("probe_rsp template too big");
4002 		return -EINVAL;
4003 	}
4004 
4005 	/* start searching from IE offset */
4006 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4007 
4008 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4009 			       probe_rsp_len - ie_offset);
4010 	if (!ptr) {
4011 		wl1271_error("No SSID in beacon!");
4012 		return -EINVAL;
4013 	}
4014 
4015 	ssid_ie_offset = ptr - probe_rsp_data;
4016 	ptr += (ptr[1] + 2);
4017 
4018 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4019 
4020 	/* insert SSID from bss_conf */
4021 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4022 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4023 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4024 	       bss_conf->ssid, bss_conf->ssid_len);
4025 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4026 
4027 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4028 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4029 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4030 
4031 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4032 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4033 				       probe_rsp_templ,
4034 				       templ_len, 0,
4035 				       rates);
4036 }
4037 
4038 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4039 				       struct ieee80211_vif *vif,
4040 				       struct ieee80211_bss_conf *bss_conf,
4041 				       u32 changed)
4042 {
4043 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4044 	int ret = 0;
4045 
4046 	if (changed & BSS_CHANGED_ERP_SLOT) {
4047 		if (bss_conf->use_short_slot)
4048 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4049 		else
4050 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4051 		if (ret < 0) {
4052 			wl1271_warning("Set slot time failed %d", ret);
4053 			goto out;
4054 		}
4055 	}
4056 
4057 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4058 		if (bss_conf->use_short_preamble)
4059 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4060 		else
4061 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4062 	}
4063 
4064 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4065 		if (bss_conf->use_cts_prot)
4066 			ret = wl1271_acx_cts_protect(wl, wlvif,
4067 						     CTSPROTECT_ENABLE);
4068 		else
4069 			ret = wl1271_acx_cts_protect(wl, wlvif,
4070 						     CTSPROTECT_DISABLE);
4071 		if (ret < 0) {
4072 			wl1271_warning("Set ctsprotect failed %d", ret);
4073 			goto out;
4074 		}
4075 	}
4076 
4077 out:
4078 	return ret;
4079 }
4080 
4081 static int wlcore_set_beacon_template(struct wl1271 *wl,
4082 				      struct ieee80211_vif *vif,
4083 				      bool is_ap)
4084 {
4085 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4086 	struct ieee80211_hdr *hdr;
4087 	u32 min_rate;
4088 	int ret;
4089 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4090 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4091 	u16 tmpl_id;
4092 
4093 	if (!beacon) {
4094 		ret = -EINVAL;
4095 		goto out;
4096 	}
4097 
4098 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4099 
4100 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4101 	if (ret < 0) {
4102 		dev_kfree_skb(beacon);
4103 		goto out;
4104 	}
4105 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4106 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4107 		CMD_TEMPL_BEACON;
4108 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4109 				      beacon->data,
4110 				      beacon->len, 0,
4111 				      min_rate);
4112 	if (ret < 0) {
4113 		dev_kfree_skb(beacon);
4114 		goto out;
4115 	}
4116 
4117 	wlvif->wmm_enabled =
4118 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4119 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4120 					beacon->data + ieoffset,
4121 					beacon->len - ieoffset);
4122 
4123 	/*
4124 	 * In case we already have a probe-resp beacon set explicitly
4125 	 * by usermode, don't use the beacon data.
4126 	 */
4127 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4128 		goto end_bcn;
4129 
4130 	/* remove TIM ie from probe response */
4131 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4132 
4133 	/*
4134 	 * remove p2p ie from probe response.
4135 	 * the fw reponds to probe requests that don't include
4136 	 * the p2p ie. probe requests with p2p ie will be passed,
4137 	 * and will be responded by the supplicant (the spec
4138 	 * forbids including the p2p ie when responding to probe
4139 	 * requests that didn't include it).
4140 	 */
4141 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4142 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4143 
4144 	hdr = (struct ieee80211_hdr *) beacon->data;
4145 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4146 					 IEEE80211_STYPE_PROBE_RESP);
4147 	if (is_ap)
4148 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4149 							   beacon->data,
4150 							   beacon->len,
4151 							   min_rate);
4152 	else
4153 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4154 					      CMD_TEMPL_PROBE_RESPONSE,
4155 					      beacon->data,
4156 					      beacon->len, 0,
4157 					      min_rate);
4158 end_bcn:
4159 	dev_kfree_skb(beacon);
4160 	if (ret < 0)
4161 		goto out;
4162 
4163 out:
4164 	return ret;
4165 }
4166 
4167 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4168 					  struct ieee80211_vif *vif,
4169 					  struct ieee80211_bss_conf *bss_conf,
4170 					  u32 changed)
4171 {
4172 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4173 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4174 	int ret = 0;
4175 
4176 	if (changed & BSS_CHANGED_BEACON_INT) {
4177 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4178 			bss_conf->beacon_int);
4179 
4180 		wlvif->beacon_int = bss_conf->beacon_int;
4181 	}
4182 
4183 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4184 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4185 
4186 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4187 	}
4188 
4189 	if (changed & BSS_CHANGED_BEACON) {
4190 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4191 		if (ret < 0)
4192 			goto out;
4193 
4194 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4195 				       &wlvif->flags)) {
4196 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4197 			if (ret < 0)
4198 				goto out;
4199 		}
4200 	}
4201 out:
4202 	if (ret != 0)
4203 		wl1271_error("beacon info change failed: %d", ret);
4204 	return ret;
4205 }
4206 
4207 /* AP mode changes */
4208 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4209 				       struct ieee80211_vif *vif,
4210 				       struct ieee80211_bss_conf *bss_conf,
4211 				       u32 changed)
4212 {
4213 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4214 	int ret = 0;
4215 
4216 	if (changed & BSS_CHANGED_BASIC_RATES) {
4217 		u32 rates = bss_conf->basic_rates;
4218 
4219 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4220 								 wlvif->band);
4221 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4222 							wlvif->basic_rate_set);
4223 
4224 		ret = wl1271_init_ap_rates(wl, wlvif);
4225 		if (ret < 0) {
4226 			wl1271_error("AP rate policy change failed %d", ret);
4227 			goto out;
4228 		}
4229 
4230 		ret = wl1271_ap_init_templates(wl, vif);
4231 		if (ret < 0)
4232 			goto out;
4233 
4234 		/* No need to set probe resp template for mesh */
4235 		if (!ieee80211_vif_is_mesh(vif)) {
4236 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4237 							    wlvif->basic_rate,
4238 							    vif);
4239 			if (ret < 0)
4240 				goto out;
4241 		}
4242 
4243 		ret = wlcore_set_beacon_template(wl, vif, true);
4244 		if (ret < 0)
4245 			goto out;
4246 	}
4247 
4248 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4249 	if (ret < 0)
4250 		goto out;
4251 
4252 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4253 		if (bss_conf->enable_beacon) {
4254 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4255 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4256 				if (ret < 0)
4257 					goto out;
4258 
4259 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4260 				if (ret < 0)
4261 					goto out;
4262 
4263 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4264 				wl1271_debug(DEBUG_AP, "started AP");
4265 			}
4266 		} else {
4267 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4268 				/*
4269 				 * AP might be in ROC in case we have just
4270 				 * sent auth reply. handle it.
4271 				 */
4272 				if (test_bit(wlvif->role_id, wl->roc_map))
4273 					wl12xx_croc(wl, wlvif->role_id);
4274 
4275 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4276 				if (ret < 0)
4277 					goto out;
4278 
4279 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4280 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4281 					  &wlvif->flags);
4282 				wl1271_debug(DEBUG_AP, "stopped AP");
4283 			}
4284 		}
4285 	}
4286 
4287 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4288 	if (ret < 0)
4289 		goto out;
4290 
4291 	/* Handle HT information change */
4292 	if ((changed & BSS_CHANGED_HT) &&
4293 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4294 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4295 					bss_conf->ht_operation_mode);
4296 		if (ret < 0) {
4297 			wl1271_warning("Set ht information failed %d", ret);
4298 			goto out;
4299 		}
4300 	}
4301 
4302 out:
4303 	return;
4304 }
4305 
4306 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4307 			    struct ieee80211_bss_conf *bss_conf,
4308 			    u32 sta_rate_set)
4309 {
4310 	u32 rates;
4311 	int ret;
4312 
4313 	wl1271_debug(DEBUG_MAC80211,
4314 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4315 	     bss_conf->bssid, bss_conf->aid,
4316 	     bss_conf->beacon_int,
4317 	     bss_conf->basic_rates, sta_rate_set);
4318 
4319 	wlvif->beacon_int = bss_conf->beacon_int;
4320 	rates = bss_conf->basic_rates;
4321 	wlvif->basic_rate_set =
4322 		wl1271_tx_enabled_rates_get(wl, rates,
4323 					    wlvif->band);
4324 	wlvif->basic_rate =
4325 		wl1271_tx_min_rate_get(wl,
4326 				       wlvif->basic_rate_set);
4327 
4328 	if (sta_rate_set)
4329 		wlvif->rate_set =
4330 			wl1271_tx_enabled_rates_get(wl,
4331 						sta_rate_set,
4332 						wlvif->band);
4333 
4334 	/* we only support sched_scan while not connected */
4335 	if (wl->sched_vif == wlvif)
4336 		wl->ops->sched_scan_stop(wl, wlvif);
4337 
4338 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4339 	if (ret < 0)
4340 		return ret;
4341 
4342 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4343 	if (ret < 0)
4344 		return ret;
4345 
4346 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4347 	if (ret < 0)
4348 		return ret;
4349 
4350 	wlcore_set_ssid(wl, wlvif);
4351 
4352 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4353 
4354 	return 0;
4355 }
4356 
4357 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4358 {
4359 	int ret;
4360 
4361 	/* revert back to minimum rates for the current band */
4362 	wl1271_set_band_rate(wl, wlvif);
4363 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4364 
4365 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4366 	if (ret < 0)
4367 		return ret;
4368 
4369 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4370 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4371 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4372 		if (ret < 0)
4373 			return ret;
4374 	}
4375 
4376 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4377 	return 0;
4378 }
4379 /* STA/IBSS mode changes */
4380 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4381 					struct ieee80211_vif *vif,
4382 					struct ieee80211_bss_conf *bss_conf,
4383 					u32 changed)
4384 {
4385 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4386 	bool do_join = false;
4387 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4388 	bool ibss_joined = false;
4389 	u32 sta_rate_set = 0;
4390 	int ret;
4391 	struct ieee80211_sta *sta;
4392 	bool sta_exists = false;
4393 	struct ieee80211_sta_ht_cap sta_ht_cap;
4394 
4395 	if (is_ibss) {
4396 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4397 						     changed);
4398 		if (ret < 0)
4399 			goto out;
4400 	}
4401 
4402 	if (changed & BSS_CHANGED_IBSS) {
4403 		if (bss_conf->ibss_joined) {
4404 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4405 			ibss_joined = true;
4406 		} else {
4407 			wlcore_unset_assoc(wl, wlvif);
4408 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4409 		}
4410 	}
4411 
4412 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4413 		do_join = true;
4414 
4415 	/* Need to update the SSID (for filtering etc) */
4416 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4417 		do_join = true;
4418 
4419 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4420 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4421 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4422 
4423 		do_join = true;
4424 	}
4425 
4426 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4427 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4428 
4429 	if (changed & BSS_CHANGED_CQM) {
4430 		bool enable = false;
4431 		if (bss_conf->cqm_rssi_thold)
4432 			enable = true;
4433 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4434 						  bss_conf->cqm_rssi_thold,
4435 						  bss_conf->cqm_rssi_hyst);
4436 		if (ret < 0)
4437 			goto out;
4438 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4439 	}
4440 
4441 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4442 		       BSS_CHANGED_ASSOC)) {
4443 		rcu_read_lock();
4444 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4445 		if (sta) {
4446 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4447 
4448 			/* save the supp_rates of the ap */
4449 			sta_rate_set = sta->supp_rates[wlvif->band];
4450 			if (sta->ht_cap.ht_supported)
4451 				sta_rate_set |=
4452 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4453 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4454 			sta_ht_cap = sta->ht_cap;
4455 			sta_exists = true;
4456 		}
4457 
4458 		rcu_read_unlock();
4459 	}
4460 
4461 	if (changed & BSS_CHANGED_BSSID) {
4462 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4463 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4464 					       sta_rate_set);
4465 			if (ret < 0)
4466 				goto out;
4467 
4468 			/* Need to update the BSSID (for filtering etc) */
4469 			do_join = true;
4470 		} else {
4471 			ret = wlcore_clear_bssid(wl, wlvif);
4472 			if (ret < 0)
4473 				goto out;
4474 		}
4475 	}
4476 
4477 	if (changed & BSS_CHANGED_IBSS) {
4478 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4479 			     bss_conf->ibss_joined);
4480 
4481 		if (bss_conf->ibss_joined) {
4482 			u32 rates = bss_conf->basic_rates;
4483 			wlvif->basic_rate_set =
4484 				wl1271_tx_enabled_rates_get(wl, rates,
4485 							    wlvif->band);
4486 			wlvif->basic_rate =
4487 				wl1271_tx_min_rate_get(wl,
4488 						       wlvif->basic_rate_set);
4489 
4490 			/* by default, use 11b + OFDM rates */
4491 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4492 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4493 			if (ret < 0)
4494 				goto out;
4495 		}
4496 	}
4497 
4498 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4499 		/* enable beacon filtering */
4500 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4501 		if (ret < 0)
4502 			goto out;
4503 	}
4504 
4505 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4506 	if (ret < 0)
4507 		goto out;
4508 
4509 	if (do_join) {
4510 		ret = wlcore_join(wl, wlvif);
4511 		if (ret < 0) {
4512 			wl1271_warning("cmd join failed %d", ret);
4513 			goto out;
4514 		}
4515 	}
4516 
4517 	if (changed & BSS_CHANGED_ASSOC) {
4518 		if (bss_conf->assoc) {
4519 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4520 					       sta_rate_set);
4521 			if (ret < 0)
4522 				goto out;
4523 
4524 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4525 				wl12xx_set_authorized(wl, wlvif);
4526 		} else {
4527 			wlcore_unset_assoc(wl, wlvif);
4528 		}
4529 	}
4530 
4531 	if (changed & BSS_CHANGED_PS) {
4532 		if ((bss_conf->ps) &&
4533 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4534 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4535 			int ps_mode;
4536 			char *ps_mode_str;
4537 
4538 			if (wl->conf.conn.forced_ps) {
4539 				ps_mode = STATION_POWER_SAVE_MODE;
4540 				ps_mode_str = "forced";
4541 			} else {
4542 				ps_mode = STATION_AUTO_PS_MODE;
4543 				ps_mode_str = "auto";
4544 			}
4545 
4546 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4547 
4548 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4549 			if (ret < 0)
4550 				wl1271_warning("enter %s ps failed %d",
4551 					       ps_mode_str, ret);
4552 		} else if (!bss_conf->ps &&
4553 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4554 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4555 
4556 			ret = wl1271_ps_set_mode(wl, wlvif,
4557 						 STATION_ACTIVE_MODE);
4558 			if (ret < 0)
4559 				wl1271_warning("exit auto ps failed %d", ret);
4560 		}
4561 	}
4562 
4563 	/* Handle new association with HT. Do this after join. */
4564 	if (sta_exists) {
4565 		bool enabled =
4566 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4567 
4568 		ret = wlcore_hw_set_peer_cap(wl,
4569 					     &sta_ht_cap,
4570 					     enabled,
4571 					     wlvif->rate_set,
4572 					     wlvif->sta.hlid);
4573 		if (ret < 0) {
4574 			wl1271_warning("Set ht cap failed %d", ret);
4575 			goto out;
4576 
4577 		}
4578 
4579 		if (enabled) {
4580 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4581 						bss_conf->ht_operation_mode);
4582 			if (ret < 0) {
4583 				wl1271_warning("Set ht information failed %d",
4584 					       ret);
4585 				goto out;
4586 			}
4587 		}
4588 	}
4589 
4590 	/* Handle arp filtering. Done after join. */
4591 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4592 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4593 		__be32 addr = bss_conf->arp_addr_list[0];
4594 		wlvif->sta.qos = bss_conf->qos;
4595 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4596 
4597 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4598 			wlvif->ip_addr = addr;
4599 			/*
4600 			 * The template should have been configured only upon
4601 			 * association. however, it seems that the correct ip
4602 			 * isn't being set (when sending), so we have to
4603 			 * reconfigure the template upon every ip change.
4604 			 */
4605 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4606 			if (ret < 0) {
4607 				wl1271_warning("build arp rsp failed: %d", ret);
4608 				goto out;
4609 			}
4610 
4611 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4612 				(ACX_ARP_FILTER_ARP_FILTERING |
4613 				 ACX_ARP_FILTER_AUTO_ARP),
4614 				addr);
4615 		} else {
4616 			wlvif->ip_addr = 0;
4617 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4618 		}
4619 
4620 		if (ret < 0)
4621 			goto out;
4622 	}
4623 
4624 out:
4625 	return;
4626 }
4627 
4628 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4629 				       struct ieee80211_vif *vif,
4630 				       struct ieee80211_bss_conf *bss_conf,
4631 				       u32 changed)
4632 {
4633 	struct wl1271 *wl = hw->priv;
4634 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4635 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4636 	int ret;
4637 
4638 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4639 		     wlvif->role_id, (int)changed);
4640 
4641 	/*
4642 	 * make sure to cancel pending disconnections if our association
4643 	 * state changed
4644 	 */
4645 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4646 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4647 
4648 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4649 	    !bss_conf->enable_beacon)
4650 		wl1271_tx_flush(wl);
4651 
4652 	mutex_lock(&wl->mutex);
4653 
4654 	if (unlikely(wl->state != WLCORE_STATE_ON))
4655 		goto out;
4656 
4657 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4658 		goto out;
4659 
4660 	ret = pm_runtime_get_sync(wl->dev);
4661 	if (ret < 0) {
4662 		pm_runtime_put_noidle(wl->dev);
4663 		goto out;
4664 	}
4665 
4666 	if ((changed & BSS_CHANGED_TXPOWER) &&
4667 	    bss_conf->txpower != wlvif->power_level) {
4668 
4669 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4670 		if (ret < 0)
4671 			goto out;
4672 
4673 		wlvif->power_level = bss_conf->txpower;
4674 	}
4675 
4676 	if (is_ap)
4677 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4678 	else
4679 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4680 
4681 	pm_runtime_mark_last_busy(wl->dev);
4682 	pm_runtime_put_autosuspend(wl->dev);
4683 
4684 out:
4685 	mutex_unlock(&wl->mutex);
4686 }
4687 
4688 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4689 				 struct ieee80211_chanctx_conf *ctx)
4690 {
4691 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4692 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4693 		     cfg80211_get_chandef_type(&ctx->def));
4694 	return 0;
4695 }
4696 
4697 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4698 				     struct ieee80211_chanctx_conf *ctx)
4699 {
4700 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4701 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4702 		     cfg80211_get_chandef_type(&ctx->def));
4703 }
4704 
4705 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4706 				     struct ieee80211_chanctx_conf *ctx,
4707 				     u32 changed)
4708 {
4709 	struct wl1271 *wl = hw->priv;
4710 	struct wl12xx_vif *wlvif;
4711 	int ret;
4712 	int channel = ieee80211_frequency_to_channel(
4713 		ctx->def.chan->center_freq);
4714 
4715 	wl1271_debug(DEBUG_MAC80211,
4716 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4717 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4718 
4719 	mutex_lock(&wl->mutex);
4720 
4721 	ret = pm_runtime_get_sync(wl->dev);
4722 	if (ret < 0) {
4723 		pm_runtime_put_noidle(wl->dev);
4724 		goto out;
4725 	}
4726 
4727 	wl12xx_for_each_wlvif(wl, wlvif) {
4728 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4729 
4730 		rcu_read_lock();
4731 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4732 			rcu_read_unlock();
4733 			continue;
4734 		}
4735 		rcu_read_unlock();
4736 
4737 		/* start radar if needed */
4738 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4739 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4740 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4741 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4742 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4743 			wlcore_hw_set_cac(wl, wlvif, true);
4744 			wlvif->radar_enabled = true;
4745 		}
4746 	}
4747 
4748 	pm_runtime_mark_last_busy(wl->dev);
4749 	pm_runtime_put_autosuspend(wl->dev);
4750 out:
4751 	mutex_unlock(&wl->mutex);
4752 }
4753 
4754 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4755 					struct ieee80211_vif *vif,
4756 					struct ieee80211_chanctx_conf *ctx)
4757 {
4758 	struct wl1271 *wl = hw->priv;
4759 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4760 	int channel = ieee80211_frequency_to_channel(
4761 		ctx->def.chan->center_freq);
4762 	int ret = -EINVAL;
4763 
4764 	wl1271_debug(DEBUG_MAC80211,
4765 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4766 		     wlvif->role_id, channel,
4767 		     cfg80211_get_chandef_type(&ctx->def),
4768 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4769 
4770 	mutex_lock(&wl->mutex);
4771 
4772 	if (unlikely(wl->state != WLCORE_STATE_ON))
4773 		goto out;
4774 
4775 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4776 		goto out;
4777 
4778 	ret = pm_runtime_get_sync(wl->dev);
4779 	if (ret < 0) {
4780 		pm_runtime_put_noidle(wl->dev);
4781 		goto out;
4782 	}
4783 
4784 	wlvif->band = ctx->def.chan->band;
4785 	wlvif->channel = channel;
4786 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4787 
4788 	/* update default rates according to the band */
4789 	wl1271_set_band_rate(wl, wlvif);
4790 
4791 	if (ctx->radar_enabled &&
4792 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4793 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4794 		wlcore_hw_set_cac(wl, wlvif, true);
4795 		wlvif->radar_enabled = true;
4796 	}
4797 
4798 	pm_runtime_mark_last_busy(wl->dev);
4799 	pm_runtime_put_autosuspend(wl->dev);
4800 out:
4801 	mutex_unlock(&wl->mutex);
4802 
4803 	return 0;
4804 }
4805 
4806 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4807 					   struct ieee80211_vif *vif,
4808 					   struct ieee80211_chanctx_conf *ctx)
4809 {
4810 	struct wl1271 *wl = hw->priv;
4811 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4812 	int ret;
4813 
4814 	wl1271_debug(DEBUG_MAC80211,
4815 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4816 		     wlvif->role_id,
4817 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4818 		     cfg80211_get_chandef_type(&ctx->def));
4819 
4820 	wl1271_tx_flush(wl);
4821 
4822 	mutex_lock(&wl->mutex);
4823 
4824 	if (unlikely(wl->state != WLCORE_STATE_ON))
4825 		goto out;
4826 
4827 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4828 		goto out;
4829 
4830 	ret = pm_runtime_get_sync(wl->dev);
4831 	if (ret < 0) {
4832 		pm_runtime_put_noidle(wl->dev);
4833 		goto out;
4834 	}
4835 
4836 	if (wlvif->radar_enabled) {
4837 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4838 		wlcore_hw_set_cac(wl, wlvif, false);
4839 		wlvif->radar_enabled = false;
4840 	}
4841 
4842 	pm_runtime_mark_last_busy(wl->dev);
4843 	pm_runtime_put_autosuspend(wl->dev);
4844 out:
4845 	mutex_unlock(&wl->mutex);
4846 }
4847 
4848 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4849 				    struct wl12xx_vif *wlvif,
4850 				    struct ieee80211_chanctx_conf *new_ctx)
4851 {
4852 	int channel = ieee80211_frequency_to_channel(
4853 		new_ctx->def.chan->center_freq);
4854 
4855 	wl1271_debug(DEBUG_MAC80211,
4856 		     "switch vif (role %d) %d -> %d chan_type: %d",
4857 		     wlvif->role_id, wlvif->channel, channel,
4858 		     cfg80211_get_chandef_type(&new_ctx->def));
4859 
4860 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4861 		return 0;
4862 
4863 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4864 
4865 	if (wlvif->radar_enabled) {
4866 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4867 		wlcore_hw_set_cac(wl, wlvif, false);
4868 		wlvif->radar_enabled = false;
4869 	}
4870 
4871 	wlvif->band = new_ctx->def.chan->band;
4872 	wlvif->channel = channel;
4873 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4874 
4875 	/* start radar if needed */
4876 	if (new_ctx->radar_enabled) {
4877 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4878 		wlcore_hw_set_cac(wl, wlvif, true);
4879 		wlvif->radar_enabled = true;
4880 	}
4881 
4882 	return 0;
4883 }
4884 
4885 static int
4886 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4887 			     struct ieee80211_vif_chanctx_switch *vifs,
4888 			     int n_vifs,
4889 			     enum ieee80211_chanctx_switch_mode mode)
4890 {
4891 	struct wl1271 *wl = hw->priv;
4892 	int i, ret;
4893 
4894 	wl1271_debug(DEBUG_MAC80211,
4895 		     "mac80211 switch chanctx n_vifs %d mode %d",
4896 		     n_vifs, mode);
4897 
4898 	mutex_lock(&wl->mutex);
4899 
4900 	ret = pm_runtime_get_sync(wl->dev);
4901 	if (ret < 0) {
4902 		pm_runtime_put_noidle(wl->dev);
4903 		goto out;
4904 	}
4905 
4906 	for (i = 0; i < n_vifs; i++) {
4907 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4908 
4909 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4910 		if (ret)
4911 			goto out_sleep;
4912 	}
4913 out_sleep:
4914 	pm_runtime_mark_last_busy(wl->dev);
4915 	pm_runtime_put_autosuspend(wl->dev);
4916 out:
4917 	mutex_unlock(&wl->mutex);
4918 
4919 	return 0;
4920 }
4921 
4922 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4923 			     struct ieee80211_vif *vif, u16 queue,
4924 			     const struct ieee80211_tx_queue_params *params)
4925 {
4926 	struct wl1271 *wl = hw->priv;
4927 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4928 	u8 ps_scheme;
4929 	int ret = 0;
4930 
4931 	if (wlcore_is_p2p_mgmt(wlvif))
4932 		return 0;
4933 
4934 	mutex_lock(&wl->mutex);
4935 
4936 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4937 
4938 	if (params->uapsd)
4939 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4940 	else
4941 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4942 
4943 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4944 		goto out;
4945 
4946 	ret = pm_runtime_get_sync(wl->dev);
4947 	if (ret < 0) {
4948 		pm_runtime_put_noidle(wl->dev);
4949 		goto out;
4950 	}
4951 
4952 	/*
4953 	 * the txop is confed in units of 32us by the mac80211,
4954 	 * we need us
4955 	 */
4956 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4957 				params->cw_min, params->cw_max,
4958 				params->aifs, params->txop << 5);
4959 	if (ret < 0)
4960 		goto out_sleep;
4961 
4962 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4963 				 CONF_CHANNEL_TYPE_EDCF,
4964 				 wl1271_tx_get_queue(queue),
4965 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4966 				 0, 0);
4967 
4968 out_sleep:
4969 	pm_runtime_mark_last_busy(wl->dev);
4970 	pm_runtime_put_autosuspend(wl->dev);
4971 
4972 out:
4973 	mutex_unlock(&wl->mutex);
4974 
4975 	return ret;
4976 }
4977 
4978 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4979 			     struct ieee80211_vif *vif)
4980 {
4981 
4982 	struct wl1271 *wl = hw->priv;
4983 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4984 	u64 mactime = ULLONG_MAX;
4985 	int ret;
4986 
4987 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4988 
4989 	mutex_lock(&wl->mutex);
4990 
4991 	if (unlikely(wl->state != WLCORE_STATE_ON))
4992 		goto out;
4993 
4994 	ret = pm_runtime_get_sync(wl->dev);
4995 	if (ret < 0) {
4996 		pm_runtime_put_noidle(wl->dev);
4997 		goto out;
4998 	}
4999 
5000 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5001 	if (ret < 0)
5002 		goto out_sleep;
5003 
5004 out_sleep:
5005 	pm_runtime_mark_last_busy(wl->dev);
5006 	pm_runtime_put_autosuspend(wl->dev);
5007 
5008 out:
5009 	mutex_unlock(&wl->mutex);
5010 	return mactime;
5011 }
5012 
5013 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5014 				struct survey_info *survey)
5015 {
5016 	struct ieee80211_conf *conf = &hw->conf;
5017 
5018 	if (idx != 0)
5019 		return -ENOENT;
5020 
5021 	survey->channel = conf->chandef.chan;
5022 	survey->filled = 0;
5023 	return 0;
5024 }
5025 
5026 static int wl1271_allocate_sta(struct wl1271 *wl,
5027 			     struct wl12xx_vif *wlvif,
5028 			     struct ieee80211_sta *sta)
5029 {
5030 	struct wl1271_station *wl_sta;
5031 	int ret;
5032 
5033 
5034 	if (wl->active_sta_count >= wl->max_ap_stations) {
5035 		wl1271_warning("could not allocate HLID - too much stations");
5036 		return -EBUSY;
5037 	}
5038 
5039 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5040 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5041 	if (ret < 0) {
5042 		wl1271_warning("could not allocate HLID - too many links");
5043 		return -EBUSY;
5044 	}
5045 
5046 	/* use the previous security seq, if this is a recovery/resume */
5047 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5048 
5049 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5050 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5051 	wl->active_sta_count++;
5052 	return 0;
5053 }
5054 
5055 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5056 {
5057 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5058 		return;
5059 
5060 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5061 	__clear_bit(hlid, &wl->ap_ps_map);
5062 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5063 
5064 	/*
5065 	 * save the last used PN in the private part of iee80211_sta,
5066 	 * in case of recovery/suspend
5067 	 */
5068 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5069 
5070 	wl12xx_free_link(wl, wlvif, &hlid);
5071 	wl->active_sta_count--;
5072 
5073 	/*
5074 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5075 	 * chance to return STA-buffered packets before complaining.
5076 	 */
5077 	if (wl->active_sta_count == 0)
5078 		wl12xx_rearm_tx_watchdog_locked(wl);
5079 }
5080 
5081 static int wl12xx_sta_add(struct wl1271 *wl,
5082 			  struct wl12xx_vif *wlvif,
5083 			  struct ieee80211_sta *sta)
5084 {
5085 	struct wl1271_station *wl_sta;
5086 	int ret = 0;
5087 	u8 hlid;
5088 
5089 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5090 
5091 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5092 	if (ret < 0)
5093 		return ret;
5094 
5095 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5096 	hlid = wl_sta->hlid;
5097 
5098 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5099 	if (ret < 0)
5100 		wl1271_free_sta(wl, wlvif, hlid);
5101 
5102 	return ret;
5103 }
5104 
5105 static int wl12xx_sta_remove(struct wl1271 *wl,
5106 			     struct wl12xx_vif *wlvif,
5107 			     struct ieee80211_sta *sta)
5108 {
5109 	struct wl1271_station *wl_sta;
5110 	int ret = 0, id;
5111 
5112 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5113 
5114 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5115 	id = wl_sta->hlid;
5116 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5117 		return -EINVAL;
5118 
5119 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5120 	if (ret < 0)
5121 		return ret;
5122 
5123 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5124 	return ret;
5125 }
5126 
5127 static void wlcore_roc_if_possible(struct wl1271 *wl,
5128 				   struct wl12xx_vif *wlvif)
5129 {
5130 	if (find_first_bit(wl->roc_map,
5131 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5132 		return;
5133 
5134 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5135 		return;
5136 
5137 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5138 }
5139 
5140 /*
5141  * when wl_sta is NULL, we treat this call as if coming from a
5142  * pending auth reply.
5143  * wl->mutex must be taken and the FW must be awake when the call
5144  * takes place.
5145  */
5146 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5147 			      struct wl1271_station *wl_sta, bool in_conn)
5148 {
5149 	if (in_conn) {
5150 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5151 			return;
5152 
5153 		if (!wlvif->ap_pending_auth_reply &&
5154 		    !wlvif->inconn_count)
5155 			wlcore_roc_if_possible(wl, wlvif);
5156 
5157 		if (wl_sta) {
5158 			wl_sta->in_connection = true;
5159 			wlvif->inconn_count++;
5160 		} else {
5161 			wlvif->ap_pending_auth_reply = true;
5162 		}
5163 	} else {
5164 		if (wl_sta && !wl_sta->in_connection)
5165 			return;
5166 
5167 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5168 			return;
5169 
5170 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5171 			return;
5172 
5173 		if (wl_sta) {
5174 			wl_sta->in_connection = false;
5175 			wlvif->inconn_count--;
5176 		} else {
5177 			wlvif->ap_pending_auth_reply = false;
5178 		}
5179 
5180 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5181 		    test_bit(wlvif->role_id, wl->roc_map))
5182 			wl12xx_croc(wl, wlvif->role_id);
5183 	}
5184 }
5185 
5186 static int wl12xx_update_sta_state(struct wl1271 *wl,
5187 				   struct wl12xx_vif *wlvif,
5188 				   struct ieee80211_sta *sta,
5189 				   enum ieee80211_sta_state old_state,
5190 				   enum ieee80211_sta_state new_state)
5191 {
5192 	struct wl1271_station *wl_sta;
5193 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5194 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5195 	int ret;
5196 
5197 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5198 
5199 	/* Add station (AP mode) */
5200 	if (is_ap &&
5201 	    old_state == IEEE80211_STA_NOTEXIST &&
5202 	    new_state == IEEE80211_STA_NONE) {
5203 		ret = wl12xx_sta_add(wl, wlvif, sta);
5204 		if (ret)
5205 			return ret;
5206 
5207 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5208 	}
5209 
5210 	/* Remove station (AP mode) */
5211 	if (is_ap &&
5212 	    old_state == IEEE80211_STA_NONE &&
5213 	    new_state == IEEE80211_STA_NOTEXIST) {
5214 		/* must not fail */
5215 		wl12xx_sta_remove(wl, wlvif, sta);
5216 
5217 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5218 	}
5219 
5220 	/* Authorize station (AP mode) */
5221 	if (is_ap &&
5222 	    new_state == IEEE80211_STA_AUTHORIZED) {
5223 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5224 		if (ret < 0)
5225 			return ret;
5226 
5227 		/* reconfigure rates */
5228 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5229 		if (ret < 0)
5230 			return ret;
5231 
5232 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5233 						     wl_sta->hlid);
5234 		if (ret)
5235 			return ret;
5236 
5237 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5238 	}
5239 
5240 	/* Authorize station */
5241 	if (is_sta &&
5242 	    new_state == IEEE80211_STA_AUTHORIZED) {
5243 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5244 		ret = wl12xx_set_authorized(wl, wlvif);
5245 		if (ret)
5246 			return ret;
5247 	}
5248 
5249 	if (is_sta &&
5250 	    old_state == IEEE80211_STA_AUTHORIZED &&
5251 	    new_state == IEEE80211_STA_ASSOC) {
5252 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5253 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5254 	}
5255 
5256 	/* save seq number on disassoc (suspend) */
5257 	if (is_sta &&
5258 	    old_state == IEEE80211_STA_ASSOC &&
5259 	    new_state == IEEE80211_STA_AUTH) {
5260 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5261 		wlvif->total_freed_pkts = 0;
5262 	}
5263 
5264 	/* restore seq number on assoc (resume) */
5265 	if (is_sta &&
5266 	    old_state == IEEE80211_STA_AUTH &&
5267 	    new_state == IEEE80211_STA_ASSOC) {
5268 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5269 	}
5270 
5271 	/* clear ROCs on failure or authorization */
5272 	if (is_sta &&
5273 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5274 	     new_state == IEEE80211_STA_NOTEXIST)) {
5275 		if (test_bit(wlvif->role_id, wl->roc_map))
5276 			wl12xx_croc(wl, wlvif->role_id);
5277 	}
5278 
5279 	if (is_sta &&
5280 	    old_state == IEEE80211_STA_NOTEXIST &&
5281 	    new_state == IEEE80211_STA_NONE) {
5282 		if (find_first_bit(wl->roc_map,
5283 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5284 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5285 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5286 				   wlvif->band, wlvif->channel);
5287 		}
5288 	}
5289 	return 0;
5290 }
5291 
5292 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5293 			       struct ieee80211_vif *vif,
5294 			       struct ieee80211_sta *sta,
5295 			       enum ieee80211_sta_state old_state,
5296 			       enum ieee80211_sta_state new_state)
5297 {
5298 	struct wl1271 *wl = hw->priv;
5299 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5300 	int ret;
5301 
5302 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5303 		     sta->aid, old_state, new_state);
5304 
5305 	mutex_lock(&wl->mutex);
5306 
5307 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5308 		ret = -EBUSY;
5309 		goto out;
5310 	}
5311 
5312 	ret = pm_runtime_get_sync(wl->dev);
5313 	if (ret < 0) {
5314 		pm_runtime_put_noidle(wl->dev);
5315 		goto out;
5316 	}
5317 
5318 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5319 
5320 	pm_runtime_mark_last_busy(wl->dev);
5321 	pm_runtime_put_autosuspend(wl->dev);
5322 out:
5323 	mutex_unlock(&wl->mutex);
5324 	if (new_state < old_state)
5325 		return 0;
5326 	return ret;
5327 }
5328 
5329 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5330 				  struct ieee80211_vif *vif,
5331 				  struct ieee80211_ampdu_params *params)
5332 {
5333 	struct wl1271 *wl = hw->priv;
5334 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5335 	int ret;
5336 	u8 hlid, *ba_bitmap;
5337 	struct ieee80211_sta *sta = params->sta;
5338 	enum ieee80211_ampdu_mlme_action action = params->action;
5339 	u16 tid = params->tid;
5340 	u16 *ssn = &params->ssn;
5341 
5342 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5343 		     tid);
5344 
5345 	/* sanity check - the fields in FW are only 8bits wide */
5346 	if (WARN_ON(tid > 0xFF))
5347 		return -ENOTSUPP;
5348 
5349 	mutex_lock(&wl->mutex);
5350 
5351 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5352 		ret = -EAGAIN;
5353 		goto out;
5354 	}
5355 
5356 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5357 		hlid = wlvif->sta.hlid;
5358 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5359 		struct wl1271_station *wl_sta;
5360 
5361 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5362 		hlid = wl_sta->hlid;
5363 	} else {
5364 		ret = -EINVAL;
5365 		goto out;
5366 	}
5367 
5368 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5369 
5370 	ret = pm_runtime_get_sync(wl->dev);
5371 	if (ret < 0) {
5372 		pm_runtime_put_noidle(wl->dev);
5373 		goto out;
5374 	}
5375 
5376 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5377 		     tid, action);
5378 
5379 	switch (action) {
5380 	case IEEE80211_AMPDU_RX_START:
5381 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5382 			ret = -ENOTSUPP;
5383 			break;
5384 		}
5385 
5386 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5387 			ret = -EBUSY;
5388 			wl1271_error("exceeded max RX BA sessions");
5389 			break;
5390 		}
5391 
5392 		if (*ba_bitmap & BIT(tid)) {
5393 			ret = -EINVAL;
5394 			wl1271_error("cannot enable RX BA session on active "
5395 				     "tid: %d", tid);
5396 			break;
5397 		}
5398 
5399 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5400 				hlid,
5401 				params->buf_size);
5402 
5403 		if (!ret) {
5404 			*ba_bitmap |= BIT(tid);
5405 			wl->ba_rx_session_count++;
5406 		}
5407 		break;
5408 
5409 	case IEEE80211_AMPDU_RX_STOP:
5410 		if (!(*ba_bitmap & BIT(tid))) {
5411 			/*
5412 			 * this happens on reconfig - so only output a debug
5413 			 * message for now, and don't fail the function.
5414 			 */
5415 			wl1271_debug(DEBUG_MAC80211,
5416 				     "no active RX BA session on tid: %d",
5417 				     tid);
5418 			ret = 0;
5419 			break;
5420 		}
5421 
5422 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5423 							 hlid, 0);
5424 		if (!ret) {
5425 			*ba_bitmap &= ~BIT(tid);
5426 			wl->ba_rx_session_count--;
5427 		}
5428 		break;
5429 
5430 	/*
5431 	 * The BA initiator session management in FW independently.
5432 	 * Falling break here on purpose for all TX APDU commands.
5433 	 */
5434 	case IEEE80211_AMPDU_TX_START:
5435 	case IEEE80211_AMPDU_TX_STOP_CONT:
5436 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5437 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5438 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5439 		ret = -EINVAL;
5440 		break;
5441 
5442 	default:
5443 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5444 		ret = -EINVAL;
5445 	}
5446 
5447 	pm_runtime_mark_last_busy(wl->dev);
5448 	pm_runtime_put_autosuspend(wl->dev);
5449 
5450 out:
5451 	mutex_unlock(&wl->mutex);
5452 
5453 	return ret;
5454 }
5455 
5456 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5457 				   struct ieee80211_vif *vif,
5458 				   const struct cfg80211_bitrate_mask *mask)
5459 {
5460 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5461 	struct wl1271 *wl = hw->priv;
5462 	int i, ret = 0;
5463 
5464 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5465 		mask->control[NL80211_BAND_2GHZ].legacy,
5466 		mask->control[NL80211_BAND_5GHZ].legacy);
5467 
5468 	mutex_lock(&wl->mutex);
5469 
5470 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5471 		wlvif->bitrate_masks[i] =
5472 			wl1271_tx_enabled_rates_get(wl,
5473 						    mask->control[i].legacy,
5474 						    i);
5475 
5476 	if (unlikely(wl->state != WLCORE_STATE_ON))
5477 		goto out;
5478 
5479 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5480 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5481 
5482 		ret = pm_runtime_get_sync(wl->dev);
5483 		if (ret < 0) {
5484 			pm_runtime_put_noidle(wl->dev);
5485 			goto out;
5486 		}
5487 
5488 		wl1271_set_band_rate(wl, wlvif);
5489 		wlvif->basic_rate =
5490 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5491 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5492 
5493 		pm_runtime_mark_last_busy(wl->dev);
5494 		pm_runtime_put_autosuspend(wl->dev);
5495 	}
5496 out:
5497 	mutex_unlock(&wl->mutex);
5498 
5499 	return ret;
5500 }
5501 
5502 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5503 				     struct ieee80211_vif *vif,
5504 				     struct ieee80211_channel_switch *ch_switch)
5505 {
5506 	struct wl1271 *wl = hw->priv;
5507 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5508 	int ret;
5509 
5510 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5511 
5512 	wl1271_tx_flush(wl);
5513 
5514 	mutex_lock(&wl->mutex);
5515 
5516 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5517 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5518 			ieee80211_chswitch_done(vif, false);
5519 		goto out;
5520 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5521 		goto out;
5522 	}
5523 
5524 	ret = pm_runtime_get_sync(wl->dev);
5525 	if (ret < 0) {
5526 		pm_runtime_put_noidle(wl->dev);
5527 		goto out;
5528 	}
5529 
5530 	/* TODO: change mac80211 to pass vif as param */
5531 
5532 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5533 		unsigned long delay_usec;
5534 
5535 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5536 		if (ret)
5537 			goto out_sleep;
5538 
5539 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5540 
5541 		/* indicate failure 5 seconds after channel switch time */
5542 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5543 			ch_switch->count;
5544 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5545 					     usecs_to_jiffies(delay_usec) +
5546 					     msecs_to_jiffies(5000));
5547 	}
5548 
5549 out_sleep:
5550 	pm_runtime_mark_last_busy(wl->dev);
5551 	pm_runtime_put_autosuspend(wl->dev);
5552 
5553 out:
5554 	mutex_unlock(&wl->mutex);
5555 }
5556 
5557 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5558 					struct wl12xx_vif *wlvif,
5559 					u8 eid)
5560 {
5561 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5562 	struct sk_buff *beacon =
5563 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5564 
5565 	if (!beacon)
5566 		return NULL;
5567 
5568 	return cfg80211_find_ie(eid,
5569 				beacon->data + ieoffset,
5570 				beacon->len - ieoffset);
5571 }
5572 
5573 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5574 				u8 *csa_count)
5575 {
5576 	const u8 *ie;
5577 	const struct ieee80211_channel_sw_ie *ie_csa;
5578 
5579 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5580 	if (!ie)
5581 		return -EINVAL;
5582 
5583 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5584 	*csa_count = ie_csa->count;
5585 
5586 	return 0;
5587 }
5588 
5589 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5590 					    struct ieee80211_vif *vif,
5591 					    struct cfg80211_chan_def *chandef)
5592 {
5593 	struct wl1271 *wl = hw->priv;
5594 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5595 	struct ieee80211_channel_switch ch_switch = {
5596 		.block_tx = true,
5597 		.chandef = *chandef,
5598 	};
5599 	int ret;
5600 
5601 	wl1271_debug(DEBUG_MAC80211,
5602 		     "mac80211 channel switch beacon (role %d)",
5603 		     wlvif->role_id);
5604 
5605 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5606 	if (ret < 0) {
5607 		wl1271_error("error getting beacon (for CSA counter)");
5608 		return;
5609 	}
5610 
5611 	mutex_lock(&wl->mutex);
5612 
5613 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5614 		ret = -EBUSY;
5615 		goto out;
5616 	}
5617 
5618 	ret = pm_runtime_get_sync(wl->dev);
5619 	if (ret < 0) {
5620 		pm_runtime_put_noidle(wl->dev);
5621 		goto out;
5622 	}
5623 
5624 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5625 	if (ret)
5626 		goto out_sleep;
5627 
5628 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5629 
5630 out_sleep:
5631 	pm_runtime_mark_last_busy(wl->dev);
5632 	pm_runtime_put_autosuspend(wl->dev);
5633 out:
5634 	mutex_unlock(&wl->mutex);
5635 }
5636 
5637 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5638 			    u32 queues, bool drop)
5639 {
5640 	struct wl1271 *wl = hw->priv;
5641 
5642 	wl1271_tx_flush(wl);
5643 }
5644 
5645 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5646 				       struct ieee80211_vif *vif,
5647 				       struct ieee80211_channel *chan,
5648 				       int duration,
5649 				       enum ieee80211_roc_type type)
5650 {
5651 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5652 	struct wl1271 *wl = hw->priv;
5653 	int channel, active_roc, ret = 0;
5654 
5655 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5656 
5657 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5658 		     channel, wlvif->role_id);
5659 
5660 	mutex_lock(&wl->mutex);
5661 
5662 	if (unlikely(wl->state != WLCORE_STATE_ON))
5663 		goto out;
5664 
5665 	/* return EBUSY if we can't ROC right now */
5666 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5667 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5668 		wl1271_warning("active roc on role %d", active_roc);
5669 		ret = -EBUSY;
5670 		goto out;
5671 	}
5672 
5673 	ret = pm_runtime_get_sync(wl->dev);
5674 	if (ret < 0) {
5675 		pm_runtime_put_noidle(wl->dev);
5676 		goto out;
5677 	}
5678 
5679 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5680 	if (ret < 0)
5681 		goto out_sleep;
5682 
5683 	wl->roc_vif = vif;
5684 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5685 				     msecs_to_jiffies(duration));
5686 out_sleep:
5687 	pm_runtime_mark_last_busy(wl->dev);
5688 	pm_runtime_put_autosuspend(wl->dev);
5689 out:
5690 	mutex_unlock(&wl->mutex);
5691 	return ret;
5692 }
5693 
5694 static int __wlcore_roc_completed(struct wl1271 *wl)
5695 {
5696 	struct wl12xx_vif *wlvif;
5697 	int ret;
5698 
5699 	/* already completed */
5700 	if (unlikely(!wl->roc_vif))
5701 		return 0;
5702 
5703 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5704 
5705 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5706 		return -EBUSY;
5707 
5708 	ret = wl12xx_stop_dev(wl, wlvif);
5709 	if (ret < 0)
5710 		return ret;
5711 
5712 	wl->roc_vif = NULL;
5713 
5714 	return 0;
5715 }
5716 
5717 static int wlcore_roc_completed(struct wl1271 *wl)
5718 {
5719 	int ret;
5720 
5721 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5722 
5723 	mutex_lock(&wl->mutex);
5724 
5725 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5726 		ret = -EBUSY;
5727 		goto out;
5728 	}
5729 
5730 	ret = pm_runtime_get_sync(wl->dev);
5731 	if (ret < 0) {
5732 		pm_runtime_put_noidle(wl->dev);
5733 		goto out;
5734 	}
5735 
5736 	ret = __wlcore_roc_completed(wl);
5737 
5738 	pm_runtime_mark_last_busy(wl->dev);
5739 	pm_runtime_put_autosuspend(wl->dev);
5740 out:
5741 	mutex_unlock(&wl->mutex);
5742 
5743 	return ret;
5744 }
5745 
5746 static void wlcore_roc_complete_work(struct work_struct *work)
5747 {
5748 	struct delayed_work *dwork;
5749 	struct wl1271 *wl;
5750 	int ret;
5751 
5752 	dwork = to_delayed_work(work);
5753 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5754 
5755 	ret = wlcore_roc_completed(wl);
5756 	if (!ret)
5757 		ieee80211_remain_on_channel_expired(wl->hw);
5758 }
5759 
5760 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5761 {
5762 	struct wl1271 *wl = hw->priv;
5763 
5764 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5765 
5766 	/* TODO: per-vif */
5767 	wl1271_tx_flush(wl);
5768 
5769 	/*
5770 	 * we can't just flush_work here, because it might deadlock
5771 	 * (as we might get called from the same workqueue)
5772 	 */
5773 	cancel_delayed_work_sync(&wl->roc_complete_work);
5774 	wlcore_roc_completed(wl);
5775 
5776 	return 0;
5777 }
5778 
5779 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5780 				    struct ieee80211_vif *vif,
5781 				    struct ieee80211_sta *sta,
5782 				    u32 changed)
5783 {
5784 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5785 
5786 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5787 
5788 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5789 		return;
5790 
5791 	/* this callback is atomic, so schedule a new work */
5792 	wlvif->rc_update_bw = sta->bandwidth;
5793 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5794 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5795 }
5796 
5797 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5798 				     struct ieee80211_vif *vif,
5799 				     struct ieee80211_sta *sta,
5800 				     struct station_info *sinfo)
5801 {
5802 	struct wl1271 *wl = hw->priv;
5803 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5804 	s8 rssi_dbm;
5805 	int ret;
5806 
5807 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5808 
5809 	mutex_lock(&wl->mutex);
5810 
5811 	if (unlikely(wl->state != WLCORE_STATE_ON))
5812 		goto out;
5813 
5814 	ret = pm_runtime_get_sync(wl->dev);
5815 	if (ret < 0) {
5816 		pm_runtime_put_noidle(wl->dev);
5817 		goto out_sleep;
5818 	}
5819 
5820 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5821 	if (ret < 0)
5822 		goto out_sleep;
5823 
5824 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5825 	sinfo->signal = rssi_dbm;
5826 
5827 out_sleep:
5828 	pm_runtime_mark_last_busy(wl->dev);
5829 	pm_runtime_put_autosuspend(wl->dev);
5830 
5831 out:
5832 	mutex_unlock(&wl->mutex);
5833 }
5834 
5835 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5836 					     struct ieee80211_sta *sta)
5837 {
5838 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5839 	struct wl1271 *wl = hw->priv;
5840 	u8 hlid = wl_sta->hlid;
5841 
5842 	/* return in units of Kbps */
5843 	return (wl->links[hlid].fw_rate_mbps * 1000);
5844 }
5845 
5846 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5847 {
5848 	struct wl1271 *wl = hw->priv;
5849 	bool ret = false;
5850 
5851 	mutex_lock(&wl->mutex);
5852 
5853 	if (unlikely(wl->state != WLCORE_STATE_ON))
5854 		goto out;
5855 
5856 	/* packets are considered pending if in the TX queue or the FW */
5857 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5858 out:
5859 	mutex_unlock(&wl->mutex);
5860 
5861 	return ret;
5862 }
5863 
5864 /* can't be const, mac80211 writes to this */
5865 static struct ieee80211_rate wl1271_rates[] = {
5866 	{ .bitrate = 10,
5867 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5868 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5869 	{ .bitrate = 20,
5870 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5871 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5872 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5873 	{ .bitrate = 55,
5874 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5875 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5876 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5877 	{ .bitrate = 110,
5878 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5879 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5880 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5881 	{ .bitrate = 60,
5882 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5883 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5884 	{ .bitrate = 90,
5885 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5886 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5887 	{ .bitrate = 120,
5888 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5889 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5890 	{ .bitrate = 180,
5891 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5892 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5893 	{ .bitrate = 240,
5894 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5895 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5896 	{ .bitrate = 360,
5897 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5898 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5899 	{ .bitrate = 480,
5900 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5901 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5902 	{ .bitrate = 540,
5903 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5904 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5905 };
5906 
5907 /* can't be const, mac80211 writes to this */
5908 static struct ieee80211_channel wl1271_channels[] = {
5909 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5910 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5911 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5912 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5913 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5914 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5915 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5916 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5917 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5918 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5919 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5920 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5921 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5922 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5923 };
5924 
5925 /* can't be const, mac80211 writes to this */
5926 static struct ieee80211_supported_band wl1271_band_2ghz = {
5927 	.channels = wl1271_channels,
5928 	.n_channels = ARRAY_SIZE(wl1271_channels),
5929 	.bitrates = wl1271_rates,
5930 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5931 };
5932 
5933 /* 5 GHz data rates for WL1273 */
5934 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5935 	{ .bitrate = 60,
5936 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5937 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5938 	{ .bitrate = 90,
5939 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5940 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5941 	{ .bitrate = 120,
5942 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5943 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5944 	{ .bitrate = 180,
5945 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5946 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5947 	{ .bitrate = 240,
5948 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5949 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5950 	{ .bitrate = 360,
5951 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5952 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5953 	{ .bitrate = 480,
5954 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5955 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5956 	{ .bitrate = 540,
5957 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5958 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5959 };
5960 
5961 /* 5 GHz band channels for WL1273 */
5962 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5963 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5964 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5965 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5966 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5967 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5968 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5969 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5970 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5971 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5972 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5973 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5994 };
5995 
5996 static struct ieee80211_supported_band wl1271_band_5ghz = {
5997 	.channels = wl1271_channels_5ghz,
5998 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5999 	.bitrates = wl1271_rates_5ghz,
6000 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6001 };
6002 
6003 static const struct ieee80211_ops wl1271_ops = {
6004 	.start = wl1271_op_start,
6005 	.stop = wlcore_op_stop,
6006 	.add_interface = wl1271_op_add_interface,
6007 	.remove_interface = wl1271_op_remove_interface,
6008 	.change_interface = wl12xx_op_change_interface,
6009 #ifdef CONFIG_PM
6010 	.suspend = wl1271_op_suspend,
6011 	.resume = wl1271_op_resume,
6012 #endif
6013 	.config = wl1271_op_config,
6014 	.prepare_multicast = wl1271_op_prepare_multicast,
6015 	.configure_filter = wl1271_op_configure_filter,
6016 	.tx = wl1271_op_tx,
6017 	.set_key = wlcore_op_set_key,
6018 	.hw_scan = wl1271_op_hw_scan,
6019 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6020 	.sched_scan_start = wl1271_op_sched_scan_start,
6021 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6022 	.bss_info_changed = wl1271_op_bss_info_changed,
6023 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6024 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6025 	.conf_tx = wl1271_op_conf_tx,
6026 	.get_tsf = wl1271_op_get_tsf,
6027 	.get_survey = wl1271_op_get_survey,
6028 	.sta_state = wl12xx_op_sta_state,
6029 	.ampdu_action = wl1271_op_ampdu_action,
6030 	.tx_frames_pending = wl1271_tx_frames_pending,
6031 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6032 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6033 	.channel_switch = wl12xx_op_channel_switch,
6034 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6035 	.flush = wlcore_op_flush,
6036 	.remain_on_channel = wlcore_op_remain_on_channel,
6037 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6038 	.add_chanctx = wlcore_op_add_chanctx,
6039 	.remove_chanctx = wlcore_op_remove_chanctx,
6040 	.change_chanctx = wlcore_op_change_chanctx,
6041 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6042 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6043 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6044 	.sta_rc_update = wlcore_op_sta_rc_update,
6045 	.sta_statistics = wlcore_op_sta_statistics,
6046 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6047 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6048 };
6049 
6050 
6051 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6052 {
6053 	u8 idx;
6054 
6055 	BUG_ON(band >= 2);
6056 
6057 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6058 		wl1271_error("Illegal RX rate from HW: %d", rate);
6059 		return 0;
6060 	}
6061 
6062 	idx = wl->band_rate_to_idx[band][rate];
6063 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6064 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6065 		return 0;
6066 	}
6067 
6068 	return idx;
6069 }
6070 
6071 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6072 {
6073 	int i;
6074 
6075 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6076 		     oui, nic);
6077 
6078 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6079 		wl1271_warning("NIC part of the MAC address wraps around!");
6080 
6081 	for (i = 0; i < wl->num_mac_addr; i++) {
6082 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6083 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6084 		wl->addresses[i].addr[2] = (u8) oui;
6085 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6086 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6087 		wl->addresses[i].addr[5] = (u8) nic;
6088 		nic++;
6089 	}
6090 
6091 	/* we may be one address short at the most */
6092 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6093 
6094 	/*
6095 	 * turn on the LAA bit in the first address and use it as
6096 	 * the last address.
6097 	 */
6098 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6099 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6100 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6101 		       sizeof(wl->addresses[0]));
6102 		/* LAA bit */
6103 		wl->addresses[idx].addr[0] |= BIT(1);
6104 	}
6105 
6106 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6107 	wl->hw->wiphy->addresses = wl->addresses;
6108 }
6109 
6110 static int wl12xx_get_hw_info(struct wl1271 *wl)
6111 {
6112 	int ret;
6113 
6114 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6115 	if (ret < 0)
6116 		goto out;
6117 
6118 	wl->fuse_oui_addr = 0;
6119 	wl->fuse_nic_addr = 0;
6120 
6121 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6122 	if (ret < 0)
6123 		goto out;
6124 
6125 	if (wl->ops->get_mac)
6126 		ret = wl->ops->get_mac(wl);
6127 
6128 out:
6129 	return ret;
6130 }
6131 
6132 static int wl1271_register_hw(struct wl1271 *wl)
6133 {
6134 	int ret;
6135 	u32 oui_addr = 0, nic_addr = 0;
6136 	struct platform_device *pdev = wl->pdev;
6137 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6138 
6139 	if (wl->mac80211_registered)
6140 		return 0;
6141 
6142 	if (wl->nvs_len >= 12) {
6143 		/* NOTE: The wl->nvs->nvs element must be first, in
6144 		 * order to simplify the casting, we assume it is at
6145 		 * the beginning of the wl->nvs structure.
6146 		 */
6147 		u8 *nvs_ptr = (u8 *)wl->nvs;
6148 
6149 		oui_addr =
6150 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6151 		nic_addr =
6152 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6153 	}
6154 
6155 	/* if the MAC address is zeroed in the NVS derive from fuse */
6156 	if (oui_addr == 0 && nic_addr == 0) {
6157 		oui_addr = wl->fuse_oui_addr;
6158 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6159 		nic_addr = wl->fuse_nic_addr + 1;
6160 	}
6161 
6162 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6163 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6164 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6165 			wl1271_warning("This default nvs file can be removed from the file system");
6166 		} else {
6167 			wl1271_warning("Your device performance is not optimized.");
6168 			wl1271_warning("Please use the calibrator tool to configure your device.");
6169 		}
6170 
6171 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6172 			wl1271_warning("Fuse mac address is zero. using random mac");
6173 			/* Use TI oui and a random nic */
6174 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6175 			nic_addr = get_random_int();
6176 		} else {
6177 			oui_addr = wl->fuse_oui_addr;
6178 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6179 			nic_addr = wl->fuse_nic_addr + 1;
6180 		}
6181 	}
6182 
6183 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6184 
6185 	ret = ieee80211_register_hw(wl->hw);
6186 	if (ret < 0) {
6187 		wl1271_error("unable to register mac80211 hw: %d", ret);
6188 		goto out;
6189 	}
6190 
6191 	wl->mac80211_registered = true;
6192 
6193 	wl1271_debugfs_init(wl);
6194 
6195 	wl1271_notice("loaded");
6196 
6197 out:
6198 	return ret;
6199 }
6200 
6201 static void wl1271_unregister_hw(struct wl1271 *wl)
6202 {
6203 	if (wl->plt)
6204 		wl1271_plt_stop(wl);
6205 
6206 	ieee80211_unregister_hw(wl->hw);
6207 	wl->mac80211_registered = false;
6208 
6209 }
6210 
6211 static int wl1271_init_ieee80211(struct wl1271 *wl)
6212 {
6213 	int i;
6214 	static const u32 cipher_suites[] = {
6215 		WLAN_CIPHER_SUITE_WEP40,
6216 		WLAN_CIPHER_SUITE_WEP104,
6217 		WLAN_CIPHER_SUITE_TKIP,
6218 		WLAN_CIPHER_SUITE_CCMP,
6219 		WL1271_CIPHER_SUITE_GEM,
6220 	};
6221 
6222 	/* The tx descriptor buffer */
6223 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6224 
6225 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6226 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6227 
6228 	/* unit us */
6229 	/* FIXME: find a proper value */
6230 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6231 
6232 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6233 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6234 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6235 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6236 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6237 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6238 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6239 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6240 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6241 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6242 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6243 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6244 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6245 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6246 
6247 	wl->hw->wiphy->cipher_suites = cipher_suites;
6248 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6249 
6250 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6251 					 BIT(NL80211_IFTYPE_AP) |
6252 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6253 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6254 #ifdef CONFIG_MAC80211_MESH
6255 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6256 #endif
6257 					 BIT(NL80211_IFTYPE_P2P_GO);
6258 
6259 	wl->hw->wiphy->max_scan_ssids = 1;
6260 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6261 	wl->hw->wiphy->max_match_sets = 16;
6262 	/*
6263 	 * Maximum length of elements in scanning probe request templates
6264 	 * should be the maximum length possible for a template, without
6265 	 * the IEEE80211 header of the template
6266 	 */
6267 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6268 			sizeof(struct ieee80211_header);
6269 
6270 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6271 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6272 		sizeof(struct ieee80211_header);
6273 
6274 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6275 
6276 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6277 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6278 				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6279 
6280 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6281 
6282 	/* make sure all our channels fit in the scanned_ch bitmask */
6283 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6284 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6285 		     WL1271_MAX_CHANNELS);
6286 	/*
6287 	* clear channel flags from the previous usage
6288 	* and restore max_power & max_antenna_gain values.
6289 	*/
6290 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6291 		wl1271_band_2ghz.channels[i].flags = 0;
6292 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6293 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6294 	}
6295 
6296 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6297 		wl1271_band_5ghz.channels[i].flags = 0;
6298 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6299 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6300 	}
6301 
6302 	/*
6303 	 * We keep local copies of the band structs because we need to
6304 	 * modify them on a per-device basis.
6305 	 */
6306 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6307 	       sizeof(wl1271_band_2ghz));
6308 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6309 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6310 	       sizeof(*wl->ht_cap));
6311 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6312 	       sizeof(wl1271_band_5ghz));
6313 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6314 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6315 	       sizeof(*wl->ht_cap));
6316 
6317 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6318 		&wl->bands[NL80211_BAND_2GHZ];
6319 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6320 		&wl->bands[NL80211_BAND_5GHZ];
6321 
6322 	/*
6323 	 * allow 4 queues per mac address we support +
6324 	 * 1 cab queue per mac + one global offchannel Tx queue
6325 	 */
6326 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6327 
6328 	/* the last queue is the offchannel queue */
6329 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6330 	wl->hw->max_rates = 1;
6331 
6332 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6333 
6334 	/* the FW answers probe-requests in AP-mode */
6335 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6336 	wl->hw->wiphy->probe_resp_offload =
6337 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6338 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6339 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6340 
6341 	/* allowed interface combinations */
6342 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6343 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6344 
6345 	/* register vendor commands */
6346 	wlcore_set_vendor_commands(wl->hw->wiphy);
6347 
6348 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6349 
6350 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6351 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6352 
6353 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6354 
6355 	return 0;
6356 }
6357 
6358 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6359 				     u32 mbox_size)
6360 {
6361 	struct ieee80211_hw *hw;
6362 	struct wl1271 *wl;
6363 	int i, j, ret;
6364 	unsigned int order;
6365 
6366 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6367 	if (!hw) {
6368 		wl1271_error("could not alloc ieee80211_hw");
6369 		ret = -ENOMEM;
6370 		goto err_hw_alloc;
6371 	}
6372 
6373 	wl = hw->priv;
6374 	memset(wl, 0, sizeof(*wl));
6375 
6376 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6377 	if (!wl->priv) {
6378 		wl1271_error("could not alloc wl priv");
6379 		ret = -ENOMEM;
6380 		goto err_priv_alloc;
6381 	}
6382 
6383 	INIT_LIST_HEAD(&wl->wlvif_list);
6384 
6385 	wl->hw = hw;
6386 
6387 	/*
6388 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6389 	 * we don't allocate any additional resource here, so that's fine.
6390 	 */
6391 	for (i = 0; i < NUM_TX_QUEUES; i++)
6392 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6393 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6394 
6395 	skb_queue_head_init(&wl->deferred_rx_queue);
6396 	skb_queue_head_init(&wl->deferred_tx_queue);
6397 
6398 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6399 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6400 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6401 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6402 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6403 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6404 
6405 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6406 	if (!wl->freezable_wq) {
6407 		ret = -ENOMEM;
6408 		goto err_hw;
6409 	}
6410 
6411 	wl->channel = 0;
6412 	wl->rx_counter = 0;
6413 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6414 	wl->band = NL80211_BAND_2GHZ;
6415 	wl->channel_type = NL80211_CHAN_NO_HT;
6416 	wl->flags = 0;
6417 	wl->sg_enabled = true;
6418 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6419 	wl->recovery_count = 0;
6420 	wl->hw_pg_ver = -1;
6421 	wl->ap_ps_map = 0;
6422 	wl->ap_fw_ps_map = 0;
6423 	wl->quirks = 0;
6424 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6425 	wl->active_sta_count = 0;
6426 	wl->active_link_count = 0;
6427 	wl->fwlog_size = 0;
6428 
6429 	/* The system link is always allocated */
6430 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6431 
6432 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6433 	for (i = 0; i < wl->num_tx_desc; i++)
6434 		wl->tx_frames[i] = NULL;
6435 
6436 	spin_lock_init(&wl->wl_lock);
6437 
6438 	wl->state = WLCORE_STATE_OFF;
6439 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6440 	mutex_init(&wl->mutex);
6441 	mutex_init(&wl->flush_mutex);
6442 	init_completion(&wl->nvs_loading_complete);
6443 
6444 	order = get_order(aggr_buf_size);
6445 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6446 	if (!wl->aggr_buf) {
6447 		ret = -ENOMEM;
6448 		goto err_wq;
6449 	}
6450 	wl->aggr_buf_size = aggr_buf_size;
6451 
6452 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6453 	if (!wl->dummy_packet) {
6454 		ret = -ENOMEM;
6455 		goto err_aggr;
6456 	}
6457 
6458 	/* Allocate one page for the FW log */
6459 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6460 	if (!wl->fwlog) {
6461 		ret = -ENOMEM;
6462 		goto err_dummy_packet;
6463 	}
6464 
6465 	wl->mbox_size = mbox_size;
6466 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6467 	if (!wl->mbox) {
6468 		ret = -ENOMEM;
6469 		goto err_fwlog;
6470 	}
6471 
6472 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6473 	if (!wl->buffer_32) {
6474 		ret = -ENOMEM;
6475 		goto err_mbox;
6476 	}
6477 
6478 	return hw;
6479 
6480 err_mbox:
6481 	kfree(wl->mbox);
6482 
6483 err_fwlog:
6484 	free_page((unsigned long)wl->fwlog);
6485 
6486 err_dummy_packet:
6487 	dev_kfree_skb(wl->dummy_packet);
6488 
6489 err_aggr:
6490 	free_pages((unsigned long)wl->aggr_buf, order);
6491 
6492 err_wq:
6493 	destroy_workqueue(wl->freezable_wq);
6494 
6495 err_hw:
6496 	wl1271_debugfs_exit(wl);
6497 	kfree(wl->priv);
6498 
6499 err_priv_alloc:
6500 	ieee80211_free_hw(hw);
6501 
6502 err_hw_alloc:
6503 
6504 	return ERR_PTR(ret);
6505 }
6506 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6507 
6508 int wlcore_free_hw(struct wl1271 *wl)
6509 {
6510 	/* Unblock any fwlog readers */
6511 	mutex_lock(&wl->mutex);
6512 	wl->fwlog_size = -1;
6513 	mutex_unlock(&wl->mutex);
6514 
6515 	wlcore_sysfs_free(wl);
6516 
6517 	kfree(wl->buffer_32);
6518 	kfree(wl->mbox);
6519 	free_page((unsigned long)wl->fwlog);
6520 	dev_kfree_skb(wl->dummy_packet);
6521 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6522 
6523 	wl1271_debugfs_exit(wl);
6524 
6525 	vfree(wl->fw);
6526 	wl->fw = NULL;
6527 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6528 	kfree(wl->nvs);
6529 	wl->nvs = NULL;
6530 
6531 	kfree(wl->raw_fw_status);
6532 	kfree(wl->fw_status);
6533 	kfree(wl->tx_res_if);
6534 	destroy_workqueue(wl->freezable_wq);
6535 
6536 	kfree(wl->priv);
6537 	ieee80211_free_hw(wl->hw);
6538 
6539 	return 0;
6540 }
6541 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6542 
6543 #ifdef CONFIG_PM
6544 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6545 	.flags = WIPHY_WOWLAN_ANY,
6546 	.n_patterns = WL1271_MAX_RX_FILTERS,
6547 	.pattern_min_len = 1,
6548 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6549 };
6550 #endif
6551 
6552 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6553 {
6554 	return IRQ_WAKE_THREAD;
6555 }
6556 
6557 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6558 {
6559 	struct wl1271 *wl = context;
6560 	struct platform_device *pdev = wl->pdev;
6561 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6562 	struct resource *res;
6563 
6564 	int ret;
6565 	irq_handler_t hardirq_fn = NULL;
6566 
6567 	if (fw) {
6568 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6569 		if (!wl->nvs) {
6570 			wl1271_error("Could not allocate nvs data");
6571 			goto out;
6572 		}
6573 		wl->nvs_len = fw->size;
6574 	} else if (pdev_data->family->nvs_name) {
6575 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6576 			     pdev_data->family->nvs_name);
6577 		wl->nvs = NULL;
6578 		wl->nvs_len = 0;
6579 	} else {
6580 		wl->nvs = NULL;
6581 		wl->nvs_len = 0;
6582 	}
6583 
6584 	ret = wl->ops->setup(wl);
6585 	if (ret < 0)
6586 		goto out_free_nvs;
6587 
6588 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6589 
6590 	/* adjust some runtime configuration parameters */
6591 	wlcore_adjust_conf(wl);
6592 
6593 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6594 	if (!res) {
6595 		wl1271_error("Could not get IRQ resource");
6596 		goto out_free_nvs;
6597 	}
6598 
6599 	wl->irq = res->start;
6600 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6601 	wl->if_ops = pdev_data->if_ops;
6602 
6603 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6604 		hardirq_fn = wlcore_hardirq;
6605 	else
6606 		wl->irq_flags |= IRQF_ONESHOT;
6607 
6608 	ret = wl12xx_set_power_on(wl);
6609 	if (ret < 0)
6610 		goto out_free_nvs;
6611 
6612 	ret = wl12xx_get_hw_info(wl);
6613 	if (ret < 0) {
6614 		wl1271_error("couldn't get hw info");
6615 		wl1271_power_off(wl);
6616 		goto out_free_nvs;
6617 	}
6618 
6619 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6620 				   wl->irq_flags, pdev->name, wl);
6621 	if (ret < 0) {
6622 		wl1271_error("interrupt configuration failed");
6623 		wl1271_power_off(wl);
6624 		goto out_free_nvs;
6625 	}
6626 
6627 #ifdef CONFIG_PM
6628 	ret = enable_irq_wake(wl->irq);
6629 	if (!ret) {
6630 		wl->irq_wake_enabled = true;
6631 		device_init_wakeup(wl->dev, 1);
6632 		if (pdev_data->pwr_in_suspend)
6633 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6634 	}
6635 #endif
6636 	disable_irq(wl->irq);
6637 	wl1271_power_off(wl);
6638 
6639 	ret = wl->ops->identify_chip(wl);
6640 	if (ret < 0)
6641 		goto out_irq;
6642 
6643 	ret = wl1271_init_ieee80211(wl);
6644 	if (ret)
6645 		goto out_irq;
6646 
6647 	ret = wl1271_register_hw(wl);
6648 	if (ret)
6649 		goto out_irq;
6650 
6651 	ret = wlcore_sysfs_init(wl);
6652 	if (ret)
6653 		goto out_unreg;
6654 
6655 	wl->initialized = true;
6656 	goto out;
6657 
6658 out_unreg:
6659 	wl1271_unregister_hw(wl);
6660 
6661 out_irq:
6662 	free_irq(wl->irq, wl);
6663 
6664 out_free_nvs:
6665 	kfree(wl->nvs);
6666 
6667 out:
6668 	release_firmware(fw);
6669 	complete_all(&wl->nvs_loading_complete);
6670 }
6671 
6672 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6673 {
6674 	struct wl1271 *wl = dev_get_drvdata(dev);
6675 	struct wl12xx_vif *wlvif;
6676 	int error;
6677 
6678 	/* We do not enter elp sleep in PLT mode */
6679 	if (wl->plt)
6680 		return 0;
6681 
6682 	/* Nothing to do if no ELP mode requested */
6683 	if (wl->sleep_auth != WL1271_PSM_ELP)
6684 		return 0;
6685 
6686 	wl12xx_for_each_wlvif(wl, wlvif) {
6687 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6688 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6689 			return -EBUSY;
6690 	}
6691 
6692 	wl1271_debug(DEBUG_PSM, "chip to elp");
6693 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6694 	if (error < 0) {
6695 		wl12xx_queue_recovery_work(wl);
6696 
6697 		return error;
6698 	}
6699 
6700 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6701 
6702 	return 0;
6703 }
6704 
6705 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6706 {
6707 	struct wl1271 *wl = dev_get_drvdata(dev);
6708 	DECLARE_COMPLETION_ONSTACK(compl);
6709 	unsigned long flags;
6710 	int ret;
6711 	unsigned long start_time = jiffies;
6712 	bool pending = false;
6713 
6714 	/* Nothing to do if no ELP mode requested */
6715 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6716 		return 0;
6717 
6718 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6719 
6720 	spin_lock_irqsave(&wl->wl_lock, flags);
6721 	if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6722 		pending = true;
6723 	else
6724 		wl->elp_compl = &compl;
6725 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6726 
6727 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6728 	if (ret < 0) {
6729 		wl12xx_queue_recovery_work(wl);
6730 		goto err;
6731 	}
6732 
6733 	if (!pending) {
6734 		ret = wait_for_completion_timeout(&compl,
6735 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6736 		if (ret == 0) {
6737 			wl1271_error("ELP wakeup timeout!");
6738 			wl12xx_queue_recovery_work(wl);
6739 
6740 			/* Return no error for runtime PM for recovery */
6741 			return 0;
6742 		}
6743 	}
6744 
6745 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6746 
6747 	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6748 		     jiffies_to_msecs(jiffies - start_time));
6749 
6750 	return 0;
6751 
6752 err:
6753 	spin_lock_irqsave(&wl->wl_lock, flags);
6754 	wl->elp_compl = NULL;
6755 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6756 	return ret;
6757 }
6758 
6759 static const struct dev_pm_ops wlcore_pm_ops = {
6760 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6761 			   wlcore_runtime_resume,
6762 			   NULL)
6763 };
6764 
6765 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6766 {
6767 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6768 	const char *nvs_name;
6769 	int ret = 0;
6770 
6771 	if (!wl->ops || !wl->ptable || !pdev_data)
6772 		return -EINVAL;
6773 
6774 	wl->dev = &pdev->dev;
6775 	wl->pdev = pdev;
6776 	platform_set_drvdata(pdev, wl);
6777 
6778 	if (pdev_data->family && pdev_data->family->nvs_name) {
6779 		nvs_name = pdev_data->family->nvs_name;
6780 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6781 					      nvs_name, &pdev->dev, GFP_KERNEL,
6782 					      wl, wlcore_nvs_cb);
6783 		if (ret < 0) {
6784 			wl1271_error("request_firmware_nowait failed for %s: %d",
6785 				     nvs_name, ret);
6786 			complete_all(&wl->nvs_loading_complete);
6787 		}
6788 	} else {
6789 		wlcore_nvs_cb(NULL, wl);
6790 	}
6791 
6792 	wl->dev->driver->pm = &wlcore_pm_ops;
6793 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6794 	pm_runtime_use_autosuspend(wl->dev);
6795 	pm_runtime_enable(wl->dev);
6796 
6797 	return ret;
6798 }
6799 EXPORT_SYMBOL_GPL(wlcore_probe);
6800 
6801 int wlcore_remove(struct platform_device *pdev)
6802 {
6803 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6804 	struct wl1271 *wl = platform_get_drvdata(pdev);
6805 	int error;
6806 
6807 	error = pm_runtime_get_sync(wl->dev);
6808 	if (error < 0)
6809 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6810 
6811 	wl->dev->driver->pm = NULL;
6812 
6813 	if (pdev_data->family && pdev_data->family->nvs_name)
6814 		wait_for_completion(&wl->nvs_loading_complete);
6815 	if (!wl->initialized)
6816 		return 0;
6817 
6818 	if (wl->irq_wake_enabled) {
6819 		device_init_wakeup(wl->dev, 0);
6820 		disable_irq_wake(wl->irq);
6821 	}
6822 	wl1271_unregister_hw(wl);
6823 
6824 	pm_runtime_put_sync(wl->dev);
6825 	pm_runtime_dont_use_autosuspend(wl->dev);
6826 	pm_runtime_disable(wl->dev);
6827 
6828 	free_irq(wl->irq, wl);
6829 	wlcore_free_hw(wl);
6830 
6831 	return 0;
6832 }
6833 EXPORT_SYMBOL_GPL(wlcore_remove);
6834 
6835 u32 wl12xx_debug_level = DEBUG_NONE;
6836 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6837 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6838 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6839 
6840 module_param_named(fwlog, fwlog_param, charp, 0);
6841 MODULE_PARM_DESC(fwlog,
6842 		 "FW logger options: continuous, dbgpins or disable");
6843 
6844 module_param(fwlog_mem_blocks, int, 0600);
6845 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6846 
6847 module_param(bug_on_recovery, int, 0600);
6848 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6849 
6850 module_param(no_recovery, int, 0600);
6851 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6852 
6853 MODULE_LICENSE("GPL");
6854 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6855 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6856