xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision 151f4e2b)
1 /*
2  * This file is part of wlcore
3  *
4  * Copyright (C) 2008-2010 Nokia Corporation
5  * Copyright (C) 2011-2013 Texas Instruments Inc.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19  * 02110-1301 USA
20  *
21  */
22 
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pm_wakeirq.h>
31 
32 #include "wlcore.h"
33 #include "debug.h"
34 #include "wl12xx_80211.h"
35 #include "io.h"
36 #include "tx.h"
37 #include "ps.h"
38 #include "init.h"
39 #include "debugfs.h"
40 #include "testmode.h"
41 #include "vendor_cmd.h"
42 #include "scan.h"
43 #include "hw_ops.h"
44 #include "sysfs.h"
45 
46 #define WL1271_BOOT_RETRIES 3
47 #define WL1271_SUSPEND_SLEEP 100
48 #define WL1271_WAKEUP_TIMEOUT 500
49 
50 static char *fwlog_param;
51 static int fwlog_mem_blocks = -1;
52 static int bug_on_recovery = -1;
53 static int no_recovery     = -1;
54 
55 static void __wl1271_op_remove_interface(struct wl1271 *wl,
56 					 struct ieee80211_vif *vif,
57 					 bool reset_tx_queues);
58 static void wlcore_op_stop_locked(struct wl1271 *wl);
59 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
60 
61 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 {
63 	int ret;
64 
65 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
66 		return -EINVAL;
67 
68 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
69 		return 0;
70 
71 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
72 		return 0;
73 
74 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 	if (ret < 0)
76 		return ret;
77 
78 	wl1271_info("Association completed.");
79 	return 0;
80 }
81 
82 static void wl1271_reg_notify(struct wiphy *wiphy,
83 			      struct regulatory_request *request)
84 {
85 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86 	struct wl1271 *wl = hw->priv;
87 
88 	/* copy the current dfs region */
89 	if (request)
90 		wl->dfs_region = request->dfs_region;
91 
92 	wlcore_regdomain_config(wl);
93 }
94 
95 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
96 				   bool enable)
97 {
98 	int ret = 0;
99 
100 	/* we should hold wl->mutex */
101 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
102 	if (ret < 0)
103 		goto out;
104 
105 	if (enable)
106 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
107 	else
108 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
109 out:
110 	return ret;
111 }
112 
113 /*
114  * this function is being called when the rx_streaming interval
115  * has beed changed or rx_streaming should be disabled
116  */
117 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
118 {
119 	int ret = 0;
120 	int period = wl->conf.rx_streaming.interval;
121 
122 	/* don't reconfigure if rx_streaming is disabled */
123 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
124 		goto out;
125 
126 	/* reconfigure/disable according to new streaming_period */
127 	if (period &&
128 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
129 	    (wl->conf.rx_streaming.always ||
130 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
131 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
132 	else {
133 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
134 		/* don't cancel_work_sync since we might deadlock */
135 		del_timer_sync(&wlvif->rx_streaming_timer);
136 	}
137 out:
138 	return ret;
139 }
140 
141 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
142 {
143 	int ret;
144 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
145 						rx_streaming_enable_work);
146 	struct wl1271 *wl = wlvif->wl;
147 
148 	mutex_lock(&wl->mutex);
149 
150 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
151 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
152 	    (!wl->conf.rx_streaming.always &&
153 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
154 		goto out;
155 
156 	if (!wl->conf.rx_streaming.interval)
157 		goto out;
158 
159 	ret = pm_runtime_get_sync(wl->dev);
160 	if (ret < 0) {
161 		pm_runtime_put_noidle(wl->dev);
162 		goto out;
163 	}
164 
165 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
166 	if (ret < 0)
167 		goto out_sleep;
168 
169 	/* stop it after some time of inactivity */
170 	mod_timer(&wlvif->rx_streaming_timer,
171 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
172 
173 out_sleep:
174 	pm_runtime_mark_last_busy(wl->dev);
175 	pm_runtime_put_autosuspend(wl->dev);
176 out:
177 	mutex_unlock(&wl->mutex);
178 }
179 
180 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
181 {
182 	int ret;
183 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
184 						rx_streaming_disable_work);
185 	struct wl1271 *wl = wlvif->wl;
186 
187 	mutex_lock(&wl->mutex);
188 
189 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
190 		goto out;
191 
192 	ret = pm_runtime_get_sync(wl->dev);
193 	if (ret < 0) {
194 		pm_runtime_put_noidle(wl->dev);
195 		goto out;
196 	}
197 
198 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
199 	if (ret)
200 		goto out_sleep;
201 
202 out_sleep:
203 	pm_runtime_mark_last_busy(wl->dev);
204 	pm_runtime_put_autosuspend(wl->dev);
205 out:
206 	mutex_unlock(&wl->mutex);
207 }
208 
209 static void wl1271_rx_streaming_timer(struct timer_list *t)
210 {
211 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
212 	struct wl1271 *wl = wlvif->wl;
213 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
214 }
215 
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218 {
219 	/* if the watchdog is not armed, don't do anything */
220 	if (wl->tx_allocated_blocks == 0)
221 		return;
222 
223 	cancel_delayed_work(&wl->tx_watchdog_work);
224 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
226 }
227 
228 static void wlcore_rc_update_work(struct work_struct *work)
229 {
230 	int ret;
231 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
232 						rc_update_work);
233 	struct wl1271 *wl = wlvif->wl;
234 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
235 
236 	mutex_lock(&wl->mutex);
237 
238 	if (unlikely(wl->state != WLCORE_STATE_ON))
239 		goto out;
240 
241 	ret = pm_runtime_get_sync(wl->dev);
242 	if (ret < 0) {
243 		pm_runtime_put_noidle(wl->dev);
244 		goto out;
245 	}
246 
247 	if (ieee80211_vif_is_mesh(vif)) {
248 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
249 						     true, wlvif->sta.hlid);
250 		if (ret < 0)
251 			goto out_sleep;
252 	} else {
253 		wlcore_hw_sta_rc_update(wl, wlvif);
254 	}
255 
256 out_sleep:
257 	pm_runtime_mark_last_busy(wl->dev);
258 	pm_runtime_put_autosuspend(wl->dev);
259 out:
260 	mutex_unlock(&wl->mutex);
261 }
262 
263 static void wl12xx_tx_watchdog_work(struct work_struct *work)
264 {
265 	struct delayed_work *dwork;
266 	struct wl1271 *wl;
267 
268 	dwork = to_delayed_work(work);
269 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
270 
271 	mutex_lock(&wl->mutex);
272 
273 	if (unlikely(wl->state != WLCORE_STATE_ON))
274 		goto out;
275 
276 	/* Tx went out in the meantime - everything is ok */
277 	if (unlikely(wl->tx_allocated_blocks == 0))
278 		goto out;
279 
280 	/*
281 	 * if a ROC is in progress, we might not have any Tx for a long
282 	 * time (e.g. pending Tx on the non-ROC channels)
283 	 */
284 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
285 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
286 			     wl->conf.tx.tx_watchdog_timeout);
287 		wl12xx_rearm_tx_watchdog_locked(wl);
288 		goto out;
289 	}
290 
291 	/*
292 	 * if a scan is in progress, we might not have any Tx for a long
293 	 * time
294 	 */
295 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
296 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
297 			     wl->conf.tx.tx_watchdog_timeout);
298 		wl12xx_rearm_tx_watchdog_locked(wl);
299 		goto out;
300 	}
301 
302 	/*
303 	* AP might cache a frame for a long time for a sleeping station,
304 	* so rearm the timer if there's an AP interface with stations. If
305 	* Tx is genuinely stuck we will most hopefully discover it when all
306 	* stations are removed due to inactivity.
307 	*/
308 	if (wl->active_sta_count) {
309 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
310 			     " %d stations",
311 			      wl->conf.tx.tx_watchdog_timeout,
312 			      wl->active_sta_count);
313 		wl12xx_rearm_tx_watchdog_locked(wl);
314 		goto out;
315 	}
316 
317 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
318 		     wl->conf.tx.tx_watchdog_timeout);
319 	wl12xx_queue_recovery_work(wl);
320 
321 out:
322 	mutex_unlock(&wl->mutex);
323 }
324 
325 static void wlcore_adjust_conf(struct wl1271 *wl)
326 {
327 
328 	if (fwlog_param) {
329 		if (!strcmp(fwlog_param, "continuous")) {
330 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
331 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
332 		} else if (!strcmp(fwlog_param, "dbgpins")) {
333 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
334 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
335 		} else if (!strcmp(fwlog_param, "disable")) {
336 			wl->conf.fwlog.mem_blocks = 0;
337 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
338 		} else {
339 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
340 		}
341 	}
342 
343 	if (bug_on_recovery != -1)
344 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
345 
346 	if (no_recovery != -1)
347 		wl->conf.recovery.no_recovery = (u8) no_recovery;
348 }
349 
350 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
351 					struct wl12xx_vif *wlvif,
352 					u8 hlid, u8 tx_pkts)
353 {
354 	bool fw_ps;
355 
356 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
357 
358 	/*
359 	 * Wake up from high level PS if the STA is asleep with too little
360 	 * packets in FW or if the STA is awake.
361 	 */
362 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
363 		wl12xx_ps_link_end(wl, wlvif, hlid);
364 
365 	/*
366 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
367 	 * Make an exception if this is the only connected link. In this
368 	 * case FW-memory congestion is less of a problem.
369 	 * Note that a single connected STA means 2*ap_count + 1 active links,
370 	 * since we must account for the global and broadcast AP links
371 	 * for each AP. The "fw_ps" check assures us the other link is a STA
372 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
373 	 */
374 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
375 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
376 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
377 }
378 
379 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
380 					   struct wl12xx_vif *wlvif,
381 					   struct wl_fw_status *status)
382 {
383 	unsigned long cur_fw_ps_map;
384 	u8 hlid;
385 
386 	cur_fw_ps_map = status->link_ps_bitmap;
387 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
388 		wl1271_debug(DEBUG_PSM,
389 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
390 			     wl->ap_fw_ps_map, cur_fw_ps_map,
391 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
392 
393 		wl->ap_fw_ps_map = cur_fw_ps_map;
394 	}
395 
396 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
397 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
398 					    wl->links[hlid].allocated_pkts);
399 }
400 
401 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
402 {
403 	struct wl12xx_vif *wlvif;
404 	u32 old_tx_blk_count = wl->tx_blocks_available;
405 	int avail, freed_blocks;
406 	int i;
407 	int ret;
408 	struct wl1271_link *lnk;
409 
410 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
411 				   wl->raw_fw_status,
412 				   wl->fw_status_len, false);
413 	if (ret < 0)
414 		return ret;
415 
416 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
417 
418 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
419 		     "drv_rx_counter = %d, tx_results_counter = %d)",
420 		     status->intr,
421 		     status->fw_rx_counter,
422 		     status->drv_rx_counter,
423 		     status->tx_results_counter);
424 
425 	for (i = 0; i < NUM_TX_QUEUES; i++) {
426 		/* prevent wrap-around in freed-packets counter */
427 		wl->tx_allocated_pkts[i] -=
428 				(status->counters.tx_released_pkts[i] -
429 				wl->tx_pkts_freed[i]) & 0xff;
430 
431 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
432 	}
433 
434 
435 	for_each_set_bit(i, wl->links_map, wl->num_links) {
436 		u8 diff;
437 		lnk = &wl->links[i];
438 
439 		/* prevent wrap-around in freed-packets counter */
440 		diff = (status->counters.tx_lnk_free_pkts[i] -
441 		       lnk->prev_freed_pkts) & 0xff;
442 
443 		if (diff == 0)
444 			continue;
445 
446 		lnk->allocated_pkts -= diff;
447 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
448 
449 		/* accumulate the prev_freed_pkts counter */
450 		lnk->total_freed_pkts += diff;
451 	}
452 
453 	/* prevent wrap-around in total blocks counter */
454 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
455 		freed_blocks = status->total_released_blks -
456 			       wl->tx_blocks_freed;
457 	else
458 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
459 			       status->total_released_blks;
460 
461 	wl->tx_blocks_freed = status->total_released_blks;
462 
463 	wl->tx_allocated_blocks -= freed_blocks;
464 
465 	/*
466 	 * If the FW freed some blocks:
467 	 * If we still have allocated blocks - re-arm the timer, Tx is
468 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
469 	 */
470 	if (freed_blocks) {
471 		if (wl->tx_allocated_blocks)
472 			wl12xx_rearm_tx_watchdog_locked(wl);
473 		else
474 			cancel_delayed_work(&wl->tx_watchdog_work);
475 	}
476 
477 	avail = status->tx_total - wl->tx_allocated_blocks;
478 
479 	/*
480 	 * The FW might change the total number of TX memblocks before
481 	 * we get a notification about blocks being released. Thus, the
482 	 * available blocks calculation might yield a temporary result
483 	 * which is lower than the actual available blocks. Keeping in
484 	 * mind that only blocks that were allocated can be moved from
485 	 * TX to RX, tx_blocks_available should never decrease here.
486 	 */
487 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
488 				      avail);
489 
490 	/* if more blocks are available now, tx work can be scheduled */
491 	if (wl->tx_blocks_available > old_tx_blk_count)
492 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
493 
494 	/* for AP update num of allocated TX blocks per link and ps status */
495 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
496 		wl12xx_irq_update_links_status(wl, wlvif, status);
497 	}
498 
499 	/* update the host-chipset time offset */
500 	wl->time_offset = (ktime_get_boot_ns() >> 10) -
501 		(s64)(status->fw_localtime);
502 
503 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
504 
505 	return 0;
506 }
507 
508 static void wl1271_flush_deferred_work(struct wl1271 *wl)
509 {
510 	struct sk_buff *skb;
511 
512 	/* Pass all received frames to the network stack */
513 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
514 		ieee80211_rx_ni(wl->hw, skb);
515 
516 	/* Return sent skbs to the network stack */
517 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
518 		ieee80211_tx_status_ni(wl->hw, skb);
519 }
520 
521 static void wl1271_netstack_work(struct work_struct *work)
522 {
523 	struct wl1271 *wl =
524 		container_of(work, struct wl1271, netstack_work);
525 
526 	do {
527 		wl1271_flush_deferred_work(wl);
528 	} while (skb_queue_len(&wl->deferred_rx_queue));
529 }
530 
531 #define WL1271_IRQ_MAX_LOOPS 256
532 
533 static int wlcore_irq_locked(struct wl1271 *wl)
534 {
535 	int ret = 0;
536 	u32 intr;
537 	int loopcount = WL1271_IRQ_MAX_LOOPS;
538 	bool done = false;
539 	unsigned int defer_count;
540 	unsigned long flags;
541 
542 	/*
543 	 * In case edge triggered interrupt must be used, we cannot iterate
544 	 * more than once without introducing race conditions with the hardirq.
545 	 */
546 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
547 		loopcount = 1;
548 
549 	wl1271_debug(DEBUG_IRQ, "IRQ work");
550 
551 	if (unlikely(wl->state != WLCORE_STATE_ON))
552 		goto out;
553 
554 	ret = pm_runtime_get_sync(wl->dev);
555 	if (ret < 0) {
556 		pm_runtime_put_noidle(wl->dev);
557 		goto out;
558 	}
559 
560 	while (!done && loopcount--) {
561 		/*
562 		 * In order to avoid a race with the hardirq, clear the flag
563 		 * before acknowledging the chip.
564 		 */
565 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
566 		smp_mb__after_atomic();
567 
568 		ret = wlcore_fw_status(wl, wl->fw_status);
569 		if (ret < 0)
570 			goto out;
571 
572 		wlcore_hw_tx_immediate_compl(wl);
573 
574 		intr = wl->fw_status->intr;
575 		intr &= WLCORE_ALL_INTR_MASK;
576 		if (!intr) {
577 			done = true;
578 			continue;
579 		}
580 
581 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
582 			wl1271_error("HW watchdog interrupt received! starting recovery.");
583 			wl->watchdog_recovery = true;
584 			ret = -EIO;
585 
586 			/* restarting the chip. ignore any other interrupt. */
587 			goto out;
588 		}
589 
590 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
591 			wl1271_error("SW watchdog interrupt received! "
592 				     "starting recovery.");
593 			wl->watchdog_recovery = true;
594 			ret = -EIO;
595 
596 			/* restarting the chip. ignore any other interrupt. */
597 			goto out;
598 		}
599 
600 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
601 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
602 
603 			ret = wlcore_rx(wl, wl->fw_status);
604 			if (ret < 0)
605 				goto out;
606 
607 			/* Check if any tx blocks were freed */
608 			spin_lock_irqsave(&wl->wl_lock, flags);
609 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
610 			    wl1271_tx_total_queue_count(wl) > 0) {
611 				spin_unlock_irqrestore(&wl->wl_lock, flags);
612 				/*
613 				 * In order to avoid starvation of the TX path,
614 				 * call the work function directly.
615 				 */
616 				ret = wlcore_tx_work_locked(wl);
617 				if (ret < 0)
618 					goto out;
619 			} else {
620 				spin_unlock_irqrestore(&wl->wl_lock, flags);
621 			}
622 
623 			/* check for tx results */
624 			ret = wlcore_hw_tx_delayed_compl(wl);
625 			if (ret < 0)
626 				goto out;
627 
628 			/* Make sure the deferred queues don't get too long */
629 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
630 				      skb_queue_len(&wl->deferred_rx_queue);
631 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
632 				wl1271_flush_deferred_work(wl);
633 		}
634 
635 		if (intr & WL1271_ACX_INTR_EVENT_A) {
636 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
637 			ret = wl1271_event_handle(wl, 0);
638 			if (ret < 0)
639 				goto out;
640 		}
641 
642 		if (intr & WL1271_ACX_INTR_EVENT_B) {
643 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
644 			ret = wl1271_event_handle(wl, 1);
645 			if (ret < 0)
646 				goto out;
647 		}
648 
649 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
650 			wl1271_debug(DEBUG_IRQ,
651 				     "WL1271_ACX_INTR_INIT_COMPLETE");
652 
653 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
654 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
655 	}
656 
657 	pm_runtime_mark_last_busy(wl->dev);
658 	pm_runtime_put_autosuspend(wl->dev);
659 
660 out:
661 	return ret;
662 }
663 
664 static irqreturn_t wlcore_irq(int irq, void *cookie)
665 {
666 	int ret;
667 	unsigned long flags;
668 	struct wl1271 *wl = cookie;
669 
670 	/* complete the ELP completion */
671 	spin_lock_irqsave(&wl->wl_lock, flags);
672 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
673 	if (wl->elp_compl) {
674 		complete(wl->elp_compl);
675 		wl->elp_compl = NULL;
676 	}
677 
678 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
679 		/* don't enqueue a work right now. mark it as pending */
680 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
681 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
682 		disable_irq_nosync(wl->irq);
683 		pm_wakeup_event(wl->dev, 0);
684 		spin_unlock_irqrestore(&wl->wl_lock, flags);
685 		return IRQ_HANDLED;
686 	}
687 	spin_unlock_irqrestore(&wl->wl_lock, flags);
688 
689 	/* TX might be handled here, avoid redundant work */
690 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
691 	cancel_work_sync(&wl->tx_work);
692 
693 	mutex_lock(&wl->mutex);
694 
695 	ret = wlcore_irq_locked(wl);
696 	if (ret)
697 		wl12xx_queue_recovery_work(wl);
698 
699 	spin_lock_irqsave(&wl->wl_lock, flags);
700 	/* In case TX was not handled here, queue TX work */
701 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
702 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
703 	    wl1271_tx_total_queue_count(wl) > 0)
704 		ieee80211_queue_work(wl->hw, &wl->tx_work);
705 	spin_unlock_irqrestore(&wl->wl_lock, flags);
706 
707 	mutex_unlock(&wl->mutex);
708 
709 	return IRQ_HANDLED;
710 }
711 
712 struct vif_counter_data {
713 	u8 counter;
714 
715 	struct ieee80211_vif *cur_vif;
716 	bool cur_vif_running;
717 };
718 
719 static void wl12xx_vif_count_iter(void *data, u8 *mac,
720 				  struct ieee80211_vif *vif)
721 {
722 	struct vif_counter_data *counter = data;
723 
724 	counter->counter++;
725 	if (counter->cur_vif == vif)
726 		counter->cur_vif_running = true;
727 }
728 
729 /* caller must not hold wl->mutex, as it might deadlock */
730 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
731 			       struct ieee80211_vif *cur_vif,
732 			       struct vif_counter_data *data)
733 {
734 	memset(data, 0, sizeof(*data));
735 	data->cur_vif = cur_vif;
736 
737 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
738 					    wl12xx_vif_count_iter, data);
739 }
740 
741 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
742 {
743 	const struct firmware *fw;
744 	const char *fw_name;
745 	enum wl12xx_fw_type fw_type;
746 	int ret;
747 
748 	if (plt) {
749 		fw_type = WL12XX_FW_TYPE_PLT;
750 		fw_name = wl->plt_fw_name;
751 	} else {
752 		/*
753 		 * we can't call wl12xx_get_vif_count() here because
754 		 * wl->mutex is taken, so use the cached last_vif_count value
755 		 */
756 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
757 			fw_type = WL12XX_FW_TYPE_MULTI;
758 			fw_name = wl->mr_fw_name;
759 		} else {
760 			fw_type = WL12XX_FW_TYPE_NORMAL;
761 			fw_name = wl->sr_fw_name;
762 		}
763 	}
764 
765 	if (wl->fw_type == fw_type)
766 		return 0;
767 
768 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
769 
770 	ret = request_firmware(&fw, fw_name, wl->dev);
771 
772 	if (ret < 0) {
773 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
774 		return ret;
775 	}
776 
777 	if (fw->size % 4) {
778 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
779 			     fw->size);
780 		ret = -EILSEQ;
781 		goto out;
782 	}
783 
784 	vfree(wl->fw);
785 	wl->fw_type = WL12XX_FW_TYPE_NONE;
786 	wl->fw_len = fw->size;
787 	wl->fw = vmalloc(wl->fw_len);
788 
789 	if (!wl->fw) {
790 		wl1271_error("could not allocate memory for the firmware");
791 		ret = -ENOMEM;
792 		goto out;
793 	}
794 
795 	memcpy(wl->fw, fw->data, wl->fw_len);
796 	ret = 0;
797 	wl->fw_type = fw_type;
798 out:
799 	release_firmware(fw);
800 
801 	return ret;
802 }
803 
804 void wl12xx_queue_recovery_work(struct wl1271 *wl)
805 {
806 	/* Avoid a recursive recovery */
807 	if (wl->state == WLCORE_STATE_ON) {
808 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
809 				  &wl->flags));
810 
811 		wl->state = WLCORE_STATE_RESTARTING;
812 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
813 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
814 	}
815 }
816 
817 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
818 {
819 	size_t len;
820 
821 	/* Make sure we have enough room */
822 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
823 
824 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
825 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
826 	wl->fwlog_size += len;
827 
828 	return len;
829 }
830 
831 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
832 {
833 	u32 end_of_log = 0;
834 	int error;
835 
836 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
837 		return;
838 
839 	wl1271_info("Reading FW panic log");
840 
841 	/*
842 	 * Make sure the chip is awake and the logger isn't active.
843 	 * Do not send a stop fwlog command if the fw is hanged or if
844 	 * dbgpins are used (due to some fw bug).
845 	 */
846 	error = pm_runtime_get_sync(wl->dev);
847 	if (error < 0) {
848 		pm_runtime_put_noidle(wl->dev);
849 		return;
850 	}
851 	if (!wl->watchdog_recovery &&
852 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
853 		wl12xx_cmd_stop_fwlog(wl);
854 
855 	/* Traverse the memory blocks linked list */
856 	do {
857 		end_of_log = wlcore_event_fw_logger(wl);
858 		if (end_of_log == 0) {
859 			msleep(100);
860 			end_of_log = wlcore_event_fw_logger(wl);
861 		}
862 	} while (end_of_log != 0);
863 }
864 
865 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
866 				   u8 hlid, struct ieee80211_sta *sta)
867 {
868 	struct wl1271_station *wl_sta;
869 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
870 
871 	wl_sta = (void *)sta->drv_priv;
872 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
873 
874 	/*
875 	 * increment the initial seq number on recovery to account for
876 	 * transmitted packets that we haven't yet got in the FW status
877 	 */
878 	if (wlvif->encryption_type == KEY_GEM)
879 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
880 
881 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
882 		wl_sta->total_freed_pkts += sqn_recovery_padding;
883 }
884 
885 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
886 					struct wl12xx_vif *wlvif,
887 					u8 hlid, const u8 *addr)
888 {
889 	struct ieee80211_sta *sta;
890 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
891 
892 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
893 		    is_zero_ether_addr(addr)))
894 		return;
895 
896 	rcu_read_lock();
897 	sta = ieee80211_find_sta(vif, addr);
898 	if (sta)
899 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
900 	rcu_read_unlock();
901 }
902 
903 static void wlcore_print_recovery(struct wl1271 *wl)
904 {
905 	u32 pc = 0;
906 	u32 hint_sts = 0;
907 	int ret;
908 
909 	wl1271_info("Hardware recovery in progress. FW ver: %s",
910 		    wl->chip.fw_ver_str);
911 
912 	/* change partitions momentarily so we can read the FW pc */
913 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
914 	if (ret < 0)
915 		return;
916 
917 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
918 	if (ret < 0)
919 		return;
920 
921 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
922 	if (ret < 0)
923 		return;
924 
925 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
926 				pc, hint_sts, ++wl->recovery_count);
927 
928 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
929 }
930 
931 
932 static void wl1271_recovery_work(struct work_struct *work)
933 {
934 	struct wl1271 *wl =
935 		container_of(work, struct wl1271, recovery_work);
936 	struct wl12xx_vif *wlvif;
937 	struct ieee80211_vif *vif;
938 	int error;
939 
940 	mutex_lock(&wl->mutex);
941 
942 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
943 		goto out_unlock;
944 
945 	error = pm_runtime_get_sync(wl->dev);
946 	if (error < 0) {
947 		wl1271_warning("Enable for recovery failed");
948 		pm_runtime_put_noidle(wl->dev);
949 	}
950 	wlcore_disable_interrupts_nosync(wl);
951 
952 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
953 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
954 			wl12xx_read_fwlog_panic(wl);
955 		wlcore_print_recovery(wl);
956 	}
957 
958 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
959 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
960 
961 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
962 
963 	if (wl->conf.recovery.no_recovery) {
964 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
965 		goto out_unlock;
966 	}
967 
968 	/* Prevent spurious TX during FW restart */
969 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970 
971 	/* reboot the chipset */
972 	while (!list_empty(&wl->wlvif_list)) {
973 		wlvif = list_first_entry(&wl->wlvif_list,
974 				       struct wl12xx_vif, list);
975 		vif = wl12xx_wlvif_to_vif(wlvif);
976 
977 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
978 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
979 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
980 						    vif->bss_conf.bssid);
981 		}
982 
983 		__wl1271_op_remove_interface(wl, vif, false);
984 	}
985 
986 	wlcore_op_stop_locked(wl);
987 	pm_runtime_mark_last_busy(wl->dev);
988 	pm_runtime_put_autosuspend(wl->dev);
989 
990 	ieee80211_restart_hw(wl->hw);
991 
992 	/*
993 	 * Its safe to enable TX now - the queues are stopped after a request
994 	 * to restart the HW.
995 	 */
996 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
997 
998 out_unlock:
999 	wl->watchdog_recovery = false;
1000 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1001 	mutex_unlock(&wl->mutex);
1002 }
1003 
1004 static int wlcore_fw_wakeup(struct wl1271 *wl)
1005 {
1006 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1007 }
1008 
1009 static int wl1271_setup(struct wl1271 *wl)
1010 {
1011 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1012 	if (!wl->raw_fw_status)
1013 		goto err;
1014 
1015 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1016 	if (!wl->fw_status)
1017 		goto err;
1018 
1019 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1020 	if (!wl->tx_res_if)
1021 		goto err;
1022 
1023 	return 0;
1024 err:
1025 	kfree(wl->fw_status);
1026 	kfree(wl->raw_fw_status);
1027 	return -ENOMEM;
1028 }
1029 
1030 static int wl12xx_set_power_on(struct wl1271 *wl)
1031 {
1032 	int ret;
1033 
1034 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1035 	ret = wl1271_power_on(wl);
1036 	if (ret < 0)
1037 		goto out;
1038 	msleep(WL1271_POWER_ON_SLEEP);
1039 	wl1271_io_reset(wl);
1040 	wl1271_io_init(wl);
1041 
1042 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1043 	if (ret < 0)
1044 		goto fail;
1045 
1046 	/* ELP module wake up */
1047 	ret = wlcore_fw_wakeup(wl);
1048 	if (ret < 0)
1049 		goto fail;
1050 
1051 out:
1052 	return ret;
1053 
1054 fail:
1055 	wl1271_power_off(wl);
1056 	return ret;
1057 }
1058 
1059 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1060 {
1061 	int ret = 0;
1062 
1063 	ret = wl12xx_set_power_on(wl);
1064 	if (ret < 0)
1065 		goto out;
1066 
1067 	/*
1068 	 * For wl127x based devices we could use the default block
1069 	 * size (512 bytes), but due to a bug in the sdio driver, we
1070 	 * need to set it explicitly after the chip is powered on.  To
1071 	 * simplify the code and since the performance impact is
1072 	 * negligible, we use the same block size for all different
1073 	 * chip types.
1074 	 *
1075 	 * Check if the bus supports blocksize alignment and, if it
1076 	 * doesn't, make sure we don't have the quirk.
1077 	 */
1078 	if (!wl1271_set_block_size(wl))
1079 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1080 
1081 	/* TODO: make sure the lower driver has set things up correctly */
1082 
1083 	ret = wl1271_setup(wl);
1084 	if (ret < 0)
1085 		goto out;
1086 
1087 	ret = wl12xx_fetch_firmware(wl, plt);
1088 	if (ret < 0) {
1089 		kfree(wl->fw_status);
1090 		kfree(wl->raw_fw_status);
1091 		kfree(wl->tx_res_if);
1092 	}
1093 
1094 out:
1095 	return ret;
1096 }
1097 
1098 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1099 {
1100 	int retries = WL1271_BOOT_RETRIES;
1101 	struct wiphy *wiphy = wl->hw->wiphy;
1102 
1103 	static const char* const PLT_MODE[] = {
1104 		"PLT_OFF",
1105 		"PLT_ON",
1106 		"PLT_FEM_DETECT",
1107 		"PLT_CHIP_AWAKE"
1108 	};
1109 
1110 	int ret;
1111 
1112 	mutex_lock(&wl->mutex);
1113 
1114 	wl1271_notice("power up");
1115 
1116 	if (wl->state != WLCORE_STATE_OFF) {
1117 		wl1271_error("cannot go into PLT state because not "
1118 			     "in off state: %d", wl->state);
1119 		ret = -EBUSY;
1120 		goto out;
1121 	}
1122 
1123 	/* Indicate to lower levels that we are now in PLT mode */
1124 	wl->plt = true;
1125 	wl->plt_mode = plt_mode;
1126 
1127 	while (retries) {
1128 		retries--;
1129 		ret = wl12xx_chip_wakeup(wl, true);
1130 		if (ret < 0)
1131 			goto power_off;
1132 
1133 		if (plt_mode != PLT_CHIP_AWAKE) {
1134 			ret = wl->ops->plt_init(wl);
1135 			if (ret < 0)
1136 				goto power_off;
1137 		}
1138 
1139 		wl->state = WLCORE_STATE_ON;
1140 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1141 			      PLT_MODE[plt_mode],
1142 			      wl->chip.fw_ver_str);
1143 
1144 		/* update hw/fw version info in wiphy struct */
1145 		wiphy->hw_version = wl->chip.id;
1146 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1147 			sizeof(wiphy->fw_version));
1148 
1149 		goto out;
1150 
1151 power_off:
1152 		wl1271_power_off(wl);
1153 	}
1154 
1155 	wl->plt = false;
1156 	wl->plt_mode = PLT_OFF;
1157 
1158 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1159 		     WL1271_BOOT_RETRIES);
1160 out:
1161 	mutex_unlock(&wl->mutex);
1162 
1163 	return ret;
1164 }
1165 
1166 int wl1271_plt_stop(struct wl1271 *wl)
1167 {
1168 	int ret = 0;
1169 
1170 	wl1271_notice("power down");
1171 
1172 	/*
1173 	 * Interrupts must be disabled before setting the state to OFF.
1174 	 * Otherwise, the interrupt handler might be called and exit without
1175 	 * reading the interrupt status.
1176 	 */
1177 	wlcore_disable_interrupts(wl);
1178 	mutex_lock(&wl->mutex);
1179 	if (!wl->plt) {
1180 		mutex_unlock(&wl->mutex);
1181 
1182 		/*
1183 		 * This will not necessarily enable interrupts as interrupts
1184 		 * may have been disabled when op_stop was called. It will,
1185 		 * however, balance the above call to disable_interrupts().
1186 		 */
1187 		wlcore_enable_interrupts(wl);
1188 
1189 		wl1271_error("cannot power down because not in PLT "
1190 			     "state: %d", wl->state);
1191 		ret = -EBUSY;
1192 		goto out;
1193 	}
1194 
1195 	mutex_unlock(&wl->mutex);
1196 
1197 	wl1271_flush_deferred_work(wl);
1198 	cancel_work_sync(&wl->netstack_work);
1199 	cancel_work_sync(&wl->recovery_work);
1200 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1201 
1202 	mutex_lock(&wl->mutex);
1203 	wl1271_power_off(wl);
1204 	wl->flags = 0;
1205 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1206 	wl->state = WLCORE_STATE_OFF;
1207 	wl->plt = false;
1208 	wl->plt_mode = PLT_OFF;
1209 	wl->rx_counter = 0;
1210 	mutex_unlock(&wl->mutex);
1211 
1212 out:
1213 	return ret;
1214 }
1215 
1216 static void wl1271_op_tx(struct ieee80211_hw *hw,
1217 			 struct ieee80211_tx_control *control,
1218 			 struct sk_buff *skb)
1219 {
1220 	struct wl1271 *wl = hw->priv;
1221 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1222 	struct ieee80211_vif *vif = info->control.vif;
1223 	struct wl12xx_vif *wlvif = NULL;
1224 	unsigned long flags;
1225 	int q, mapping;
1226 	u8 hlid;
1227 
1228 	if (!vif) {
1229 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1230 		ieee80211_free_txskb(hw, skb);
1231 		return;
1232 	}
1233 
1234 	wlvif = wl12xx_vif_to_data(vif);
1235 	mapping = skb_get_queue_mapping(skb);
1236 	q = wl1271_tx_get_queue(mapping);
1237 
1238 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1239 
1240 	spin_lock_irqsave(&wl->wl_lock, flags);
1241 
1242 	/*
1243 	 * drop the packet if the link is invalid or the queue is stopped
1244 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1245 	 * allow these packets through.
1246 	 */
1247 	if (hlid == WL12XX_INVALID_LINK_ID ||
1248 	    (!test_bit(hlid, wlvif->links_map)) ||
1249 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1250 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1251 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1252 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1253 		ieee80211_free_txskb(hw, skb);
1254 		goto out;
1255 	}
1256 
1257 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1258 		     hlid, q, skb->len);
1259 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1260 
1261 	wl->tx_queue_count[q]++;
1262 	wlvif->tx_queue_count[q]++;
1263 
1264 	/*
1265 	 * The workqueue is slow to process the tx_queue and we need stop
1266 	 * the queue here, otherwise the queue will get too long.
1267 	 */
1268 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1269 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1270 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1271 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1272 		wlcore_stop_queue_locked(wl, wlvif, q,
1273 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1274 	}
1275 
1276 	/*
1277 	 * The chip specific setup must run before the first TX packet -
1278 	 * before that, the tx_work will not be initialized!
1279 	 */
1280 
1281 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1282 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1283 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1284 
1285 out:
1286 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1287 }
1288 
1289 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1290 {
1291 	unsigned long flags;
1292 	int q;
1293 
1294 	/* no need to queue a new dummy packet if one is already pending */
1295 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1296 		return 0;
1297 
1298 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1299 
1300 	spin_lock_irqsave(&wl->wl_lock, flags);
1301 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1302 	wl->tx_queue_count[q]++;
1303 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1304 
1305 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1306 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1307 		return wlcore_tx_work_locked(wl);
1308 
1309 	/*
1310 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1311 	 * interrupt handler function
1312 	 */
1313 	return 0;
1314 }
1315 
1316 /*
1317  * The size of the dummy packet should be at least 1400 bytes. However, in
1318  * order to minimize the number of bus transactions, aligning it to 512 bytes
1319  * boundaries could be beneficial, performance wise
1320  */
1321 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1322 
1323 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1324 {
1325 	struct sk_buff *skb;
1326 	struct ieee80211_hdr_3addr *hdr;
1327 	unsigned int dummy_packet_size;
1328 
1329 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1330 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1331 
1332 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1333 	if (!skb) {
1334 		wl1271_warning("Failed to allocate a dummy packet skb");
1335 		return NULL;
1336 	}
1337 
1338 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1339 
1340 	hdr = skb_put_zero(skb, sizeof(*hdr));
1341 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1342 					 IEEE80211_STYPE_NULLFUNC |
1343 					 IEEE80211_FCTL_TODS);
1344 
1345 	skb_put_zero(skb, dummy_packet_size);
1346 
1347 	/* Dummy packets require the TID to be management */
1348 	skb->priority = WL1271_TID_MGMT;
1349 
1350 	/* Initialize all fields that might be used */
1351 	skb_set_queue_mapping(skb, 0);
1352 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1353 
1354 	return skb;
1355 }
1356 
1357 
1358 static int
1359 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1360 {
1361 	int num_fields = 0, in_field = 0, fields_size = 0;
1362 	int i, pattern_len = 0;
1363 
1364 	if (!p->mask) {
1365 		wl1271_warning("No mask in WoWLAN pattern");
1366 		return -EINVAL;
1367 	}
1368 
1369 	/*
1370 	 * The pattern is broken up into segments of bytes at different offsets
1371 	 * that need to be checked by the FW filter. Each segment is called
1372 	 * a field in the FW API. We verify that the total number of fields
1373 	 * required for this pattern won't exceed FW limits (8)
1374 	 * as well as the total fields buffer won't exceed the FW limit.
1375 	 * Note that if there's a pattern which crosses Ethernet/IP header
1376 	 * boundary a new field is required.
1377 	 */
1378 	for (i = 0; i < p->pattern_len; i++) {
1379 		if (test_bit(i, (unsigned long *)p->mask)) {
1380 			if (!in_field) {
1381 				in_field = 1;
1382 				pattern_len = 1;
1383 			} else {
1384 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1385 					num_fields++;
1386 					fields_size += pattern_len +
1387 						RX_FILTER_FIELD_OVERHEAD;
1388 					pattern_len = 1;
1389 				} else
1390 					pattern_len++;
1391 			}
1392 		} else {
1393 			if (in_field) {
1394 				in_field = 0;
1395 				fields_size += pattern_len +
1396 					RX_FILTER_FIELD_OVERHEAD;
1397 				num_fields++;
1398 			}
1399 		}
1400 	}
1401 
1402 	if (in_field) {
1403 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1404 		num_fields++;
1405 	}
1406 
1407 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1408 		wl1271_warning("RX Filter too complex. Too many segments");
1409 		return -EINVAL;
1410 	}
1411 
1412 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1413 		wl1271_warning("RX filter pattern is too big");
1414 		return -E2BIG;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1421 {
1422 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1423 }
1424 
1425 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1426 {
1427 	int i;
1428 
1429 	if (filter == NULL)
1430 		return;
1431 
1432 	for (i = 0; i < filter->num_fields; i++)
1433 		kfree(filter->fields[i].pattern);
1434 
1435 	kfree(filter);
1436 }
1437 
1438 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1439 				 u16 offset, u8 flags,
1440 				 const u8 *pattern, u8 len)
1441 {
1442 	struct wl12xx_rx_filter_field *field;
1443 
1444 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1445 		wl1271_warning("Max fields per RX filter. can't alloc another");
1446 		return -EINVAL;
1447 	}
1448 
1449 	field = &filter->fields[filter->num_fields];
1450 
1451 	field->pattern = kzalloc(len, GFP_KERNEL);
1452 	if (!field->pattern) {
1453 		wl1271_warning("Failed to allocate RX filter pattern");
1454 		return -ENOMEM;
1455 	}
1456 
1457 	filter->num_fields++;
1458 
1459 	field->offset = cpu_to_le16(offset);
1460 	field->flags = flags;
1461 	field->len = len;
1462 	memcpy(field->pattern, pattern, len);
1463 
1464 	return 0;
1465 }
1466 
1467 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1468 {
1469 	int i, fields_size = 0;
1470 
1471 	for (i = 0; i < filter->num_fields; i++)
1472 		fields_size += filter->fields[i].len +
1473 			sizeof(struct wl12xx_rx_filter_field) -
1474 			sizeof(u8 *);
1475 
1476 	return fields_size;
1477 }
1478 
1479 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1480 				    u8 *buf)
1481 {
1482 	int i;
1483 	struct wl12xx_rx_filter_field *field;
1484 
1485 	for (i = 0; i < filter->num_fields; i++) {
1486 		field = (struct wl12xx_rx_filter_field *)buf;
1487 
1488 		field->offset = filter->fields[i].offset;
1489 		field->flags = filter->fields[i].flags;
1490 		field->len = filter->fields[i].len;
1491 
1492 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1493 		buf += sizeof(struct wl12xx_rx_filter_field) -
1494 			sizeof(u8 *) + field->len;
1495 	}
1496 }
1497 
1498 /*
1499  * Allocates an RX filter returned through f
1500  * which needs to be freed using rx_filter_free()
1501  */
1502 static int
1503 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1504 					   struct wl12xx_rx_filter **f)
1505 {
1506 	int i, j, ret = 0;
1507 	struct wl12xx_rx_filter *filter;
1508 	u16 offset;
1509 	u8 flags, len;
1510 
1511 	filter = wl1271_rx_filter_alloc();
1512 	if (!filter) {
1513 		wl1271_warning("Failed to alloc rx filter");
1514 		ret = -ENOMEM;
1515 		goto err;
1516 	}
1517 
1518 	i = 0;
1519 	while (i < p->pattern_len) {
1520 		if (!test_bit(i, (unsigned long *)p->mask)) {
1521 			i++;
1522 			continue;
1523 		}
1524 
1525 		for (j = i; j < p->pattern_len; j++) {
1526 			if (!test_bit(j, (unsigned long *)p->mask))
1527 				break;
1528 
1529 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1530 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1531 				break;
1532 		}
1533 
1534 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1535 			offset = i;
1536 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1537 		} else {
1538 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1539 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1540 		}
1541 
1542 		len = j - i;
1543 
1544 		ret = wl1271_rx_filter_alloc_field(filter,
1545 						   offset,
1546 						   flags,
1547 						   &p->pattern[i], len);
1548 		if (ret)
1549 			goto err;
1550 
1551 		i = j;
1552 	}
1553 
1554 	filter->action = FILTER_SIGNAL;
1555 
1556 	*f = filter;
1557 	return 0;
1558 
1559 err:
1560 	wl1271_rx_filter_free(filter);
1561 	*f = NULL;
1562 
1563 	return ret;
1564 }
1565 
1566 static int wl1271_configure_wowlan(struct wl1271 *wl,
1567 				   struct cfg80211_wowlan *wow)
1568 {
1569 	int i, ret;
1570 
1571 	if (!wow || wow->any || !wow->n_patterns) {
1572 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1573 							  FILTER_SIGNAL);
1574 		if (ret)
1575 			goto out;
1576 
1577 		ret = wl1271_rx_filter_clear_all(wl);
1578 		if (ret)
1579 			goto out;
1580 
1581 		return 0;
1582 	}
1583 
1584 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1585 		return -EINVAL;
1586 
1587 	/* Validate all incoming patterns before clearing current FW state */
1588 	for (i = 0; i < wow->n_patterns; i++) {
1589 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1590 		if (ret) {
1591 			wl1271_warning("Bad wowlan pattern %d", i);
1592 			return ret;
1593 		}
1594 	}
1595 
1596 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1597 	if (ret)
1598 		goto out;
1599 
1600 	ret = wl1271_rx_filter_clear_all(wl);
1601 	if (ret)
1602 		goto out;
1603 
1604 	/* Translate WoWLAN patterns into filters */
1605 	for (i = 0; i < wow->n_patterns; i++) {
1606 		struct cfg80211_pkt_pattern *p;
1607 		struct wl12xx_rx_filter *filter = NULL;
1608 
1609 		p = &wow->patterns[i];
1610 
1611 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1612 		if (ret) {
1613 			wl1271_warning("Failed to create an RX filter from "
1614 				       "wowlan pattern %d", i);
1615 			goto out;
1616 		}
1617 
1618 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1619 
1620 		wl1271_rx_filter_free(filter);
1621 		if (ret)
1622 			goto out;
1623 	}
1624 
1625 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1626 
1627 out:
1628 	return ret;
1629 }
1630 
1631 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1632 					struct wl12xx_vif *wlvif,
1633 					struct cfg80211_wowlan *wow)
1634 {
1635 	int ret = 0;
1636 
1637 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1638 		goto out;
1639 
1640 	ret = wl1271_configure_wowlan(wl, wow);
1641 	if (ret < 0)
1642 		goto out;
1643 
1644 	if ((wl->conf.conn.suspend_wake_up_event ==
1645 	     wl->conf.conn.wake_up_event) &&
1646 	    (wl->conf.conn.suspend_listen_interval ==
1647 	     wl->conf.conn.listen_interval))
1648 		goto out;
1649 
1650 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1651 				    wl->conf.conn.suspend_wake_up_event,
1652 				    wl->conf.conn.suspend_listen_interval);
1653 
1654 	if (ret < 0)
1655 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1656 out:
1657 	return ret;
1658 
1659 }
1660 
1661 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1662 					struct wl12xx_vif *wlvif,
1663 					struct cfg80211_wowlan *wow)
1664 {
1665 	int ret = 0;
1666 
1667 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1668 		goto out;
1669 
1670 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1671 	if (ret < 0)
1672 		goto out;
1673 
1674 	ret = wl1271_configure_wowlan(wl, wow);
1675 	if (ret < 0)
1676 		goto out;
1677 
1678 out:
1679 	return ret;
1680 
1681 }
1682 
1683 static int wl1271_configure_suspend(struct wl1271 *wl,
1684 				    struct wl12xx_vif *wlvif,
1685 				    struct cfg80211_wowlan *wow)
1686 {
1687 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1688 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1689 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1690 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1691 	return 0;
1692 }
1693 
1694 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1695 {
1696 	int ret = 0;
1697 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1698 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1699 
1700 	if ((!is_ap) && (!is_sta))
1701 		return;
1702 
1703 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1704 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1705 		return;
1706 
1707 	wl1271_configure_wowlan(wl, NULL);
1708 
1709 	if (is_sta) {
1710 		if ((wl->conf.conn.suspend_wake_up_event ==
1711 		     wl->conf.conn.wake_up_event) &&
1712 		    (wl->conf.conn.suspend_listen_interval ==
1713 		     wl->conf.conn.listen_interval))
1714 			return;
1715 
1716 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1717 				    wl->conf.conn.wake_up_event,
1718 				    wl->conf.conn.listen_interval);
1719 
1720 		if (ret < 0)
1721 			wl1271_error("resume: wake up conditions failed: %d",
1722 				     ret);
1723 
1724 	} else if (is_ap) {
1725 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1726 	}
1727 }
1728 
1729 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1730 					    struct cfg80211_wowlan *wow)
1731 {
1732 	struct wl1271 *wl = hw->priv;
1733 	struct wl12xx_vif *wlvif;
1734 	unsigned long flags;
1735 	int ret;
1736 
1737 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1738 	WARN_ON(!wow);
1739 
1740 	/* we want to perform the recovery before suspending */
1741 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1742 		wl1271_warning("postponing suspend to perform recovery");
1743 		return -EBUSY;
1744 	}
1745 
1746 	wl1271_tx_flush(wl);
1747 
1748 	mutex_lock(&wl->mutex);
1749 
1750 	ret = pm_runtime_get_sync(wl->dev);
1751 	if (ret < 0) {
1752 		pm_runtime_put_noidle(wl->dev);
1753 		mutex_unlock(&wl->mutex);
1754 		return ret;
1755 	}
1756 
1757 	wl->wow_enabled = true;
1758 	wl12xx_for_each_wlvif(wl, wlvif) {
1759 		if (wlcore_is_p2p_mgmt(wlvif))
1760 			continue;
1761 
1762 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1763 		if (ret < 0) {
1764 			mutex_unlock(&wl->mutex);
1765 			wl1271_warning("couldn't prepare device to suspend");
1766 			return ret;
1767 		}
1768 	}
1769 
1770 	/* disable fast link flow control notifications from FW */
1771 	ret = wlcore_hw_interrupt_notify(wl, false);
1772 	if (ret < 0)
1773 		goto out_sleep;
1774 
1775 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1776 	ret = wlcore_hw_rx_ba_filter(wl,
1777 				     !!wl->conf.conn.suspend_rx_ba_activity);
1778 	if (ret < 0)
1779 		goto out_sleep;
1780 
1781 out_sleep:
1782 	pm_runtime_put_noidle(wl->dev);
1783 	mutex_unlock(&wl->mutex);
1784 
1785 	if (ret < 0) {
1786 		wl1271_warning("couldn't prepare device to suspend");
1787 		return ret;
1788 	}
1789 
1790 	/* flush any remaining work */
1791 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1792 
1793 	flush_work(&wl->tx_work);
1794 
1795 	/*
1796 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1797 	 * it on resume anyway.
1798 	 */
1799 	cancel_delayed_work(&wl->tx_watchdog_work);
1800 
1801 	/*
1802 	 * set suspended flag to avoid triggering a new threaded_irq
1803 	 * work.
1804 	 */
1805 	spin_lock_irqsave(&wl->wl_lock, flags);
1806 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1807 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1808 
1809 	return pm_runtime_force_suspend(wl->dev);
1810 }
1811 
1812 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1813 {
1814 	struct wl1271 *wl = hw->priv;
1815 	struct wl12xx_vif *wlvif;
1816 	unsigned long flags;
1817 	bool run_irq_work = false, pending_recovery;
1818 	int ret;
1819 
1820 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1821 		     wl->wow_enabled);
1822 	WARN_ON(!wl->wow_enabled);
1823 
1824 	ret = pm_runtime_force_resume(wl->dev);
1825 	if (ret < 0) {
1826 		wl1271_error("ELP wakeup failure!");
1827 		goto out_sleep;
1828 	}
1829 
1830 	/*
1831 	 * re-enable irq_work enqueuing, and call irq_work directly if
1832 	 * there is a pending work.
1833 	 */
1834 	spin_lock_irqsave(&wl->wl_lock, flags);
1835 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1836 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1837 		run_irq_work = true;
1838 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1839 
1840 	mutex_lock(&wl->mutex);
1841 
1842 	/* test the recovery flag before calling any SDIO functions */
1843 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1844 				    &wl->flags);
1845 
1846 	if (run_irq_work) {
1847 		wl1271_debug(DEBUG_MAC80211,
1848 			     "run postponed irq_work directly");
1849 
1850 		/* don't talk to the HW if recovery is pending */
1851 		if (!pending_recovery) {
1852 			ret = wlcore_irq_locked(wl);
1853 			if (ret)
1854 				wl12xx_queue_recovery_work(wl);
1855 		}
1856 
1857 		wlcore_enable_interrupts(wl);
1858 	}
1859 
1860 	if (pending_recovery) {
1861 		wl1271_warning("queuing forgotten recovery on resume");
1862 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1863 		goto out_sleep;
1864 	}
1865 
1866 	ret = pm_runtime_get_sync(wl->dev);
1867 	if (ret < 0) {
1868 		pm_runtime_put_noidle(wl->dev);
1869 		goto out;
1870 	}
1871 
1872 	wl12xx_for_each_wlvif(wl, wlvif) {
1873 		if (wlcore_is_p2p_mgmt(wlvif))
1874 			continue;
1875 
1876 		wl1271_configure_resume(wl, wlvif);
1877 	}
1878 
1879 	ret = wlcore_hw_interrupt_notify(wl, true);
1880 	if (ret < 0)
1881 		goto out_sleep;
1882 
1883 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1884 	ret = wlcore_hw_rx_ba_filter(wl, false);
1885 	if (ret < 0)
1886 		goto out_sleep;
1887 
1888 out_sleep:
1889 	pm_runtime_mark_last_busy(wl->dev);
1890 	pm_runtime_put_autosuspend(wl->dev);
1891 
1892 out:
1893 	wl->wow_enabled = false;
1894 
1895 	/*
1896 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1897 	 * That way we avoid possible conditions where Tx-complete interrupts
1898 	 * fail to arrive and we perform a spurious recovery.
1899 	 */
1900 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1901 	mutex_unlock(&wl->mutex);
1902 
1903 	return 0;
1904 }
1905 
1906 static int wl1271_op_start(struct ieee80211_hw *hw)
1907 {
1908 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1909 
1910 	/*
1911 	 * We have to delay the booting of the hardware because
1912 	 * we need to know the local MAC address before downloading and
1913 	 * initializing the firmware. The MAC address cannot be changed
1914 	 * after boot, and without the proper MAC address, the firmware
1915 	 * will not function properly.
1916 	 *
1917 	 * The MAC address is first known when the corresponding interface
1918 	 * is added. That is where we will initialize the hardware.
1919 	 */
1920 
1921 	return 0;
1922 }
1923 
1924 static void wlcore_op_stop_locked(struct wl1271 *wl)
1925 {
1926 	int i;
1927 
1928 	if (wl->state == WLCORE_STATE_OFF) {
1929 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1930 					&wl->flags))
1931 			wlcore_enable_interrupts(wl);
1932 
1933 		return;
1934 	}
1935 
1936 	/*
1937 	 * this must be before the cancel_work calls below, so that the work
1938 	 * functions don't perform further work.
1939 	 */
1940 	wl->state = WLCORE_STATE_OFF;
1941 
1942 	/*
1943 	 * Use the nosync variant to disable interrupts, so the mutex could be
1944 	 * held while doing so without deadlocking.
1945 	 */
1946 	wlcore_disable_interrupts_nosync(wl);
1947 
1948 	mutex_unlock(&wl->mutex);
1949 
1950 	wlcore_synchronize_interrupts(wl);
1951 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1952 		cancel_work_sync(&wl->recovery_work);
1953 	wl1271_flush_deferred_work(wl);
1954 	cancel_delayed_work_sync(&wl->scan_complete_work);
1955 	cancel_work_sync(&wl->netstack_work);
1956 	cancel_work_sync(&wl->tx_work);
1957 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1958 
1959 	/* let's notify MAC80211 about the remaining pending TX frames */
1960 	mutex_lock(&wl->mutex);
1961 	wl12xx_tx_reset(wl);
1962 
1963 	wl1271_power_off(wl);
1964 	/*
1965 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1966 	 * an interrupt storm. Now that the power is down, it is safe to
1967 	 * re-enable interrupts to balance the disable depth
1968 	 */
1969 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1970 		wlcore_enable_interrupts(wl);
1971 
1972 	wl->band = NL80211_BAND_2GHZ;
1973 
1974 	wl->rx_counter = 0;
1975 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1976 	wl->channel_type = NL80211_CHAN_NO_HT;
1977 	wl->tx_blocks_available = 0;
1978 	wl->tx_allocated_blocks = 0;
1979 	wl->tx_results_count = 0;
1980 	wl->tx_packets_count = 0;
1981 	wl->time_offset = 0;
1982 	wl->ap_fw_ps_map = 0;
1983 	wl->ap_ps_map = 0;
1984 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1985 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1986 	memset(wl->links_map, 0, sizeof(wl->links_map));
1987 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1988 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1989 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1990 	wl->active_sta_count = 0;
1991 	wl->active_link_count = 0;
1992 
1993 	/* The system link is always allocated */
1994 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1995 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1996 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1997 
1998 	/*
1999 	 * this is performed after the cancel_work calls and the associated
2000 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2001 	 * get executed before all these vars have been reset.
2002 	 */
2003 	wl->flags = 0;
2004 
2005 	wl->tx_blocks_freed = 0;
2006 
2007 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2008 		wl->tx_pkts_freed[i] = 0;
2009 		wl->tx_allocated_pkts[i] = 0;
2010 	}
2011 
2012 	wl1271_debugfs_reset(wl);
2013 
2014 	kfree(wl->raw_fw_status);
2015 	wl->raw_fw_status = NULL;
2016 	kfree(wl->fw_status);
2017 	wl->fw_status = NULL;
2018 	kfree(wl->tx_res_if);
2019 	wl->tx_res_if = NULL;
2020 	kfree(wl->target_mem_map);
2021 	wl->target_mem_map = NULL;
2022 
2023 	/*
2024 	 * FW channels must be re-calibrated after recovery,
2025 	 * save current Reg-Domain channel configuration and clear it.
2026 	 */
2027 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2028 	       sizeof(wl->reg_ch_conf_pending));
2029 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2030 }
2031 
2032 static void wlcore_op_stop(struct ieee80211_hw *hw)
2033 {
2034 	struct wl1271 *wl = hw->priv;
2035 
2036 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2037 
2038 	mutex_lock(&wl->mutex);
2039 
2040 	wlcore_op_stop_locked(wl);
2041 
2042 	mutex_unlock(&wl->mutex);
2043 }
2044 
2045 static void wlcore_channel_switch_work(struct work_struct *work)
2046 {
2047 	struct delayed_work *dwork;
2048 	struct wl1271 *wl;
2049 	struct ieee80211_vif *vif;
2050 	struct wl12xx_vif *wlvif;
2051 	int ret;
2052 
2053 	dwork = to_delayed_work(work);
2054 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2055 	wl = wlvif->wl;
2056 
2057 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2058 
2059 	mutex_lock(&wl->mutex);
2060 
2061 	if (unlikely(wl->state != WLCORE_STATE_ON))
2062 		goto out;
2063 
2064 	/* check the channel switch is still ongoing */
2065 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2066 		goto out;
2067 
2068 	vif = wl12xx_wlvif_to_vif(wlvif);
2069 	ieee80211_chswitch_done(vif, false);
2070 
2071 	ret = pm_runtime_get_sync(wl->dev);
2072 	if (ret < 0) {
2073 		pm_runtime_put_noidle(wl->dev);
2074 		goto out;
2075 	}
2076 
2077 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2078 
2079 	pm_runtime_mark_last_busy(wl->dev);
2080 	pm_runtime_put_autosuspend(wl->dev);
2081 out:
2082 	mutex_unlock(&wl->mutex);
2083 }
2084 
2085 static void wlcore_connection_loss_work(struct work_struct *work)
2086 {
2087 	struct delayed_work *dwork;
2088 	struct wl1271 *wl;
2089 	struct ieee80211_vif *vif;
2090 	struct wl12xx_vif *wlvif;
2091 
2092 	dwork = to_delayed_work(work);
2093 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2094 	wl = wlvif->wl;
2095 
2096 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2097 
2098 	mutex_lock(&wl->mutex);
2099 
2100 	if (unlikely(wl->state != WLCORE_STATE_ON))
2101 		goto out;
2102 
2103 	/* Call mac80211 connection loss */
2104 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2105 		goto out;
2106 
2107 	vif = wl12xx_wlvif_to_vif(wlvif);
2108 	ieee80211_connection_loss(vif);
2109 out:
2110 	mutex_unlock(&wl->mutex);
2111 }
2112 
2113 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2114 {
2115 	struct delayed_work *dwork;
2116 	struct wl1271 *wl;
2117 	struct wl12xx_vif *wlvif;
2118 	unsigned long time_spare;
2119 	int ret;
2120 
2121 	dwork = to_delayed_work(work);
2122 	wlvif = container_of(dwork, struct wl12xx_vif,
2123 			     pending_auth_complete_work);
2124 	wl = wlvif->wl;
2125 
2126 	mutex_lock(&wl->mutex);
2127 
2128 	if (unlikely(wl->state != WLCORE_STATE_ON))
2129 		goto out;
2130 
2131 	/*
2132 	 * Make sure a second really passed since the last auth reply. Maybe
2133 	 * a second auth reply arrived while we were stuck on the mutex.
2134 	 * Check for a little less than the timeout to protect from scheduler
2135 	 * irregularities.
2136 	 */
2137 	time_spare = jiffies +
2138 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2139 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2140 		goto out;
2141 
2142 	ret = pm_runtime_get_sync(wl->dev);
2143 	if (ret < 0) {
2144 		pm_runtime_put_noidle(wl->dev);
2145 		goto out;
2146 	}
2147 
2148 	/* cancel the ROC if active */
2149 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2150 
2151 	pm_runtime_mark_last_busy(wl->dev);
2152 	pm_runtime_put_autosuspend(wl->dev);
2153 out:
2154 	mutex_unlock(&wl->mutex);
2155 }
2156 
2157 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2158 {
2159 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2160 					WL12XX_MAX_RATE_POLICIES);
2161 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2162 		return -EBUSY;
2163 
2164 	__set_bit(policy, wl->rate_policies_map);
2165 	*idx = policy;
2166 	return 0;
2167 }
2168 
2169 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2170 {
2171 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2172 		return;
2173 
2174 	__clear_bit(*idx, wl->rate_policies_map);
2175 	*idx = WL12XX_MAX_RATE_POLICIES;
2176 }
2177 
2178 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2179 {
2180 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2181 					WLCORE_MAX_KLV_TEMPLATES);
2182 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2183 		return -EBUSY;
2184 
2185 	__set_bit(policy, wl->klv_templates_map);
2186 	*idx = policy;
2187 	return 0;
2188 }
2189 
2190 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2191 {
2192 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2193 		return;
2194 
2195 	__clear_bit(*idx, wl->klv_templates_map);
2196 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2197 }
2198 
2199 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2200 {
2201 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2202 
2203 	switch (wlvif->bss_type) {
2204 	case BSS_TYPE_AP_BSS:
2205 		if (wlvif->p2p)
2206 			return WL1271_ROLE_P2P_GO;
2207 		else if (ieee80211_vif_is_mesh(vif))
2208 			return WL1271_ROLE_MESH_POINT;
2209 		else
2210 			return WL1271_ROLE_AP;
2211 
2212 	case BSS_TYPE_STA_BSS:
2213 		if (wlvif->p2p)
2214 			return WL1271_ROLE_P2P_CL;
2215 		else
2216 			return WL1271_ROLE_STA;
2217 
2218 	case BSS_TYPE_IBSS:
2219 		return WL1271_ROLE_IBSS;
2220 
2221 	default:
2222 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2223 	}
2224 	return WL12XX_INVALID_ROLE_TYPE;
2225 }
2226 
2227 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2228 {
2229 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2230 	int i;
2231 
2232 	/* clear everything but the persistent data */
2233 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2234 
2235 	switch (ieee80211_vif_type_p2p(vif)) {
2236 	case NL80211_IFTYPE_P2P_CLIENT:
2237 		wlvif->p2p = 1;
2238 		/* fall-through */
2239 	case NL80211_IFTYPE_STATION:
2240 	case NL80211_IFTYPE_P2P_DEVICE:
2241 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2242 		break;
2243 	case NL80211_IFTYPE_ADHOC:
2244 		wlvif->bss_type = BSS_TYPE_IBSS;
2245 		break;
2246 	case NL80211_IFTYPE_P2P_GO:
2247 		wlvif->p2p = 1;
2248 		/* fall-through */
2249 	case NL80211_IFTYPE_AP:
2250 	case NL80211_IFTYPE_MESH_POINT:
2251 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2252 		break;
2253 	default:
2254 		wlvif->bss_type = MAX_BSS_TYPE;
2255 		return -EOPNOTSUPP;
2256 	}
2257 
2258 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2259 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2260 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2261 
2262 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2263 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2264 		/* init sta/ibss data */
2265 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2266 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2267 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2268 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2269 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2270 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2271 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2272 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2273 	} else {
2274 		/* init ap data */
2275 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2276 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2277 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2278 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2279 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2280 			wl12xx_allocate_rate_policy(wl,
2281 						&wlvif->ap.ucast_rate_idx[i]);
2282 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2283 		/*
2284 		 * TODO: check if basic_rate shouldn't be
2285 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2286 		 * instead (the same thing for STA above).
2287 		*/
2288 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2289 		/* TODO: this seems to be used only for STA, check it */
2290 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2291 	}
2292 
2293 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2294 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2295 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2296 
2297 	/*
2298 	 * mac80211 configures some values globally, while we treat them
2299 	 * per-interface. thus, on init, we have to copy them from wl
2300 	 */
2301 	wlvif->band = wl->band;
2302 	wlvif->channel = wl->channel;
2303 	wlvif->power_level = wl->power_level;
2304 	wlvif->channel_type = wl->channel_type;
2305 
2306 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2307 		  wl1271_rx_streaming_enable_work);
2308 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2309 		  wl1271_rx_streaming_disable_work);
2310 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2311 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2312 			  wlcore_channel_switch_work);
2313 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2314 			  wlcore_connection_loss_work);
2315 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2316 			  wlcore_pending_auth_complete_work);
2317 	INIT_LIST_HEAD(&wlvif->list);
2318 
2319 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2320 	return 0;
2321 }
2322 
2323 static int wl12xx_init_fw(struct wl1271 *wl)
2324 {
2325 	int retries = WL1271_BOOT_RETRIES;
2326 	bool booted = false;
2327 	struct wiphy *wiphy = wl->hw->wiphy;
2328 	int ret;
2329 
2330 	while (retries) {
2331 		retries--;
2332 		ret = wl12xx_chip_wakeup(wl, false);
2333 		if (ret < 0)
2334 			goto power_off;
2335 
2336 		ret = wl->ops->boot(wl);
2337 		if (ret < 0)
2338 			goto power_off;
2339 
2340 		ret = wl1271_hw_init(wl);
2341 		if (ret < 0)
2342 			goto irq_disable;
2343 
2344 		booted = true;
2345 		break;
2346 
2347 irq_disable:
2348 		mutex_unlock(&wl->mutex);
2349 		/* Unlocking the mutex in the middle of handling is
2350 		   inherently unsafe. In this case we deem it safe to do,
2351 		   because we need to let any possibly pending IRQ out of
2352 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2353 		   work function will not do anything.) Also, any other
2354 		   possible concurrent operations will fail due to the
2355 		   current state, hence the wl1271 struct should be safe. */
2356 		wlcore_disable_interrupts(wl);
2357 		wl1271_flush_deferred_work(wl);
2358 		cancel_work_sync(&wl->netstack_work);
2359 		mutex_lock(&wl->mutex);
2360 power_off:
2361 		wl1271_power_off(wl);
2362 	}
2363 
2364 	if (!booted) {
2365 		wl1271_error("firmware boot failed despite %d retries",
2366 			     WL1271_BOOT_RETRIES);
2367 		goto out;
2368 	}
2369 
2370 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2371 
2372 	/* update hw/fw version info in wiphy struct */
2373 	wiphy->hw_version = wl->chip.id;
2374 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2375 		sizeof(wiphy->fw_version));
2376 
2377 	/*
2378 	 * Now we know if 11a is supported (info from the NVS), so disable
2379 	 * 11a channels if not supported
2380 	 */
2381 	if (!wl->enable_11a)
2382 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2383 
2384 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2385 		     wl->enable_11a ? "" : "not ");
2386 
2387 	wl->state = WLCORE_STATE_ON;
2388 out:
2389 	return ret;
2390 }
2391 
2392 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2393 {
2394 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2395 }
2396 
2397 /*
2398  * Check whether a fw switch (i.e. moving from one loaded
2399  * fw to another) is needed. This function is also responsible
2400  * for updating wl->last_vif_count, so it must be called before
2401  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2402  * will be used).
2403  */
2404 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2405 				  struct vif_counter_data vif_counter_data,
2406 				  bool add)
2407 {
2408 	enum wl12xx_fw_type current_fw = wl->fw_type;
2409 	u8 vif_count = vif_counter_data.counter;
2410 
2411 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2412 		return false;
2413 
2414 	/* increase the vif count if this is a new vif */
2415 	if (add && !vif_counter_data.cur_vif_running)
2416 		vif_count++;
2417 
2418 	wl->last_vif_count = vif_count;
2419 
2420 	/* no need for fw change if the device is OFF */
2421 	if (wl->state == WLCORE_STATE_OFF)
2422 		return false;
2423 
2424 	/* no need for fw change if a single fw is used */
2425 	if (!wl->mr_fw_name)
2426 		return false;
2427 
2428 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2429 		return true;
2430 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2431 		return true;
2432 
2433 	return false;
2434 }
2435 
2436 /*
2437  * Enter "forced psm". Make sure the sta is in psm against the ap,
2438  * to make the fw switch a bit more disconnection-persistent.
2439  */
2440 static void wl12xx_force_active_psm(struct wl1271 *wl)
2441 {
2442 	struct wl12xx_vif *wlvif;
2443 
2444 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2445 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2446 	}
2447 }
2448 
2449 struct wlcore_hw_queue_iter_data {
2450 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2451 	/* current vif */
2452 	struct ieee80211_vif *vif;
2453 	/* is the current vif among those iterated */
2454 	bool cur_running;
2455 };
2456 
2457 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2458 				 struct ieee80211_vif *vif)
2459 {
2460 	struct wlcore_hw_queue_iter_data *iter_data = data;
2461 
2462 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2463 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2464 		return;
2465 
2466 	if (iter_data->cur_running || vif == iter_data->vif) {
2467 		iter_data->cur_running = true;
2468 		return;
2469 	}
2470 
2471 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2472 }
2473 
2474 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2475 					 struct wl12xx_vif *wlvif)
2476 {
2477 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2478 	struct wlcore_hw_queue_iter_data iter_data = {};
2479 	int i, q_base;
2480 
2481 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2482 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2483 		return 0;
2484 	}
2485 
2486 	iter_data.vif = vif;
2487 
2488 	/* mark all bits taken by active interfaces */
2489 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2490 					IEEE80211_IFACE_ITER_RESUME_ALL,
2491 					wlcore_hw_queue_iter, &iter_data);
2492 
2493 	/* the current vif is already running in mac80211 (resume/recovery) */
2494 	if (iter_data.cur_running) {
2495 		wlvif->hw_queue_base = vif->hw_queue[0];
2496 		wl1271_debug(DEBUG_MAC80211,
2497 			     "using pre-allocated hw queue base %d",
2498 			     wlvif->hw_queue_base);
2499 
2500 		/* interface type might have changed type */
2501 		goto adjust_cab_queue;
2502 	}
2503 
2504 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2505 				     WLCORE_NUM_MAC_ADDRESSES);
2506 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2507 		return -EBUSY;
2508 
2509 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2510 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2511 		     wlvif->hw_queue_base);
2512 
2513 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2514 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2515 		/* register hw queues in mac80211 */
2516 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2517 	}
2518 
2519 adjust_cab_queue:
2520 	/* the last places are reserved for cab queues per interface */
2521 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2522 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2523 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2524 	else
2525 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2526 
2527 	return 0;
2528 }
2529 
2530 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2531 				   struct ieee80211_vif *vif)
2532 {
2533 	struct wl1271 *wl = hw->priv;
2534 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2535 	struct vif_counter_data vif_count;
2536 	int ret = 0;
2537 	u8 role_type;
2538 
2539 	if (wl->plt) {
2540 		wl1271_error("Adding Interface not allowed while in PLT mode");
2541 		return -EBUSY;
2542 	}
2543 
2544 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2545 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2546 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2547 
2548 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2549 		     ieee80211_vif_type_p2p(vif), vif->addr);
2550 
2551 	wl12xx_get_vif_count(hw, vif, &vif_count);
2552 
2553 	mutex_lock(&wl->mutex);
2554 
2555 	/*
2556 	 * in some very corner case HW recovery scenarios its possible to
2557 	 * get here before __wl1271_op_remove_interface is complete, so
2558 	 * opt out if that is the case.
2559 	 */
2560 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2561 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2562 		ret = -EBUSY;
2563 		goto out;
2564 	}
2565 
2566 
2567 	ret = wl12xx_init_vif_data(wl, vif);
2568 	if (ret < 0)
2569 		goto out;
2570 
2571 	wlvif->wl = wl;
2572 	role_type = wl12xx_get_role_type(wl, wlvif);
2573 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2574 		ret = -EINVAL;
2575 		goto out;
2576 	}
2577 
2578 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2579 	if (ret < 0)
2580 		goto out;
2581 
2582 	/*
2583 	 * TODO: after the nvs issue will be solved, move this block
2584 	 * to start(), and make sure here the driver is ON.
2585 	 */
2586 	if (wl->state == WLCORE_STATE_OFF) {
2587 		/*
2588 		 * we still need this in order to configure the fw
2589 		 * while uploading the nvs
2590 		 */
2591 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2592 
2593 		ret = wl12xx_init_fw(wl);
2594 		if (ret < 0)
2595 			goto out;
2596 	}
2597 
2598 	/*
2599 	 * Call runtime PM only after possible wl12xx_init_fw() above
2600 	 * is done. Otherwise we do not have interrupts enabled.
2601 	 */
2602 	ret = pm_runtime_get_sync(wl->dev);
2603 	if (ret < 0) {
2604 		pm_runtime_put_noidle(wl->dev);
2605 		goto out_unlock;
2606 	}
2607 
2608 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2609 		wl12xx_force_active_psm(wl);
2610 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2611 		mutex_unlock(&wl->mutex);
2612 		wl1271_recovery_work(&wl->recovery_work);
2613 		return 0;
2614 	}
2615 
2616 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2617 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2618 					     role_type, &wlvif->role_id);
2619 		if (ret < 0)
2620 			goto out;
2621 
2622 		ret = wl1271_init_vif_specific(wl, vif);
2623 		if (ret < 0)
2624 			goto out;
2625 
2626 	} else {
2627 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2628 					     &wlvif->dev_role_id);
2629 		if (ret < 0)
2630 			goto out;
2631 
2632 		/* needed mainly for configuring rate policies */
2633 		ret = wl1271_sta_hw_init(wl, wlvif);
2634 		if (ret < 0)
2635 			goto out;
2636 	}
2637 
2638 	list_add(&wlvif->list, &wl->wlvif_list);
2639 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2640 
2641 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2642 		wl->ap_count++;
2643 	else
2644 		wl->sta_count++;
2645 out:
2646 	pm_runtime_mark_last_busy(wl->dev);
2647 	pm_runtime_put_autosuspend(wl->dev);
2648 out_unlock:
2649 	mutex_unlock(&wl->mutex);
2650 
2651 	return ret;
2652 }
2653 
2654 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2655 					 struct ieee80211_vif *vif,
2656 					 bool reset_tx_queues)
2657 {
2658 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2659 	int i, ret;
2660 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2661 
2662 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2663 
2664 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2665 		return;
2666 
2667 	/* because of hardware recovery, we may get here twice */
2668 	if (wl->state == WLCORE_STATE_OFF)
2669 		return;
2670 
2671 	wl1271_info("down");
2672 
2673 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2674 	    wl->scan_wlvif == wlvif) {
2675 		struct cfg80211_scan_info info = {
2676 			.aborted = true,
2677 		};
2678 
2679 		/*
2680 		 * Rearm the tx watchdog just before idling scan. This
2681 		 * prevents just-finished scans from triggering the watchdog
2682 		 */
2683 		wl12xx_rearm_tx_watchdog_locked(wl);
2684 
2685 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2686 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2687 		wl->scan_wlvif = NULL;
2688 		wl->scan.req = NULL;
2689 		ieee80211_scan_completed(wl->hw, &info);
2690 	}
2691 
2692 	if (wl->sched_vif == wlvif)
2693 		wl->sched_vif = NULL;
2694 
2695 	if (wl->roc_vif == vif) {
2696 		wl->roc_vif = NULL;
2697 		ieee80211_remain_on_channel_expired(wl->hw);
2698 	}
2699 
2700 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2701 		/* disable active roles */
2702 		ret = pm_runtime_get_sync(wl->dev);
2703 		if (ret < 0) {
2704 			pm_runtime_put_noidle(wl->dev);
2705 			goto deinit;
2706 		}
2707 
2708 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2709 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2710 			if (wl12xx_dev_role_started(wlvif))
2711 				wl12xx_stop_dev(wl, wlvif);
2712 		}
2713 
2714 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2715 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2716 			if (ret < 0)
2717 				goto deinit;
2718 		} else {
2719 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2720 			if (ret < 0)
2721 				goto deinit;
2722 		}
2723 
2724 		pm_runtime_mark_last_busy(wl->dev);
2725 		pm_runtime_put_autosuspend(wl->dev);
2726 	}
2727 deinit:
2728 	wl12xx_tx_reset_wlvif(wl, wlvif);
2729 
2730 	/* clear all hlids (except system_hlid) */
2731 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2732 
2733 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2734 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2735 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2736 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2737 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2738 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2739 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2740 	} else {
2741 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2742 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2743 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2744 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2745 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2746 			wl12xx_free_rate_policy(wl,
2747 						&wlvif->ap.ucast_rate_idx[i]);
2748 		wl1271_free_ap_keys(wl, wlvif);
2749 	}
2750 
2751 	dev_kfree_skb(wlvif->probereq);
2752 	wlvif->probereq = NULL;
2753 	if (wl->last_wlvif == wlvif)
2754 		wl->last_wlvif = NULL;
2755 	list_del(&wlvif->list);
2756 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2757 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2758 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2759 
2760 	if (is_ap)
2761 		wl->ap_count--;
2762 	else
2763 		wl->sta_count--;
2764 
2765 	/*
2766 	 * Last AP, have more stations. Configure sleep auth according to STA.
2767 	 * Don't do thin on unintended recovery.
2768 	 */
2769 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2770 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2771 		goto unlock;
2772 
2773 	if (wl->ap_count == 0 && is_ap) {
2774 		/* mask ap events */
2775 		wl->event_mask &= ~wl->ap_event_mask;
2776 		wl1271_event_unmask(wl);
2777 	}
2778 
2779 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2780 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2781 		/* Configure for power according to debugfs */
2782 		if (sta_auth != WL1271_PSM_ILLEGAL)
2783 			wl1271_acx_sleep_auth(wl, sta_auth);
2784 		/* Configure for ELP power saving */
2785 		else
2786 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2787 	}
2788 
2789 unlock:
2790 	mutex_unlock(&wl->mutex);
2791 
2792 	del_timer_sync(&wlvif->rx_streaming_timer);
2793 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2794 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2795 	cancel_work_sync(&wlvif->rc_update_work);
2796 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2797 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2798 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2799 
2800 	mutex_lock(&wl->mutex);
2801 }
2802 
2803 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2804 				       struct ieee80211_vif *vif)
2805 {
2806 	struct wl1271 *wl = hw->priv;
2807 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2808 	struct wl12xx_vif *iter;
2809 	struct vif_counter_data vif_count;
2810 
2811 	wl12xx_get_vif_count(hw, vif, &vif_count);
2812 	mutex_lock(&wl->mutex);
2813 
2814 	if (wl->state == WLCORE_STATE_OFF ||
2815 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2816 		goto out;
2817 
2818 	/*
2819 	 * wl->vif can be null here if someone shuts down the interface
2820 	 * just when hardware recovery has been started.
2821 	 */
2822 	wl12xx_for_each_wlvif(wl, iter) {
2823 		if (iter != wlvif)
2824 			continue;
2825 
2826 		__wl1271_op_remove_interface(wl, vif, true);
2827 		break;
2828 	}
2829 	WARN_ON(iter != wlvif);
2830 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2831 		wl12xx_force_active_psm(wl);
2832 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2833 		wl12xx_queue_recovery_work(wl);
2834 	}
2835 out:
2836 	mutex_unlock(&wl->mutex);
2837 }
2838 
2839 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2840 				      struct ieee80211_vif *vif,
2841 				      enum nl80211_iftype new_type, bool p2p)
2842 {
2843 	struct wl1271 *wl = hw->priv;
2844 	int ret;
2845 
2846 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2847 	wl1271_op_remove_interface(hw, vif);
2848 
2849 	vif->type = new_type;
2850 	vif->p2p = p2p;
2851 	ret = wl1271_op_add_interface(hw, vif);
2852 
2853 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2854 	return ret;
2855 }
2856 
2857 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2858 {
2859 	int ret;
2860 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2861 
2862 	/*
2863 	 * One of the side effects of the JOIN command is that is clears
2864 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2865 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2866 	 * Currently the only valid scenario for JOIN during association
2867 	 * is on roaming, in which case we will also be given new keys.
2868 	 * Keep the below message for now, unless it starts bothering
2869 	 * users who really like to roam a lot :)
2870 	 */
2871 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2872 		wl1271_info("JOIN while associated.");
2873 
2874 	/* clear encryption type */
2875 	wlvif->encryption_type = KEY_NONE;
2876 
2877 	if (is_ibss)
2878 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2879 	else {
2880 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2881 			/*
2882 			 * TODO: this is an ugly workaround for wl12xx fw
2883 			 * bug - we are not able to tx/rx after the first
2884 			 * start_sta, so make dummy start+stop calls,
2885 			 * and then call start_sta again.
2886 			 * this should be fixed in the fw.
2887 			 */
2888 			wl12xx_cmd_role_start_sta(wl, wlvif);
2889 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2890 		}
2891 
2892 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2893 	}
2894 
2895 	return ret;
2896 }
2897 
2898 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2899 			    int offset)
2900 {
2901 	u8 ssid_len;
2902 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2903 					 skb->len - offset);
2904 
2905 	if (!ptr) {
2906 		wl1271_error("No SSID in IEs!");
2907 		return -ENOENT;
2908 	}
2909 
2910 	ssid_len = ptr[1];
2911 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2912 		wl1271_error("SSID is too long!");
2913 		return -EINVAL;
2914 	}
2915 
2916 	wlvif->ssid_len = ssid_len;
2917 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2918 	return 0;
2919 }
2920 
2921 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2922 {
2923 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2924 	struct sk_buff *skb;
2925 	int ieoffset;
2926 
2927 	/* we currently only support setting the ssid from the ap probe req */
2928 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2929 		return -EINVAL;
2930 
2931 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2932 	if (!skb)
2933 		return -EINVAL;
2934 
2935 	ieoffset = offsetof(struct ieee80211_mgmt,
2936 			    u.probe_req.variable);
2937 	wl1271_ssid_set(wlvif, skb, ieoffset);
2938 	dev_kfree_skb(skb);
2939 
2940 	return 0;
2941 }
2942 
2943 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2944 			    struct ieee80211_bss_conf *bss_conf,
2945 			    u32 sta_rate_set)
2946 {
2947 	int ieoffset;
2948 	int ret;
2949 
2950 	wlvif->aid = bss_conf->aid;
2951 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2952 	wlvif->beacon_int = bss_conf->beacon_int;
2953 	wlvif->wmm_enabled = bss_conf->qos;
2954 
2955 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2956 
2957 	/*
2958 	 * with wl1271, we don't need to update the
2959 	 * beacon_int and dtim_period, because the firmware
2960 	 * updates it by itself when the first beacon is
2961 	 * received after a join.
2962 	 */
2963 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2964 	if (ret < 0)
2965 		return ret;
2966 
2967 	/*
2968 	 * Get a template for hardware connection maintenance
2969 	 */
2970 	dev_kfree_skb(wlvif->probereq);
2971 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2972 							wlvif,
2973 							NULL);
2974 	ieoffset = offsetof(struct ieee80211_mgmt,
2975 			    u.probe_req.variable);
2976 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2977 
2978 	/* enable the connection monitoring feature */
2979 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2980 	if (ret < 0)
2981 		return ret;
2982 
2983 	/*
2984 	 * The join command disable the keep-alive mode, shut down its process,
2985 	 * and also clear the template config, so we need to reset it all after
2986 	 * the join. The acx_aid starts the keep-alive process, and the order
2987 	 * of the commands below is relevant.
2988 	 */
2989 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2990 	if (ret < 0)
2991 		return ret;
2992 
2993 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2994 	if (ret < 0)
2995 		return ret;
2996 
2997 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2998 	if (ret < 0)
2999 		return ret;
3000 
3001 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
3002 					   wlvif->sta.klv_template_id,
3003 					   ACX_KEEP_ALIVE_TPL_VALID);
3004 	if (ret < 0)
3005 		return ret;
3006 
3007 	/*
3008 	 * The default fw psm configuration is AUTO, while mac80211 default
3009 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3010 	 */
3011 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3012 	if (ret < 0)
3013 		return ret;
3014 
3015 	if (sta_rate_set) {
3016 		wlvif->rate_set =
3017 			wl1271_tx_enabled_rates_get(wl,
3018 						    sta_rate_set,
3019 						    wlvif->band);
3020 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3021 		if (ret < 0)
3022 			return ret;
3023 	}
3024 
3025 	return ret;
3026 }
3027 
3028 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3029 {
3030 	int ret;
3031 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3032 
3033 	/* make sure we are connected (sta) joined */
3034 	if (sta &&
3035 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3036 		return false;
3037 
3038 	/* make sure we are joined (ibss) */
3039 	if (!sta &&
3040 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3041 		return false;
3042 
3043 	if (sta) {
3044 		/* use defaults when not associated */
3045 		wlvif->aid = 0;
3046 
3047 		/* free probe-request template */
3048 		dev_kfree_skb(wlvif->probereq);
3049 		wlvif->probereq = NULL;
3050 
3051 		/* disable connection monitor features */
3052 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3053 		if (ret < 0)
3054 			return ret;
3055 
3056 		/* Disable the keep-alive feature */
3057 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3058 		if (ret < 0)
3059 			return ret;
3060 
3061 		/* disable beacon filtering */
3062 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3063 		if (ret < 0)
3064 			return ret;
3065 	}
3066 
3067 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3068 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3069 
3070 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3071 		ieee80211_chswitch_done(vif, false);
3072 		cancel_delayed_work(&wlvif->channel_switch_work);
3073 	}
3074 
3075 	/* invalidate keep-alive template */
3076 	wl1271_acx_keep_alive_config(wl, wlvif,
3077 				     wlvif->sta.klv_template_id,
3078 				     ACX_KEEP_ALIVE_TPL_INVALID);
3079 
3080 	return 0;
3081 }
3082 
3083 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3084 {
3085 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3086 	wlvif->rate_set = wlvif->basic_rate_set;
3087 }
3088 
3089 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3090 				   bool idle)
3091 {
3092 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3093 
3094 	if (idle == cur_idle)
3095 		return;
3096 
3097 	if (idle) {
3098 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3099 	} else {
3100 		/* The current firmware only supports sched_scan in idle */
3101 		if (wl->sched_vif == wlvif)
3102 			wl->ops->sched_scan_stop(wl, wlvif);
3103 
3104 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3105 	}
3106 }
3107 
3108 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3109 			     struct ieee80211_conf *conf, u32 changed)
3110 {
3111 	int ret;
3112 
3113 	if (wlcore_is_p2p_mgmt(wlvif))
3114 		return 0;
3115 
3116 	if (conf->power_level != wlvif->power_level) {
3117 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3118 		if (ret < 0)
3119 			return ret;
3120 
3121 		wlvif->power_level = conf->power_level;
3122 	}
3123 
3124 	return 0;
3125 }
3126 
3127 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3128 {
3129 	struct wl1271 *wl = hw->priv;
3130 	struct wl12xx_vif *wlvif;
3131 	struct ieee80211_conf *conf = &hw->conf;
3132 	int ret = 0;
3133 
3134 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3135 		     " changed 0x%x",
3136 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3137 		     conf->power_level,
3138 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3139 			 changed);
3140 
3141 	mutex_lock(&wl->mutex);
3142 
3143 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3144 		wl->power_level = conf->power_level;
3145 
3146 	if (unlikely(wl->state != WLCORE_STATE_ON))
3147 		goto out;
3148 
3149 	ret = pm_runtime_get_sync(wl->dev);
3150 	if (ret < 0) {
3151 		pm_runtime_put_noidle(wl->dev);
3152 		goto out;
3153 	}
3154 
3155 	/* configure each interface */
3156 	wl12xx_for_each_wlvif(wl, wlvif) {
3157 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3158 		if (ret < 0)
3159 			goto out_sleep;
3160 	}
3161 
3162 out_sleep:
3163 	pm_runtime_mark_last_busy(wl->dev);
3164 	pm_runtime_put_autosuspend(wl->dev);
3165 
3166 out:
3167 	mutex_unlock(&wl->mutex);
3168 
3169 	return ret;
3170 }
3171 
3172 struct wl1271_filter_params {
3173 	bool enabled;
3174 	int mc_list_length;
3175 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3176 };
3177 
3178 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3179 				       struct netdev_hw_addr_list *mc_list)
3180 {
3181 	struct wl1271_filter_params *fp;
3182 	struct netdev_hw_addr *ha;
3183 
3184 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3185 	if (!fp) {
3186 		wl1271_error("Out of memory setting filters.");
3187 		return 0;
3188 	}
3189 
3190 	/* update multicast filtering parameters */
3191 	fp->mc_list_length = 0;
3192 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3193 		fp->enabled = false;
3194 	} else {
3195 		fp->enabled = true;
3196 		netdev_hw_addr_list_for_each(ha, mc_list) {
3197 			memcpy(fp->mc_list[fp->mc_list_length],
3198 					ha->addr, ETH_ALEN);
3199 			fp->mc_list_length++;
3200 		}
3201 	}
3202 
3203 	return (u64)(unsigned long)fp;
3204 }
3205 
3206 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3207 				  FIF_FCSFAIL | \
3208 				  FIF_BCN_PRBRESP_PROMISC | \
3209 				  FIF_CONTROL | \
3210 				  FIF_OTHER_BSS)
3211 
3212 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3213 				       unsigned int changed,
3214 				       unsigned int *total, u64 multicast)
3215 {
3216 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3217 	struct wl1271 *wl = hw->priv;
3218 	struct wl12xx_vif *wlvif;
3219 
3220 	int ret;
3221 
3222 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3223 		     " total %x", changed, *total);
3224 
3225 	mutex_lock(&wl->mutex);
3226 
3227 	*total &= WL1271_SUPPORTED_FILTERS;
3228 	changed &= WL1271_SUPPORTED_FILTERS;
3229 
3230 	if (unlikely(wl->state != WLCORE_STATE_ON))
3231 		goto out;
3232 
3233 	ret = pm_runtime_get_sync(wl->dev);
3234 	if (ret < 0) {
3235 		pm_runtime_put_noidle(wl->dev);
3236 		goto out;
3237 	}
3238 
3239 	wl12xx_for_each_wlvif(wl, wlvif) {
3240 		if (wlcore_is_p2p_mgmt(wlvif))
3241 			continue;
3242 
3243 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3244 			if (*total & FIF_ALLMULTI)
3245 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3246 								   false,
3247 								   NULL, 0);
3248 			else if (fp)
3249 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3250 							fp->enabled,
3251 							fp->mc_list,
3252 							fp->mc_list_length);
3253 			if (ret < 0)
3254 				goto out_sleep;
3255 		}
3256 
3257 		/*
3258 		 * If interface in AP mode and created with allmulticast then disable
3259 		 * the firmware filters so that all multicast packets are passed
3260 		 * This is mandatory for MDNS based discovery protocols
3261 		 */
3262  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3263  			if (*total & FIF_ALLMULTI) {
3264 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3265 							false,
3266 							NULL, 0);
3267 				if (ret < 0)
3268 					goto out_sleep;
3269 			}
3270 		}
3271 	}
3272 
3273 	/*
3274 	 * the fw doesn't provide an api to configure the filters. instead,
3275 	 * the filters configuration is based on the active roles / ROC
3276 	 * state.
3277 	 */
3278 
3279 out_sleep:
3280 	pm_runtime_mark_last_busy(wl->dev);
3281 	pm_runtime_put_autosuspend(wl->dev);
3282 
3283 out:
3284 	mutex_unlock(&wl->mutex);
3285 	kfree(fp);
3286 }
3287 
3288 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3289 				u8 id, u8 key_type, u8 key_size,
3290 				const u8 *key, u8 hlid, u32 tx_seq_32,
3291 				u16 tx_seq_16)
3292 {
3293 	struct wl1271_ap_key *ap_key;
3294 	int i;
3295 
3296 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3297 
3298 	if (key_size > MAX_KEY_SIZE)
3299 		return -EINVAL;
3300 
3301 	/*
3302 	 * Find next free entry in ap_keys. Also check we are not replacing
3303 	 * an existing key.
3304 	 */
3305 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3306 		if (wlvif->ap.recorded_keys[i] == NULL)
3307 			break;
3308 
3309 		if (wlvif->ap.recorded_keys[i]->id == id) {
3310 			wl1271_warning("trying to record key replacement");
3311 			return -EINVAL;
3312 		}
3313 	}
3314 
3315 	if (i == MAX_NUM_KEYS)
3316 		return -EBUSY;
3317 
3318 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3319 	if (!ap_key)
3320 		return -ENOMEM;
3321 
3322 	ap_key->id = id;
3323 	ap_key->key_type = key_type;
3324 	ap_key->key_size = key_size;
3325 	memcpy(ap_key->key, key, key_size);
3326 	ap_key->hlid = hlid;
3327 	ap_key->tx_seq_32 = tx_seq_32;
3328 	ap_key->tx_seq_16 = tx_seq_16;
3329 
3330 	wlvif->ap.recorded_keys[i] = ap_key;
3331 	return 0;
3332 }
3333 
3334 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3335 {
3336 	int i;
3337 
3338 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3339 		kfree(wlvif->ap.recorded_keys[i]);
3340 		wlvif->ap.recorded_keys[i] = NULL;
3341 	}
3342 }
3343 
3344 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3345 {
3346 	int i, ret = 0;
3347 	struct wl1271_ap_key *key;
3348 	bool wep_key_added = false;
3349 
3350 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3351 		u8 hlid;
3352 		if (wlvif->ap.recorded_keys[i] == NULL)
3353 			break;
3354 
3355 		key = wlvif->ap.recorded_keys[i];
3356 		hlid = key->hlid;
3357 		if (hlid == WL12XX_INVALID_LINK_ID)
3358 			hlid = wlvif->ap.bcast_hlid;
3359 
3360 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3361 					    key->id, key->key_type,
3362 					    key->key_size, key->key,
3363 					    hlid, key->tx_seq_32,
3364 					    key->tx_seq_16);
3365 		if (ret < 0)
3366 			goto out;
3367 
3368 		if (key->key_type == KEY_WEP)
3369 			wep_key_added = true;
3370 	}
3371 
3372 	if (wep_key_added) {
3373 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3374 						     wlvif->ap.bcast_hlid);
3375 		if (ret < 0)
3376 			goto out;
3377 	}
3378 
3379 out:
3380 	wl1271_free_ap_keys(wl, wlvif);
3381 	return ret;
3382 }
3383 
3384 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3385 		       u16 action, u8 id, u8 key_type,
3386 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3387 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3388 {
3389 	int ret;
3390 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3391 
3392 	if (is_ap) {
3393 		struct wl1271_station *wl_sta;
3394 		u8 hlid;
3395 
3396 		if (sta) {
3397 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3398 			hlid = wl_sta->hlid;
3399 		} else {
3400 			hlid = wlvif->ap.bcast_hlid;
3401 		}
3402 
3403 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3404 			/*
3405 			 * We do not support removing keys after AP shutdown.
3406 			 * Pretend we do to make mac80211 happy.
3407 			 */
3408 			if (action != KEY_ADD_OR_REPLACE)
3409 				return 0;
3410 
3411 			ret = wl1271_record_ap_key(wl, wlvif, id,
3412 					     key_type, key_size,
3413 					     key, hlid, tx_seq_32,
3414 					     tx_seq_16);
3415 		} else {
3416 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3417 					     id, key_type, key_size,
3418 					     key, hlid, tx_seq_32,
3419 					     tx_seq_16);
3420 		}
3421 
3422 		if (ret < 0)
3423 			return ret;
3424 	} else {
3425 		const u8 *addr;
3426 		static const u8 bcast_addr[ETH_ALEN] = {
3427 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3428 		};
3429 
3430 		addr = sta ? sta->addr : bcast_addr;
3431 
3432 		if (is_zero_ether_addr(addr)) {
3433 			/* We dont support TX only encryption */
3434 			return -EOPNOTSUPP;
3435 		}
3436 
3437 		/* The wl1271 does not allow to remove unicast keys - they
3438 		   will be cleared automatically on next CMD_JOIN. Ignore the
3439 		   request silently, as we dont want the mac80211 to emit
3440 		   an error message. */
3441 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3442 			return 0;
3443 
3444 		/* don't remove key if hlid was already deleted */
3445 		if (action == KEY_REMOVE &&
3446 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3447 			return 0;
3448 
3449 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3450 					     id, key_type, key_size,
3451 					     key, addr, tx_seq_32,
3452 					     tx_seq_16);
3453 		if (ret < 0)
3454 			return ret;
3455 
3456 	}
3457 
3458 	return 0;
3459 }
3460 
3461 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3462 			     struct ieee80211_vif *vif,
3463 			     struct ieee80211_sta *sta,
3464 			     struct ieee80211_key_conf *key_conf)
3465 {
3466 	struct wl1271 *wl = hw->priv;
3467 	int ret;
3468 	bool might_change_spare =
3469 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3470 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3471 
3472 	if (might_change_spare) {
3473 		/*
3474 		 * stop the queues and flush to ensure the next packets are
3475 		 * in sync with FW spare block accounting
3476 		 */
3477 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3478 		wl1271_tx_flush(wl);
3479 	}
3480 
3481 	mutex_lock(&wl->mutex);
3482 
3483 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3484 		ret = -EAGAIN;
3485 		goto out_wake_queues;
3486 	}
3487 
3488 	ret = pm_runtime_get_sync(wl->dev);
3489 	if (ret < 0) {
3490 		pm_runtime_put_noidle(wl->dev);
3491 		goto out_wake_queues;
3492 	}
3493 
3494 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3495 
3496 	pm_runtime_mark_last_busy(wl->dev);
3497 	pm_runtime_put_autosuspend(wl->dev);
3498 
3499 out_wake_queues:
3500 	if (might_change_spare)
3501 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3502 
3503 	mutex_unlock(&wl->mutex);
3504 
3505 	return ret;
3506 }
3507 
3508 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3509 		   struct ieee80211_vif *vif,
3510 		   struct ieee80211_sta *sta,
3511 		   struct ieee80211_key_conf *key_conf)
3512 {
3513 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3514 	int ret;
3515 	u32 tx_seq_32 = 0;
3516 	u16 tx_seq_16 = 0;
3517 	u8 key_type;
3518 	u8 hlid;
3519 
3520 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3521 
3522 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3523 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3524 		     key_conf->cipher, key_conf->keyidx,
3525 		     key_conf->keylen, key_conf->flags);
3526 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3527 
3528 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3529 		if (sta) {
3530 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3531 			hlid = wl_sta->hlid;
3532 		} else {
3533 			hlid = wlvif->ap.bcast_hlid;
3534 		}
3535 	else
3536 		hlid = wlvif->sta.hlid;
3537 
3538 	if (hlid != WL12XX_INVALID_LINK_ID) {
3539 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3540 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3541 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3542 	}
3543 
3544 	switch (key_conf->cipher) {
3545 	case WLAN_CIPHER_SUITE_WEP40:
3546 	case WLAN_CIPHER_SUITE_WEP104:
3547 		key_type = KEY_WEP;
3548 
3549 		key_conf->hw_key_idx = key_conf->keyidx;
3550 		break;
3551 	case WLAN_CIPHER_SUITE_TKIP:
3552 		key_type = KEY_TKIP;
3553 		key_conf->hw_key_idx = key_conf->keyidx;
3554 		break;
3555 	case WLAN_CIPHER_SUITE_CCMP:
3556 		key_type = KEY_AES;
3557 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3558 		break;
3559 	case WL1271_CIPHER_SUITE_GEM:
3560 		key_type = KEY_GEM;
3561 		break;
3562 	default:
3563 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3564 
3565 		return -EOPNOTSUPP;
3566 	}
3567 
3568 	switch (cmd) {
3569 	case SET_KEY:
3570 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3571 				 key_conf->keyidx, key_type,
3572 				 key_conf->keylen, key_conf->key,
3573 				 tx_seq_32, tx_seq_16, sta);
3574 		if (ret < 0) {
3575 			wl1271_error("Could not add or replace key");
3576 			return ret;
3577 		}
3578 
3579 		/*
3580 		 * reconfiguring arp response if the unicast (or common)
3581 		 * encryption key type was changed
3582 		 */
3583 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3584 		    (sta || key_type == KEY_WEP) &&
3585 		    wlvif->encryption_type != key_type) {
3586 			wlvif->encryption_type = key_type;
3587 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3588 			if (ret < 0) {
3589 				wl1271_warning("build arp rsp failed: %d", ret);
3590 				return ret;
3591 			}
3592 		}
3593 		break;
3594 
3595 	case DISABLE_KEY:
3596 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3597 				     key_conf->keyidx, key_type,
3598 				     key_conf->keylen, key_conf->key,
3599 				     0, 0, sta);
3600 		if (ret < 0) {
3601 			wl1271_error("Could not remove key");
3602 			return ret;
3603 		}
3604 		break;
3605 
3606 	default:
3607 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3608 		return -EOPNOTSUPP;
3609 	}
3610 
3611 	return ret;
3612 }
3613 EXPORT_SYMBOL_GPL(wlcore_set_key);
3614 
3615 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3616 					  struct ieee80211_vif *vif,
3617 					  int key_idx)
3618 {
3619 	struct wl1271 *wl = hw->priv;
3620 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3621 	int ret;
3622 
3623 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3624 		     key_idx);
3625 
3626 	/* we don't handle unsetting of default key */
3627 	if (key_idx == -1)
3628 		return;
3629 
3630 	mutex_lock(&wl->mutex);
3631 
3632 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3633 		ret = -EAGAIN;
3634 		goto out_unlock;
3635 	}
3636 
3637 	ret = pm_runtime_get_sync(wl->dev);
3638 	if (ret < 0) {
3639 		pm_runtime_put_noidle(wl->dev);
3640 		goto out_unlock;
3641 	}
3642 
3643 	wlvif->default_key = key_idx;
3644 
3645 	/* the default WEP key needs to be configured at least once */
3646 	if (wlvif->encryption_type == KEY_WEP) {
3647 		ret = wl12xx_cmd_set_default_wep_key(wl,
3648 				key_idx,
3649 				wlvif->sta.hlid);
3650 		if (ret < 0)
3651 			goto out_sleep;
3652 	}
3653 
3654 out_sleep:
3655 	pm_runtime_mark_last_busy(wl->dev);
3656 	pm_runtime_put_autosuspend(wl->dev);
3657 
3658 out_unlock:
3659 	mutex_unlock(&wl->mutex);
3660 }
3661 
3662 void wlcore_regdomain_config(struct wl1271 *wl)
3663 {
3664 	int ret;
3665 
3666 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3667 		return;
3668 
3669 	mutex_lock(&wl->mutex);
3670 
3671 	if (unlikely(wl->state != WLCORE_STATE_ON))
3672 		goto out;
3673 
3674 	ret = pm_runtime_get_sync(wl->dev);
3675 	if (ret < 0)
3676 		goto out;
3677 
3678 	ret = wlcore_cmd_regdomain_config_locked(wl);
3679 	if (ret < 0) {
3680 		wl12xx_queue_recovery_work(wl);
3681 		goto out;
3682 	}
3683 
3684 	pm_runtime_mark_last_busy(wl->dev);
3685 	pm_runtime_put_autosuspend(wl->dev);
3686 out:
3687 	mutex_unlock(&wl->mutex);
3688 }
3689 
3690 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3691 			     struct ieee80211_vif *vif,
3692 			     struct ieee80211_scan_request *hw_req)
3693 {
3694 	struct cfg80211_scan_request *req = &hw_req->req;
3695 	struct wl1271 *wl = hw->priv;
3696 	int ret;
3697 	u8 *ssid = NULL;
3698 	size_t len = 0;
3699 
3700 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3701 
3702 	if (req->n_ssids) {
3703 		ssid = req->ssids[0].ssid;
3704 		len = req->ssids[0].ssid_len;
3705 	}
3706 
3707 	mutex_lock(&wl->mutex);
3708 
3709 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3710 		/*
3711 		 * We cannot return -EBUSY here because cfg80211 will expect
3712 		 * a call to ieee80211_scan_completed if we do - in this case
3713 		 * there won't be any call.
3714 		 */
3715 		ret = -EAGAIN;
3716 		goto out;
3717 	}
3718 
3719 	ret = pm_runtime_get_sync(wl->dev);
3720 	if (ret < 0) {
3721 		pm_runtime_put_noidle(wl->dev);
3722 		goto out;
3723 	}
3724 
3725 	/* fail if there is any role in ROC */
3726 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3727 		/* don't allow scanning right now */
3728 		ret = -EBUSY;
3729 		goto out_sleep;
3730 	}
3731 
3732 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3733 out_sleep:
3734 	pm_runtime_mark_last_busy(wl->dev);
3735 	pm_runtime_put_autosuspend(wl->dev);
3736 out:
3737 	mutex_unlock(&wl->mutex);
3738 
3739 	return ret;
3740 }
3741 
3742 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3743 				     struct ieee80211_vif *vif)
3744 {
3745 	struct wl1271 *wl = hw->priv;
3746 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3747 	struct cfg80211_scan_info info = {
3748 		.aborted = true,
3749 	};
3750 	int ret;
3751 
3752 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3753 
3754 	mutex_lock(&wl->mutex);
3755 
3756 	if (unlikely(wl->state != WLCORE_STATE_ON))
3757 		goto out;
3758 
3759 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3760 		goto out;
3761 
3762 	ret = pm_runtime_get_sync(wl->dev);
3763 	if (ret < 0) {
3764 		pm_runtime_put_noidle(wl->dev);
3765 		goto out;
3766 	}
3767 
3768 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3769 		ret = wl->ops->scan_stop(wl, wlvif);
3770 		if (ret < 0)
3771 			goto out_sleep;
3772 	}
3773 
3774 	/*
3775 	 * Rearm the tx watchdog just before idling scan. This
3776 	 * prevents just-finished scans from triggering the watchdog
3777 	 */
3778 	wl12xx_rearm_tx_watchdog_locked(wl);
3779 
3780 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3781 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3782 	wl->scan_wlvif = NULL;
3783 	wl->scan.req = NULL;
3784 	ieee80211_scan_completed(wl->hw, &info);
3785 
3786 out_sleep:
3787 	pm_runtime_mark_last_busy(wl->dev);
3788 	pm_runtime_put_autosuspend(wl->dev);
3789 out:
3790 	mutex_unlock(&wl->mutex);
3791 
3792 	cancel_delayed_work_sync(&wl->scan_complete_work);
3793 }
3794 
3795 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3796 				      struct ieee80211_vif *vif,
3797 				      struct cfg80211_sched_scan_request *req,
3798 				      struct ieee80211_scan_ies *ies)
3799 {
3800 	struct wl1271 *wl = hw->priv;
3801 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3802 	int ret;
3803 
3804 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3805 
3806 	mutex_lock(&wl->mutex);
3807 
3808 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3809 		ret = -EAGAIN;
3810 		goto out;
3811 	}
3812 
3813 	ret = pm_runtime_get_sync(wl->dev);
3814 	if (ret < 0) {
3815 		pm_runtime_put_noidle(wl->dev);
3816 		goto out;
3817 	}
3818 
3819 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3820 	if (ret < 0)
3821 		goto out_sleep;
3822 
3823 	wl->sched_vif = wlvif;
3824 
3825 out_sleep:
3826 	pm_runtime_mark_last_busy(wl->dev);
3827 	pm_runtime_put_autosuspend(wl->dev);
3828 out:
3829 	mutex_unlock(&wl->mutex);
3830 	return ret;
3831 }
3832 
3833 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3834 				     struct ieee80211_vif *vif)
3835 {
3836 	struct wl1271 *wl = hw->priv;
3837 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3838 	int ret;
3839 
3840 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3841 
3842 	mutex_lock(&wl->mutex);
3843 
3844 	if (unlikely(wl->state != WLCORE_STATE_ON))
3845 		goto out;
3846 
3847 	ret = pm_runtime_get_sync(wl->dev);
3848 	if (ret < 0) {
3849 		pm_runtime_put_noidle(wl->dev);
3850 		goto out;
3851 	}
3852 
3853 	wl->ops->sched_scan_stop(wl, wlvif);
3854 
3855 	pm_runtime_mark_last_busy(wl->dev);
3856 	pm_runtime_put_autosuspend(wl->dev);
3857 out:
3858 	mutex_unlock(&wl->mutex);
3859 
3860 	return 0;
3861 }
3862 
3863 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3864 {
3865 	struct wl1271 *wl = hw->priv;
3866 	int ret = 0;
3867 
3868 	mutex_lock(&wl->mutex);
3869 
3870 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3871 		ret = -EAGAIN;
3872 		goto out;
3873 	}
3874 
3875 	ret = pm_runtime_get_sync(wl->dev);
3876 	if (ret < 0) {
3877 		pm_runtime_put_noidle(wl->dev);
3878 		goto out;
3879 	}
3880 
3881 	ret = wl1271_acx_frag_threshold(wl, value);
3882 	if (ret < 0)
3883 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3884 
3885 	pm_runtime_mark_last_busy(wl->dev);
3886 	pm_runtime_put_autosuspend(wl->dev);
3887 
3888 out:
3889 	mutex_unlock(&wl->mutex);
3890 
3891 	return ret;
3892 }
3893 
3894 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3895 {
3896 	struct wl1271 *wl = hw->priv;
3897 	struct wl12xx_vif *wlvif;
3898 	int ret = 0;
3899 
3900 	mutex_lock(&wl->mutex);
3901 
3902 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3903 		ret = -EAGAIN;
3904 		goto out;
3905 	}
3906 
3907 	ret = pm_runtime_get_sync(wl->dev);
3908 	if (ret < 0) {
3909 		pm_runtime_put_noidle(wl->dev);
3910 		goto out;
3911 	}
3912 
3913 	wl12xx_for_each_wlvif(wl, wlvif) {
3914 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3915 		if (ret < 0)
3916 			wl1271_warning("set rts threshold failed: %d", ret);
3917 	}
3918 	pm_runtime_mark_last_busy(wl->dev);
3919 	pm_runtime_put_autosuspend(wl->dev);
3920 
3921 out:
3922 	mutex_unlock(&wl->mutex);
3923 
3924 	return ret;
3925 }
3926 
3927 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3928 {
3929 	int len;
3930 	const u8 *next, *end = skb->data + skb->len;
3931 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3932 					skb->len - ieoffset);
3933 	if (!ie)
3934 		return;
3935 	len = ie[1] + 2;
3936 	next = ie + len;
3937 	memmove(ie, next, end - next);
3938 	skb_trim(skb, skb->len - len);
3939 }
3940 
3941 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3942 					    unsigned int oui, u8 oui_type,
3943 					    int ieoffset)
3944 {
3945 	int len;
3946 	const u8 *next, *end = skb->data + skb->len;
3947 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3948 					       skb->data + ieoffset,
3949 					       skb->len - ieoffset);
3950 	if (!ie)
3951 		return;
3952 	len = ie[1] + 2;
3953 	next = ie + len;
3954 	memmove(ie, next, end - next);
3955 	skb_trim(skb, skb->len - len);
3956 }
3957 
3958 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3959 					 struct ieee80211_vif *vif)
3960 {
3961 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3962 	struct sk_buff *skb;
3963 	int ret;
3964 
3965 	skb = ieee80211_proberesp_get(wl->hw, vif);
3966 	if (!skb)
3967 		return -EOPNOTSUPP;
3968 
3969 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3970 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3971 				      skb->data,
3972 				      skb->len, 0,
3973 				      rates);
3974 	dev_kfree_skb(skb);
3975 
3976 	if (ret < 0)
3977 		goto out;
3978 
3979 	wl1271_debug(DEBUG_AP, "probe response updated");
3980 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3981 
3982 out:
3983 	return ret;
3984 }
3985 
3986 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3987 					     struct ieee80211_vif *vif,
3988 					     u8 *probe_rsp_data,
3989 					     size_t probe_rsp_len,
3990 					     u32 rates)
3991 {
3992 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3993 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3994 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3995 	int ssid_ie_offset, ie_offset, templ_len;
3996 	const u8 *ptr;
3997 
3998 	/* no need to change probe response if the SSID is set correctly */
3999 	if (wlvif->ssid_len > 0)
4000 		return wl1271_cmd_template_set(wl, wlvif->role_id,
4001 					       CMD_TEMPL_AP_PROBE_RESPONSE,
4002 					       probe_rsp_data,
4003 					       probe_rsp_len, 0,
4004 					       rates);
4005 
4006 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4007 		wl1271_error("probe_rsp template too big");
4008 		return -EINVAL;
4009 	}
4010 
4011 	/* start searching from IE offset */
4012 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4013 
4014 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4015 			       probe_rsp_len - ie_offset);
4016 	if (!ptr) {
4017 		wl1271_error("No SSID in beacon!");
4018 		return -EINVAL;
4019 	}
4020 
4021 	ssid_ie_offset = ptr - probe_rsp_data;
4022 	ptr += (ptr[1] + 2);
4023 
4024 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4025 
4026 	/* insert SSID from bss_conf */
4027 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4028 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4029 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4030 	       bss_conf->ssid, bss_conf->ssid_len);
4031 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4032 
4033 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4034 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4035 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4036 
4037 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4038 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4039 				       probe_rsp_templ,
4040 				       templ_len, 0,
4041 				       rates);
4042 }
4043 
4044 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4045 				       struct ieee80211_vif *vif,
4046 				       struct ieee80211_bss_conf *bss_conf,
4047 				       u32 changed)
4048 {
4049 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4050 	int ret = 0;
4051 
4052 	if (changed & BSS_CHANGED_ERP_SLOT) {
4053 		if (bss_conf->use_short_slot)
4054 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4055 		else
4056 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4057 		if (ret < 0) {
4058 			wl1271_warning("Set slot time failed %d", ret);
4059 			goto out;
4060 		}
4061 	}
4062 
4063 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4064 		if (bss_conf->use_short_preamble)
4065 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4066 		else
4067 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4068 	}
4069 
4070 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4071 		if (bss_conf->use_cts_prot)
4072 			ret = wl1271_acx_cts_protect(wl, wlvif,
4073 						     CTSPROTECT_ENABLE);
4074 		else
4075 			ret = wl1271_acx_cts_protect(wl, wlvif,
4076 						     CTSPROTECT_DISABLE);
4077 		if (ret < 0) {
4078 			wl1271_warning("Set ctsprotect failed %d", ret);
4079 			goto out;
4080 		}
4081 	}
4082 
4083 out:
4084 	return ret;
4085 }
4086 
4087 static int wlcore_set_beacon_template(struct wl1271 *wl,
4088 				      struct ieee80211_vif *vif,
4089 				      bool is_ap)
4090 {
4091 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4092 	struct ieee80211_hdr *hdr;
4093 	u32 min_rate;
4094 	int ret;
4095 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4096 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4097 	u16 tmpl_id;
4098 
4099 	if (!beacon) {
4100 		ret = -EINVAL;
4101 		goto out;
4102 	}
4103 
4104 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4105 
4106 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4107 	if (ret < 0) {
4108 		dev_kfree_skb(beacon);
4109 		goto out;
4110 	}
4111 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4112 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4113 		CMD_TEMPL_BEACON;
4114 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4115 				      beacon->data,
4116 				      beacon->len, 0,
4117 				      min_rate);
4118 	if (ret < 0) {
4119 		dev_kfree_skb(beacon);
4120 		goto out;
4121 	}
4122 
4123 	wlvif->wmm_enabled =
4124 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4125 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4126 					beacon->data + ieoffset,
4127 					beacon->len - ieoffset);
4128 
4129 	/*
4130 	 * In case we already have a probe-resp beacon set explicitly
4131 	 * by usermode, don't use the beacon data.
4132 	 */
4133 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4134 		goto end_bcn;
4135 
4136 	/* remove TIM ie from probe response */
4137 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4138 
4139 	/*
4140 	 * remove p2p ie from probe response.
4141 	 * the fw reponds to probe requests that don't include
4142 	 * the p2p ie. probe requests with p2p ie will be passed,
4143 	 * and will be responded by the supplicant (the spec
4144 	 * forbids including the p2p ie when responding to probe
4145 	 * requests that didn't include it).
4146 	 */
4147 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4148 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4149 
4150 	hdr = (struct ieee80211_hdr *) beacon->data;
4151 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4152 					 IEEE80211_STYPE_PROBE_RESP);
4153 	if (is_ap)
4154 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4155 							   beacon->data,
4156 							   beacon->len,
4157 							   min_rate);
4158 	else
4159 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4160 					      CMD_TEMPL_PROBE_RESPONSE,
4161 					      beacon->data,
4162 					      beacon->len, 0,
4163 					      min_rate);
4164 end_bcn:
4165 	dev_kfree_skb(beacon);
4166 	if (ret < 0)
4167 		goto out;
4168 
4169 out:
4170 	return ret;
4171 }
4172 
4173 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4174 					  struct ieee80211_vif *vif,
4175 					  struct ieee80211_bss_conf *bss_conf,
4176 					  u32 changed)
4177 {
4178 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4179 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4180 	int ret = 0;
4181 
4182 	if (changed & BSS_CHANGED_BEACON_INT) {
4183 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4184 			bss_conf->beacon_int);
4185 
4186 		wlvif->beacon_int = bss_conf->beacon_int;
4187 	}
4188 
4189 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4190 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4191 
4192 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4193 	}
4194 
4195 	if (changed & BSS_CHANGED_BEACON) {
4196 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4197 		if (ret < 0)
4198 			goto out;
4199 
4200 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4201 				       &wlvif->flags)) {
4202 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4203 			if (ret < 0)
4204 				goto out;
4205 		}
4206 	}
4207 out:
4208 	if (ret != 0)
4209 		wl1271_error("beacon info change failed: %d", ret);
4210 	return ret;
4211 }
4212 
4213 /* AP mode changes */
4214 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4215 				       struct ieee80211_vif *vif,
4216 				       struct ieee80211_bss_conf *bss_conf,
4217 				       u32 changed)
4218 {
4219 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4220 	int ret = 0;
4221 
4222 	if (changed & BSS_CHANGED_BASIC_RATES) {
4223 		u32 rates = bss_conf->basic_rates;
4224 
4225 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4226 								 wlvif->band);
4227 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4228 							wlvif->basic_rate_set);
4229 
4230 		ret = wl1271_init_ap_rates(wl, wlvif);
4231 		if (ret < 0) {
4232 			wl1271_error("AP rate policy change failed %d", ret);
4233 			goto out;
4234 		}
4235 
4236 		ret = wl1271_ap_init_templates(wl, vif);
4237 		if (ret < 0)
4238 			goto out;
4239 
4240 		/* No need to set probe resp template for mesh */
4241 		if (!ieee80211_vif_is_mesh(vif)) {
4242 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4243 							    wlvif->basic_rate,
4244 							    vif);
4245 			if (ret < 0)
4246 				goto out;
4247 		}
4248 
4249 		ret = wlcore_set_beacon_template(wl, vif, true);
4250 		if (ret < 0)
4251 			goto out;
4252 	}
4253 
4254 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4255 	if (ret < 0)
4256 		goto out;
4257 
4258 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4259 		if (bss_conf->enable_beacon) {
4260 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4261 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4262 				if (ret < 0)
4263 					goto out;
4264 
4265 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4266 				if (ret < 0)
4267 					goto out;
4268 
4269 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4270 				wl1271_debug(DEBUG_AP, "started AP");
4271 			}
4272 		} else {
4273 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4274 				/*
4275 				 * AP might be in ROC in case we have just
4276 				 * sent auth reply. handle it.
4277 				 */
4278 				if (test_bit(wlvif->role_id, wl->roc_map))
4279 					wl12xx_croc(wl, wlvif->role_id);
4280 
4281 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4282 				if (ret < 0)
4283 					goto out;
4284 
4285 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4286 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4287 					  &wlvif->flags);
4288 				wl1271_debug(DEBUG_AP, "stopped AP");
4289 			}
4290 		}
4291 	}
4292 
4293 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4294 	if (ret < 0)
4295 		goto out;
4296 
4297 	/* Handle HT information change */
4298 	if ((changed & BSS_CHANGED_HT) &&
4299 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4300 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4301 					bss_conf->ht_operation_mode);
4302 		if (ret < 0) {
4303 			wl1271_warning("Set ht information failed %d", ret);
4304 			goto out;
4305 		}
4306 	}
4307 
4308 out:
4309 	return;
4310 }
4311 
4312 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4313 			    struct ieee80211_bss_conf *bss_conf,
4314 			    u32 sta_rate_set)
4315 {
4316 	u32 rates;
4317 	int ret;
4318 
4319 	wl1271_debug(DEBUG_MAC80211,
4320 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4321 	     bss_conf->bssid, bss_conf->aid,
4322 	     bss_conf->beacon_int,
4323 	     bss_conf->basic_rates, sta_rate_set);
4324 
4325 	wlvif->beacon_int = bss_conf->beacon_int;
4326 	rates = bss_conf->basic_rates;
4327 	wlvif->basic_rate_set =
4328 		wl1271_tx_enabled_rates_get(wl, rates,
4329 					    wlvif->band);
4330 	wlvif->basic_rate =
4331 		wl1271_tx_min_rate_get(wl,
4332 				       wlvif->basic_rate_set);
4333 
4334 	if (sta_rate_set)
4335 		wlvif->rate_set =
4336 			wl1271_tx_enabled_rates_get(wl,
4337 						sta_rate_set,
4338 						wlvif->band);
4339 
4340 	/* we only support sched_scan while not connected */
4341 	if (wl->sched_vif == wlvif)
4342 		wl->ops->sched_scan_stop(wl, wlvif);
4343 
4344 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4345 	if (ret < 0)
4346 		return ret;
4347 
4348 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4349 	if (ret < 0)
4350 		return ret;
4351 
4352 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4353 	if (ret < 0)
4354 		return ret;
4355 
4356 	wlcore_set_ssid(wl, wlvif);
4357 
4358 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4359 
4360 	return 0;
4361 }
4362 
4363 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4364 {
4365 	int ret;
4366 
4367 	/* revert back to minimum rates for the current band */
4368 	wl1271_set_band_rate(wl, wlvif);
4369 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4370 
4371 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4372 	if (ret < 0)
4373 		return ret;
4374 
4375 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4376 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4377 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4378 		if (ret < 0)
4379 			return ret;
4380 	}
4381 
4382 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4383 	return 0;
4384 }
4385 /* STA/IBSS mode changes */
4386 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4387 					struct ieee80211_vif *vif,
4388 					struct ieee80211_bss_conf *bss_conf,
4389 					u32 changed)
4390 {
4391 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4392 	bool do_join = false;
4393 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4394 	bool ibss_joined = false;
4395 	u32 sta_rate_set = 0;
4396 	int ret;
4397 	struct ieee80211_sta *sta;
4398 	bool sta_exists = false;
4399 	struct ieee80211_sta_ht_cap sta_ht_cap;
4400 
4401 	if (is_ibss) {
4402 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4403 						     changed);
4404 		if (ret < 0)
4405 			goto out;
4406 	}
4407 
4408 	if (changed & BSS_CHANGED_IBSS) {
4409 		if (bss_conf->ibss_joined) {
4410 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4411 			ibss_joined = true;
4412 		} else {
4413 			wlcore_unset_assoc(wl, wlvif);
4414 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4415 		}
4416 	}
4417 
4418 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4419 		do_join = true;
4420 
4421 	/* Need to update the SSID (for filtering etc) */
4422 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4423 		do_join = true;
4424 
4425 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4426 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4427 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4428 
4429 		do_join = true;
4430 	}
4431 
4432 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4433 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4434 
4435 	if (changed & BSS_CHANGED_CQM) {
4436 		bool enable = false;
4437 		if (bss_conf->cqm_rssi_thold)
4438 			enable = true;
4439 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4440 						  bss_conf->cqm_rssi_thold,
4441 						  bss_conf->cqm_rssi_hyst);
4442 		if (ret < 0)
4443 			goto out;
4444 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4445 	}
4446 
4447 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4448 		       BSS_CHANGED_ASSOC)) {
4449 		rcu_read_lock();
4450 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4451 		if (sta) {
4452 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4453 
4454 			/* save the supp_rates of the ap */
4455 			sta_rate_set = sta->supp_rates[wlvif->band];
4456 			if (sta->ht_cap.ht_supported)
4457 				sta_rate_set |=
4458 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4459 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4460 			sta_ht_cap = sta->ht_cap;
4461 			sta_exists = true;
4462 		}
4463 
4464 		rcu_read_unlock();
4465 	}
4466 
4467 	if (changed & BSS_CHANGED_BSSID) {
4468 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4469 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4470 					       sta_rate_set);
4471 			if (ret < 0)
4472 				goto out;
4473 
4474 			/* Need to update the BSSID (for filtering etc) */
4475 			do_join = true;
4476 		} else {
4477 			ret = wlcore_clear_bssid(wl, wlvif);
4478 			if (ret < 0)
4479 				goto out;
4480 		}
4481 	}
4482 
4483 	if (changed & BSS_CHANGED_IBSS) {
4484 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4485 			     bss_conf->ibss_joined);
4486 
4487 		if (bss_conf->ibss_joined) {
4488 			u32 rates = bss_conf->basic_rates;
4489 			wlvif->basic_rate_set =
4490 				wl1271_tx_enabled_rates_get(wl, rates,
4491 							    wlvif->band);
4492 			wlvif->basic_rate =
4493 				wl1271_tx_min_rate_get(wl,
4494 						       wlvif->basic_rate_set);
4495 
4496 			/* by default, use 11b + OFDM rates */
4497 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4498 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4499 			if (ret < 0)
4500 				goto out;
4501 		}
4502 	}
4503 
4504 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4505 		/* enable beacon filtering */
4506 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4507 		if (ret < 0)
4508 			goto out;
4509 	}
4510 
4511 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4512 	if (ret < 0)
4513 		goto out;
4514 
4515 	if (do_join) {
4516 		ret = wlcore_join(wl, wlvif);
4517 		if (ret < 0) {
4518 			wl1271_warning("cmd join failed %d", ret);
4519 			goto out;
4520 		}
4521 	}
4522 
4523 	if (changed & BSS_CHANGED_ASSOC) {
4524 		if (bss_conf->assoc) {
4525 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4526 					       sta_rate_set);
4527 			if (ret < 0)
4528 				goto out;
4529 
4530 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4531 				wl12xx_set_authorized(wl, wlvif);
4532 		} else {
4533 			wlcore_unset_assoc(wl, wlvif);
4534 		}
4535 	}
4536 
4537 	if (changed & BSS_CHANGED_PS) {
4538 		if ((bss_conf->ps) &&
4539 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4540 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4541 			int ps_mode;
4542 			char *ps_mode_str;
4543 
4544 			if (wl->conf.conn.forced_ps) {
4545 				ps_mode = STATION_POWER_SAVE_MODE;
4546 				ps_mode_str = "forced";
4547 			} else {
4548 				ps_mode = STATION_AUTO_PS_MODE;
4549 				ps_mode_str = "auto";
4550 			}
4551 
4552 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4553 
4554 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4555 			if (ret < 0)
4556 				wl1271_warning("enter %s ps failed %d",
4557 					       ps_mode_str, ret);
4558 		} else if (!bss_conf->ps &&
4559 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4560 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4561 
4562 			ret = wl1271_ps_set_mode(wl, wlvif,
4563 						 STATION_ACTIVE_MODE);
4564 			if (ret < 0)
4565 				wl1271_warning("exit auto ps failed %d", ret);
4566 		}
4567 	}
4568 
4569 	/* Handle new association with HT. Do this after join. */
4570 	if (sta_exists) {
4571 		bool enabled =
4572 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4573 
4574 		ret = wlcore_hw_set_peer_cap(wl,
4575 					     &sta_ht_cap,
4576 					     enabled,
4577 					     wlvif->rate_set,
4578 					     wlvif->sta.hlid);
4579 		if (ret < 0) {
4580 			wl1271_warning("Set ht cap failed %d", ret);
4581 			goto out;
4582 
4583 		}
4584 
4585 		if (enabled) {
4586 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4587 						bss_conf->ht_operation_mode);
4588 			if (ret < 0) {
4589 				wl1271_warning("Set ht information failed %d",
4590 					       ret);
4591 				goto out;
4592 			}
4593 		}
4594 	}
4595 
4596 	/* Handle arp filtering. Done after join. */
4597 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4598 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4599 		__be32 addr = bss_conf->arp_addr_list[0];
4600 		wlvif->sta.qos = bss_conf->qos;
4601 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4602 
4603 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4604 			wlvif->ip_addr = addr;
4605 			/*
4606 			 * The template should have been configured only upon
4607 			 * association. however, it seems that the correct ip
4608 			 * isn't being set (when sending), so we have to
4609 			 * reconfigure the template upon every ip change.
4610 			 */
4611 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4612 			if (ret < 0) {
4613 				wl1271_warning("build arp rsp failed: %d", ret);
4614 				goto out;
4615 			}
4616 
4617 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4618 				(ACX_ARP_FILTER_ARP_FILTERING |
4619 				 ACX_ARP_FILTER_AUTO_ARP),
4620 				addr);
4621 		} else {
4622 			wlvif->ip_addr = 0;
4623 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4624 		}
4625 
4626 		if (ret < 0)
4627 			goto out;
4628 	}
4629 
4630 out:
4631 	return;
4632 }
4633 
4634 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4635 				       struct ieee80211_vif *vif,
4636 				       struct ieee80211_bss_conf *bss_conf,
4637 				       u32 changed)
4638 {
4639 	struct wl1271 *wl = hw->priv;
4640 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4641 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4642 	int ret;
4643 
4644 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4645 		     wlvif->role_id, (int)changed);
4646 
4647 	/*
4648 	 * make sure to cancel pending disconnections if our association
4649 	 * state changed
4650 	 */
4651 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4652 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4653 
4654 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4655 	    !bss_conf->enable_beacon)
4656 		wl1271_tx_flush(wl);
4657 
4658 	mutex_lock(&wl->mutex);
4659 
4660 	if (unlikely(wl->state != WLCORE_STATE_ON))
4661 		goto out;
4662 
4663 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4664 		goto out;
4665 
4666 	ret = pm_runtime_get_sync(wl->dev);
4667 	if (ret < 0) {
4668 		pm_runtime_put_noidle(wl->dev);
4669 		goto out;
4670 	}
4671 
4672 	if ((changed & BSS_CHANGED_TXPOWER) &&
4673 	    bss_conf->txpower != wlvif->power_level) {
4674 
4675 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4676 		if (ret < 0)
4677 			goto out;
4678 
4679 		wlvif->power_level = bss_conf->txpower;
4680 	}
4681 
4682 	if (is_ap)
4683 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4684 	else
4685 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4686 
4687 	pm_runtime_mark_last_busy(wl->dev);
4688 	pm_runtime_put_autosuspend(wl->dev);
4689 
4690 out:
4691 	mutex_unlock(&wl->mutex);
4692 }
4693 
4694 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4695 				 struct ieee80211_chanctx_conf *ctx)
4696 {
4697 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4698 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4699 		     cfg80211_get_chandef_type(&ctx->def));
4700 	return 0;
4701 }
4702 
4703 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4704 				     struct ieee80211_chanctx_conf *ctx)
4705 {
4706 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4707 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4708 		     cfg80211_get_chandef_type(&ctx->def));
4709 }
4710 
4711 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4712 				     struct ieee80211_chanctx_conf *ctx,
4713 				     u32 changed)
4714 {
4715 	struct wl1271 *wl = hw->priv;
4716 	struct wl12xx_vif *wlvif;
4717 	int ret;
4718 	int channel = ieee80211_frequency_to_channel(
4719 		ctx->def.chan->center_freq);
4720 
4721 	wl1271_debug(DEBUG_MAC80211,
4722 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4723 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4724 
4725 	mutex_lock(&wl->mutex);
4726 
4727 	ret = pm_runtime_get_sync(wl->dev);
4728 	if (ret < 0) {
4729 		pm_runtime_put_noidle(wl->dev);
4730 		goto out;
4731 	}
4732 
4733 	wl12xx_for_each_wlvif(wl, wlvif) {
4734 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4735 
4736 		rcu_read_lock();
4737 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4738 			rcu_read_unlock();
4739 			continue;
4740 		}
4741 		rcu_read_unlock();
4742 
4743 		/* start radar if needed */
4744 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4745 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4746 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4747 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4748 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4749 			wlcore_hw_set_cac(wl, wlvif, true);
4750 			wlvif->radar_enabled = true;
4751 		}
4752 	}
4753 
4754 	pm_runtime_mark_last_busy(wl->dev);
4755 	pm_runtime_put_autosuspend(wl->dev);
4756 out:
4757 	mutex_unlock(&wl->mutex);
4758 }
4759 
4760 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4761 					struct ieee80211_vif *vif,
4762 					struct ieee80211_chanctx_conf *ctx)
4763 {
4764 	struct wl1271 *wl = hw->priv;
4765 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4766 	int channel = ieee80211_frequency_to_channel(
4767 		ctx->def.chan->center_freq);
4768 	int ret = -EINVAL;
4769 
4770 	wl1271_debug(DEBUG_MAC80211,
4771 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4772 		     wlvif->role_id, channel,
4773 		     cfg80211_get_chandef_type(&ctx->def),
4774 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4775 
4776 	mutex_lock(&wl->mutex);
4777 
4778 	if (unlikely(wl->state != WLCORE_STATE_ON))
4779 		goto out;
4780 
4781 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4782 		goto out;
4783 
4784 	ret = pm_runtime_get_sync(wl->dev);
4785 	if (ret < 0) {
4786 		pm_runtime_put_noidle(wl->dev);
4787 		goto out;
4788 	}
4789 
4790 	wlvif->band = ctx->def.chan->band;
4791 	wlvif->channel = channel;
4792 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4793 
4794 	/* update default rates according to the band */
4795 	wl1271_set_band_rate(wl, wlvif);
4796 
4797 	if (ctx->radar_enabled &&
4798 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4799 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4800 		wlcore_hw_set_cac(wl, wlvif, true);
4801 		wlvif->radar_enabled = true;
4802 	}
4803 
4804 	pm_runtime_mark_last_busy(wl->dev);
4805 	pm_runtime_put_autosuspend(wl->dev);
4806 out:
4807 	mutex_unlock(&wl->mutex);
4808 
4809 	return 0;
4810 }
4811 
4812 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4813 					   struct ieee80211_vif *vif,
4814 					   struct ieee80211_chanctx_conf *ctx)
4815 {
4816 	struct wl1271 *wl = hw->priv;
4817 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4818 	int ret;
4819 
4820 	wl1271_debug(DEBUG_MAC80211,
4821 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4822 		     wlvif->role_id,
4823 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4824 		     cfg80211_get_chandef_type(&ctx->def));
4825 
4826 	wl1271_tx_flush(wl);
4827 
4828 	mutex_lock(&wl->mutex);
4829 
4830 	if (unlikely(wl->state != WLCORE_STATE_ON))
4831 		goto out;
4832 
4833 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4834 		goto out;
4835 
4836 	ret = pm_runtime_get_sync(wl->dev);
4837 	if (ret < 0) {
4838 		pm_runtime_put_noidle(wl->dev);
4839 		goto out;
4840 	}
4841 
4842 	if (wlvif->radar_enabled) {
4843 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4844 		wlcore_hw_set_cac(wl, wlvif, false);
4845 		wlvif->radar_enabled = false;
4846 	}
4847 
4848 	pm_runtime_mark_last_busy(wl->dev);
4849 	pm_runtime_put_autosuspend(wl->dev);
4850 out:
4851 	mutex_unlock(&wl->mutex);
4852 }
4853 
4854 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4855 				    struct wl12xx_vif *wlvif,
4856 				    struct ieee80211_chanctx_conf *new_ctx)
4857 {
4858 	int channel = ieee80211_frequency_to_channel(
4859 		new_ctx->def.chan->center_freq);
4860 
4861 	wl1271_debug(DEBUG_MAC80211,
4862 		     "switch vif (role %d) %d -> %d chan_type: %d",
4863 		     wlvif->role_id, wlvif->channel, channel,
4864 		     cfg80211_get_chandef_type(&new_ctx->def));
4865 
4866 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4867 		return 0;
4868 
4869 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4870 
4871 	if (wlvif->radar_enabled) {
4872 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4873 		wlcore_hw_set_cac(wl, wlvif, false);
4874 		wlvif->radar_enabled = false;
4875 	}
4876 
4877 	wlvif->band = new_ctx->def.chan->band;
4878 	wlvif->channel = channel;
4879 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4880 
4881 	/* start radar if needed */
4882 	if (new_ctx->radar_enabled) {
4883 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4884 		wlcore_hw_set_cac(wl, wlvif, true);
4885 		wlvif->radar_enabled = true;
4886 	}
4887 
4888 	return 0;
4889 }
4890 
4891 static int
4892 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4893 			     struct ieee80211_vif_chanctx_switch *vifs,
4894 			     int n_vifs,
4895 			     enum ieee80211_chanctx_switch_mode mode)
4896 {
4897 	struct wl1271 *wl = hw->priv;
4898 	int i, ret;
4899 
4900 	wl1271_debug(DEBUG_MAC80211,
4901 		     "mac80211 switch chanctx n_vifs %d mode %d",
4902 		     n_vifs, mode);
4903 
4904 	mutex_lock(&wl->mutex);
4905 
4906 	ret = pm_runtime_get_sync(wl->dev);
4907 	if (ret < 0) {
4908 		pm_runtime_put_noidle(wl->dev);
4909 		goto out;
4910 	}
4911 
4912 	for (i = 0; i < n_vifs; i++) {
4913 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4914 
4915 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4916 		if (ret)
4917 			goto out_sleep;
4918 	}
4919 out_sleep:
4920 	pm_runtime_mark_last_busy(wl->dev);
4921 	pm_runtime_put_autosuspend(wl->dev);
4922 out:
4923 	mutex_unlock(&wl->mutex);
4924 
4925 	return 0;
4926 }
4927 
4928 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4929 			     struct ieee80211_vif *vif, u16 queue,
4930 			     const struct ieee80211_tx_queue_params *params)
4931 {
4932 	struct wl1271 *wl = hw->priv;
4933 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4934 	u8 ps_scheme;
4935 	int ret = 0;
4936 
4937 	if (wlcore_is_p2p_mgmt(wlvif))
4938 		return 0;
4939 
4940 	mutex_lock(&wl->mutex);
4941 
4942 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4943 
4944 	if (params->uapsd)
4945 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4946 	else
4947 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4948 
4949 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4950 		goto out;
4951 
4952 	ret = pm_runtime_get_sync(wl->dev);
4953 	if (ret < 0) {
4954 		pm_runtime_put_noidle(wl->dev);
4955 		goto out;
4956 	}
4957 
4958 	/*
4959 	 * the txop is confed in units of 32us by the mac80211,
4960 	 * we need us
4961 	 */
4962 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4963 				params->cw_min, params->cw_max,
4964 				params->aifs, params->txop << 5);
4965 	if (ret < 0)
4966 		goto out_sleep;
4967 
4968 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4969 				 CONF_CHANNEL_TYPE_EDCF,
4970 				 wl1271_tx_get_queue(queue),
4971 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4972 				 0, 0);
4973 
4974 out_sleep:
4975 	pm_runtime_mark_last_busy(wl->dev);
4976 	pm_runtime_put_autosuspend(wl->dev);
4977 
4978 out:
4979 	mutex_unlock(&wl->mutex);
4980 
4981 	return ret;
4982 }
4983 
4984 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4985 			     struct ieee80211_vif *vif)
4986 {
4987 
4988 	struct wl1271 *wl = hw->priv;
4989 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4990 	u64 mactime = ULLONG_MAX;
4991 	int ret;
4992 
4993 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4994 
4995 	mutex_lock(&wl->mutex);
4996 
4997 	if (unlikely(wl->state != WLCORE_STATE_ON))
4998 		goto out;
4999 
5000 	ret = pm_runtime_get_sync(wl->dev);
5001 	if (ret < 0) {
5002 		pm_runtime_put_noidle(wl->dev);
5003 		goto out;
5004 	}
5005 
5006 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5007 	if (ret < 0)
5008 		goto out_sleep;
5009 
5010 out_sleep:
5011 	pm_runtime_mark_last_busy(wl->dev);
5012 	pm_runtime_put_autosuspend(wl->dev);
5013 
5014 out:
5015 	mutex_unlock(&wl->mutex);
5016 	return mactime;
5017 }
5018 
5019 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5020 				struct survey_info *survey)
5021 {
5022 	struct ieee80211_conf *conf = &hw->conf;
5023 
5024 	if (idx != 0)
5025 		return -ENOENT;
5026 
5027 	survey->channel = conf->chandef.chan;
5028 	survey->filled = 0;
5029 	return 0;
5030 }
5031 
5032 static int wl1271_allocate_sta(struct wl1271 *wl,
5033 			     struct wl12xx_vif *wlvif,
5034 			     struct ieee80211_sta *sta)
5035 {
5036 	struct wl1271_station *wl_sta;
5037 	int ret;
5038 
5039 
5040 	if (wl->active_sta_count >= wl->max_ap_stations) {
5041 		wl1271_warning("could not allocate HLID - too much stations");
5042 		return -EBUSY;
5043 	}
5044 
5045 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5046 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5047 	if (ret < 0) {
5048 		wl1271_warning("could not allocate HLID - too many links");
5049 		return -EBUSY;
5050 	}
5051 
5052 	/* use the previous security seq, if this is a recovery/resume */
5053 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5054 
5055 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5056 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5057 	wl->active_sta_count++;
5058 	return 0;
5059 }
5060 
5061 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5062 {
5063 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5064 		return;
5065 
5066 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5067 	__clear_bit(hlid, &wl->ap_ps_map);
5068 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5069 
5070 	/*
5071 	 * save the last used PN in the private part of iee80211_sta,
5072 	 * in case of recovery/suspend
5073 	 */
5074 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5075 
5076 	wl12xx_free_link(wl, wlvif, &hlid);
5077 	wl->active_sta_count--;
5078 
5079 	/*
5080 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5081 	 * chance to return STA-buffered packets before complaining.
5082 	 */
5083 	if (wl->active_sta_count == 0)
5084 		wl12xx_rearm_tx_watchdog_locked(wl);
5085 }
5086 
5087 static int wl12xx_sta_add(struct wl1271 *wl,
5088 			  struct wl12xx_vif *wlvif,
5089 			  struct ieee80211_sta *sta)
5090 {
5091 	struct wl1271_station *wl_sta;
5092 	int ret = 0;
5093 	u8 hlid;
5094 
5095 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5096 
5097 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5098 	if (ret < 0)
5099 		return ret;
5100 
5101 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5102 	hlid = wl_sta->hlid;
5103 
5104 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5105 	if (ret < 0)
5106 		wl1271_free_sta(wl, wlvif, hlid);
5107 
5108 	return ret;
5109 }
5110 
5111 static int wl12xx_sta_remove(struct wl1271 *wl,
5112 			     struct wl12xx_vif *wlvif,
5113 			     struct ieee80211_sta *sta)
5114 {
5115 	struct wl1271_station *wl_sta;
5116 	int ret = 0, id;
5117 
5118 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5119 
5120 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5121 	id = wl_sta->hlid;
5122 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5123 		return -EINVAL;
5124 
5125 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5126 	if (ret < 0)
5127 		return ret;
5128 
5129 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5130 	return ret;
5131 }
5132 
5133 static void wlcore_roc_if_possible(struct wl1271 *wl,
5134 				   struct wl12xx_vif *wlvif)
5135 {
5136 	if (find_first_bit(wl->roc_map,
5137 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5138 		return;
5139 
5140 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5141 		return;
5142 
5143 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5144 }
5145 
5146 /*
5147  * when wl_sta is NULL, we treat this call as if coming from a
5148  * pending auth reply.
5149  * wl->mutex must be taken and the FW must be awake when the call
5150  * takes place.
5151  */
5152 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5153 			      struct wl1271_station *wl_sta, bool in_conn)
5154 {
5155 	if (in_conn) {
5156 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5157 			return;
5158 
5159 		if (!wlvif->ap_pending_auth_reply &&
5160 		    !wlvif->inconn_count)
5161 			wlcore_roc_if_possible(wl, wlvif);
5162 
5163 		if (wl_sta) {
5164 			wl_sta->in_connection = true;
5165 			wlvif->inconn_count++;
5166 		} else {
5167 			wlvif->ap_pending_auth_reply = true;
5168 		}
5169 	} else {
5170 		if (wl_sta && !wl_sta->in_connection)
5171 			return;
5172 
5173 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5174 			return;
5175 
5176 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5177 			return;
5178 
5179 		if (wl_sta) {
5180 			wl_sta->in_connection = false;
5181 			wlvif->inconn_count--;
5182 		} else {
5183 			wlvif->ap_pending_auth_reply = false;
5184 		}
5185 
5186 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5187 		    test_bit(wlvif->role_id, wl->roc_map))
5188 			wl12xx_croc(wl, wlvif->role_id);
5189 	}
5190 }
5191 
5192 static int wl12xx_update_sta_state(struct wl1271 *wl,
5193 				   struct wl12xx_vif *wlvif,
5194 				   struct ieee80211_sta *sta,
5195 				   enum ieee80211_sta_state old_state,
5196 				   enum ieee80211_sta_state new_state)
5197 {
5198 	struct wl1271_station *wl_sta;
5199 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5200 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5201 	int ret;
5202 
5203 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5204 
5205 	/* Add station (AP mode) */
5206 	if (is_ap &&
5207 	    old_state == IEEE80211_STA_NOTEXIST &&
5208 	    new_state == IEEE80211_STA_NONE) {
5209 		ret = wl12xx_sta_add(wl, wlvif, sta);
5210 		if (ret)
5211 			return ret;
5212 
5213 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5214 	}
5215 
5216 	/* Remove station (AP mode) */
5217 	if (is_ap &&
5218 	    old_state == IEEE80211_STA_NONE &&
5219 	    new_state == IEEE80211_STA_NOTEXIST) {
5220 		/* must not fail */
5221 		wl12xx_sta_remove(wl, wlvif, sta);
5222 
5223 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5224 	}
5225 
5226 	/* Authorize station (AP mode) */
5227 	if (is_ap &&
5228 	    new_state == IEEE80211_STA_AUTHORIZED) {
5229 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5230 		if (ret < 0)
5231 			return ret;
5232 
5233 		/* reconfigure rates */
5234 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5235 		if (ret < 0)
5236 			return ret;
5237 
5238 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5239 						     wl_sta->hlid);
5240 		if (ret)
5241 			return ret;
5242 
5243 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5244 	}
5245 
5246 	/* Authorize station */
5247 	if (is_sta &&
5248 	    new_state == IEEE80211_STA_AUTHORIZED) {
5249 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5250 		ret = wl12xx_set_authorized(wl, wlvif);
5251 		if (ret)
5252 			return ret;
5253 	}
5254 
5255 	if (is_sta &&
5256 	    old_state == IEEE80211_STA_AUTHORIZED &&
5257 	    new_state == IEEE80211_STA_ASSOC) {
5258 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5259 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5260 	}
5261 
5262 	/* save seq number on disassoc (suspend) */
5263 	if (is_sta &&
5264 	    old_state == IEEE80211_STA_ASSOC &&
5265 	    new_state == IEEE80211_STA_AUTH) {
5266 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5267 		wlvif->total_freed_pkts = 0;
5268 	}
5269 
5270 	/* restore seq number on assoc (resume) */
5271 	if (is_sta &&
5272 	    old_state == IEEE80211_STA_AUTH &&
5273 	    new_state == IEEE80211_STA_ASSOC) {
5274 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5275 	}
5276 
5277 	/* clear ROCs on failure or authorization */
5278 	if (is_sta &&
5279 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5280 	     new_state == IEEE80211_STA_NOTEXIST)) {
5281 		if (test_bit(wlvif->role_id, wl->roc_map))
5282 			wl12xx_croc(wl, wlvif->role_id);
5283 	}
5284 
5285 	if (is_sta &&
5286 	    old_state == IEEE80211_STA_NOTEXIST &&
5287 	    new_state == IEEE80211_STA_NONE) {
5288 		if (find_first_bit(wl->roc_map,
5289 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5290 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5291 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5292 				   wlvif->band, wlvif->channel);
5293 		}
5294 	}
5295 	return 0;
5296 }
5297 
5298 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5299 			       struct ieee80211_vif *vif,
5300 			       struct ieee80211_sta *sta,
5301 			       enum ieee80211_sta_state old_state,
5302 			       enum ieee80211_sta_state new_state)
5303 {
5304 	struct wl1271 *wl = hw->priv;
5305 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5306 	int ret;
5307 
5308 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5309 		     sta->aid, old_state, new_state);
5310 
5311 	mutex_lock(&wl->mutex);
5312 
5313 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5314 		ret = -EBUSY;
5315 		goto out;
5316 	}
5317 
5318 	ret = pm_runtime_get_sync(wl->dev);
5319 	if (ret < 0) {
5320 		pm_runtime_put_noidle(wl->dev);
5321 		goto out;
5322 	}
5323 
5324 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5325 
5326 	pm_runtime_mark_last_busy(wl->dev);
5327 	pm_runtime_put_autosuspend(wl->dev);
5328 out:
5329 	mutex_unlock(&wl->mutex);
5330 	if (new_state < old_state)
5331 		return 0;
5332 	return ret;
5333 }
5334 
5335 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5336 				  struct ieee80211_vif *vif,
5337 				  struct ieee80211_ampdu_params *params)
5338 {
5339 	struct wl1271 *wl = hw->priv;
5340 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5341 	int ret;
5342 	u8 hlid, *ba_bitmap;
5343 	struct ieee80211_sta *sta = params->sta;
5344 	enum ieee80211_ampdu_mlme_action action = params->action;
5345 	u16 tid = params->tid;
5346 	u16 *ssn = &params->ssn;
5347 
5348 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5349 		     tid);
5350 
5351 	/* sanity check - the fields in FW are only 8bits wide */
5352 	if (WARN_ON(tid > 0xFF))
5353 		return -ENOTSUPP;
5354 
5355 	mutex_lock(&wl->mutex);
5356 
5357 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5358 		ret = -EAGAIN;
5359 		goto out;
5360 	}
5361 
5362 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5363 		hlid = wlvif->sta.hlid;
5364 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5365 		struct wl1271_station *wl_sta;
5366 
5367 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5368 		hlid = wl_sta->hlid;
5369 	} else {
5370 		ret = -EINVAL;
5371 		goto out;
5372 	}
5373 
5374 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5375 
5376 	ret = pm_runtime_get_sync(wl->dev);
5377 	if (ret < 0) {
5378 		pm_runtime_put_noidle(wl->dev);
5379 		goto out;
5380 	}
5381 
5382 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5383 		     tid, action);
5384 
5385 	switch (action) {
5386 	case IEEE80211_AMPDU_RX_START:
5387 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5388 			ret = -ENOTSUPP;
5389 			break;
5390 		}
5391 
5392 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5393 			ret = -EBUSY;
5394 			wl1271_error("exceeded max RX BA sessions");
5395 			break;
5396 		}
5397 
5398 		if (*ba_bitmap & BIT(tid)) {
5399 			ret = -EINVAL;
5400 			wl1271_error("cannot enable RX BA session on active "
5401 				     "tid: %d", tid);
5402 			break;
5403 		}
5404 
5405 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5406 				hlid,
5407 				params->buf_size);
5408 
5409 		if (!ret) {
5410 			*ba_bitmap |= BIT(tid);
5411 			wl->ba_rx_session_count++;
5412 		}
5413 		break;
5414 
5415 	case IEEE80211_AMPDU_RX_STOP:
5416 		if (!(*ba_bitmap & BIT(tid))) {
5417 			/*
5418 			 * this happens on reconfig - so only output a debug
5419 			 * message for now, and don't fail the function.
5420 			 */
5421 			wl1271_debug(DEBUG_MAC80211,
5422 				     "no active RX BA session on tid: %d",
5423 				     tid);
5424 			ret = 0;
5425 			break;
5426 		}
5427 
5428 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5429 							 hlid, 0);
5430 		if (!ret) {
5431 			*ba_bitmap &= ~BIT(tid);
5432 			wl->ba_rx_session_count--;
5433 		}
5434 		break;
5435 
5436 	/*
5437 	 * The BA initiator session management in FW independently.
5438 	 * Falling break here on purpose for all TX APDU commands.
5439 	 */
5440 	case IEEE80211_AMPDU_TX_START:
5441 	case IEEE80211_AMPDU_TX_STOP_CONT:
5442 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5443 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5444 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5445 		ret = -EINVAL;
5446 		break;
5447 
5448 	default:
5449 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5450 		ret = -EINVAL;
5451 	}
5452 
5453 	pm_runtime_mark_last_busy(wl->dev);
5454 	pm_runtime_put_autosuspend(wl->dev);
5455 
5456 out:
5457 	mutex_unlock(&wl->mutex);
5458 
5459 	return ret;
5460 }
5461 
5462 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5463 				   struct ieee80211_vif *vif,
5464 				   const struct cfg80211_bitrate_mask *mask)
5465 {
5466 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5467 	struct wl1271 *wl = hw->priv;
5468 	int i, ret = 0;
5469 
5470 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5471 		mask->control[NL80211_BAND_2GHZ].legacy,
5472 		mask->control[NL80211_BAND_5GHZ].legacy);
5473 
5474 	mutex_lock(&wl->mutex);
5475 
5476 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5477 		wlvif->bitrate_masks[i] =
5478 			wl1271_tx_enabled_rates_get(wl,
5479 						    mask->control[i].legacy,
5480 						    i);
5481 
5482 	if (unlikely(wl->state != WLCORE_STATE_ON))
5483 		goto out;
5484 
5485 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5486 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5487 
5488 		ret = pm_runtime_get_sync(wl->dev);
5489 		if (ret < 0) {
5490 			pm_runtime_put_noidle(wl->dev);
5491 			goto out;
5492 		}
5493 
5494 		wl1271_set_band_rate(wl, wlvif);
5495 		wlvif->basic_rate =
5496 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5497 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5498 
5499 		pm_runtime_mark_last_busy(wl->dev);
5500 		pm_runtime_put_autosuspend(wl->dev);
5501 	}
5502 out:
5503 	mutex_unlock(&wl->mutex);
5504 
5505 	return ret;
5506 }
5507 
5508 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5509 				     struct ieee80211_vif *vif,
5510 				     struct ieee80211_channel_switch *ch_switch)
5511 {
5512 	struct wl1271 *wl = hw->priv;
5513 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5514 	int ret;
5515 
5516 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5517 
5518 	wl1271_tx_flush(wl);
5519 
5520 	mutex_lock(&wl->mutex);
5521 
5522 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5523 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5524 			ieee80211_chswitch_done(vif, false);
5525 		goto out;
5526 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5527 		goto out;
5528 	}
5529 
5530 	ret = pm_runtime_get_sync(wl->dev);
5531 	if (ret < 0) {
5532 		pm_runtime_put_noidle(wl->dev);
5533 		goto out;
5534 	}
5535 
5536 	/* TODO: change mac80211 to pass vif as param */
5537 
5538 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5539 		unsigned long delay_usec;
5540 
5541 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5542 		if (ret)
5543 			goto out_sleep;
5544 
5545 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5546 
5547 		/* indicate failure 5 seconds after channel switch time */
5548 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5549 			ch_switch->count;
5550 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5551 					     usecs_to_jiffies(delay_usec) +
5552 					     msecs_to_jiffies(5000));
5553 	}
5554 
5555 out_sleep:
5556 	pm_runtime_mark_last_busy(wl->dev);
5557 	pm_runtime_put_autosuspend(wl->dev);
5558 
5559 out:
5560 	mutex_unlock(&wl->mutex);
5561 }
5562 
5563 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5564 					struct wl12xx_vif *wlvif,
5565 					u8 eid)
5566 {
5567 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5568 	struct sk_buff *beacon =
5569 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5570 
5571 	if (!beacon)
5572 		return NULL;
5573 
5574 	return cfg80211_find_ie(eid,
5575 				beacon->data + ieoffset,
5576 				beacon->len - ieoffset);
5577 }
5578 
5579 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5580 				u8 *csa_count)
5581 {
5582 	const u8 *ie;
5583 	const struct ieee80211_channel_sw_ie *ie_csa;
5584 
5585 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5586 	if (!ie)
5587 		return -EINVAL;
5588 
5589 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5590 	*csa_count = ie_csa->count;
5591 
5592 	return 0;
5593 }
5594 
5595 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5596 					    struct ieee80211_vif *vif,
5597 					    struct cfg80211_chan_def *chandef)
5598 {
5599 	struct wl1271 *wl = hw->priv;
5600 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5601 	struct ieee80211_channel_switch ch_switch = {
5602 		.block_tx = true,
5603 		.chandef = *chandef,
5604 	};
5605 	int ret;
5606 
5607 	wl1271_debug(DEBUG_MAC80211,
5608 		     "mac80211 channel switch beacon (role %d)",
5609 		     wlvif->role_id);
5610 
5611 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5612 	if (ret < 0) {
5613 		wl1271_error("error getting beacon (for CSA counter)");
5614 		return;
5615 	}
5616 
5617 	mutex_lock(&wl->mutex);
5618 
5619 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5620 		ret = -EBUSY;
5621 		goto out;
5622 	}
5623 
5624 	ret = pm_runtime_get_sync(wl->dev);
5625 	if (ret < 0) {
5626 		pm_runtime_put_noidle(wl->dev);
5627 		goto out;
5628 	}
5629 
5630 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5631 	if (ret)
5632 		goto out_sleep;
5633 
5634 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5635 
5636 out_sleep:
5637 	pm_runtime_mark_last_busy(wl->dev);
5638 	pm_runtime_put_autosuspend(wl->dev);
5639 out:
5640 	mutex_unlock(&wl->mutex);
5641 }
5642 
5643 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5644 			    u32 queues, bool drop)
5645 {
5646 	struct wl1271 *wl = hw->priv;
5647 
5648 	wl1271_tx_flush(wl);
5649 }
5650 
5651 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5652 				       struct ieee80211_vif *vif,
5653 				       struct ieee80211_channel *chan,
5654 				       int duration,
5655 				       enum ieee80211_roc_type type)
5656 {
5657 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5658 	struct wl1271 *wl = hw->priv;
5659 	int channel, active_roc, ret = 0;
5660 
5661 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5662 
5663 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5664 		     channel, wlvif->role_id);
5665 
5666 	mutex_lock(&wl->mutex);
5667 
5668 	if (unlikely(wl->state != WLCORE_STATE_ON))
5669 		goto out;
5670 
5671 	/* return EBUSY if we can't ROC right now */
5672 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5673 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5674 		wl1271_warning("active roc on role %d", active_roc);
5675 		ret = -EBUSY;
5676 		goto out;
5677 	}
5678 
5679 	ret = pm_runtime_get_sync(wl->dev);
5680 	if (ret < 0) {
5681 		pm_runtime_put_noidle(wl->dev);
5682 		goto out;
5683 	}
5684 
5685 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5686 	if (ret < 0)
5687 		goto out_sleep;
5688 
5689 	wl->roc_vif = vif;
5690 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5691 				     msecs_to_jiffies(duration));
5692 out_sleep:
5693 	pm_runtime_mark_last_busy(wl->dev);
5694 	pm_runtime_put_autosuspend(wl->dev);
5695 out:
5696 	mutex_unlock(&wl->mutex);
5697 	return ret;
5698 }
5699 
5700 static int __wlcore_roc_completed(struct wl1271 *wl)
5701 {
5702 	struct wl12xx_vif *wlvif;
5703 	int ret;
5704 
5705 	/* already completed */
5706 	if (unlikely(!wl->roc_vif))
5707 		return 0;
5708 
5709 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5710 
5711 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5712 		return -EBUSY;
5713 
5714 	ret = wl12xx_stop_dev(wl, wlvif);
5715 	if (ret < 0)
5716 		return ret;
5717 
5718 	wl->roc_vif = NULL;
5719 
5720 	return 0;
5721 }
5722 
5723 static int wlcore_roc_completed(struct wl1271 *wl)
5724 {
5725 	int ret;
5726 
5727 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5728 
5729 	mutex_lock(&wl->mutex);
5730 
5731 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5732 		ret = -EBUSY;
5733 		goto out;
5734 	}
5735 
5736 	ret = pm_runtime_get_sync(wl->dev);
5737 	if (ret < 0) {
5738 		pm_runtime_put_noidle(wl->dev);
5739 		goto out;
5740 	}
5741 
5742 	ret = __wlcore_roc_completed(wl);
5743 
5744 	pm_runtime_mark_last_busy(wl->dev);
5745 	pm_runtime_put_autosuspend(wl->dev);
5746 out:
5747 	mutex_unlock(&wl->mutex);
5748 
5749 	return ret;
5750 }
5751 
5752 static void wlcore_roc_complete_work(struct work_struct *work)
5753 {
5754 	struct delayed_work *dwork;
5755 	struct wl1271 *wl;
5756 	int ret;
5757 
5758 	dwork = to_delayed_work(work);
5759 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5760 
5761 	ret = wlcore_roc_completed(wl);
5762 	if (!ret)
5763 		ieee80211_remain_on_channel_expired(wl->hw);
5764 }
5765 
5766 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5767 {
5768 	struct wl1271 *wl = hw->priv;
5769 
5770 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5771 
5772 	/* TODO: per-vif */
5773 	wl1271_tx_flush(wl);
5774 
5775 	/*
5776 	 * we can't just flush_work here, because it might deadlock
5777 	 * (as we might get called from the same workqueue)
5778 	 */
5779 	cancel_delayed_work_sync(&wl->roc_complete_work);
5780 	wlcore_roc_completed(wl);
5781 
5782 	return 0;
5783 }
5784 
5785 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5786 				    struct ieee80211_vif *vif,
5787 				    struct ieee80211_sta *sta,
5788 				    u32 changed)
5789 {
5790 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5791 
5792 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5793 
5794 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5795 		return;
5796 
5797 	/* this callback is atomic, so schedule a new work */
5798 	wlvif->rc_update_bw = sta->bandwidth;
5799 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5800 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5801 }
5802 
5803 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5804 				     struct ieee80211_vif *vif,
5805 				     struct ieee80211_sta *sta,
5806 				     struct station_info *sinfo)
5807 {
5808 	struct wl1271 *wl = hw->priv;
5809 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5810 	s8 rssi_dbm;
5811 	int ret;
5812 
5813 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5814 
5815 	mutex_lock(&wl->mutex);
5816 
5817 	if (unlikely(wl->state != WLCORE_STATE_ON))
5818 		goto out;
5819 
5820 	ret = pm_runtime_get_sync(wl->dev);
5821 	if (ret < 0) {
5822 		pm_runtime_put_noidle(wl->dev);
5823 		goto out_sleep;
5824 	}
5825 
5826 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5827 	if (ret < 0)
5828 		goto out_sleep;
5829 
5830 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5831 	sinfo->signal = rssi_dbm;
5832 
5833 out_sleep:
5834 	pm_runtime_mark_last_busy(wl->dev);
5835 	pm_runtime_put_autosuspend(wl->dev);
5836 
5837 out:
5838 	mutex_unlock(&wl->mutex);
5839 }
5840 
5841 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5842 					     struct ieee80211_sta *sta)
5843 {
5844 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5845 	struct wl1271 *wl = hw->priv;
5846 	u8 hlid = wl_sta->hlid;
5847 
5848 	/* return in units of Kbps */
5849 	return (wl->links[hlid].fw_rate_mbps * 1000);
5850 }
5851 
5852 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5853 {
5854 	struct wl1271 *wl = hw->priv;
5855 	bool ret = false;
5856 
5857 	mutex_lock(&wl->mutex);
5858 
5859 	if (unlikely(wl->state != WLCORE_STATE_ON))
5860 		goto out;
5861 
5862 	/* packets are considered pending if in the TX queue or the FW */
5863 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5864 out:
5865 	mutex_unlock(&wl->mutex);
5866 
5867 	return ret;
5868 }
5869 
5870 /* can't be const, mac80211 writes to this */
5871 static struct ieee80211_rate wl1271_rates[] = {
5872 	{ .bitrate = 10,
5873 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5874 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5875 	{ .bitrate = 20,
5876 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5877 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5878 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5879 	{ .bitrate = 55,
5880 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5881 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5882 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5883 	{ .bitrate = 110,
5884 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5885 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5886 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5887 	{ .bitrate = 60,
5888 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5889 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5890 	{ .bitrate = 90,
5891 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5892 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5893 	{ .bitrate = 120,
5894 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5895 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5896 	{ .bitrate = 180,
5897 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5898 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5899 	{ .bitrate = 240,
5900 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5901 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5902 	{ .bitrate = 360,
5903 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5904 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5905 	{ .bitrate = 480,
5906 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5907 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5908 	{ .bitrate = 540,
5909 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5910 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5911 };
5912 
5913 /* can't be const, mac80211 writes to this */
5914 static struct ieee80211_channel wl1271_channels[] = {
5915 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5916 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5917 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5918 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5919 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5920 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5921 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5922 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5923 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5924 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5925 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5926 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5927 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5928 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5929 };
5930 
5931 /* can't be const, mac80211 writes to this */
5932 static struct ieee80211_supported_band wl1271_band_2ghz = {
5933 	.channels = wl1271_channels,
5934 	.n_channels = ARRAY_SIZE(wl1271_channels),
5935 	.bitrates = wl1271_rates,
5936 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5937 };
5938 
5939 /* 5 GHz data rates for WL1273 */
5940 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5941 	{ .bitrate = 60,
5942 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5943 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5944 	{ .bitrate = 90,
5945 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5946 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5947 	{ .bitrate = 120,
5948 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5949 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5950 	{ .bitrate = 180,
5951 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5952 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5953 	{ .bitrate = 240,
5954 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5955 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5956 	{ .bitrate = 360,
5957 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5958 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5959 	{ .bitrate = 480,
5960 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5961 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5962 	{ .bitrate = 540,
5963 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5964 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5965 };
5966 
5967 /* 5 GHz band channels for WL1273 */
5968 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5969 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5970 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5971 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5972 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5973 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5994 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5995 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5996 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5997 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5998 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5999 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6000 };
6001 
6002 static struct ieee80211_supported_band wl1271_band_5ghz = {
6003 	.channels = wl1271_channels_5ghz,
6004 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6005 	.bitrates = wl1271_rates_5ghz,
6006 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6007 };
6008 
6009 static const struct ieee80211_ops wl1271_ops = {
6010 	.start = wl1271_op_start,
6011 	.stop = wlcore_op_stop,
6012 	.add_interface = wl1271_op_add_interface,
6013 	.remove_interface = wl1271_op_remove_interface,
6014 	.change_interface = wl12xx_op_change_interface,
6015 #ifdef CONFIG_PM
6016 	.suspend = wl1271_op_suspend,
6017 	.resume = wl1271_op_resume,
6018 #endif
6019 	.config = wl1271_op_config,
6020 	.prepare_multicast = wl1271_op_prepare_multicast,
6021 	.configure_filter = wl1271_op_configure_filter,
6022 	.tx = wl1271_op_tx,
6023 	.set_key = wlcore_op_set_key,
6024 	.hw_scan = wl1271_op_hw_scan,
6025 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6026 	.sched_scan_start = wl1271_op_sched_scan_start,
6027 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6028 	.bss_info_changed = wl1271_op_bss_info_changed,
6029 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6030 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6031 	.conf_tx = wl1271_op_conf_tx,
6032 	.get_tsf = wl1271_op_get_tsf,
6033 	.get_survey = wl1271_op_get_survey,
6034 	.sta_state = wl12xx_op_sta_state,
6035 	.ampdu_action = wl1271_op_ampdu_action,
6036 	.tx_frames_pending = wl1271_tx_frames_pending,
6037 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6038 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6039 	.channel_switch = wl12xx_op_channel_switch,
6040 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6041 	.flush = wlcore_op_flush,
6042 	.remain_on_channel = wlcore_op_remain_on_channel,
6043 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6044 	.add_chanctx = wlcore_op_add_chanctx,
6045 	.remove_chanctx = wlcore_op_remove_chanctx,
6046 	.change_chanctx = wlcore_op_change_chanctx,
6047 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6048 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6049 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6050 	.sta_rc_update = wlcore_op_sta_rc_update,
6051 	.sta_statistics = wlcore_op_sta_statistics,
6052 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6053 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6054 };
6055 
6056 
6057 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6058 {
6059 	u8 idx;
6060 
6061 	BUG_ON(band >= 2);
6062 
6063 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6064 		wl1271_error("Illegal RX rate from HW: %d", rate);
6065 		return 0;
6066 	}
6067 
6068 	idx = wl->band_rate_to_idx[band][rate];
6069 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6070 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6071 		return 0;
6072 	}
6073 
6074 	return idx;
6075 }
6076 
6077 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6078 {
6079 	int i;
6080 
6081 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6082 		     oui, nic);
6083 
6084 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6085 		wl1271_warning("NIC part of the MAC address wraps around!");
6086 
6087 	for (i = 0; i < wl->num_mac_addr; i++) {
6088 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6089 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6090 		wl->addresses[i].addr[2] = (u8) oui;
6091 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6092 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6093 		wl->addresses[i].addr[5] = (u8) nic;
6094 		nic++;
6095 	}
6096 
6097 	/* we may be one address short at the most */
6098 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6099 
6100 	/*
6101 	 * turn on the LAA bit in the first address and use it as
6102 	 * the last address.
6103 	 */
6104 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6105 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6106 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6107 		       sizeof(wl->addresses[0]));
6108 		/* LAA bit */
6109 		wl->addresses[idx].addr[0] |= BIT(1);
6110 	}
6111 
6112 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6113 	wl->hw->wiphy->addresses = wl->addresses;
6114 }
6115 
6116 static int wl12xx_get_hw_info(struct wl1271 *wl)
6117 {
6118 	int ret;
6119 
6120 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6121 	if (ret < 0)
6122 		goto out;
6123 
6124 	wl->fuse_oui_addr = 0;
6125 	wl->fuse_nic_addr = 0;
6126 
6127 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6128 	if (ret < 0)
6129 		goto out;
6130 
6131 	if (wl->ops->get_mac)
6132 		ret = wl->ops->get_mac(wl);
6133 
6134 out:
6135 	return ret;
6136 }
6137 
6138 static int wl1271_register_hw(struct wl1271 *wl)
6139 {
6140 	int ret;
6141 	u32 oui_addr = 0, nic_addr = 0;
6142 	struct platform_device *pdev = wl->pdev;
6143 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6144 
6145 	if (wl->mac80211_registered)
6146 		return 0;
6147 
6148 	if (wl->nvs_len >= 12) {
6149 		/* NOTE: The wl->nvs->nvs element must be first, in
6150 		 * order to simplify the casting, we assume it is at
6151 		 * the beginning of the wl->nvs structure.
6152 		 */
6153 		u8 *nvs_ptr = (u8 *)wl->nvs;
6154 
6155 		oui_addr =
6156 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6157 		nic_addr =
6158 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6159 	}
6160 
6161 	/* if the MAC address is zeroed in the NVS derive from fuse */
6162 	if (oui_addr == 0 && nic_addr == 0) {
6163 		oui_addr = wl->fuse_oui_addr;
6164 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6165 		nic_addr = wl->fuse_nic_addr + 1;
6166 	}
6167 
6168 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6169 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6170 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6171 			wl1271_warning("This default nvs file can be removed from the file system");
6172 		} else {
6173 			wl1271_warning("Your device performance is not optimized.");
6174 			wl1271_warning("Please use the calibrator tool to configure your device.");
6175 		}
6176 
6177 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6178 			wl1271_warning("Fuse mac address is zero. using random mac");
6179 			/* Use TI oui and a random nic */
6180 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6181 			nic_addr = get_random_int();
6182 		} else {
6183 			oui_addr = wl->fuse_oui_addr;
6184 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6185 			nic_addr = wl->fuse_nic_addr + 1;
6186 		}
6187 	}
6188 
6189 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6190 
6191 	ret = ieee80211_register_hw(wl->hw);
6192 	if (ret < 0) {
6193 		wl1271_error("unable to register mac80211 hw: %d", ret);
6194 		goto out;
6195 	}
6196 
6197 	wl->mac80211_registered = true;
6198 
6199 	wl1271_debugfs_init(wl);
6200 
6201 	wl1271_notice("loaded");
6202 
6203 out:
6204 	return ret;
6205 }
6206 
6207 static void wl1271_unregister_hw(struct wl1271 *wl)
6208 {
6209 	if (wl->plt)
6210 		wl1271_plt_stop(wl);
6211 
6212 	ieee80211_unregister_hw(wl->hw);
6213 	wl->mac80211_registered = false;
6214 
6215 }
6216 
6217 static int wl1271_init_ieee80211(struct wl1271 *wl)
6218 {
6219 	int i;
6220 	static const u32 cipher_suites[] = {
6221 		WLAN_CIPHER_SUITE_WEP40,
6222 		WLAN_CIPHER_SUITE_WEP104,
6223 		WLAN_CIPHER_SUITE_TKIP,
6224 		WLAN_CIPHER_SUITE_CCMP,
6225 		WL1271_CIPHER_SUITE_GEM,
6226 	};
6227 
6228 	/* The tx descriptor buffer */
6229 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6230 
6231 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6232 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6233 
6234 	/* unit us */
6235 	/* FIXME: find a proper value */
6236 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6237 
6238 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6239 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6240 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6241 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6242 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6243 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6244 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6245 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6246 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6247 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6248 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6249 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6250 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6251 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6252 
6253 	wl->hw->wiphy->cipher_suites = cipher_suites;
6254 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6255 
6256 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6257 					 BIT(NL80211_IFTYPE_AP) |
6258 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6259 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6260 #ifdef CONFIG_MAC80211_MESH
6261 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6262 #endif
6263 					 BIT(NL80211_IFTYPE_P2P_GO);
6264 
6265 	wl->hw->wiphy->max_scan_ssids = 1;
6266 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6267 	wl->hw->wiphy->max_match_sets = 16;
6268 	/*
6269 	 * Maximum length of elements in scanning probe request templates
6270 	 * should be the maximum length possible for a template, without
6271 	 * the IEEE80211 header of the template
6272 	 */
6273 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6274 			sizeof(struct ieee80211_header);
6275 
6276 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6277 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6278 		sizeof(struct ieee80211_header);
6279 
6280 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6281 
6282 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6283 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6284 				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6285 
6286 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6287 
6288 	/* make sure all our channels fit in the scanned_ch bitmask */
6289 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6290 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6291 		     WL1271_MAX_CHANNELS);
6292 	/*
6293 	* clear channel flags from the previous usage
6294 	* and restore max_power & max_antenna_gain values.
6295 	*/
6296 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6297 		wl1271_band_2ghz.channels[i].flags = 0;
6298 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6299 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6300 	}
6301 
6302 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6303 		wl1271_band_5ghz.channels[i].flags = 0;
6304 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6305 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6306 	}
6307 
6308 	/*
6309 	 * We keep local copies of the band structs because we need to
6310 	 * modify them on a per-device basis.
6311 	 */
6312 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6313 	       sizeof(wl1271_band_2ghz));
6314 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6315 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6316 	       sizeof(*wl->ht_cap));
6317 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6318 	       sizeof(wl1271_band_5ghz));
6319 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6320 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6321 	       sizeof(*wl->ht_cap));
6322 
6323 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6324 		&wl->bands[NL80211_BAND_2GHZ];
6325 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6326 		&wl->bands[NL80211_BAND_5GHZ];
6327 
6328 	/*
6329 	 * allow 4 queues per mac address we support +
6330 	 * 1 cab queue per mac + one global offchannel Tx queue
6331 	 */
6332 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6333 
6334 	/* the last queue is the offchannel queue */
6335 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6336 	wl->hw->max_rates = 1;
6337 
6338 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6339 
6340 	/* the FW answers probe-requests in AP-mode */
6341 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6342 	wl->hw->wiphy->probe_resp_offload =
6343 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6344 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6345 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6346 
6347 	/* allowed interface combinations */
6348 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6349 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6350 
6351 	/* register vendor commands */
6352 	wlcore_set_vendor_commands(wl->hw->wiphy);
6353 
6354 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6355 
6356 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6357 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6358 
6359 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6360 
6361 	return 0;
6362 }
6363 
6364 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6365 				     u32 mbox_size)
6366 {
6367 	struct ieee80211_hw *hw;
6368 	struct wl1271 *wl;
6369 	int i, j, ret;
6370 	unsigned int order;
6371 
6372 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6373 	if (!hw) {
6374 		wl1271_error("could not alloc ieee80211_hw");
6375 		ret = -ENOMEM;
6376 		goto err_hw_alloc;
6377 	}
6378 
6379 	wl = hw->priv;
6380 	memset(wl, 0, sizeof(*wl));
6381 
6382 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6383 	if (!wl->priv) {
6384 		wl1271_error("could not alloc wl priv");
6385 		ret = -ENOMEM;
6386 		goto err_priv_alloc;
6387 	}
6388 
6389 	INIT_LIST_HEAD(&wl->wlvif_list);
6390 
6391 	wl->hw = hw;
6392 
6393 	/*
6394 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6395 	 * we don't allocate any additional resource here, so that's fine.
6396 	 */
6397 	for (i = 0; i < NUM_TX_QUEUES; i++)
6398 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6399 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6400 
6401 	skb_queue_head_init(&wl->deferred_rx_queue);
6402 	skb_queue_head_init(&wl->deferred_tx_queue);
6403 
6404 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6405 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6406 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6407 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6408 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6409 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6410 
6411 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6412 	if (!wl->freezable_wq) {
6413 		ret = -ENOMEM;
6414 		goto err_hw;
6415 	}
6416 
6417 	wl->channel = 0;
6418 	wl->rx_counter = 0;
6419 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6420 	wl->band = NL80211_BAND_2GHZ;
6421 	wl->channel_type = NL80211_CHAN_NO_HT;
6422 	wl->flags = 0;
6423 	wl->sg_enabled = true;
6424 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6425 	wl->recovery_count = 0;
6426 	wl->hw_pg_ver = -1;
6427 	wl->ap_ps_map = 0;
6428 	wl->ap_fw_ps_map = 0;
6429 	wl->quirks = 0;
6430 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6431 	wl->active_sta_count = 0;
6432 	wl->active_link_count = 0;
6433 	wl->fwlog_size = 0;
6434 
6435 	/* The system link is always allocated */
6436 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6437 
6438 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6439 	for (i = 0; i < wl->num_tx_desc; i++)
6440 		wl->tx_frames[i] = NULL;
6441 
6442 	spin_lock_init(&wl->wl_lock);
6443 
6444 	wl->state = WLCORE_STATE_OFF;
6445 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6446 	mutex_init(&wl->mutex);
6447 	mutex_init(&wl->flush_mutex);
6448 	init_completion(&wl->nvs_loading_complete);
6449 
6450 	order = get_order(aggr_buf_size);
6451 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6452 	if (!wl->aggr_buf) {
6453 		ret = -ENOMEM;
6454 		goto err_wq;
6455 	}
6456 	wl->aggr_buf_size = aggr_buf_size;
6457 
6458 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6459 	if (!wl->dummy_packet) {
6460 		ret = -ENOMEM;
6461 		goto err_aggr;
6462 	}
6463 
6464 	/* Allocate one page for the FW log */
6465 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6466 	if (!wl->fwlog) {
6467 		ret = -ENOMEM;
6468 		goto err_dummy_packet;
6469 	}
6470 
6471 	wl->mbox_size = mbox_size;
6472 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6473 	if (!wl->mbox) {
6474 		ret = -ENOMEM;
6475 		goto err_fwlog;
6476 	}
6477 
6478 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6479 	if (!wl->buffer_32) {
6480 		ret = -ENOMEM;
6481 		goto err_mbox;
6482 	}
6483 
6484 	return hw;
6485 
6486 err_mbox:
6487 	kfree(wl->mbox);
6488 
6489 err_fwlog:
6490 	free_page((unsigned long)wl->fwlog);
6491 
6492 err_dummy_packet:
6493 	dev_kfree_skb(wl->dummy_packet);
6494 
6495 err_aggr:
6496 	free_pages((unsigned long)wl->aggr_buf, order);
6497 
6498 err_wq:
6499 	destroy_workqueue(wl->freezable_wq);
6500 
6501 err_hw:
6502 	wl1271_debugfs_exit(wl);
6503 	kfree(wl->priv);
6504 
6505 err_priv_alloc:
6506 	ieee80211_free_hw(hw);
6507 
6508 err_hw_alloc:
6509 
6510 	return ERR_PTR(ret);
6511 }
6512 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6513 
6514 int wlcore_free_hw(struct wl1271 *wl)
6515 {
6516 	/* Unblock any fwlog readers */
6517 	mutex_lock(&wl->mutex);
6518 	wl->fwlog_size = -1;
6519 	mutex_unlock(&wl->mutex);
6520 
6521 	wlcore_sysfs_free(wl);
6522 
6523 	kfree(wl->buffer_32);
6524 	kfree(wl->mbox);
6525 	free_page((unsigned long)wl->fwlog);
6526 	dev_kfree_skb(wl->dummy_packet);
6527 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6528 
6529 	wl1271_debugfs_exit(wl);
6530 
6531 	vfree(wl->fw);
6532 	wl->fw = NULL;
6533 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6534 	kfree(wl->nvs);
6535 	wl->nvs = NULL;
6536 
6537 	kfree(wl->raw_fw_status);
6538 	kfree(wl->fw_status);
6539 	kfree(wl->tx_res_if);
6540 	destroy_workqueue(wl->freezable_wq);
6541 
6542 	kfree(wl->priv);
6543 	ieee80211_free_hw(wl->hw);
6544 
6545 	return 0;
6546 }
6547 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6548 
6549 #ifdef CONFIG_PM
6550 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6551 	.flags = WIPHY_WOWLAN_ANY,
6552 	.n_patterns = WL1271_MAX_RX_FILTERS,
6553 	.pattern_min_len = 1,
6554 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6555 };
6556 #endif
6557 
6558 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6559 {
6560 	return IRQ_WAKE_THREAD;
6561 }
6562 
6563 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6564 {
6565 	struct wl1271 *wl = context;
6566 	struct platform_device *pdev = wl->pdev;
6567 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6568 	struct resource *res;
6569 
6570 	int ret;
6571 	irq_handler_t hardirq_fn = NULL;
6572 
6573 	if (fw) {
6574 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6575 		if (!wl->nvs) {
6576 			wl1271_error("Could not allocate nvs data");
6577 			goto out;
6578 		}
6579 		wl->nvs_len = fw->size;
6580 	} else if (pdev_data->family->nvs_name) {
6581 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6582 			     pdev_data->family->nvs_name);
6583 		wl->nvs = NULL;
6584 		wl->nvs_len = 0;
6585 	} else {
6586 		wl->nvs = NULL;
6587 		wl->nvs_len = 0;
6588 	}
6589 
6590 	ret = wl->ops->setup(wl);
6591 	if (ret < 0)
6592 		goto out_free_nvs;
6593 
6594 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6595 
6596 	/* adjust some runtime configuration parameters */
6597 	wlcore_adjust_conf(wl);
6598 
6599 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6600 	if (!res) {
6601 		wl1271_error("Could not get IRQ resource");
6602 		goto out_free_nvs;
6603 	}
6604 
6605 	wl->irq = res->start;
6606 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6607 	wl->if_ops = pdev_data->if_ops;
6608 
6609 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6610 		hardirq_fn = wlcore_hardirq;
6611 	else
6612 		wl->irq_flags |= IRQF_ONESHOT;
6613 
6614 	ret = wl12xx_set_power_on(wl);
6615 	if (ret < 0)
6616 		goto out_free_nvs;
6617 
6618 	ret = wl12xx_get_hw_info(wl);
6619 	if (ret < 0) {
6620 		wl1271_error("couldn't get hw info");
6621 		wl1271_power_off(wl);
6622 		goto out_free_nvs;
6623 	}
6624 
6625 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6626 				   wl->irq_flags, pdev->name, wl);
6627 	if (ret < 0) {
6628 		wl1271_error("interrupt configuration failed");
6629 		wl1271_power_off(wl);
6630 		goto out_free_nvs;
6631 	}
6632 
6633 #ifdef CONFIG_PM
6634 	device_init_wakeup(wl->dev, true);
6635 
6636 	ret = enable_irq_wake(wl->irq);
6637 	if (!ret) {
6638 		wl->irq_wake_enabled = true;
6639 		if (pdev_data->pwr_in_suspend)
6640 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6641 	}
6642 
6643 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6644 	if (res) {
6645 		wl->wakeirq = res->start;
6646 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6647 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6648 		if (ret)
6649 			wl->wakeirq = -ENODEV;
6650 	} else {
6651 		wl->wakeirq = -ENODEV;
6652 	}
6653 #endif
6654 	disable_irq(wl->irq);
6655 	wl1271_power_off(wl);
6656 
6657 	ret = wl->ops->identify_chip(wl);
6658 	if (ret < 0)
6659 		goto out_irq;
6660 
6661 	ret = wl1271_init_ieee80211(wl);
6662 	if (ret)
6663 		goto out_irq;
6664 
6665 	ret = wl1271_register_hw(wl);
6666 	if (ret)
6667 		goto out_irq;
6668 
6669 	ret = wlcore_sysfs_init(wl);
6670 	if (ret)
6671 		goto out_unreg;
6672 
6673 	wl->initialized = true;
6674 	goto out;
6675 
6676 out_unreg:
6677 	wl1271_unregister_hw(wl);
6678 
6679 out_irq:
6680 	if (wl->wakeirq >= 0)
6681 		dev_pm_clear_wake_irq(wl->dev);
6682 	device_init_wakeup(wl->dev, false);
6683 	free_irq(wl->irq, wl);
6684 
6685 out_free_nvs:
6686 	kfree(wl->nvs);
6687 
6688 out:
6689 	release_firmware(fw);
6690 	complete_all(&wl->nvs_loading_complete);
6691 }
6692 
6693 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6694 {
6695 	struct wl1271 *wl = dev_get_drvdata(dev);
6696 	struct wl12xx_vif *wlvif;
6697 	int error;
6698 
6699 	/* We do not enter elp sleep in PLT mode */
6700 	if (wl->plt)
6701 		return 0;
6702 
6703 	/* Nothing to do if no ELP mode requested */
6704 	if (wl->sleep_auth != WL1271_PSM_ELP)
6705 		return 0;
6706 
6707 	wl12xx_for_each_wlvif(wl, wlvif) {
6708 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6709 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6710 			return -EBUSY;
6711 	}
6712 
6713 	wl1271_debug(DEBUG_PSM, "chip to elp");
6714 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6715 	if (error < 0) {
6716 		wl12xx_queue_recovery_work(wl);
6717 
6718 		return error;
6719 	}
6720 
6721 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6722 
6723 	return 0;
6724 }
6725 
6726 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6727 {
6728 	struct wl1271 *wl = dev_get_drvdata(dev);
6729 	DECLARE_COMPLETION_ONSTACK(compl);
6730 	unsigned long flags;
6731 	int ret;
6732 	unsigned long start_time = jiffies;
6733 	bool pending = false;
6734 	bool recovery = false;
6735 
6736 	/* Nothing to do if no ELP mode requested */
6737 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6738 		return 0;
6739 
6740 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6741 
6742 	spin_lock_irqsave(&wl->wl_lock, flags);
6743 	if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6744 		pending = true;
6745 	else
6746 		wl->elp_compl = &compl;
6747 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6748 
6749 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6750 	if (ret < 0) {
6751 		recovery = true;
6752 		goto err;
6753 	}
6754 
6755 	if (!pending) {
6756 		ret = wait_for_completion_timeout(&compl,
6757 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6758 		if (ret == 0) {
6759 			wl1271_warning("ELP wakeup timeout!");
6760 
6761 			/* Return no error for runtime PM for recovery */
6762 			ret = 0;
6763 			recovery = true;
6764 			goto err;
6765 		}
6766 	}
6767 
6768 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6769 
6770 	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6771 		     jiffies_to_msecs(jiffies - start_time));
6772 
6773 	return 0;
6774 
6775 err:
6776 	spin_lock_irqsave(&wl->wl_lock, flags);
6777 	wl->elp_compl = NULL;
6778 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6779 
6780 	if (recovery) {
6781 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6782 		wl12xx_queue_recovery_work(wl);
6783 	}
6784 
6785 	return ret;
6786 }
6787 
6788 static const struct dev_pm_ops wlcore_pm_ops = {
6789 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6790 			   wlcore_runtime_resume,
6791 			   NULL)
6792 };
6793 
6794 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6795 {
6796 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6797 	const char *nvs_name;
6798 	int ret = 0;
6799 
6800 	if (!wl->ops || !wl->ptable || !pdev_data)
6801 		return -EINVAL;
6802 
6803 	wl->dev = &pdev->dev;
6804 	wl->pdev = pdev;
6805 	platform_set_drvdata(pdev, wl);
6806 
6807 	if (pdev_data->family && pdev_data->family->nvs_name) {
6808 		nvs_name = pdev_data->family->nvs_name;
6809 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6810 					      nvs_name, &pdev->dev, GFP_KERNEL,
6811 					      wl, wlcore_nvs_cb);
6812 		if (ret < 0) {
6813 			wl1271_error("request_firmware_nowait failed for %s: %d",
6814 				     nvs_name, ret);
6815 			complete_all(&wl->nvs_loading_complete);
6816 		}
6817 	} else {
6818 		wlcore_nvs_cb(NULL, wl);
6819 	}
6820 
6821 	wl->dev->driver->pm = &wlcore_pm_ops;
6822 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6823 	pm_runtime_use_autosuspend(wl->dev);
6824 	pm_runtime_enable(wl->dev);
6825 
6826 	return ret;
6827 }
6828 EXPORT_SYMBOL_GPL(wlcore_probe);
6829 
6830 int wlcore_remove(struct platform_device *pdev)
6831 {
6832 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6833 	struct wl1271 *wl = platform_get_drvdata(pdev);
6834 	int error;
6835 
6836 	error = pm_runtime_get_sync(wl->dev);
6837 	if (error < 0)
6838 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6839 
6840 	wl->dev->driver->pm = NULL;
6841 
6842 	if (pdev_data->family && pdev_data->family->nvs_name)
6843 		wait_for_completion(&wl->nvs_loading_complete);
6844 	if (!wl->initialized)
6845 		return 0;
6846 
6847 	if (wl->wakeirq >= 0) {
6848 		dev_pm_clear_wake_irq(wl->dev);
6849 		wl->wakeirq = -ENODEV;
6850 	}
6851 
6852 	device_init_wakeup(wl->dev, false);
6853 
6854 	if (wl->irq_wake_enabled)
6855 		disable_irq_wake(wl->irq);
6856 
6857 	wl1271_unregister_hw(wl);
6858 
6859 	pm_runtime_put_sync(wl->dev);
6860 	pm_runtime_dont_use_autosuspend(wl->dev);
6861 	pm_runtime_disable(wl->dev);
6862 
6863 	free_irq(wl->irq, wl);
6864 	wlcore_free_hw(wl);
6865 
6866 	return 0;
6867 }
6868 EXPORT_SYMBOL_GPL(wlcore_remove);
6869 
6870 u32 wl12xx_debug_level = DEBUG_NONE;
6871 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6872 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6873 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6874 
6875 module_param_named(fwlog, fwlog_param, charp, 0);
6876 MODULE_PARM_DESC(fwlog,
6877 		 "FW logger options: continuous, dbgpins or disable");
6878 
6879 module_param(fwlog_mem_blocks, int, 0600);
6880 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6881 
6882 module_param(bug_on_recovery, int, 0600);
6883 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6884 
6885 module_param(no_recovery, int, 0600);
6886 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6887 
6888 MODULE_LICENSE("GPL");
6889 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6890 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6891