xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision 852a53a0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_SUSPEND_SLEEP 100
34 #define WL1271_WAKEUP_TIMEOUT 500
35 
36 static char *fwlog_param;
37 static int fwlog_mem_blocks = -1;
38 static int bug_on_recovery = -1;
39 static int no_recovery     = -1;
40 
41 static void __wl1271_op_remove_interface(struct wl1271 *wl,
42 					 struct ieee80211_vif *vif,
43 					 bool reset_tx_queues);
44 static void wlcore_op_stop_locked(struct wl1271 *wl);
45 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
46 
47 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
48 {
49 	int ret;
50 
51 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
52 		return -EINVAL;
53 
54 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
55 		return 0;
56 
57 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
58 		return 0;
59 
60 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
61 	if (ret < 0)
62 		return ret;
63 
64 	wl1271_info("Association completed.");
65 	return 0;
66 }
67 
68 static void wl1271_reg_notify(struct wiphy *wiphy,
69 			      struct regulatory_request *request)
70 {
71 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
72 	struct wl1271 *wl = hw->priv;
73 
74 	/* copy the current dfs region */
75 	if (request)
76 		wl->dfs_region = request->dfs_region;
77 
78 	wlcore_regdomain_config(wl);
79 }
80 
81 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
82 				   bool enable)
83 {
84 	int ret = 0;
85 
86 	/* we should hold wl->mutex */
87 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
88 	if (ret < 0)
89 		goto out;
90 
91 	if (enable)
92 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
93 	else
94 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
95 out:
96 	return ret;
97 }
98 
99 /*
100  * this function is being called when the rx_streaming interval
101  * has beed changed or rx_streaming should be disabled
102  */
103 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
104 {
105 	int ret = 0;
106 	int period = wl->conf.rx_streaming.interval;
107 
108 	/* don't reconfigure if rx_streaming is disabled */
109 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
110 		goto out;
111 
112 	/* reconfigure/disable according to new streaming_period */
113 	if (period &&
114 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
115 	    (wl->conf.rx_streaming.always ||
116 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
117 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
118 	else {
119 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
120 		/* don't cancel_work_sync since we might deadlock */
121 		del_timer_sync(&wlvif->rx_streaming_timer);
122 	}
123 out:
124 	return ret;
125 }
126 
127 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
128 {
129 	int ret;
130 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
131 						rx_streaming_enable_work);
132 	struct wl1271 *wl = wlvif->wl;
133 
134 	mutex_lock(&wl->mutex);
135 
136 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
137 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
138 	    (!wl->conf.rx_streaming.always &&
139 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
140 		goto out;
141 
142 	if (!wl->conf.rx_streaming.interval)
143 		goto out;
144 
145 	ret = pm_runtime_get_sync(wl->dev);
146 	if (ret < 0) {
147 		pm_runtime_put_noidle(wl->dev);
148 		goto out;
149 	}
150 
151 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 	if (ret < 0)
153 		goto out_sleep;
154 
155 	/* stop it after some time of inactivity */
156 	mod_timer(&wlvif->rx_streaming_timer,
157 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
158 
159 out_sleep:
160 	pm_runtime_mark_last_busy(wl->dev);
161 	pm_runtime_put_autosuspend(wl->dev);
162 out:
163 	mutex_unlock(&wl->mutex);
164 }
165 
166 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
167 {
168 	int ret;
169 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
170 						rx_streaming_disable_work);
171 	struct wl1271 *wl = wlvif->wl;
172 
173 	mutex_lock(&wl->mutex);
174 
175 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
176 		goto out;
177 
178 	ret = pm_runtime_get_sync(wl->dev);
179 	if (ret < 0) {
180 		pm_runtime_put_noidle(wl->dev);
181 		goto out;
182 	}
183 
184 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
185 	if (ret)
186 		goto out_sleep;
187 
188 out_sleep:
189 	pm_runtime_mark_last_busy(wl->dev);
190 	pm_runtime_put_autosuspend(wl->dev);
191 out:
192 	mutex_unlock(&wl->mutex);
193 }
194 
195 static void wl1271_rx_streaming_timer(struct timer_list *t)
196 {
197 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
198 	struct wl1271 *wl = wlvif->wl;
199 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
200 }
201 
202 /* wl->mutex must be taken */
203 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
204 {
205 	/* if the watchdog is not armed, don't do anything */
206 	if (wl->tx_allocated_blocks == 0)
207 		return;
208 
209 	cancel_delayed_work(&wl->tx_watchdog_work);
210 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
211 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
212 }
213 
214 static void wlcore_rc_update_work(struct work_struct *work)
215 {
216 	int ret;
217 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
218 						rc_update_work);
219 	struct wl1271 *wl = wlvif->wl;
220 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
221 
222 	mutex_lock(&wl->mutex);
223 
224 	if (unlikely(wl->state != WLCORE_STATE_ON))
225 		goto out;
226 
227 	ret = pm_runtime_get_sync(wl->dev);
228 	if (ret < 0) {
229 		pm_runtime_put_noidle(wl->dev);
230 		goto out;
231 	}
232 
233 	if (ieee80211_vif_is_mesh(vif)) {
234 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
235 						     true, wlvif->sta.hlid);
236 		if (ret < 0)
237 			goto out_sleep;
238 	} else {
239 		wlcore_hw_sta_rc_update(wl, wlvif);
240 	}
241 
242 out_sleep:
243 	pm_runtime_mark_last_busy(wl->dev);
244 	pm_runtime_put_autosuspend(wl->dev);
245 out:
246 	mutex_unlock(&wl->mutex);
247 }
248 
249 static void wl12xx_tx_watchdog_work(struct work_struct *work)
250 {
251 	struct delayed_work *dwork;
252 	struct wl1271 *wl;
253 
254 	dwork = to_delayed_work(work);
255 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
256 
257 	mutex_lock(&wl->mutex);
258 
259 	if (unlikely(wl->state != WLCORE_STATE_ON))
260 		goto out;
261 
262 	/* Tx went out in the meantime - everything is ok */
263 	if (unlikely(wl->tx_allocated_blocks == 0))
264 		goto out;
265 
266 	/*
267 	 * if a ROC is in progress, we might not have any Tx for a long
268 	 * time (e.g. pending Tx on the non-ROC channels)
269 	 */
270 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
271 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
272 			     wl->conf.tx.tx_watchdog_timeout);
273 		wl12xx_rearm_tx_watchdog_locked(wl);
274 		goto out;
275 	}
276 
277 	/*
278 	 * if a scan is in progress, we might not have any Tx for a long
279 	 * time
280 	 */
281 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
282 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
283 			     wl->conf.tx.tx_watchdog_timeout);
284 		wl12xx_rearm_tx_watchdog_locked(wl);
285 		goto out;
286 	}
287 
288 	/*
289 	* AP might cache a frame for a long time for a sleeping station,
290 	* so rearm the timer if there's an AP interface with stations. If
291 	* Tx is genuinely stuck we will most hopefully discover it when all
292 	* stations are removed due to inactivity.
293 	*/
294 	if (wl->active_sta_count) {
295 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
296 			     " %d stations",
297 			      wl->conf.tx.tx_watchdog_timeout,
298 			      wl->active_sta_count);
299 		wl12xx_rearm_tx_watchdog_locked(wl);
300 		goto out;
301 	}
302 
303 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
304 		     wl->conf.tx.tx_watchdog_timeout);
305 	wl12xx_queue_recovery_work(wl);
306 
307 out:
308 	mutex_unlock(&wl->mutex);
309 }
310 
311 static void wlcore_adjust_conf(struct wl1271 *wl)
312 {
313 
314 	if (fwlog_param) {
315 		if (!strcmp(fwlog_param, "continuous")) {
316 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
318 		} else if (!strcmp(fwlog_param, "dbgpins")) {
319 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
320 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
321 		} else if (!strcmp(fwlog_param, "disable")) {
322 			wl->conf.fwlog.mem_blocks = 0;
323 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
324 		} else {
325 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
326 		}
327 	}
328 
329 	if (bug_on_recovery != -1)
330 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
331 
332 	if (no_recovery != -1)
333 		wl->conf.recovery.no_recovery = (u8) no_recovery;
334 }
335 
336 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
337 					struct wl12xx_vif *wlvif,
338 					u8 hlid, u8 tx_pkts)
339 {
340 	bool fw_ps;
341 
342 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
343 
344 	/*
345 	 * Wake up from high level PS if the STA is asleep with too little
346 	 * packets in FW or if the STA is awake.
347 	 */
348 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
349 		wl12xx_ps_link_end(wl, wlvif, hlid);
350 
351 	/*
352 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
353 	 * Make an exception if this is the only connected link. In this
354 	 * case FW-memory congestion is less of a problem.
355 	 * Note that a single connected STA means 2*ap_count + 1 active links,
356 	 * since we must account for the global and broadcast AP links
357 	 * for each AP. The "fw_ps" check assures us the other link is a STA
358 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
359 	 */
360 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
361 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
362 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
363 }
364 
365 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
366 					   struct wl12xx_vif *wlvif,
367 					   struct wl_fw_status *status)
368 {
369 	unsigned long cur_fw_ps_map;
370 	u8 hlid;
371 
372 	cur_fw_ps_map = status->link_ps_bitmap;
373 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
374 		wl1271_debug(DEBUG_PSM,
375 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
376 			     wl->ap_fw_ps_map, cur_fw_ps_map,
377 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
378 
379 		wl->ap_fw_ps_map = cur_fw_ps_map;
380 	}
381 
382 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
383 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
384 					    wl->links[hlid].allocated_pkts);
385 }
386 
387 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
388 {
389 	struct wl12xx_vif *wlvif;
390 	u32 old_tx_blk_count = wl->tx_blocks_available;
391 	int avail, freed_blocks;
392 	int i;
393 	int ret;
394 	struct wl1271_link *lnk;
395 
396 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
397 				   wl->raw_fw_status,
398 				   wl->fw_status_len, false);
399 	if (ret < 0)
400 		return ret;
401 
402 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
403 
404 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
405 		     "drv_rx_counter = %d, tx_results_counter = %d)",
406 		     status->intr,
407 		     status->fw_rx_counter,
408 		     status->drv_rx_counter,
409 		     status->tx_results_counter);
410 
411 	for (i = 0; i < NUM_TX_QUEUES; i++) {
412 		/* prevent wrap-around in freed-packets counter */
413 		wl->tx_allocated_pkts[i] -=
414 				(status->counters.tx_released_pkts[i] -
415 				wl->tx_pkts_freed[i]) & 0xff;
416 
417 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
418 	}
419 
420 
421 	for_each_set_bit(i, wl->links_map, wl->num_links) {
422 		u8 diff;
423 		lnk = &wl->links[i];
424 
425 		/* prevent wrap-around in freed-packets counter */
426 		diff = (status->counters.tx_lnk_free_pkts[i] -
427 		       lnk->prev_freed_pkts) & 0xff;
428 
429 		if (diff == 0)
430 			continue;
431 
432 		lnk->allocated_pkts -= diff;
433 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
434 
435 		/* accumulate the prev_freed_pkts counter */
436 		lnk->total_freed_pkts += diff;
437 	}
438 
439 	/* prevent wrap-around in total blocks counter */
440 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
441 		freed_blocks = status->total_released_blks -
442 			       wl->tx_blocks_freed;
443 	else
444 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
445 			       status->total_released_blks;
446 
447 	wl->tx_blocks_freed = status->total_released_blks;
448 
449 	wl->tx_allocated_blocks -= freed_blocks;
450 
451 	/*
452 	 * If the FW freed some blocks:
453 	 * If we still have allocated blocks - re-arm the timer, Tx is
454 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
455 	 */
456 	if (freed_blocks) {
457 		if (wl->tx_allocated_blocks)
458 			wl12xx_rearm_tx_watchdog_locked(wl);
459 		else
460 			cancel_delayed_work(&wl->tx_watchdog_work);
461 	}
462 
463 	avail = status->tx_total - wl->tx_allocated_blocks;
464 
465 	/*
466 	 * The FW might change the total number of TX memblocks before
467 	 * we get a notification about blocks being released. Thus, the
468 	 * available blocks calculation might yield a temporary result
469 	 * which is lower than the actual available blocks. Keeping in
470 	 * mind that only blocks that were allocated can be moved from
471 	 * TX to RX, tx_blocks_available should never decrease here.
472 	 */
473 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
474 				      avail);
475 
476 	/* if more blocks are available now, tx work can be scheduled */
477 	if (wl->tx_blocks_available > old_tx_blk_count)
478 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
479 
480 	/* for AP update num of allocated TX blocks per link and ps status */
481 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
482 		wl12xx_irq_update_links_status(wl, wlvif, status);
483 	}
484 
485 	/* update the host-chipset time offset */
486 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
487 		(s64)(status->fw_localtime);
488 
489 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
490 
491 	return 0;
492 }
493 
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
495 {
496 	struct sk_buff *skb;
497 
498 	/* Pass all received frames to the network stack */
499 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 		ieee80211_rx_ni(wl->hw, skb);
501 
502 	/* Return sent skbs to the network stack */
503 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 		ieee80211_tx_status_ni(wl->hw, skb);
505 }
506 
507 static void wl1271_netstack_work(struct work_struct *work)
508 {
509 	struct wl1271 *wl =
510 		container_of(work, struct wl1271, netstack_work);
511 
512 	do {
513 		wl1271_flush_deferred_work(wl);
514 	} while (skb_queue_len(&wl->deferred_rx_queue));
515 }
516 
517 #define WL1271_IRQ_MAX_LOOPS 256
518 
519 static int wlcore_irq_locked(struct wl1271 *wl)
520 {
521 	int ret = 0;
522 	u32 intr;
523 	int loopcount = WL1271_IRQ_MAX_LOOPS;
524 	bool run_tx_queue = true;
525 	bool done = false;
526 	unsigned int defer_count;
527 	unsigned long flags;
528 
529 	/*
530 	 * In case edge triggered interrupt must be used, we cannot iterate
531 	 * more than once without introducing race conditions with the hardirq.
532 	 */
533 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
534 		loopcount = 1;
535 
536 	wl1271_debug(DEBUG_IRQ, "IRQ work");
537 
538 	if (unlikely(wl->state != WLCORE_STATE_ON))
539 		goto out;
540 
541 	ret = pm_runtime_get_sync(wl->dev);
542 	if (ret < 0) {
543 		pm_runtime_put_noidle(wl->dev);
544 		goto out;
545 	}
546 
547 	while (!done && loopcount--) {
548 		smp_mb__after_atomic();
549 
550 		ret = wlcore_fw_status(wl, wl->fw_status);
551 		if (ret < 0)
552 			goto err_ret;
553 
554 		wlcore_hw_tx_immediate_compl(wl);
555 
556 		intr = wl->fw_status->intr;
557 		intr &= WLCORE_ALL_INTR_MASK;
558 		if (!intr) {
559 			done = true;
560 			continue;
561 		}
562 
563 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
564 			wl1271_error("HW watchdog interrupt received! starting recovery.");
565 			wl->watchdog_recovery = true;
566 			ret = -EIO;
567 
568 			/* restarting the chip. ignore any other interrupt. */
569 			goto err_ret;
570 		}
571 
572 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
573 			wl1271_error("SW watchdog interrupt received! "
574 				     "starting recovery.");
575 			wl->watchdog_recovery = true;
576 			ret = -EIO;
577 
578 			/* restarting the chip. ignore any other interrupt. */
579 			goto err_ret;
580 		}
581 
582 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
583 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
584 
585 			ret = wlcore_rx(wl, wl->fw_status);
586 			if (ret < 0)
587 				goto err_ret;
588 
589 			/* Check if any tx blocks were freed */
590 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
591 				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
592 					if (!wl1271_tx_total_queue_count(wl))
593 						run_tx_queue = false;
594 					spin_unlock_irqrestore(&wl->wl_lock, flags);
595 				}
596 
597 				/*
598 				 * In order to avoid starvation of the TX path,
599 				 * call the work function directly.
600 				 */
601 				if (run_tx_queue) {
602 					ret = wlcore_tx_work_locked(wl);
603 					if (ret < 0)
604 						goto err_ret;
605 				}
606 			}
607 
608 			/* check for tx results */
609 			ret = wlcore_hw_tx_delayed_compl(wl);
610 			if (ret < 0)
611 				goto err_ret;
612 
613 			/* Make sure the deferred queues don't get too long */
614 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
615 				      skb_queue_len(&wl->deferred_rx_queue);
616 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
617 				wl1271_flush_deferred_work(wl);
618 		}
619 
620 		if (intr & WL1271_ACX_INTR_EVENT_A) {
621 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 			ret = wl1271_event_handle(wl, 0);
623 			if (ret < 0)
624 				goto err_ret;
625 		}
626 
627 		if (intr & WL1271_ACX_INTR_EVENT_B) {
628 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
629 			ret = wl1271_event_handle(wl, 1);
630 			if (ret < 0)
631 				goto err_ret;
632 		}
633 
634 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
635 			wl1271_debug(DEBUG_IRQ,
636 				     "WL1271_ACX_INTR_INIT_COMPLETE");
637 
638 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
639 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
640 	}
641 
642 err_ret:
643 	pm_runtime_mark_last_busy(wl->dev);
644 	pm_runtime_put_autosuspend(wl->dev);
645 
646 out:
647 	return ret;
648 }
649 
650 static irqreturn_t wlcore_irq(int irq, void *cookie)
651 {
652 	int ret;
653 	unsigned long flags;
654 	struct wl1271 *wl = cookie;
655 	bool queue_tx_work = true;
656 
657 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
658 
659 	/* complete the ELP completion */
660 	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
661 		spin_lock_irqsave(&wl->wl_lock, flags);
662 		if (wl->elp_compl)
663 			complete(wl->elp_compl);
664 		spin_unlock_irqrestore(&wl->wl_lock, flags);
665 	}
666 
667 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
668 		/* don't enqueue a work right now. mark it as pending */
669 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
670 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
671 		spin_lock_irqsave(&wl->wl_lock, flags);
672 		disable_irq_nosync(wl->irq);
673 		pm_wakeup_event(wl->dev, 0);
674 		spin_unlock_irqrestore(&wl->wl_lock, flags);
675 		goto out_handled;
676 	}
677 
678 	/* TX might be handled here, avoid redundant work */
679 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
680 	cancel_work_sync(&wl->tx_work);
681 
682 	mutex_lock(&wl->mutex);
683 
684 	ret = wlcore_irq_locked(wl);
685 	if (ret)
686 		wl12xx_queue_recovery_work(wl);
687 
688 	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
689 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
690 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
691 		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
692 			if (!wl1271_tx_total_queue_count(wl))
693 				queue_tx_work = false;
694 			spin_unlock_irqrestore(&wl->wl_lock, flags);
695 		}
696 		if (queue_tx_work)
697 			ieee80211_queue_work(wl->hw, &wl->tx_work);
698 	}
699 
700 	mutex_unlock(&wl->mutex);
701 
702 out_handled:
703 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
704 
705 	return IRQ_HANDLED;
706 }
707 
708 struct vif_counter_data {
709 	u8 counter;
710 
711 	struct ieee80211_vif *cur_vif;
712 	bool cur_vif_running;
713 };
714 
715 static void wl12xx_vif_count_iter(void *data, u8 *mac,
716 				  struct ieee80211_vif *vif)
717 {
718 	struct vif_counter_data *counter = data;
719 
720 	counter->counter++;
721 	if (counter->cur_vif == vif)
722 		counter->cur_vif_running = true;
723 }
724 
725 /* caller must not hold wl->mutex, as it might deadlock */
726 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
727 			       struct ieee80211_vif *cur_vif,
728 			       struct vif_counter_data *data)
729 {
730 	memset(data, 0, sizeof(*data));
731 	data->cur_vif = cur_vif;
732 
733 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
734 					    wl12xx_vif_count_iter, data);
735 }
736 
737 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
738 {
739 	const struct firmware *fw;
740 	const char *fw_name;
741 	enum wl12xx_fw_type fw_type;
742 	int ret;
743 
744 	if (plt) {
745 		fw_type = WL12XX_FW_TYPE_PLT;
746 		fw_name = wl->plt_fw_name;
747 	} else {
748 		/*
749 		 * we can't call wl12xx_get_vif_count() here because
750 		 * wl->mutex is taken, so use the cached last_vif_count value
751 		 */
752 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
753 			fw_type = WL12XX_FW_TYPE_MULTI;
754 			fw_name = wl->mr_fw_name;
755 		} else {
756 			fw_type = WL12XX_FW_TYPE_NORMAL;
757 			fw_name = wl->sr_fw_name;
758 		}
759 	}
760 
761 	if (wl->fw_type == fw_type)
762 		return 0;
763 
764 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
765 
766 	ret = request_firmware(&fw, fw_name, wl->dev);
767 
768 	if (ret < 0) {
769 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
770 		return ret;
771 	}
772 
773 	if (fw->size % 4) {
774 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
775 			     fw->size);
776 		ret = -EILSEQ;
777 		goto out;
778 	}
779 
780 	vfree(wl->fw);
781 	wl->fw_type = WL12XX_FW_TYPE_NONE;
782 	wl->fw_len = fw->size;
783 	wl->fw = vmalloc(wl->fw_len);
784 
785 	if (!wl->fw) {
786 		wl1271_error("could not allocate memory for the firmware");
787 		ret = -ENOMEM;
788 		goto out;
789 	}
790 
791 	memcpy(wl->fw, fw->data, wl->fw_len);
792 	ret = 0;
793 	wl->fw_type = fw_type;
794 out:
795 	release_firmware(fw);
796 
797 	return ret;
798 }
799 
800 void wl12xx_queue_recovery_work(struct wl1271 *wl)
801 {
802 	/* Avoid a recursive recovery */
803 	if (wl->state == WLCORE_STATE_ON) {
804 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
805 				  &wl->flags));
806 
807 		wl->state = WLCORE_STATE_RESTARTING;
808 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
809 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
810 	}
811 }
812 
813 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
814 {
815 	size_t len;
816 
817 	/* Make sure we have enough room */
818 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
819 
820 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
821 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
822 	wl->fwlog_size += len;
823 
824 	return len;
825 }
826 
827 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
828 {
829 	u32 end_of_log = 0;
830 	int error;
831 
832 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
833 		return;
834 
835 	wl1271_info("Reading FW panic log");
836 
837 	/*
838 	 * Make sure the chip is awake and the logger isn't active.
839 	 * Do not send a stop fwlog command if the fw is hanged or if
840 	 * dbgpins are used (due to some fw bug).
841 	 */
842 	error = pm_runtime_get_sync(wl->dev);
843 	if (error < 0) {
844 		pm_runtime_put_noidle(wl->dev);
845 		return;
846 	}
847 	if (!wl->watchdog_recovery &&
848 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
849 		wl12xx_cmd_stop_fwlog(wl);
850 
851 	/* Traverse the memory blocks linked list */
852 	do {
853 		end_of_log = wlcore_event_fw_logger(wl);
854 		if (end_of_log == 0) {
855 			msleep(100);
856 			end_of_log = wlcore_event_fw_logger(wl);
857 		}
858 	} while (end_of_log != 0);
859 }
860 
861 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
862 				   u8 hlid, struct ieee80211_sta *sta)
863 {
864 	struct wl1271_station *wl_sta;
865 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
866 
867 	wl_sta = (void *)sta->drv_priv;
868 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
869 
870 	/*
871 	 * increment the initial seq number on recovery to account for
872 	 * transmitted packets that we haven't yet got in the FW status
873 	 */
874 	if (wlvif->encryption_type == KEY_GEM)
875 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
876 
877 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
878 		wl_sta->total_freed_pkts += sqn_recovery_padding;
879 }
880 
881 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
882 					struct wl12xx_vif *wlvif,
883 					u8 hlid, const u8 *addr)
884 {
885 	struct ieee80211_sta *sta;
886 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
887 
888 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
889 		    is_zero_ether_addr(addr)))
890 		return;
891 
892 	rcu_read_lock();
893 	sta = ieee80211_find_sta(vif, addr);
894 	if (sta)
895 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
896 	rcu_read_unlock();
897 }
898 
899 static void wlcore_print_recovery(struct wl1271 *wl)
900 {
901 	u32 pc = 0;
902 	u32 hint_sts = 0;
903 	int ret;
904 
905 	wl1271_info("Hardware recovery in progress. FW ver: %s",
906 		    wl->chip.fw_ver_str);
907 
908 	/* change partitions momentarily so we can read the FW pc */
909 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
910 	if (ret < 0)
911 		return;
912 
913 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
914 	if (ret < 0)
915 		return;
916 
917 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
918 	if (ret < 0)
919 		return;
920 
921 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
922 				pc, hint_sts, ++wl->recovery_count);
923 
924 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
925 }
926 
927 
928 static void wl1271_recovery_work(struct work_struct *work)
929 {
930 	struct wl1271 *wl =
931 		container_of(work, struct wl1271, recovery_work);
932 	struct wl12xx_vif *wlvif;
933 	struct ieee80211_vif *vif;
934 	int error;
935 
936 	mutex_lock(&wl->mutex);
937 
938 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
939 		goto out_unlock;
940 
941 	error = pm_runtime_get_sync(wl->dev);
942 	if (error < 0) {
943 		wl1271_warning("Enable for recovery failed");
944 		pm_runtime_put_noidle(wl->dev);
945 	}
946 	wlcore_disable_interrupts_nosync(wl);
947 
948 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
949 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
950 			wl12xx_read_fwlog_panic(wl);
951 		wlcore_print_recovery(wl);
952 	}
953 
954 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
955 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
956 
957 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
958 
959 	if (wl->conf.recovery.no_recovery) {
960 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
961 		goto out_unlock;
962 	}
963 
964 	/* Prevent spurious TX during FW restart */
965 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
966 
967 	/* reboot the chipset */
968 	while (!list_empty(&wl->wlvif_list)) {
969 		wlvif = list_first_entry(&wl->wlvif_list,
970 				       struct wl12xx_vif, list);
971 		vif = wl12xx_wlvif_to_vif(wlvif);
972 
973 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
974 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
975 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
976 						    vif->bss_conf.bssid);
977 		}
978 
979 		__wl1271_op_remove_interface(wl, vif, false);
980 	}
981 
982 	wlcore_op_stop_locked(wl);
983 	pm_runtime_mark_last_busy(wl->dev);
984 	pm_runtime_put_autosuspend(wl->dev);
985 
986 	ieee80211_restart_hw(wl->hw);
987 
988 	/*
989 	 * Its safe to enable TX now - the queues are stopped after a request
990 	 * to restart the HW.
991 	 */
992 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
993 
994 out_unlock:
995 	wl->watchdog_recovery = false;
996 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
997 	mutex_unlock(&wl->mutex);
998 }
999 
1000 static int wlcore_fw_wakeup(struct wl1271 *wl)
1001 {
1002 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1003 }
1004 
1005 static int wl1271_setup(struct wl1271 *wl)
1006 {
1007 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1008 	if (!wl->raw_fw_status)
1009 		goto err;
1010 
1011 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1012 	if (!wl->fw_status)
1013 		goto err;
1014 
1015 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1016 	if (!wl->tx_res_if)
1017 		goto err;
1018 
1019 	return 0;
1020 err:
1021 	kfree(wl->fw_status);
1022 	kfree(wl->raw_fw_status);
1023 	return -ENOMEM;
1024 }
1025 
1026 static int wl12xx_set_power_on(struct wl1271 *wl)
1027 {
1028 	int ret;
1029 
1030 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1031 	ret = wl1271_power_on(wl);
1032 	if (ret < 0)
1033 		goto out;
1034 	msleep(WL1271_POWER_ON_SLEEP);
1035 	wl1271_io_reset(wl);
1036 	wl1271_io_init(wl);
1037 
1038 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1039 	if (ret < 0)
1040 		goto fail;
1041 
1042 	/* ELP module wake up */
1043 	ret = wlcore_fw_wakeup(wl);
1044 	if (ret < 0)
1045 		goto fail;
1046 
1047 out:
1048 	return ret;
1049 
1050 fail:
1051 	wl1271_power_off(wl);
1052 	return ret;
1053 }
1054 
1055 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1056 {
1057 	int ret = 0;
1058 
1059 	ret = wl12xx_set_power_on(wl);
1060 	if (ret < 0)
1061 		goto out;
1062 
1063 	/*
1064 	 * For wl127x based devices we could use the default block
1065 	 * size (512 bytes), but due to a bug in the sdio driver, we
1066 	 * need to set it explicitly after the chip is powered on.  To
1067 	 * simplify the code and since the performance impact is
1068 	 * negligible, we use the same block size for all different
1069 	 * chip types.
1070 	 *
1071 	 * Check if the bus supports blocksize alignment and, if it
1072 	 * doesn't, make sure we don't have the quirk.
1073 	 */
1074 	if (!wl1271_set_block_size(wl))
1075 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1076 
1077 	/* TODO: make sure the lower driver has set things up correctly */
1078 
1079 	ret = wl1271_setup(wl);
1080 	if (ret < 0)
1081 		goto out;
1082 
1083 	ret = wl12xx_fetch_firmware(wl, plt);
1084 	if (ret < 0) {
1085 		kfree(wl->fw_status);
1086 		kfree(wl->raw_fw_status);
1087 		kfree(wl->tx_res_if);
1088 	}
1089 
1090 out:
1091 	return ret;
1092 }
1093 
1094 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1095 {
1096 	int retries = WL1271_BOOT_RETRIES;
1097 	struct wiphy *wiphy = wl->hw->wiphy;
1098 
1099 	static const char* const PLT_MODE[] = {
1100 		"PLT_OFF",
1101 		"PLT_ON",
1102 		"PLT_FEM_DETECT",
1103 		"PLT_CHIP_AWAKE"
1104 	};
1105 
1106 	int ret;
1107 
1108 	mutex_lock(&wl->mutex);
1109 
1110 	wl1271_notice("power up");
1111 
1112 	if (wl->state != WLCORE_STATE_OFF) {
1113 		wl1271_error("cannot go into PLT state because not "
1114 			     "in off state: %d", wl->state);
1115 		ret = -EBUSY;
1116 		goto out;
1117 	}
1118 
1119 	/* Indicate to lower levels that we are now in PLT mode */
1120 	wl->plt = true;
1121 	wl->plt_mode = plt_mode;
1122 
1123 	while (retries) {
1124 		retries--;
1125 		ret = wl12xx_chip_wakeup(wl, true);
1126 		if (ret < 0)
1127 			goto power_off;
1128 
1129 		if (plt_mode != PLT_CHIP_AWAKE) {
1130 			ret = wl->ops->plt_init(wl);
1131 			if (ret < 0)
1132 				goto power_off;
1133 		}
1134 
1135 		wl->state = WLCORE_STATE_ON;
1136 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1137 			      PLT_MODE[plt_mode],
1138 			      wl->chip.fw_ver_str);
1139 
1140 		/* update hw/fw version info in wiphy struct */
1141 		wiphy->hw_version = wl->chip.id;
1142 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1143 			sizeof(wiphy->fw_version));
1144 
1145 		goto out;
1146 
1147 power_off:
1148 		wl1271_power_off(wl);
1149 	}
1150 
1151 	wl->plt = false;
1152 	wl->plt_mode = PLT_OFF;
1153 
1154 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1155 		     WL1271_BOOT_RETRIES);
1156 out:
1157 	mutex_unlock(&wl->mutex);
1158 
1159 	return ret;
1160 }
1161 
1162 int wl1271_plt_stop(struct wl1271 *wl)
1163 {
1164 	int ret = 0;
1165 
1166 	wl1271_notice("power down");
1167 
1168 	/*
1169 	 * Interrupts must be disabled before setting the state to OFF.
1170 	 * Otherwise, the interrupt handler might be called and exit without
1171 	 * reading the interrupt status.
1172 	 */
1173 	wlcore_disable_interrupts(wl);
1174 	mutex_lock(&wl->mutex);
1175 	if (!wl->plt) {
1176 		mutex_unlock(&wl->mutex);
1177 
1178 		/*
1179 		 * This will not necessarily enable interrupts as interrupts
1180 		 * may have been disabled when op_stop was called. It will,
1181 		 * however, balance the above call to disable_interrupts().
1182 		 */
1183 		wlcore_enable_interrupts(wl);
1184 
1185 		wl1271_error("cannot power down because not in PLT "
1186 			     "state: %d", wl->state);
1187 		ret = -EBUSY;
1188 		goto out;
1189 	}
1190 
1191 	mutex_unlock(&wl->mutex);
1192 
1193 	wl1271_flush_deferred_work(wl);
1194 	cancel_work_sync(&wl->netstack_work);
1195 	cancel_work_sync(&wl->recovery_work);
1196 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1197 
1198 	mutex_lock(&wl->mutex);
1199 	wl1271_power_off(wl);
1200 	wl->flags = 0;
1201 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1202 	wl->state = WLCORE_STATE_OFF;
1203 	wl->plt = false;
1204 	wl->plt_mode = PLT_OFF;
1205 	wl->rx_counter = 0;
1206 	mutex_unlock(&wl->mutex);
1207 
1208 out:
1209 	return ret;
1210 }
1211 
1212 static void wl1271_op_tx(struct ieee80211_hw *hw,
1213 			 struct ieee80211_tx_control *control,
1214 			 struct sk_buff *skb)
1215 {
1216 	struct wl1271 *wl = hw->priv;
1217 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1218 	struct ieee80211_vif *vif = info->control.vif;
1219 	struct wl12xx_vif *wlvif = NULL;
1220 	unsigned long flags;
1221 	int q, mapping;
1222 	u8 hlid;
1223 
1224 	if (!vif) {
1225 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1226 		ieee80211_free_txskb(hw, skb);
1227 		return;
1228 	}
1229 
1230 	wlvif = wl12xx_vif_to_data(vif);
1231 	mapping = skb_get_queue_mapping(skb);
1232 	q = wl1271_tx_get_queue(mapping);
1233 
1234 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1235 
1236 	spin_lock_irqsave(&wl->wl_lock, flags);
1237 
1238 	/*
1239 	 * drop the packet if the link is invalid or the queue is stopped
1240 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1241 	 * allow these packets through.
1242 	 */
1243 	if (hlid == WL12XX_INVALID_LINK_ID ||
1244 	    (!test_bit(hlid, wlvif->links_map)) ||
1245 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1246 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1247 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1248 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1249 		ieee80211_free_txskb(hw, skb);
1250 		goto out;
1251 	}
1252 
1253 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1254 		     hlid, q, skb->len);
1255 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1256 
1257 	wl->tx_queue_count[q]++;
1258 	wlvif->tx_queue_count[q]++;
1259 
1260 	/*
1261 	 * The workqueue is slow to process the tx_queue and we need stop
1262 	 * the queue here, otherwise the queue will get too long.
1263 	 */
1264 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1265 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1266 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1267 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1268 		wlcore_stop_queue_locked(wl, wlvif, q,
1269 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1270 	}
1271 
1272 	/*
1273 	 * The chip specific setup must run before the first TX packet -
1274 	 * before that, the tx_work will not be initialized!
1275 	 */
1276 
1277 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1278 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1279 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1280 
1281 out:
1282 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1283 }
1284 
1285 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1286 {
1287 	unsigned long flags;
1288 	int q;
1289 
1290 	/* no need to queue a new dummy packet if one is already pending */
1291 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1292 		return 0;
1293 
1294 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1295 
1296 	spin_lock_irqsave(&wl->wl_lock, flags);
1297 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1298 	wl->tx_queue_count[q]++;
1299 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1300 
1301 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1302 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1303 		return wlcore_tx_work_locked(wl);
1304 
1305 	/*
1306 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1307 	 * interrupt handler function
1308 	 */
1309 	return 0;
1310 }
1311 
1312 /*
1313  * The size of the dummy packet should be at least 1400 bytes. However, in
1314  * order to minimize the number of bus transactions, aligning it to 512 bytes
1315  * boundaries could be beneficial, performance wise
1316  */
1317 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1318 
1319 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1320 {
1321 	struct sk_buff *skb;
1322 	struct ieee80211_hdr_3addr *hdr;
1323 	unsigned int dummy_packet_size;
1324 
1325 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1326 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1327 
1328 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1329 	if (!skb) {
1330 		wl1271_warning("Failed to allocate a dummy packet skb");
1331 		return NULL;
1332 	}
1333 
1334 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1335 
1336 	hdr = skb_put_zero(skb, sizeof(*hdr));
1337 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1338 					 IEEE80211_STYPE_NULLFUNC |
1339 					 IEEE80211_FCTL_TODS);
1340 
1341 	skb_put_zero(skb, dummy_packet_size);
1342 
1343 	/* Dummy packets require the TID to be management */
1344 	skb->priority = WL1271_TID_MGMT;
1345 
1346 	/* Initialize all fields that might be used */
1347 	skb_set_queue_mapping(skb, 0);
1348 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1349 
1350 	return skb;
1351 }
1352 
1353 
1354 static int
1355 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1356 {
1357 	int num_fields = 0, in_field = 0, fields_size = 0;
1358 	int i, pattern_len = 0;
1359 
1360 	if (!p->mask) {
1361 		wl1271_warning("No mask in WoWLAN pattern");
1362 		return -EINVAL;
1363 	}
1364 
1365 	/*
1366 	 * The pattern is broken up into segments of bytes at different offsets
1367 	 * that need to be checked by the FW filter. Each segment is called
1368 	 * a field in the FW API. We verify that the total number of fields
1369 	 * required for this pattern won't exceed FW limits (8)
1370 	 * as well as the total fields buffer won't exceed the FW limit.
1371 	 * Note that if there's a pattern which crosses Ethernet/IP header
1372 	 * boundary a new field is required.
1373 	 */
1374 	for (i = 0; i < p->pattern_len; i++) {
1375 		if (test_bit(i, (unsigned long *)p->mask)) {
1376 			if (!in_field) {
1377 				in_field = 1;
1378 				pattern_len = 1;
1379 			} else {
1380 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1381 					num_fields++;
1382 					fields_size += pattern_len +
1383 						RX_FILTER_FIELD_OVERHEAD;
1384 					pattern_len = 1;
1385 				} else
1386 					pattern_len++;
1387 			}
1388 		} else {
1389 			if (in_field) {
1390 				in_field = 0;
1391 				fields_size += pattern_len +
1392 					RX_FILTER_FIELD_OVERHEAD;
1393 				num_fields++;
1394 			}
1395 		}
1396 	}
1397 
1398 	if (in_field) {
1399 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1400 		num_fields++;
1401 	}
1402 
1403 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1404 		wl1271_warning("RX Filter too complex. Too many segments");
1405 		return -EINVAL;
1406 	}
1407 
1408 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1409 		wl1271_warning("RX filter pattern is too big");
1410 		return -E2BIG;
1411 	}
1412 
1413 	return 0;
1414 }
1415 
1416 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1417 {
1418 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1419 }
1420 
1421 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1422 {
1423 	int i;
1424 
1425 	if (filter == NULL)
1426 		return;
1427 
1428 	for (i = 0; i < filter->num_fields; i++)
1429 		kfree(filter->fields[i].pattern);
1430 
1431 	kfree(filter);
1432 }
1433 
1434 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1435 				 u16 offset, u8 flags,
1436 				 const u8 *pattern, u8 len)
1437 {
1438 	struct wl12xx_rx_filter_field *field;
1439 
1440 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1441 		wl1271_warning("Max fields per RX filter. can't alloc another");
1442 		return -EINVAL;
1443 	}
1444 
1445 	field = &filter->fields[filter->num_fields];
1446 
1447 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1448 	if (!field->pattern) {
1449 		wl1271_warning("Failed to allocate RX filter pattern");
1450 		return -ENOMEM;
1451 	}
1452 
1453 	filter->num_fields++;
1454 
1455 	field->offset = cpu_to_le16(offset);
1456 	field->flags = flags;
1457 	field->len = len;
1458 
1459 	return 0;
1460 }
1461 
1462 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1463 {
1464 	int i, fields_size = 0;
1465 
1466 	for (i = 0; i < filter->num_fields; i++)
1467 		fields_size += filter->fields[i].len +
1468 			sizeof(struct wl12xx_rx_filter_field) -
1469 			sizeof(u8 *);
1470 
1471 	return fields_size;
1472 }
1473 
1474 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1475 				    u8 *buf)
1476 {
1477 	int i;
1478 	struct wl12xx_rx_filter_field *field;
1479 
1480 	for (i = 0; i < filter->num_fields; i++) {
1481 		field = (struct wl12xx_rx_filter_field *)buf;
1482 
1483 		field->offset = filter->fields[i].offset;
1484 		field->flags = filter->fields[i].flags;
1485 		field->len = filter->fields[i].len;
1486 
1487 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1488 		buf += sizeof(struct wl12xx_rx_filter_field) -
1489 			sizeof(u8 *) + field->len;
1490 	}
1491 }
1492 
1493 /*
1494  * Allocates an RX filter returned through f
1495  * which needs to be freed using rx_filter_free()
1496  */
1497 static int
1498 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1499 					   struct wl12xx_rx_filter **f)
1500 {
1501 	int i, j, ret = 0;
1502 	struct wl12xx_rx_filter *filter;
1503 	u16 offset;
1504 	u8 flags, len;
1505 
1506 	filter = wl1271_rx_filter_alloc();
1507 	if (!filter) {
1508 		wl1271_warning("Failed to alloc rx filter");
1509 		ret = -ENOMEM;
1510 		goto err;
1511 	}
1512 
1513 	i = 0;
1514 	while (i < p->pattern_len) {
1515 		if (!test_bit(i, (unsigned long *)p->mask)) {
1516 			i++;
1517 			continue;
1518 		}
1519 
1520 		for (j = i; j < p->pattern_len; j++) {
1521 			if (!test_bit(j, (unsigned long *)p->mask))
1522 				break;
1523 
1524 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1525 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1526 				break;
1527 		}
1528 
1529 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1530 			offset = i;
1531 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1532 		} else {
1533 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1534 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1535 		}
1536 
1537 		len = j - i;
1538 
1539 		ret = wl1271_rx_filter_alloc_field(filter,
1540 						   offset,
1541 						   flags,
1542 						   &p->pattern[i], len);
1543 		if (ret)
1544 			goto err;
1545 
1546 		i = j;
1547 	}
1548 
1549 	filter->action = FILTER_SIGNAL;
1550 
1551 	*f = filter;
1552 	return 0;
1553 
1554 err:
1555 	wl1271_rx_filter_free(filter);
1556 	*f = NULL;
1557 
1558 	return ret;
1559 }
1560 
1561 static int wl1271_configure_wowlan(struct wl1271 *wl,
1562 				   struct cfg80211_wowlan *wow)
1563 {
1564 	int i, ret;
1565 
1566 	if (!wow || wow->any || !wow->n_patterns) {
1567 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1568 							  FILTER_SIGNAL);
1569 		if (ret)
1570 			goto out;
1571 
1572 		ret = wl1271_rx_filter_clear_all(wl);
1573 		if (ret)
1574 			goto out;
1575 
1576 		return 0;
1577 	}
1578 
1579 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1580 		return -EINVAL;
1581 
1582 	/* Validate all incoming patterns before clearing current FW state */
1583 	for (i = 0; i < wow->n_patterns; i++) {
1584 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1585 		if (ret) {
1586 			wl1271_warning("Bad wowlan pattern %d", i);
1587 			return ret;
1588 		}
1589 	}
1590 
1591 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1592 	if (ret)
1593 		goto out;
1594 
1595 	ret = wl1271_rx_filter_clear_all(wl);
1596 	if (ret)
1597 		goto out;
1598 
1599 	/* Translate WoWLAN patterns into filters */
1600 	for (i = 0; i < wow->n_patterns; i++) {
1601 		struct cfg80211_pkt_pattern *p;
1602 		struct wl12xx_rx_filter *filter = NULL;
1603 
1604 		p = &wow->patterns[i];
1605 
1606 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1607 		if (ret) {
1608 			wl1271_warning("Failed to create an RX filter from "
1609 				       "wowlan pattern %d", i);
1610 			goto out;
1611 		}
1612 
1613 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1614 
1615 		wl1271_rx_filter_free(filter);
1616 		if (ret)
1617 			goto out;
1618 	}
1619 
1620 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1621 
1622 out:
1623 	return ret;
1624 }
1625 
1626 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1627 					struct wl12xx_vif *wlvif,
1628 					struct cfg80211_wowlan *wow)
1629 {
1630 	int ret = 0;
1631 
1632 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1633 		goto out;
1634 
1635 	ret = wl1271_configure_wowlan(wl, wow);
1636 	if (ret < 0)
1637 		goto out;
1638 
1639 	if ((wl->conf.conn.suspend_wake_up_event ==
1640 	     wl->conf.conn.wake_up_event) &&
1641 	    (wl->conf.conn.suspend_listen_interval ==
1642 	     wl->conf.conn.listen_interval))
1643 		goto out;
1644 
1645 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1646 				    wl->conf.conn.suspend_wake_up_event,
1647 				    wl->conf.conn.suspend_listen_interval);
1648 
1649 	if (ret < 0)
1650 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1651 out:
1652 	return ret;
1653 
1654 }
1655 
1656 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1657 					struct wl12xx_vif *wlvif,
1658 					struct cfg80211_wowlan *wow)
1659 {
1660 	int ret = 0;
1661 
1662 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1663 		goto out;
1664 
1665 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1666 	if (ret < 0)
1667 		goto out;
1668 
1669 	ret = wl1271_configure_wowlan(wl, wow);
1670 	if (ret < 0)
1671 		goto out;
1672 
1673 out:
1674 	return ret;
1675 
1676 }
1677 
1678 static int wl1271_configure_suspend(struct wl1271 *wl,
1679 				    struct wl12xx_vif *wlvif,
1680 				    struct cfg80211_wowlan *wow)
1681 {
1682 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1683 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1684 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1685 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1686 	return 0;
1687 }
1688 
1689 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1690 {
1691 	int ret = 0;
1692 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1693 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1694 
1695 	if ((!is_ap) && (!is_sta))
1696 		return;
1697 
1698 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1699 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1700 		return;
1701 
1702 	wl1271_configure_wowlan(wl, NULL);
1703 
1704 	if (is_sta) {
1705 		if ((wl->conf.conn.suspend_wake_up_event ==
1706 		     wl->conf.conn.wake_up_event) &&
1707 		    (wl->conf.conn.suspend_listen_interval ==
1708 		     wl->conf.conn.listen_interval))
1709 			return;
1710 
1711 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1712 				    wl->conf.conn.wake_up_event,
1713 				    wl->conf.conn.listen_interval);
1714 
1715 		if (ret < 0)
1716 			wl1271_error("resume: wake up conditions failed: %d",
1717 				     ret);
1718 
1719 	} else if (is_ap) {
1720 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1721 	}
1722 }
1723 
1724 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1725 					    struct cfg80211_wowlan *wow)
1726 {
1727 	struct wl1271 *wl = hw->priv;
1728 	struct wl12xx_vif *wlvif;
1729 	unsigned long flags;
1730 	int ret;
1731 
1732 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1733 	WARN_ON(!wow);
1734 
1735 	/* we want to perform the recovery before suspending */
1736 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1737 		wl1271_warning("postponing suspend to perform recovery");
1738 		return -EBUSY;
1739 	}
1740 
1741 	wl1271_tx_flush(wl);
1742 
1743 	mutex_lock(&wl->mutex);
1744 
1745 	ret = pm_runtime_get_sync(wl->dev);
1746 	if (ret < 0) {
1747 		pm_runtime_put_noidle(wl->dev);
1748 		mutex_unlock(&wl->mutex);
1749 		return ret;
1750 	}
1751 
1752 	wl->wow_enabled = true;
1753 	wl12xx_for_each_wlvif(wl, wlvif) {
1754 		if (wlcore_is_p2p_mgmt(wlvif))
1755 			continue;
1756 
1757 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1758 		if (ret < 0) {
1759 			goto out_sleep;
1760 		}
1761 	}
1762 
1763 	/* disable fast link flow control notifications from FW */
1764 	ret = wlcore_hw_interrupt_notify(wl, false);
1765 	if (ret < 0)
1766 		goto out_sleep;
1767 
1768 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1769 	ret = wlcore_hw_rx_ba_filter(wl,
1770 				     !!wl->conf.conn.suspend_rx_ba_activity);
1771 	if (ret < 0)
1772 		goto out_sleep;
1773 
1774 out_sleep:
1775 	pm_runtime_put_noidle(wl->dev);
1776 	mutex_unlock(&wl->mutex);
1777 
1778 	if (ret < 0) {
1779 		wl1271_warning("couldn't prepare device to suspend");
1780 		return ret;
1781 	}
1782 
1783 	/* flush any remaining work */
1784 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1785 
1786 	flush_work(&wl->tx_work);
1787 
1788 	/*
1789 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1790 	 * it on resume anyway.
1791 	 */
1792 	cancel_delayed_work(&wl->tx_watchdog_work);
1793 
1794 	/*
1795 	 * set suspended flag to avoid triggering a new threaded_irq
1796 	 * work.
1797 	 */
1798 	spin_lock_irqsave(&wl->wl_lock, flags);
1799 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1800 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1801 
1802 	return pm_runtime_force_suspend(wl->dev);
1803 }
1804 
1805 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1806 {
1807 	struct wl1271 *wl = hw->priv;
1808 	struct wl12xx_vif *wlvif;
1809 	unsigned long flags;
1810 	bool run_irq_work = false, pending_recovery;
1811 	int ret;
1812 
1813 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1814 		     wl->wow_enabled);
1815 	WARN_ON(!wl->wow_enabled);
1816 
1817 	ret = pm_runtime_force_resume(wl->dev);
1818 	if (ret < 0) {
1819 		wl1271_error("ELP wakeup failure!");
1820 		goto out_sleep;
1821 	}
1822 
1823 	/*
1824 	 * re-enable irq_work enqueuing, and call irq_work directly if
1825 	 * there is a pending work.
1826 	 */
1827 	spin_lock_irqsave(&wl->wl_lock, flags);
1828 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1829 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1830 		run_irq_work = true;
1831 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1832 
1833 	mutex_lock(&wl->mutex);
1834 
1835 	/* test the recovery flag before calling any SDIO functions */
1836 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1837 				    &wl->flags);
1838 
1839 	if (run_irq_work) {
1840 		wl1271_debug(DEBUG_MAC80211,
1841 			     "run postponed irq_work directly");
1842 
1843 		/* don't talk to the HW if recovery is pending */
1844 		if (!pending_recovery) {
1845 			ret = wlcore_irq_locked(wl);
1846 			if (ret)
1847 				wl12xx_queue_recovery_work(wl);
1848 		}
1849 
1850 		wlcore_enable_interrupts(wl);
1851 	}
1852 
1853 	if (pending_recovery) {
1854 		wl1271_warning("queuing forgotten recovery on resume");
1855 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1856 		goto out_sleep;
1857 	}
1858 
1859 	ret = pm_runtime_get_sync(wl->dev);
1860 	if (ret < 0) {
1861 		pm_runtime_put_noidle(wl->dev);
1862 		goto out;
1863 	}
1864 
1865 	wl12xx_for_each_wlvif(wl, wlvif) {
1866 		if (wlcore_is_p2p_mgmt(wlvif))
1867 			continue;
1868 
1869 		wl1271_configure_resume(wl, wlvif);
1870 	}
1871 
1872 	ret = wlcore_hw_interrupt_notify(wl, true);
1873 	if (ret < 0)
1874 		goto out_sleep;
1875 
1876 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1877 	ret = wlcore_hw_rx_ba_filter(wl, false);
1878 	if (ret < 0)
1879 		goto out_sleep;
1880 
1881 out_sleep:
1882 	pm_runtime_mark_last_busy(wl->dev);
1883 	pm_runtime_put_autosuspend(wl->dev);
1884 
1885 out:
1886 	wl->wow_enabled = false;
1887 
1888 	/*
1889 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1890 	 * That way we avoid possible conditions where Tx-complete interrupts
1891 	 * fail to arrive and we perform a spurious recovery.
1892 	 */
1893 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1894 	mutex_unlock(&wl->mutex);
1895 
1896 	return 0;
1897 }
1898 
1899 static int wl1271_op_start(struct ieee80211_hw *hw)
1900 {
1901 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1902 
1903 	/*
1904 	 * We have to delay the booting of the hardware because
1905 	 * we need to know the local MAC address before downloading and
1906 	 * initializing the firmware. The MAC address cannot be changed
1907 	 * after boot, and without the proper MAC address, the firmware
1908 	 * will not function properly.
1909 	 *
1910 	 * The MAC address is first known when the corresponding interface
1911 	 * is added. That is where we will initialize the hardware.
1912 	 */
1913 
1914 	return 0;
1915 }
1916 
1917 static void wlcore_op_stop_locked(struct wl1271 *wl)
1918 {
1919 	int i;
1920 
1921 	if (wl->state == WLCORE_STATE_OFF) {
1922 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1923 					&wl->flags))
1924 			wlcore_enable_interrupts(wl);
1925 
1926 		return;
1927 	}
1928 
1929 	/*
1930 	 * this must be before the cancel_work calls below, so that the work
1931 	 * functions don't perform further work.
1932 	 */
1933 	wl->state = WLCORE_STATE_OFF;
1934 
1935 	/*
1936 	 * Use the nosync variant to disable interrupts, so the mutex could be
1937 	 * held while doing so without deadlocking.
1938 	 */
1939 	wlcore_disable_interrupts_nosync(wl);
1940 
1941 	mutex_unlock(&wl->mutex);
1942 
1943 	wlcore_synchronize_interrupts(wl);
1944 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1945 		cancel_work_sync(&wl->recovery_work);
1946 	wl1271_flush_deferred_work(wl);
1947 	cancel_delayed_work_sync(&wl->scan_complete_work);
1948 	cancel_work_sync(&wl->netstack_work);
1949 	cancel_work_sync(&wl->tx_work);
1950 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1951 
1952 	/* let's notify MAC80211 about the remaining pending TX frames */
1953 	mutex_lock(&wl->mutex);
1954 	wl12xx_tx_reset(wl);
1955 
1956 	wl1271_power_off(wl);
1957 	/*
1958 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1959 	 * an interrupt storm. Now that the power is down, it is safe to
1960 	 * re-enable interrupts to balance the disable depth
1961 	 */
1962 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1963 		wlcore_enable_interrupts(wl);
1964 
1965 	wl->band = NL80211_BAND_2GHZ;
1966 
1967 	wl->rx_counter = 0;
1968 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1969 	wl->channel_type = NL80211_CHAN_NO_HT;
1970 	wl->tx_blocks_available = 0;
1971 	wl->tx_allocated_blocks = 0;
1972 	wl->tx_results_count = 0;
1973 	wl->tx_packets_count = 0;
1974 	wl->time_offset = 0;
1975 	wl->ap_fw_ps_map = 0;
1976 	wl->ap_ps_map = 0;
1977 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1978 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1979 	memset(wl->links_map, 0, sizeof(wl->links_map));
1980 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1981 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1982 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1983 	wl->active_sta_count = 0;
1984 	wl->active_link_count = 0;
1985 
1986 	/* The system link is always allocated */
1987 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1988 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1989 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1990 
1991 	/*
1992 	 * this is performed after the cancel_work calls and the associated
1993 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1994 	 * get executed before all these vars have been reset.
1995 	 */
1996 	wl->flags = 0;
1997 
1998 	wl->tx_blocks_freed = 0;
1999 
2000 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2001 		wl->tx_pkts_freed[i] = 0;
2002 		wl->tx_allocated_pkts[i] = 0;
2003 	}
2004 
2005 	wl1271_debugfs_reset(wl);
2006 
2007 	kfree(wl->raw_fw_status);
2008 	wl->raw_fw_status = NULL;
2009 	kfree(wl->fw_status);
2010 	wl->fw_status = NULL;
2011 	kfree(wl->tx_res_if);
2012 	wl->tx_res_if = NULL;
2013 	kfree(wl->target_mem_map);
2014 	wl->target_mem_map = NULL;
2015 
2016 	/*
2017 	 * FW channels must be re-calibrated after recovery,
2018 	 * save current Reg-Domain channel configuration and clear it.
2019 	 */
2020 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2021 	       sizeof(wl->reg_ch_conf_pending));
2022 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2023 }
2024 
2025 static void wlcore_op_stop(struct ieee80211_hw *hw)
2026 {
2027 	struct wl1271 *wl = hw->priv;
2028 
2029 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2030 
2031 	mutex_lock(&wl->mutex);
2032 
2033 	wlcore_op_stop_locked(wl);
2034 
2035 	mutex_unlock(&wl->mutex);
2036 }
2037 
2038 static void wlcore_channel_switch_work(struct work_struct *work)
2039 {
2040 	struct delayed_work *dwork;
2041 	struct wl1271 *wl;
2042 	struct ieee80211_vif *vif;
2043 	struct wl12xx_vif *wlvif;
2044 	int ret;
2045 
2046 	dwork = to_delayed_work(work);
2047 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2048 	wl = wlvif->wl;
2049 
2050 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2051 
2052 	mutex_lock(&wl->mutex);
2053 
2054 	if (unlikely(wl->state != WLCORE_STATE_ON))
2055 		goto out;
2056 
2057 	/* check the channel switch is still ongoing */
2058 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2059 		goto out;
2060 
2061 	vif = wl12xx_wlvif_to_vif(wlvif);
2062 	ieee80211_chswitch_done(vif, false);
2063 
2064 	ret = pm_runtime_get_sync(wl->dev);
2065 	if (ret < 0) {
2066 		pm_runtime_put_noidle(wl->dev);
2067 		goto out;
2068 	}
2069 
2070 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2071 
2072 	pm_runtime_mark_last_busy(wl->dev);
2073 	pm_runtime_put_autosuspend(wl->dev);
2074 out:
2075 	mutex_unlock(&wl->mutex);
2076 }
2077 
2078 static void wlcore_connection_loss_work(struct work_struct *work)
2079 {
2080 	struct delayed_work *dwork;
2081 	struct wl1271 *wl;
2082 	struct ieee80211_vif *vif;
2083 	struct wl12xx_vif *wlvif;
2084 
2085 	dwork = to_delayed_work(work);
2086 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2087 	wl = wlvif->wl;
2088 
2089 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2090 
2091 	mutex_lock(&wl->mutex);
2092 
2093 	if (unlikely(wl->state != WLCORE_STATE_ON))
2094 		goto out;
2095 
2096 	/* Call mac80211 connection loss */
2097 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2098 		goto out;
2099 
2100 	vif = wl12xx_wlvif_to_vif(wlvif);
2101 	ieee80211_connection_loss(vif);
2102 out:
2103 	mutex_unlock(&wl->mutex);
2104 }
2105 
2106 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2107 {
2108 	struct delayed_work *dwork;
2109 	struct wl1271 *wl;
2110 	struct wl12xx_vif *wlvif;
2111 	unsigned long time_spare;
2112 	int ret;
2113 
2114 	dwork = to_delayed_work(work);
2115 	wlvif = container_of(dwork, struct wl12xx_vif,
2116 			     pending_auth_complete_work);
2117 	wl = wlvif->wl;
2118 
2119 	mutex_lock(&wl->mutex);
2120 
2121 	if (unlikely(wl->state != WLCORE_STATE_ON))
2122 		goto out;
2123 
2124 	/*
2125 	 * Make sure a second really passed since the last auth reply. Maybe
2126 	 * a second auth reply arrived while we were stuck on the mutex.
2127 	 * Check for a little less than the timeout to protect from scheduler
2128 	 * irregularities.
2129 	 */
2130 	time_spare = jiffies +
2131 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2132 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2133 		goto out;
2134 
2135 	ret = pm_runtime_get_sync(wl->dev);
2136 	if (ret < 0) {
2137 		pm_runtime_put_noidle(wl->dev);
2138 		goto out;
2139 	}
2140 
2141 	/* cancel the ROC if active */
2142 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2143 
2144 	pm_runtime_mark_last_busy(wl->dev);
2145 	pm_runtime_put_autosuspend(wl->dev);
2146 out:
2147 	mutex_unlock(&wl->mutex);
2148 }
2149 
2150 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2151 {
2152 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2153 					WL12XX_MAX_RATE_POLICIES);
2154 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2155 		return -EBUSY;
2156 
2157 	__set_bit(policy, wl->rate_policies_map);
2158 	*idx = policy;
2159 	return 0;
2160 }
2161 
2162 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2163 {
2164 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2165 		return;
2166 
2167 	__clear_bit(*idx, wl->rate_policies_map);
2168 	*idx = WL12XX_MAX_RATE_POLICIES;
2169 }
2170 
2171 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2172 {
2173 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2174 					WLCORE_MAX_KLV_TEMPLATES);
2175 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2176 		return -EBUSY;
2177 
2178 	__set_bit(policy, wl->klv_templates_map);
2179 	*idx = policy;
2180 	return 0;
2181 }
2182 
2183 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2184 {
2185 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2186 		return;
2187 
2188 	__clear_bit(*idx, wl->klv_templates_map);
2189 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2190 }
2191 
2192 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2193 {
2194 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2195 
2196 	switch (wlvif->bss_type) {
2197 	case BSS_TYPE_AP_BSS:
2198 		if (wlvif->p2p)
2199 			return WL1271_ROLE_P2P_GO;
2200 		else if (ieee80211_vif_is_mesh(vif))
2201 			return WL1271_ROLE_MESH_POINT;
2202 		else
2203 			return WL1271_ROLE_AP;
2204 
2205 	case BSS_TYPE_STA_BSS:
2206 		if (wlvif->p2p)
2207 			return WL1271_ROLE_P2P_CL;
2208 		else
2209 			return WL1271_ROLE_STA;
2210 
2211 	case BSS_TYPE_IBSS:
2212 		return WL1271_ROLE_IBSS;
2213 
2214 	default:
2215 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2216 	}
2217 	return WL12XX_INVALID_ROLE_TYPE;
2218 }
2219 
2220 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2221 {
2222 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2223 	int i;
2224 
2225 	/* clear everything but the persistent data */
2226 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2227 
2228 	switch (ieee80211_vif_type_p2p(vif)) {
2229 	case NL80211_IFTYPE_P2P_CLIENT:
2230 		wlvif->p2p = 1;
2231 		/* fall-through */
2232 	case NL80211_IFTYPE_STATION:
2233 	case NL80211_IFTYPE_P2P_DEVICE:
2234 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2235 		break;
2236 	case NL80211_IFTYPE_ADHOC:
2237 		wlvif->bss_type = BSS_TYPE_IBSS;
2238 		break;
2239 	case NL80211_IFTYPE_P2P_GO:
2240 		wlvif->p2p = 1;
2241 		/* fall-through */
2242 	case NL80211_IFTYPE_AP:
2243 	case NL80211_IFTYPE_MESH_POINT:
2244 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2245 		break;
2246 	default:
2247 		wlvif->bss_type = MAX_BSS_TYPE;
2248 		return -EOPNOTSUPP;
2249 	}
2250 
2251 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2252 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2253 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2254 
2255 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2256 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2257 		/* init sta/ibss data */
2258 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2259 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2260 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2261 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2262 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2263 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2264 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2265 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2266 	} else {
2267 		/* init ap data */
2268 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2269 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2270 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2271 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2272 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2273 			wl12xx_allocate_rate_policy(wl,
2274 						&wlvif->ap.ucast_rate_idx[i]);
2275 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2276 		/*
2277 		 * TODO: check if basic_rate shouldn't be
2278 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2279 		 * instead (the same thing for STA above).
2280 		*/
2281 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2282 		/* TODO: this seems to be used only for STA, check it */
2283 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2284 	}
2285 
2286 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2287 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2288 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2289 
2290 	/*
2291 	 * mac80211 configures some values globally, while we treat them
2292 	 * per-interface. thus, on init, we have to copy them from wl
2293 	 */
2294 	wlvif->band = wl->band;
2295 	wlvif->channel = wl->channel;
2296 	wlvif->power_level = wl->power_level;
2297 	wlvif->channel_type = wl->channel_type;
2298 
2299 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2300 		  wl1271_rx_streaming_enable_work);
2301 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2302 		  wl1271_rx_streaming_disable_work);
2303 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2304 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2305 			  wlcore_channel_switch_work);
2306 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2307 			  wlcore_connection_loss_work);
2308 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2309 			  wlcore_pending_auth_complete_work);
2310 	INIT_LIST_HEAD(&wlvif->list);
2311 
2312 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2313 	return 0;
2314 }
2315 
2316 static int wl12xx_init_fw(struct wl1271 *wl)
2317 {
2318 	int retries = WL1271_BOOT_RETRIES;
2319 	bool booted = false;
2320 	struct wiphy *wiphy = wl->hw->wiphy;
2321 	int ret;
2322 
2323 	while (retries) {
2324 		retries--;
2325 		ret = wl12xx_chip_wakeup(wl, false);
2326 		if (ret < 0)
2327 			goto power_off;
2328 
2329 		ret = wl->ops->boot(wl);
2330 		if (ret < 0)
2331 			goto power_off;
2332 
2333 		ret = wl1271_hw_init(wl);
2334 		if (ret < 0)
2335 			goto irq_disable;
2336 
2337 		booted = true;
2338 		break;
2339 
2340 irq_disable:
2341 		mutex_unlock(&wl->mutex);
2342 		/* Unlocking the mutex in the middle of handling is
2343 		   inherently unsafe. In this case we deem it safe to do,
2344 		   because we need to let any possibly pending IRQ out of
2345 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2346 		   work function will not do anything.) Also, any other
2347 		   possible concurrent operations will fail due to the
2348 		   current state, hence the wl1271 struct should be safe. */
2349 		wlcore_disable_interrupts(wl);
2350 		wl1271_flush_deferred_work(wl);
2351 		cancel_work_sync(&wl->netstack_work);
2352 		mutex_lock(&wl->mutex);
2353 power_off:
2354 		wl1271_power_off(wl);
2355 	}
2356 
2357 	if (!booted) {
2358 		wl1271_error("firmware boot failed despite %d retries",
2359 			     WL1271_BOOT_RETRIES);
2360 		goto out;
2361 	}
2362 
2363 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2364 
2365 	/* update hw/fw version info in wiphy struct */
2366 	wiphy->hw_version = wl->chip.id;
2367 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2368 		sizeof(wiphy->fw_version));
2369 
2370 	/*
2371 	 * Now we know if 11a is supported (info from the NVS), so disable
2372 	 * 11a channels if not supported
2373 	 */
2374 	if (!wl->enable_11a)
2375 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2376 
2377 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2378 		     wl->enable_11a ? "" : "not ");
2379 
2380 	wl->state = WLCORE_STATE_ON;
2381 out:
2382 	return ret;
2383 }
2384 
2385 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2386 {
2387 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2388 }
2389 
2390 /*
2391  * Check whether a fw switch (i.e. moving from one loaded
2392  * fw to another) is needed. This function is also responsible
2393  * for updating wl->last_vif_count, so it must be called before
2394  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2395  * will be used).
2396  */
2397 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2398 				  struct vif_counter_data vif_counter_data,
2399 				  bool add)
2400 {
2401 	enum wl12xx_fw_type current_fw = wl->fw_type;
2402 	u8 vif_count = vif_counter_data.counter;
2403 
2404 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2405 		return false;
2406 
2407 	/* increase the vif count if this is a new vif */
2408 	if (add && !vif_counter_data.cur_vif_running)
2409 		vif_count++;
2410 
2411 	wl->last_vif_count = vif_count;
2412 
2413 	/* no need for fw change if the device is OFF */
2414 	if (wl->state == WLCORE_STATE_OFF)
2415 		return false;
2416 
2417 	/* no need for fw change if a single fw is used */
2418 	if (!wl->mr_fw_name)
2419 		return false;
2420 
2421 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2422 		return true;
2423 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2424 		return true;
2425 
2426 	return false;
2427 }
2428 
2429 /*
2430  * Enter "forced psm". Make sure the sta is in psm against the ap,
2431  * to make the fw switch a bit more disconnection-persistent.
2432  */
2433 static void wl12xx_force_active_psm(struct wl1271 *wl)
2434 {
2435 	struct wl12xx_vif *wlvif;
2436 
2437 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2438 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2439 	}
2440 }
2441 
2442 struct wlcore_hw_queue_iter_data {
2443 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2444 	/* current vif */
2445 	struct ieee80211_vif *vif;
2446 	/* is the current vif among those iterated */
2447 	bool cur_running;
2448 };
2449 
2450 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2451 				 struct ieee80211_vif *vif)
2452 {
2453 	struct wlcore_hw_queue_iter_data *iter_data = data;
2454 
2455 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2456 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2457 		return;
2458 
2459 	if (iter_data->cur_running || vif == iter_data->vif) {
2460 		iter_data->cur_running = true;
2461 		return;
2462 	}
2463 
2464 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2465 }
2466 
2467 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2468 					 struct wl12xx_vif *wlvif)
2469 {
2470 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2471 	struct wlcore_hw_queue_iter_data iter_data = {};
2472 	int i, q_base;
2473 
2474 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2475 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2476 		return 0;
2477 	}
2478 
2479 	iter_data.vif = vif;
2480 
2481 	/* mark all bits taken by active interfaces */
2482 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2483 					IEEE80211_IFACE_ITER_RESUME_ALL,
2484 					wlcore_hw_queue_iter, &iter_data);
2485 
2486 	/* the current vif is already running in mac80211 (resume/recovery) */
2487 	if (iter_data.cur_running) {
2488 		wlvif->hw_queue_base = vif->hw_queue[0];
2489 		wl1271_debug(DEBUG_MAC80211,
2490 			     "using pre-allocated hw queue base %d",
2491 			     wlvif->hw_queue_base);
2492 
2493 		/* interface type might have changed type */
2494 		goto adjust_cab_queue;
2495 	}
2496 
2497 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2498 				     WLCORE_NUM_MAC_ADDRESSES);
2499 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2500 		return -EBUSY;
2501 
2502 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2503 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2504 		     wlvif->hw_queue_base);
2505 
2506 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2507 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2508 		/* register hw queues in mac80211 */
2509 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2510 	}
2511 
2512 adjust_cab_queue:
2513 	/* the last places are reserved for cab queues per interface */
2514 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2515 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2516 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2517 	else
2518 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2519 
2520 	return 0;
2521 }
2522 
2523 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2524 				   struct ieee80211_vif *vif)
2525 {
2526 	struct wl1271 *wl = hw->priv;
2527 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2528 	struct vif_counter_data vif_count;
2529 	int ret = 0;
2530 	u8 role_type;
2531 
2532 	if (wl->plt) {
2533 		wl1271_error("Adding Interface not allowed while in PLT mode");
2534 		return -EBUSY;
2535 	}
2536 
2537 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2538 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2539 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2540 
2541 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2542 		     ieee80211_vif_type_p2p(vif), vif->addr);
2543 
2544 	wl12xx_get_vif_count(hw, vif, &vif_count);
2545 
2546 	mutex_lock(&wl->mutex);
2547 
2548 	/*
2549 	 * in some very corner case HW recovery scenarios its possible to
2550 	 * get here before __wl1271_op_remove_interface is complete, so
2551 	 * opt out if that is the case.
2552 	 */
2553 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2554 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2555 		ret = -EBUSY;
2556 		goto out;
2557 	}
2558 
2559 
2560 	ret = wl12xx_init_vif_data(wl, vif);
2561 	if (ret < 0)
2562 		goto out;
2563 
2564 	wlvif->wl = wl;
2565 	role_type = wl12xx_get_role_type(wl, wlvif);
2566 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2567 		ret = -EINVAL;
2568 		goto out;
2569 	}
2570 
2571 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2572 	if (ret < 0)
2573 		goto out;
2574 
2575 	/*
2576 	 * TODO: after the nvs issue will be solved, move this block
2577 	 * to start(), and make sure here the driver is ON.
2578 	 */
2579 	if (wl->state == WLCORE_STATE_OFF) {
2580 		/*
2581 		 * we still need this in order to configure the fw
2582 		 * while uploading the nvs
2583 		 */
2584 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2585 
2586 		ret = wl12xx_init_fw(wl);
2587 		if (ret < 0)
2588 			goto out;
2589 	}
2590 
2591 	/*
2592 	 * Call runtime PM only after possible wl12xx_init_fw() above
2593 	 * is done. Otherwise we do not have interrupts enabled.
2594 	 */
2595 	ret = pm_runtime_get_sync(wl->dev);
2596 	if (ret < 0) {
2597 		pm_runtime_put_noidle(wl->dev);
2598 		goto out_unlock;
2599 	}
2600 
2601 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2602 		wl12xx_force_active_psm(wl);
2603 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2604 		mutex_unlock(&wl->mutex);
2605 		wl1271_recovery_work(&wl->recovery_work);
2606 		return 0;
2607 	}
2608 
2609 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2610 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2611 					     role_type, &wlvif->role_id);
2612 		if (ret < 0)
2613 			goto out;
2614 
2615 		ret = wl1271_init_vif_specific(wl, vif);
2616 		if (ret < 0)
2617 			goto out;
2618 
2619 	} else {
2620 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2621 					     &wlvif->dev_role_id);
2622 		if (ret < 0)
2623 			goto out;
2624 
2625 		/* needed mainly for configuring rate policies */
2626 		ret = wl1271_sta_hw_init(wl, wlvif);
2627 		if (ret < 0)
2628 			goto out;
2629 	}
2630 
2631 	list_add(&wlvif->list, &wl->wlvif_list);
2632 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2633 
2634 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2635 		wl->ap_count++;
2636 	else
2637 		wl->sta_count++;
2638 out:
2639 	pm_runtime_mark_last_busy(wl->dev);
2640 	pm_runtime_put_autosuspend(wl->dev);
2641 out_unlock:
2642 	mutex_unlock(&wl->mutex);
2643 
2644 	return ret;
2645 }
2646 
2647 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2648 					 struct ieee80211_vif *vif,
2649 					 bool reset_tx_queues)
2650 {
2651 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2652 	int i, ret;
2653 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2654 
2655 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2656 
2657 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2658 		return;
2659 
2660 	/* because of hardware recovery, we may get here twice */
2661 	if (wl->state == WLCORE_STATE_OFF)
2662 		return;
2663 
2664 	wl1271_info("down");
2665 
2666 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2667 	    wl->scan_wlvif == wlvif) {
2668 		struct cfg80211_scan_info info = {
2669 			.aborted = true,
2670 		};
2671 
2672 		/*
2673 		 * Rearm the tx watchdog just before idling scan. This
2674 		 * prevents just-finished scans from triggering the watchdog
2675 		 */
2676 		wl12xx_rearm_tx_watchdog_locked(wl);
2677 
2678 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2679 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2680 		wl->scan_wlvif = NULL;
2681 		wl->scan.req = NULL;
2682 		ieee80211_scan_completed(wl->hw, &info);
2683 	}
2684 
2685 	if (wl->sched_vif == wlvif)
2686 		wl->sched_vif = NULL;
2687 
2688 	if (wl->roc_vif == vif) {
2689 		wl->roc_vif = NULL;
2690 		ieee80211_remain_on_channel_expired(wl->hw);
2691 	}
2692 
2693 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2694 		/* disable active roles */
2695 		ret = pm_runtime_get_sync(wl->dev);
2696 		if (ret < 0) {
2697 			pm_runtime_put_noidle(wl->dev);
2698 			goto deinit;
2699 		}
2700 
2701 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2702 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2703 			if (wl12xx_dev_role_started(wlvif))
2704 				wl12xx_stop_dev(wl, wlvif);
2705 		}
2706 
2707 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2708 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2709 			if (ret < 0) {
2710 				pm_runtime_put_noidle(wl->dev);
2711 				goto deinit;
2712 			}
2713 		} else {
2714 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2715 			if (ret < 0) {
2716 				pm_runtime_put_noidle(wl->dev);
2717 				goto deinit;
2718 			}
2719 		}
2720 
2721 		pm_runtime_mark_last_busy(wl->dev);
2722 		pm_runtime_put_autosuspend(wl->dev);
2723 	}
2724 deinit:
2725 	wl12xx_tx_reset_wlvif(wl, wlvif);
2726 
2727 	/* clear all hlids (except system_hlid) */
2728 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2729 
2730 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2731 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2732 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2733 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2734 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2735 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2736 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2737 	} else {
2738 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2739 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2740 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2741 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2742 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2743 			wl12xx_free_rate_policy(wl,
2744 						&wlvif->ap.ucast_rate_idx[i]);
2745 		wl1271_free_ap_keys(wl, wlvif);
2746 	}
2747 
2748 	dev_kfree_skb(wlvif->probereq);
2749 	wlvif->probereq = NULL;
2750 	if (wl->last_wlvif == wlvif)
2751 		wl->last_wlvif = NULL;
2752 	list_del(&wlvif->list);
2753 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2754 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2755 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2756 
2757 	if (is_ap)
2758 		wl->ap_count--;
2759 	else
2760 		wl->sta_count--;
2761 
2762 	/*
2763 	 * Last AP, have more stations. Configure sleep auth according to STA.
2764 	 * Don't do thin on unintended recovery.
2765 	 */
2766 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2767 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2768 		goto unlock;
2769 
2770 	if (wl->ap_count == 0 && is_ap) {
2771 		/* mask ap events */
2772 		wl->event_mask &= ~wl->ap_event_mask;
2773 		wl1271_event_unmask(wl);
2774 	}
2775 
2776 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2777 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2778 		/* Configure for power according to debugfs */
2779 		if (sta_auth != WL1271_PSM_ILLEGAL)
2780 			wl1271_acx_sleep_auth(wl, sta_auth);
2781 		/* Configure for ELP power saving */
2782 		else
2783 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2784 	}
2785 
2786 unlock:
2787 	mutex_unlock(&wl->mutex);
2788 
2789 	del_timer_sync(&wlvif->rx_streaming_timer);
2790 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2791 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2792 	cancel_work_sync(&wlvif->rc_update_work);
2793 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2794 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2795 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2796 
2797 	mutex_lock(&wl->mutex);
2798 }
2799 
2800 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2801 				       struct ieee80211_vif *vif)
2802 {
2803 	struct wl1271 *wl = hw->priv;
2804 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2805 	struct wl12xx_vif *iter;
2806 	struct vif_counter_data vif_count;
2807 
2808 	wl12xx_get_vif_count(hw, vif, &vif_count);
2809 	mutex_lock(&wl->mutex);
2810 
2811 	if (wl->state == WLCORE_STATE_OFF ||
2812 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2813 		goto out;
2814 
2815 	/*
2816 	 * wl->vif can be null here if someone shuts down the interface
2817 	 * just when hardware recovery has been started.
2818 	 */
2819 	wl12xx_for_each_wlvif(wl, iter) {
2820 		if (iter != wlvif)
2821 			continue;
2822 
2823 		__wl1271_op_remove_interface(wl, vif, true);
2824 		break;
2825 	}
2826 	WARN_ON(iter != wlvif);
2827 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2828 		wl12xx_force_active_psm(wl);
2829 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2830 		wl12xx_queue_recovery_work(wl);
2831 	}
2832 out:
2833 	mutex_unlock(&wl->mutex);
2834 }
2835 
2836 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2837 				      struct ieee80211_vif *vif,
2838 				      enum nl80211_iftype new_type, bool p2p)
2839 {
2840 	struct wl1271 *wl = hw->priv;
2841 	int ret;
2842 
2843 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2844 	wl1271_op_remove_interface(hw, vif);
2845 
2846 	vif->type = new_type;
2847 	vif->p2p = p2p;
2848 	ret = wl1271_op_add_interface(hw, vif);
2849 
2850 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2851 	return ret;
2852 }
2853 
2854 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2855 {
2856 	int ret;
2857 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2858 
2859 	/*
2860 	 * One of the side effects of the JOIN command is that is clears
2861 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2862 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2863 	 * Currently the only valid scenario for JOIN during association
2864 	 * is on roaming, in which case we will also be given new keys.
2865 	 * Keep the below message for now, unless it starts bothering
2866 	 * users who really like to roam a lot :)
2867 	 */
2868 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2869 		wl1271_info("JOIN while associated.");
2870 
2871 	/* clear encryption type */
2872 	wlvif->encryption_type = KEY_NONE;
2873 
2874 	if (is_ibss)
2875 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2876 	else {
2877 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2878 			/*
2879 			 * TODO: this is an ugly workaround for wl12xx fw
2880 			 * bug - we are not able to tx/rx after the first
2881 			 * start_sta, so make dummy start+stop calls,
2882 			 * and then call start_sta again.
2883 			 * this should be fixed in the fw.
2884 			 */
2885 			wl12xx_cmd_role_start_sta(wl, wlvif);
2886 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2887 		}
2888 
2889 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2890 	}
2891 
2892 	return ret;
2893 }
2894 
2895 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2896 			    int offset)
2897 {
2898 	u8 ssid_len;
2899 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2900 					 skb->len - offset);
2901 
2902 	if (!ptr) {
2903 		wl1271_error("No SSID in IEs!");
2904 		return -ENOENT;
2905 	}
2906 
2907 	ssid_len = ptr[1];
2908 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2909 		wl1271_error("SSID is too long!");
2910 		return -EINVAL;
2911 	}
2912 
2913 	wlvif->ssid_len = ssid_len;
2914 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2915 	return 0;
2916 }
2917 
2918 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2919 {
2920 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2921 	struct sk_buff *skb;
2922 	int ieoffset;
2923 
2924 	/* we currently only support setting the ssid from the ap probe req */
2925 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2926 		return -EINVAL;
2927 
2928 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2929 	if (!skb)
2930 		return -EINVAL;
2931 
2932 	ieoffset = offsetof(struct ieee80211_mgmt,
2933 			    u.probe_req.variable);
2934 	wl1271_ssid_set(wlvif, skb, ieoffset);
2935 	dev_kfree_skb(skb);
2936 
2937 	return 0;
2938 }
2939 
2940 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2941 			    struct ieee80211_bss_conf *bss_conf,
2942 			    u32 sta_rate_set)
2943 {
2944 	int ieoffset;
2945 	int ret;
2946 
2947 	wlvif->aid = bss_conf->aid;
2948 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2949 	wlvif->beacon_int = bss_conf->beacon_int;
2950 	wlvif->wmm_enabled = bss_conf->qos;
2951 
2952 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2953 
2954 	/*
2955 	 * with wl1271, we don't need to update the
2956 	 * beacon_int and dtim_period, because the firmware
2957 	 * updates it by itself when the first beacon is
2958 	 * received after a join.
2959 	 */
2960 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2961 	if (ret < 0)
2962 		return ret;
2963 
2964 	/*
2965 	 * Get a template for hardware connection maintenance
2966 	 */
2967 	dev_kfree_skb(wlvif->probereq);
2968 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2969 							wlvif,
2970 							NULL);
2971 	ieoffset = offsetof(struct ieee80211_mgmt,
2972 			    u.probe_req.variable);
2973 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2974 
2975 	/* enable the connection monitoring feature */
2976 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2977 	if (ret < 0)
2978 		return ret;
2979 
2980 	/*
2981 	 * The join command disable the keep-alive mode, shut down its process,
2982 	 * and also clear the template config, so we need to reset it all after
2983 	 * the join. The acx_aid starts the keep-alive process, and the order
2984 	 * of the commands below is relevant.
2985 	 */
2986 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2987 	if (ret < 0)
2988 		return ret;
2989 
2990 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2991 	if (ret < 0)
2992 		return ret;
2993 
2994 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2995 	if (ret < 0)
2996 		return ret;
2997 
2998 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2999 					   wlvif->sta.klv_template_id,
3000 					   ACX_KEEP_ALIVE_TPL_VALID);
3001 	if (ret < 0)
3002 		return ret;
3003 
3004 	/*
3005 	 * The default fw psm configuration is AUTO, while mac80211 default
3006 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3007 	 */
3008 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3009 	if (ret < 0)
3010 		return ret;
3011 
3012 	if (sta_rate_set) {
3013 		wlvif->rate_set =
3014 			wl1271_tx_enabled_rates_get(wl,
3015 						    sta_rate_set,
3016 						    wlvif->band);
3017 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3018 		if (ret < 0)
3019 			return ret;
3020 	}
3021 
3022 	return ret;
3023 }
3024 
3025 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3026 {
3027 	int ret;
3028 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3029 
3030 	/* make sure we are connected (sta) joined */
3031 	if (sta &&
3032 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3033 		return false;
3034 
3035 	/* make sure we are joined (ibss) */
3036 	if (!sta &&
3037 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3038 		return false;
3039 
3040 	if (sta) {
3041 		/* use defaults when not associated */
3042 		wlvif->aid = 0;
3043 
3044 		/* free probe-request template */
3045 		dev_kfree_skb(wlvif->probereq);
3046 		wlvif->probereq = NULL;
3047 
3048 		/* disable connection monitor features */
3049 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3050 		if (ret < 0)
3051 			return ret;
3052 
3053 		/* Disable the keep-alive feature */
3054 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3055 		if (ret < 0)
3056 			return ret;
3057 
3058 		/* disable beacon filtering */
3059 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3060 		if (ret < 0)
3061 			return ret;
3062 	}
3063 
3064 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3065 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3066 
3067 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3068 		ieee80211_chswitch_done(vif, false);
3069 		cancel_delayed_work(&wlvif->channel_switch_work);
3070 	}
3071 
3072 	/* invalidate keep-alive template */
3073 	wl1271_acx_keep_alive_config(wl, wlvif,
3074 				     wlvif->sta.klv_template_id,
3075 				     ACX_KEEP_ALIVE_TPL_INVALID);
3076 
3077 	return 0;
3078 }
3079 
3080 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3081 {
3082 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3083 	wlvif->rate_set = wlvif->basic_rate_set;
3084 }
3085 
3086 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3087 				   bool idle)
3088 {
3089 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3090 
3091 	if (idle == cur_idle)
3092 		return;
3093 
3094 	if (idle) {
3095 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3096 	} else {
3097 		/* The current firmware only supports sched_scan in idle */
3098 		if (wl->sched_vif == wlvif)
3099 			wl->ops->sched_scan_stop(wl, wlvif);
3100 
3101 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3102 	}
3103 }
3104 
3105 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3106 			     struct ieee80211_conf *conf, u32 changed)
3107 {
3108 	int ret;
3109 
3110 	if (wlcore_is_p2p_mgmt(wlvif))
3111 		return 0;
3112 
3113 	if (conf->power_level != wlvif->power_level) {
3114 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3115 		if (ret < 0)
3116 			return ret;
3117 
3118 		wlvif->power_level = conf->power_level;
3119 	}
3120 
3121 	return 0;
3122 }
3123 
3124 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3125 {
3126 	struct wl1271 *wl = hw->priv;
3127 	struct wl12xx_vif *wlvif;
3128 	struct ieee80211_conf *conf = &hw->conf;
3129 	int ret = 0;
3130 
3131 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3132 		     " changed 0x%x",
3133 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3134 		     conf->power_level,
3135 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3136 			 changed);
3137 
3138 	mutex_lock(&wl->mutex);
3139 
3140 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3141 		wl->power_level = conf->power_level;
3142 
3143 	if (unlikely(wl->state != WLCORE_STATE_ON))
3144 		goto out;
3145 
3146 	ret = pm_runtime_get_sync(wl->dev);
3147 	if (ret < 0) {
3148 		pm_runtime_put_noidle(wl->dev);
3149 		goto out;
3150 	}
3151 
3152 	/* configure each interface */
3153 	wl12xx_for_each_wlvif(wl, wlvif) {
3154 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3155 		if (ret < 0)
3156 			goto out_sleep;
3157 	}
3158 
3159 out_sleep:
3160 	pm_runtime_mark_last_busy(wl->dev);
3161 	pm_runtime_put_autosuspend(wl->dev);
3162 
3163 out:
3164 	mutex_unlock(&wl->mutex);
3165 
3166 	return ret;
3167 }
3168 
3169 struct wl1271_filter_params {
3170 	bool enabled;
3171 	int mc_list_length;
3172 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3173 };
3174 
3175 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3176 				       struct netdev_hw_addr_list *mc_list)
3177 {
3178 	struct wl1271_filter_params *fp;
3179 	struct netdev_hw_addr *ha;
3180 
3181 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3182 	if (!fp) {
3183 		wl1271_error("Out of memory setting filters.");
3184 		return 0;
3185 	}
3186 
3187 	/* update multicast filtering parameters */
3188 	fp->mc_list_length = 0;
3189 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3190 		fp->enabled = false;
3191 	} else {
3192 		fp->enabled = true;
3193 		netdev_hw_addr_list_for_each(ha, mc_list) {
3194 			memcpy(fp->mc_list[fp->mc_list_length],
3195 					ha->addr, ETH_ALEN);
3196 			fp->mc_list_length++;
3197 		}
3198 	}
3199 
3200 	return (u64)(unsigned long)fp;
3201 }
3202 
3203 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3204 				  FIF_FCSFAIL | \
3205 				  FIF_BCN_PRBRESP_PROMISC | \
3206 				  FIF_CONTROL | \
3207 				  FIF_OTHER_BSS)
3208 
3209 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3210 				       unsigned int changed,
3211 				       unsigned int *total, u64 multicast)
3212 {
3213 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3214 	struct wl1271 *wl = hw->priv;
3215 	struct wl12xx_vif *wlvif;
3216 
3217 	int ret;
3218 
3219 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3220 		     " total %x", changed, *total);
3221 
3222 	mutex_lock(&wl->mutex);
3223 
3224 	*total &= WL1271_SUPPORTED_FILTERS;
3225 	changed &= WL1271_SUPPORTED_FILTERS;
3226 
3227 	if (unlikely(wl->state != WLCORE_STATE_ON))
3228 		goto out;
3229 
3230 	ret = pm_runtime_get_sync(wl->dev);
3231 	if (ret < 0) {
3232 		pm_runtime_put_noidle(wl->dev);
3233 		goto out;
3234 	}
3235 
3236 	wl12xx_for_each_wlvif(wl, wlvif) {
3237 		if (wlcore_is_p2p_mgmt(wlvif))
3238 			continue;
3239 
3240 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3241 			if (*total & FIF_ALLMULTI)
3242 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3243 								   false,
3244 								   NULL, 0);
3245 			else if (fp)
3246 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3247 							fp->enabled,
3248 							fp->mc_list,
3249 							fp->mc_list_length);
3250 			if (ret < 0)
3251 				goto out_sleep;
3252 		}
3253 
3254 		/*
3255 		 * If interface in AP mode and created with allmulticast then disable
3256 		 * the firmware filters so that all multicast packets are passed
3257 		 * This is mandatory for MDNS based discovery protocols
3258 		 */
3259  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3260  			if (*total & FIF_ALLMULTI) {
3261 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3262 							false,
3263 							NULL, 0);
3264 				if (ret < 0)
3265 					goto out_sleep;
3266 			}
3267 		}
3268 	}
3269 
3270 	/*
3271 	 * the fw doesn't provide an api to configure the filters. instead,
3272 	 * the filters configuration is based on the active roles / ROC
3273 	 * state.
3274 	 */
3275 
3276 out_sleep:
3277 	pm_runtime_mark_last_busy(wl->dev);
3278 	pm_runtime_put_autosuspend(wl->dev);
3279 
3280 out:
3281 	mutex_unlock(&wl->mutex);
3282 	kfree(fp);
3283 }
3284 
3285 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3286 				u8 id, u8 key_type, u8 key_size,
3287 				const u8 *key, u8 hlid, u32 tx_seq_32,
3288 				u16 tx_seq_16, bool is_pairwise)
3289 {
3290 	struct wl1271_ap_key *ap_key;
3291 	int i;
3292 
3293 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3294 
3295 	if (key_size > MAX_KEY_SIZE)
3296 		return -EINVAL;
3297 
3298 	/*
3299 	 * Find next free entry in ap_keys. Also check we are not replacing
3300 	 * an existing key.
3301 	 */
3302 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3303 		if (wlvif->ap.recorded_keys[i] == NULL)
3304 			break;
3305 
3306 		if (wlvif->ap.recorded_keys[i]->id == id) {
3307 			wl1271_warning("trying to record key replacement");
3308 			return -EINVAL;
3309 		}
3310 	}
3311 
3312 	if (i == MAX_NUM_KEYS)
3313 		return -EBUSY;
3314 
3315 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3316 	if (!ap_key)
3317 		return -ENOMEM;
3318 
3319 	ap_key->id = id;
3320 	ap_key->key_type = key_type;
3321 	ap_key->key_size = key_size;
3322 	memcpy(ap_key->key, key, key_size);
3323 	ap_key->hlid = hlid;
3324 	ap_key->tx_seq_32 = tx_seq_32;
3325 	ap_key->tx_seq_16 = tx_seq_16;
3326 	ap_key->is_pairwise = is_pairwise;
3327 
3328 	wlvif->ap.recorded_keys[i] = ap_key;
3329 	return 0;
3330 }
3331 
3332 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3333 {
3334 	int i;
3335 
3336 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3337 		kfree(wlvif->ap.recorded_keys[i]);
3338 		wlvif->ap.recorded_keys[i] = NULL;
3339 	}
3340 }
3341 
3342 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3343 {
3344 	int i, ret = 0;
3345 	struct wl1271_ap_key *key;
3346 	bool wep_key_added = false;
3347 
3348 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3349 		u8 hlid;
3350 		if (wlvif->ap.recorded_keys[i] == NULL)
3351 			break;
3352 
3353 		key = wlvif->ap.recorded_keys[i];
3354 		hlid = key->hlid;
3355 		if (hlid == WL12XX_INVALID_LINK_ID)
3356 			hlid = wlvif->ap.bcast_hlid;
3357 
3358 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3359 					    key->id, key->key_type,
3360 					    key->key_size, key->key,
3361 					    hlid, key->tx_seq_32,
3362 					    key->tx_seq_16, key->is_pairwise);
3363 		if (ret < 0)
3364 			goto out;
3365 
3366 		if (key->key_type == KEY_WEP)
3367 			wep_key_added = true;
3368 	}
3369 
3370 	if (wep_key_added) {
3371 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3372 						     wlvif->ap.bcast_hlid);
3373 		if (ret < 0)
3374 			goto out;
3375 	}
3376 
3377 out:
3378 	wl1271_free_ap_keys(wl, wlvif);
3379 	return ret;
3380 }
3381 
3382 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3383 		       u16 action, u8 id, u8 key_type,
3384 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3385 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3386 		       bool is_pairwise)
3387 {
3388 	int ret;
3389 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3390 
3391 	if (is_ap) {
3392 		struct wl1271_station *wl_sta;
3393 		u8 hlid;
3394 
3395 		if (sta) {
3396 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3397 			hlid = wl_sta->hlid;
3398 		} else {
3399 			hlid = wlvif->ap.bcast_hlid;
3400 		}
3401 
3402 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3403 			/*
3404 			 * We do not support removing keys after AP shutdown.
3405 			 * Pretend we do to make mac80211 happy.
3406 			 */
3407 			if (action != KEY_ADD_OR_REPLACE)
3408 				return 0;
3409 
3410 			ret = wl1271_record_ap_key(wl, wlvif, id,
3411 					     key_type, key_size,
3412 					     key, hlid, tx_seq_32,
3413 					     tx_seq_16, is_pairwise);
3414 		} else {
3415 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3416 					     id, key_type, key_size,
3417 					     key, hlid, tx_seq_32,
3418 					     tx_seq_16, is_pairwise);
3419 		}
3420 
3421 		if (ret < 0)
3422 			return ret;
3423 	} else {
3424 		const u8 *addr;
3425 		static const u8 bcast_addr[ETH_ALEN] = {
3426 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3427 		};
3428 
3429 		addr = sta ? sta->addr : bcast_addr;
3430 
3431 		if (is_zero_ether_addr(addr)) {
3432 			/* We dont support TX only encryption */
3433 			return -EOPNOTSUPP;
3434 		}
3435 
3436 		/* The wl1271 does not allow to remove unicast keys - they
3437 		   will be cleared automatically on next CMD_JOIN. Ignore the
3438 		   request silently, as we dont want the mac80211 to emit
3439 		   an error message. */
3440 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3441 			return 0;
3442 
3443 		/* don't remove key if hlid was already deleted */
3444 		if (action == KEY_REMOVE &&
3445 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3446 			return 0;
3447 
3448 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3449 					     id, key_type, key_size,
3450 					     key, addr, tx_seq_32,
3451 					     tx_seq_16);
3452 		if (ret < 0)
3453 			return ret;
3454 
3455 	}
3456 
3457 	return 0;
3458 }
3459 
3460 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3461 			     struct ieee80211_vif *vif,
3462 			     struct ieee80211_sta *sta,
3463 			     struct ieee80211_key_conf *key_conf)
3464 {
3465 	struct wl1271 *wl = hw->priv;
3466 	int ret;
3467 	bool might_change_spare =
3468 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3469 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3470 
3471 	if (might_change_spare) {
3472 		/*
3473 		 * stop the queues and flush to ensure the next packets are
3474 		 * in sync with FW spare block accounting
3475 		 */
3476 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3477 		wl1271_tx_flush(wl);
3478 	}
3479 
3480 	mutex_lock(&wl->mutex);
3481 
3482 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3483 		ret = -EAGAIN;
3484 		goto out_wake_queues;
3485 	}
3486 
3487 	ret = pm_runtime_get_sync(wl->dev);
3488 	if (ret < 0) {
3489 		pm_runtime_put_noidle(wl->dev);
3490 		goto out_wake_queues;
3491 	}
3492 
3493 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3494 
3495 	pm_runtime_mark_last_busy(wl->dev);
3496 	pm_runtime_put_autosuspend(wl->dev);
3497 
3498 out_wake_queues:
3499 	if (might_change_spare)
3500 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3501 
3502 	mutex_unlock(&wl->mutex);
3503 
3504 	return ret;
3505 }
3506 
3507 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3508 		   struct ieee80211_vif *vif,
3509 		   struct ieee80211_sta *sta,
3510 		   struct ieee80211_key_conf *key_conf)
3511 {
3512 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3513 	int ret;
3514 	u32 tx_seq_32 = 0;
3515 	u16 tx_seq_16 = 0;
3516 	u8 key_type;
3517 	u8 hlid;
3518 	bool is_pairwise;
3519 
3520 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3521 
3522 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3523 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3524 		     key_conf->cipher, key_conf->keyidx,
3525 		     key_conf->keylen, key_conf->flags);
3526 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3527 
3528 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3529 		if (sta) {
3530 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3531 			hlid = wl_sta->hlid;
3532 		} else {
3533 			hlid = wlvif->ap.bcast_hlid;
3534 		}
3535 	else
3536 		hlid = wlvif->sta.hlid;
3537 
3538 	if (hlid != WL12XX_INVALID_LINK_ID) {
3539 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3540 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3541 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3542 	}
3543 
3544 	switch (key_conf->cipher) {
3545 	case WLAN_CIPHER_SUITE_WEP40:
3546 	case WLAN_CIPHER_SUITE_WEP104:
3547 		key_type = KEY_WEP;
3548 
3549 		key_conf->hw_key_idx = key_conf->keyidx;
3550 		break;
3551 	case WLAN_CIPHER_SUITE_TKIP:
3552 		key_type = KEY_TKIP;
3553 		key_conf->hw_key_idx = key_conf->keyidx;
3554 		break;
3555 	case WLAN_CIPHER_SUITE_CCMP:
3556 		key_type = KEY_AES;
3557 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3558 		break;
3559 	case WL1271_CIPHER_SUITE_GEM:
3560 		key_type = KEY_GEM;
3561 		break;
3562 	case WLAN_CIPHER_SUITE_AES_CMAC:
3563 		key_type = KEY_IGTK;
3564 		break;
3565 	default:
3566 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3567 
3568 		return -EOPNOTSUPP;
3569 	}
3570 
3571 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3572 
3573 	switch (cmd) {
3574 	case SET_KEY:
3575 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3576 				 key_conf->keyidx, key_type,
3577 				 key_conf->keylen, key_conf->key,
3578 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3579 		if (ret < 0) {
3580 			wl1271_error("Could not add or replace key");
3581 			return ret;
3582 		}
3583 
3584 		/*
3585 		 * reconfiguring arp response if the unicast (or common)
3586 		 * encryption key type was changed
3587 		 */
3588 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3589 		    (sta || key_type == KEY_WEP) &&
3590 		    wlvif->encryption_type != key_type) {
3591 			wlvif->encryption_type = key_type;
3592 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3593 			if (ret < 0) {
3594 				wl1271_warning("build arp rsp failed: %d", ret);
3595 				return ret;
3596 			}
3597 		}
3598 		break;
3599 
3600 	case DISABLE_KEY:
3601 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3602 				     key_conf->keyidx, key_type,
3603 				     key_conf->keylen, key_conf->key,
3604 				     0, 0, sta, is_pairwise);
3605 		if (ret < 0) {
3606 			wl1271_error("Could not remove key");
3607 			return ret;
3608 		}
3609 		break;
3610 
3611 	default:
3612 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3613 		return -EOPNOTSUPP;
3614 	}
3615 
3616 	return ret;
3617 }
3618 EXPORT_SYMBOL_GPL(wlcore_set_key);
3619 
3620 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3621 					  struct ieee80211_vif *vif,
3622 					  int key_idx)
3623 {
3624 	struct wl1271 *wl = hw->priv;
3625 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3626 	int ret;
3627 
3628 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3629 		     key_idx);
3630 
3631 	/* we don't handle unsetting of default key */
3632 	if (key_idx == -1)
3633 		return;
3634 
3635 	mutex_lock(&wl->mutex);
3636 
3637 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3638 		ret = -EAGAIN;
3639 		goto out_unlock;
3640 	}
3641 
3642 	ret = pm_runtime_get_sync(wl->dev);
3643 	if (ret < 0) {
3644 		pm_runtime_put_noidle(wl->dev);
3645 		goto out_unlock;
3646 	}
3647 
3648 	wlvif->default_key = key_idx;
3649 
3650 	/* the default WEP key needs to be configured at least once */
3651 	if (wlvif->encryption_type == KEY_WEP) {
3652 		ret = wl12xx_cmd_set_default_wep_key(wl,
3653 				key_idx,
3654 				wlvif->sta.hlid);
3655 		if (ret < 0)
3656 			goto out_sleep;
3657 	}
3658 
3659 out_sleep:
3660 	pm_runtime_mark_last_busy(wl->dev);
3661 	pm_runtime_put_autosuspend(wl->dev);
3662 
3663 out_unlock:
3664 	mutex_unlock(&wl->mutex);
3665 }
3666 
3667 void wlcore_regdomain_config(struct wl1271 *wl)
3668 {
3669 	int ret;
3670 
3671 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3672 		return;
3673 
3674 	mutex_lock(&wl->mutex);
3675 
3676 	if (unlikely(wl->state != WLCORE_STATE_ON))
3677 		goto out;
3678 
3679 	ret = pm_runtime_get_sync(wl->dev);
3680 	if (ret < 0) {
3681 		pm_runtime_put_autosuspend(wl->dev);
3682 		goto out;
3683 	}
3684 
3685 	ret = wlcore_cmd_regdomain_config_locked(wl);
3686 	if (ret < 0) {
3687 		wl12xx_queue_recovery_work(wl);
3688 		goto out;
3689 	}
3690 
3691 	pm_runtime_mark_last_busy(wl->dev);
3692 	pm_runtime_put_autosuspend(wl->dev);
3693 out:
3694 	mutex_unlock(&wl->mutex);
3695 }
3696 
3697 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3698 			     struct ieee80211_vif *vif,
3699 			     struct ieee80211_scan_request *hw_req)
3700 {
3701 	struct cfg80211_scan_request *req = &hw_req->req;
3702 	struct wl1271 *wl = hw->priv;
3703 	int ret;
3704 	u8 *ssid = NULL;
3705 	size_t len = 0;
3706 
3707 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3708 
3709 	if (req->n_ssids) {
3710 		ssid = req->ssids[0].ssid;
3711 		len = req->ssids[0].ssid_len;
3712 	}
3713 
3714 	mutex_lock(&wl->mutex);
3715 
3716 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3717 		/*
3718 		 * We cannot return -EBUSY here because cfg80211 will expect
3719 		 * a call to ieee80211_scan_completed if we do - in this case
3720 		 * there won't be any call.
3721 		 */
3722 		ret = -EAGAIN;
3723 		goto out;
3724 	}
3725 
3726 	ret = pm_runtime_get_sync(wl->dev);
3727 	if (ret < 0) {
3728 		pm_runtime_put_noidle(wl->dev);
3729 		goto out;
3730 	}
3731 
3732 	/* fail if there is any role in ROC */
3733 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3734 		/* don't allow scanning right now */
3735 		ret = -EBUSY;
3736 		goto out_sleep;
3737 	}
3738 
3739 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3740 out_sleep:
3741 	pm_runtime_mark_last_busy(wl->dev);
3742 	pm_runtime_put_autosuspend(wl->dev);
3743 out:
3744 	mutex_unlock(&wl->mutex);
3745 
3746 	return ret;
3747 }
3748 
3749 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3750 				     struct ieee80211_vif *vif)
3751 {
3752 	struct wl1271 *wl = hw->priv;
3753 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3754 	struct cfg80211_scan_info info = {
3755 		.aborted = true,
3756 	};
3757 	int ret;
3758 
3759 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3760 
3761 	mutex_lock(&wl->mutex);
3762 
3763 	if (unlikely(wl->state != WLCORE_STATE_ON))
3764 		goto out;
3765 
3766 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3767 		goto out;
3768 
3769 	ret = pm_runtime_get_sync(wl->dev);
3770 	if (ret < 0) {
3771 		pm_runtime_put_noidle(wl->dev);
3772 		goto out;
3773 	}
3774 
3775 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3776 		ret = wl->ops->scan_stop(wl, wlvif);
3777 		if (ret < 0)
3778 			goto out_sleep;
3779 	}
3780 
3781 	/*
3782 	 * Rearm the tx watchdog just before idling scan. This
3783 	 * prevents just-finished scans from triggering the watchdog
3784 	 */
3785 	wl12xx_rearm_tx_watchdog_locked(wl);
3786 
3787 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3788 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3789 	wl->scan_wlvif = NULL;
3790 	wl->scan.req = NULL;
3791 	ieee80211_scan_completed(wl->hw, &info);
3792 
3793 out_sleep:
3794 	pm_runtime_mark_last_busy(wl->dev);
3795 	pm_runtime_put_autosuspend(wl->dev);
3796 out:
3797 	mutex_unlock(&wl->mutex);
3798 
3799 	cancel_delayed_work_sync(&wl->scan_complete_work);
3800 }
3801 
3802 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3803 				      struct ieee80211_vif *vif,
3804 				      struct cfg80211_sched_scan_request *req,
3805 				      struct ieee80211_scan_ies *ies)
3806 {
3807 	struct wl1271 *wl = hw->priv;
3808 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3809 	int ret;
3810 
3811 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3812 
3813 	mutex_lock(&wl->mutex);
3814 
3815 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3816 		ret = -EAGAIN;
3817 		goto out;
3818 	}
3819 
3820 	ret = pm_runtime_get_sync(wl->dev);
3821 	if (ret < 0) {
3822 		pm_runtime_put_noidle(wl->dev);
3823 		goto out;
3824 	}
3825 
3826 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3827 	if (ret < 0)
3828 		goto out_sleep;
3829 
3830 	wl->sched_vif = wlvif;
3831 
3832 out_sleep:
3833 	pm_runtime_mark_last_busy(wl->dev);
3834 	pm_runtime_put_autosuspend(wl->dev);
3835 out:
3836 	mutex_unlock(&wl->mutex);
3837 	return ret;
3838 }
3839 
3840 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3841 				     struct ieee80211_vif *vif)
3842 {
3843 	struct wl1271 *wl = hw->priv;
3844 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3845 	int ret;
3846 
3847 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3848 
3849 	mutex_lock(&wl->mutex);
3850 
3851 	if (unlikely(wl->state != WLCORE_STATE_ON))
3852 		goto out;
3853 
3854 	ret = pm_runtime_get_sync(wl->dev);
3855 	if (ret < 0) {
3856 		pm_runtime_put_noidle(wl->dev);
3857 		goto out;
3858 	}
3859 
3860 	wl->ops->sched_scan_stop(wl, wlvif);
3861 
3862 	pm_runtime_mark_last_busy(wl->dev);
3863 	pm_runtime_put_autosuspend(wl->dev);
3864 out:
3865 	mutex_unlock(&wl->mutex);
3866 
3867 	return 0;
3868 }
3869 
3870 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3871 {
3872 	struct wl1271 *wl = hw->priv;
3873 	int ret = 0;
3874 
3875 	mutex_lock(&wl->mutex);
3876 
3877 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3878 		ret = -EAGAIN;
3879 		goto out;
3880 	}
3881 
3882 	ret = pm_runtime_get_sync(wl->dev);
3883 	if (ret < 0) {
3884 		pm_runtime_put_noidle(wl->dev);
3885 		goto out;
3886 	}
3887 
3888 	ret = wl1271_acx_frag_threshold(wl, value);
3889 	if (ret < 0)
3890 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3891 
3892 	pm_runtime_mark_last_busy(wl->dev);
3893 	pm_runtime_put_autosuspend(wl->dev);
3894 
3895 out:
3896 	mutex_unlock(&wl->mutex);
3897 
3898 	return ret;
3899 }
3900 
3901 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3902 {
3903 	struct wl1271 *wl = hw->priv;
3904 	struct wl12xx_vif *wlvif;
3905 	int ret = 0;
3906 
3907 	mutex_lock(&wl->mutex);
3908 
3909 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3910 		ret = -EAGAIN;
3911 		goto out;
3912 	}
3913 
3914 	ret = pm_runtime_get_sync(wl->dev);
3915 	if (ret < 0) {
3916 		pm_runtime_put_noidle(wl->dev);
3917 		goto out;
3918 	}
3919 
3920 	wl12xx_for_each_wlvif(wl, wlvif) {
3921 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3922 		if (ret < 0)
3923 			wl1271_warning("set rts threshold failed: %d", ret);
3924 	}
3925 	pm_runtime_mark_last_busy(wl->dev);
3926 	pm_runtime_put_autosuspend(wl->dev);
3927 
3928 out:
3929 	mutex_unlock(&wl->mutex);
3930 
3931 	return ret;
3932 }
3933 
3934 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3935 {
3936 	int len;
3937 	const u8 *next, *end = skb->data + skb->len;
3938 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3939 					skb->len - ieoffset);
3940 	if (!ie)
3941 		return;
3942 	len = ie[1] + 2;
3943 	next = ie + len;
3944 	memmove(ie, next, end - next);
3945 	skb_trim(skb, skb->len - len);
3946 }
3947 
3948 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3949 					    unsigned int oui, u8 oui_type,
3950 					    int ieoffset)
3951 {
3952 	int len;
3953 	const u8 *next, *end = skb->data + skb->len;
3954 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3955 					       skb->data + ieoffset,
3956 					       skb->len - ieoffset);
3957 	if (!ie)
3958 		return;
3959 	len = ie[1] + 2;
3960 	next = ie + len;
3961 	memmove(ie, next, end - next);
3962 	skb_trim(skb, skb->len - len);
3963 }
3964 
3965 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3966 					 struct ieee80211_vif *vif)
3967 {
3968 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3969 	struct sk_buff *skb;
3970 	int ret;
3971 
3972 	skb = ieee80211_proberesp_get(wl->hw, vif);
3973 	if (!skb)
3974 		return -EOPNOTSUPP;
3975 
3976 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3977 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3978 				      skb->data,
3979 				      skb->len, 0,
3980 				      rates);
3981 	dev_kfree_skb(skb);
3982 
3983 	if (ret < 0)
3984 		goto out;
3985 
3986 	wl1271_debug(DEBUG_AP, "probe response updated");
3987 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3988 
3989 out:
3990 	return ret;
3991 }
3992 
3993 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3994 					     struct ieee80211_vif *vif,
3995 					     u8 *probe_rsp_data,
3996 					     size_t probe_rsp_len,
3997 					     u32 rates)
3998 {
3999 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4000 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
4001 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
4002 	int ssid_ie_offset, ie_offset, templ_len;
4003 	const u8 *ptr;
4004 
4005 	/* no need to change probe response if the SSID is set correctly */
4006 	if (wlvif->ssid_len > 0)
4007 		return wl1271_cmd_template_set(wl, wlvif->role_id,
4008 					       CMD_TEMPL_AP_PROBE_RESPONSE,
4009 					       probe_rsp_data,
4010 					       probe_rsp_len, 0,
4011 					       rates);
4012 
4013 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4014 		wl1271_error("probe_rsp template too big");
4015 		return -EINVAL;
4016 	}
4017 
4018 	/* start searching from IE offset */
4019 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4020 
4021 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4022 			       probe_rsp_len - ie_offset);
4023 	if (!ptr) {
4024 		wl1271_error("No SSID in beacon!");
4025 		return -EINVAL;
4026 	}
4027 
4028 	ssid_ie_offset = ptr - probe_rsp_data;
4029 	ptr += (ptr[1] + 2);
4030 
4031 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4032 
4033 	/* insert SSID from bss_conf */
4034 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4035 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4036 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4037 	       bss_conf->ssid, bss_conf->ssid_len);
4038 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4039 
4040 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4041 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4042 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4043 
4044 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4045 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4046 				       probe_rsp_templ,
4047 				       templ_len, 0,
4048 				       rates);
4049 }
4050 
4051 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4052 				       struct ieee80211_vif *vif,
4053 				       struct ieee80211_bss_conf *bss_conf,
4054 				       u32 changed)
4055 {
4056 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4057 	int ret = 0;
4058 
4059 	if (changed & BSS_CHANGED_ERP_SLOT) {
4060 		if (bss_conf->use_short_slot)
4061 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4062 		else
4063 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4064 		if (ret < 0) {
4065 			wl1271_warning("Set slot time failed %d", ret);
4066 			goto out;
4067 		}
4068 	}
4069 
4070 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4071 		if (bss_conf->use_short_preamble)
4072 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4073 		else
4074 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4075 	}
4076 
4077 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4078 		if (bss_conf->use_cts_prot)
4079 			ret = wl1271_acx_cts_protect(wl, wlvif,
4080 						     CTSPROTECT_ENABLE);
4081 		else
4082 			ret = wl1271_acx_cts_protect(wl, wlvif,
4083 						     CTSPROTECT_DISABLE);
4084 		if (ret < 0) {
4085 			wl1271_warning("Set ctsprotect failed %d", ret);
4086 			goto out;
4087 		}
4088 	}
4089 
4090 out:
4091 	return ret;
4092 }
4093 
4094 static int wlcore_set_beacon_template(struct wl1271 *wl,
4095 				      struct ieee80211_vif *vif,
4096 				      bool is_ap)
4097 {
4098 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4099 	struct ieee80211_hdr *hdr;
4100 	u32 min_rate;
4101 	int ret;
4102 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4103 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4104 	u16 tmpl_id;
4105 
4106 	if (!beacon) {
4107 		ret = -EINVAL;
4108 		goto out;
4109 	}
4110 
4111 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4112 
4113 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4114 	if (ret < 0) {
4115 		dev_kfree_skb(beacon);
4116 		goto out;
4117 	}
4118 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4119 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4120 		CMD_TEMPL_BEACON;
4121 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4122 				      beacon->data,
4123 				      beacon->len, 0,
4124 				      min_rate);
4125 	if (ret < 0) {
4126 		dev_kfree_skb(beacon);
4127 		goto out;
4128 	}
4129 
4130 	wlvif->wmm_enabled =
4131 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4132 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4133 					beacon->data + ieoffset,
4134 					beacon->len - ieoffset);
4135 
4136 	/*
4137 	 * In case we already have a probe-resp beacon set explicitly
4138 	 * by usermode, don't use the beacon data.
4139 	 */
4140 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4141 		goto end_bcn;
4142 
4143 	/* remove TIM ie from probe response */
4144 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4145 
4146 	/*
4147 	 * remove p2p ie from probe response.
4148 	 * the fw reponds to probe requests that don't include
4149 	 * the p2p ie. probe requests with p2p ie will be passed,
4150 	 * and will be responded by the supplicant (the spec
4151 	 * forbids including the p2p ie when responding to probe
4152 	 * requests that didn't include it).
4153 	 */
4154 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4155 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4156 
4157 	hdr = (struct ieee80211_hdr *) beacon->data;
4158 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4159 					 IEEE80211_STYPE_PROBE_RESP);
4160 	if (is_ap)
4161 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4162 							   beacon->data,
4163 							   beacon->len,
4164 							   min_rate);
4165 	else
4166 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4167 					      CMD_TEMPL_PROBE_RESPONSE,
4168 					      beacon->data,
4169 					      beacon->len, 0,
4170 					      min_rate);
4171 end_bcn:
4172 	dev_kfree_skb(beacon);
4173 	if (ret < 0)
4174 		goto out;
4175 
4176 out:
4177 	return ret;
4178 }
4179 
4180 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4181 					  struct ieee80211_vif *vif,
4182 					  struct ieee80211_bss_conf *bss_conf,
4183 					  u32 changed)
4184 {
4185 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4186 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4187 	int ret = 0;
4188 
4189 	if (changed & BSS_CHANGED_BEACON_INT) {
4190 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4191 			bss_conf->beacon_int);
4192 
4193 		wlvif->beacon_int = bss_conf->beacon_int;
4194 	}
4195 
4196 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4197 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4198 
4199 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4200 	}
4201 
4202 	if (changed & BSS_CHANGED_BEACON) {
4203 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4204 		if (ret < 0)
4205 			goto out;
4206 
4207 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4208 				       &wlvif->flags)) {
4209 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4210 			if (ret < 0)
4211 				goto out;
4212 		}
4213 	}
4214 out:
4215 	if (ret != 0)
4216 		wl1271_error("beacon info change failed: %d", ret);
4217 	return ret;
4218 }
4219 
4220 /* AP mode changes */
4221 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4222 				       struct ieee80211_vif *vif,
4223 				       struct ieee80211_bss_conf *bss_conf,
4224 				       u32 changed)
4225 {
4226 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4227 	int ret = 0;
4228 
4229 	if (changed & BSS_CHANGED_BASIC_RATES) {
4230 		u32 rates = bss_conf->basic_rates;
4231 
4232 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4233 								 wlvif->band);
4234 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4235 							wlvif->basic_rate_set);
4236 
4237 		ret = wl1271_init_ap_rates(wl, wlvif);
4238 		if (ret < 0) {
4239 			wl1271_error("AP rate policy change failed %d", ret);
4240 			goto out;
4241 		}
4242 
4243 		ret = wl1271_ap_init_templates(wl, vif);
4244 		if (ret < 0)
4245 			goto out;
4246 
4247 		/* No need to set probe resp template for mesh */
4248 		if (!ieee80211_vif_is_mesh(vif)) {
4249 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4250 							    wlvif->basic_rate,
4251 							    vif);
4252 			if (ret < 0)
4253 				goto out;
4254 		}
4255 
4256 		ret = wlcore_set_beacon_template(wl, vif, true);
4257 		if (ret < 0)
4258 			goto out;
4259 	}
4260 
4261 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4262 	if (ret < 0)
4263 		goto out;
4264 
4265 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4266 		if (bss_conf->enable_beacon) {
4267 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4268 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4269 				if (ret < 0)
4270 					goto out;
4271 
4272 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4273 				if (ret < 0)
4274 					goto out;
4275 
4276 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4277 				wl1271_debug(DEBUG_AP, "started AP");
4278 			}
4279 		} else {
4280 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4281 				/*
4282 				 * AP might be in ROC in case we have just
4283 				 * sent auth reply. handle it.
4284 				 */
4285 				if (test_bit(wlvif->role_id, wl->roc_map))
4286 					wl12xx_croc(wl, wlvif->role_id);
4287 
4288 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4289 				if (ret < 0)
4290 					goto out;
4291 
4292 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4293 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4294 					  &wlvif->flags);
4295 				wl1271_debug(DEBUG_AP, "stopped AP");
4296 			}
4297 		}
4298 	}
4299 
4300 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4301 	if (ret < 0)
4302 		goto out;
4303 
4304 	/* Handle HT information change */
4305 	if ((changed & BSS_CHANGED_HT) &&
4306 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4307 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4308 					bss_conf->ht_operation_mode);
4309 		if (ret < 0) {
4310 			wl1271_warning("Set ht information failed %d", ret);
4311 			goto out;
4312 		}
4313 	}
4314 
4315 out:
4316 	return;
4317 }
4318 
4319 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4320 			    struct ieee80211_bss_conf *bss_conf,
4321 			    u32 sta_rate_set)
4322 {
4323 	u32 rates;
4324 	int ret;
4325 
4326 	wl1271_debug(DEBUG_MAC80211,
4327 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4328 	     bss_conf->bssid, bss_conf->aid,
4329 	     bss_conf->beacon_int,
4330 	     bss_conf->basic_rates, sta_rate_set);
4331 
4332 	wlvif->beacon_int = bss_conf->beacon_int;
4333 	rates = bss_conf->basic_rates;
4334 	wlvif->basic_rate_set =
4335 		wl1271_tx_enabled_rates_get(wl, rates,
4336 					    wlvif->band);
4337 	wlvif->basic_rate =
4338 		wl1271_tx_min_rate_get(wl,
4339 				       wlvif->basic_rate_set);
4340 
4341 	if (sta_rate_set)
4342 		wlvif->rate_set =
4343 			wl1271_tx_enabled_rates_get(wl,
4344 						sta_rate_set,
4345 						wlvif->band);
4346 
4347 	/* we only support sched_scan while not connected */
4348 	if (wl->sched_vif == wlvif)
4349 		wl->ops->sched_scan_stop(wl, wlvif);
4350 
4351 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4352 	if (ret < 0)
4353 		return ret;
4354 
4355 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4356 	if (ret < 0)
4357 		return ret;
4358 
4359 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4360 	if (ret < 0)
4361 		return ret;
4362 
4363 	wlcore_set_ssid(wl, wlvif);
4364 
4365 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4366 
4367 	return 0;
4368 }
4369 
4370 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4371 {
4372 	int ret;
4373 
4374 	/* revert back to minimum rates for the current band */
4375 	wl1271_set_band_rate(wl, wlvif);
4376 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4377 
4378 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4379 	if (ret < 0)
4380 		return ret;
4381 
4382 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4383 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4384 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4385 		if (ret < 0)
4386 			return ret;
4387 	}
4388 
4389 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4390 	return 0;
4391 }
4392 /* STA/IBSS mode changes */
4393 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4394 					struct ieee80211_vif *vif,
4395 					struct ieee80211_bss_conf *bss_conf,
4396 					u32 changed)
4397 {
4398 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4399 	bool do_join = false;
4400 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4401 	bool ibss_joined = false;
4402 	u32 sta_rate_set = 0;
4403 	int ret;
4404 	struct ieee80211_sta *sta;
4405 	bool sta_exists = false;
4406 	struct ieee80211_sta_ht_cap sta_ht_cap;
4407 
4408 	if (is_ibss) {
4409 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4410 						     changed);
4411 		if (ret < 0)
4412 			goto out;
4413 	}
4414 
4415 	if (changed & BSS_CHANGED_IBSS) {
4416 		if (bss_conf->ibss_joined) {
4417 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4418 			ibss_joined = true;
4419 		} else {
4420 			wlcore_unset_assoc(wl, wlvif);
4421 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4422 		}
4423 	}
4424 
4425 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4426 		do_join = true;
4427 
4428 	/* Need to update the SSID (for filtering etc) */
4429 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4430 		do_join = true;
4431 
4432 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4433 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4434 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4435 
4436 		do_join = true;
4437 	}
4438 
4439 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4440 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4441 
4442 	if (changed & BSS_CHANGED_CQM) {
4443 		bool enable = false;
4444 		if (bss_conf->cqm_rssi_thold)
4445 			enable = true;
4446 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4447 						  bss_conf->cqm_rssi_thold,
4448 						  bss_conf->cqm_rssi_hyst);
4449 		if (ret < 0)
4450 			goto out;
4451 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4452 	}
4453 
4454 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4455 		       BSS_CHANGED_ASSOC)) {
4456 		rcu_read_lock();
4457 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4458 		if (sta) {
4459 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4460 
4461 			/* save the supp_rates of the ap */
4462 			sta_rate_set = sta->supp_rates[wlvif->band];
4463 			if (sta->ht_cap.ht_supported)
4464 				sta_rate_set |=
4465 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4466 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4467 			sta_ht_cap = sta->ht_cap;
4468 			sta_exists = true;
4469 		}
4470 
4471 		rcu_read_unlock();
4472 	}
4473 
4474 	if (changed & BSS_CHANGED_BSSID) {
4475 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4476 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4477 					       sta_rate_set);
4478 			if (ret < 0)
4479 				goto out;
4480 
4481 			/* Need to update the BSSID (for filtering etc) */
4482 			do_join = true;
4483 		} else {
4484 			ret = wlcore_clear_bssid(wl, wlvif);
4485 			if (ret < 0)
4486 				goto out;
4487 		}
4488 	}
4489 
4490 	if (changed & BSS_CHANGED_IBSS) {
4491 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4492 			     bss_conf->ibss_joined);
4493 
4494 		if (bss_conf->ibss_joined) {
4495 			u32 rates = bss_conf->basic_rates;
4496 			wlvif->basic_rate_set =
4497 				wl1271_tx_enabled_rates_get(wl, rates,
4498 							    wlvif->band);
4499 			wlvif->basic_rate =
4500 				wl1271_tx_min_rate_get(wl,
4501 						       wlvif->basic_rate_set);
4502 
4503 			/* by default, use 11b + OFDM rates */
4504 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4505 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4506 			if (ret < 0)
4507 				goto out;
4508 		}
4509 	}
4510 
4511 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4512 		/* enable beacon filtering */
4513 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4514 		if (ret < 0)
4515 			goto out;
4516 	}
4517 
4518 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4519 	if (ret < 0)
4520 		goto out;
4521 
4522 	if (do_join) {
4523 		ret = wlcore_join(wl, wlvif);
4524 		if (ret < 0) {
4525 			wl1271_warning("cmd join failed %d", ret);
4526 			goto out;
4527 		}
4528 	}
4529 
4530 	if (changed & BSS_CHANGED_ASSOC) {
4531 		if (bss_conf->assoc) {
4532 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4533 					       sta_rate_set);
4534 			if (ret < 0)
4535 				goto out;
4536 
4537 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4538 				wl12xx_set_authorized(wl, wlvif);
4539 		} else {
4540 			wlcore_unset_assoc(wl, wlvif);
4541 		}
4542 	}
4543 
4544 	if (changed & BSS_CHANGED_PS) {
4545 		if ((bss_conf->ps) &&
4546 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4547 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4548 			int ps_mode;
4549 			char *ps_mode_str;
4550 
4551 			if (wl->conf.conn.forced_ps) {
4552 				ps_mode = STATION_POWER_SAVE_MODE;
4553 				ps_mode_str = "forced";
4554 			} else {
4555 				ps_mode = STATION_AUTO_PS_MODE;
4556 				ps_mode_str = "auto";
4557 			}
4558 
4559 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4560 
4561 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4562 			if (ret < 0)
4563 				wl1271_warning("enter %s ps failed %d",
4564 					       ps_mode_str, ret);
4565 		} else if (!bss_conf->ps &&
4566 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4567 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4568 
4569 			ret = wl1271_ps_set_mode(wl, wlvif,
4570 						 STATION_ACTIVE_MODE);
4571 			if (ret < 0)
4572 				wl1271_warning("exit auto ps failed %d", ret);
4573 		}
4574 	}
4575 
4576 	/* Handle new association with HT. Do this after join. */
4577 	if (sta_exists) {
4578 		bool enabled =
4579 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4580 
4581 		ret = wlcore_hw_set_peer_cap(wl,
4582 					     &sta_ht_cap,
4583 					     enabled,
4584 					     wlvif->rate_set,
4585 					     wlvif->sta.hlid);
4586 		if (ret < 0) {
4587 			wl1271_warning("Set ht cap failed %d", ret);
4588 			goto out;
4589 
4590 		}
4591 
4592 		if (enabled) {
4593 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4594 						bss_conf->ht_operation_mode);
4595 			if (ret < 0) {
4596 				wl1271_warning("Set ht information failed %d",
4597 					       ret);
4598 				goto out;
4599 			}
4600 		}
4601 	}
4602 
4603 	/* Handle arp filtering. Done after join. */
4604 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4605 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4606 		__be32 addr = bss_conf->arp_addr_list[0];
4607 		wlvif->sta.qos = bss_conf->qos;
4608 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4609 
4610 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4611 			wlvif->ip_addr = addr;
4612 			/*
4613 			 * The template should have been configured only upon
4614 			 * association. however, it seems that the correct ip
4615 			 * isn't being set (when sending), so we have to
4616 			 * reconfigure the template upon every ip change.
4617 			 */
4618 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4619 			if (ret < 0) {
4620 				wl1271_warning("build arp rsp failed: %d", ret);
4621 				goto out;
4622 			}
4623 
4624 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4625 				(ACX_ARP_FILTER_ARP_FILTERING |
4626 				 ACX_ARP_FILTER_AUTO_ARP),
4627 				addr);
4628 		} else {
4629 			wlvif->ip_addr = 0;
4630 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4631 		}
4632 
4633 		if (ret < 0)
4634 			goto out;
4635 	}
4636 
4637 out:
4638 	return;
4639 }
4640 
4641 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4642 				       struct ieee80211_vif *vif,
4643 				       struct ieee80211_bss_conf *bss_conf,
4644 				       u32 changed)
4645 {
4646 	struct wl1271 *wl = hw->priv;
4647 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4648 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4649 	int ret;
4650 
4651 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4652 		     wlvif->role_id, (int)changed);
4653 
4654 	/*
4655 	 * make sure to cancel pending disconnections if our association
4656 	 * state changed
4657 	 */
4658 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4659 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4660 
4661 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4662 	    !bss_conf->enable_beacon)
4663 		wl1271_tx_flush(wl);
4664 
4665 	mutex_lock(&wl->mutex);
4666 
4667 	if (unlikely(wl->state != WLCORE_STATE_ON))
4668 		goto out;
4669 
4670 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4671 		goto out;
4672 
4673 	ret = pm_runtime_get_sync(wl->dev);
4674 	if (ret < 0) {
4675 		pm_runtime_put_noidle(wl->dev);
4676 		goto out;
4677 	}
4678 
4679 	if ((changed & BSS_CHANGED_TXPOWER) &&
4680 	    bss_conf->txpower != wlvif->power_level) {
4681 
4682 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4683 		if (ret < 0)
4684 			goto out;
4685 
4686 		wlvif->power_level = bss_conf->txpower;
4687 	}
4688 
4689 	if (is_ap)
4690 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4691 	else
4692 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4693 
4694 	pm_runtime_mark_last_busy(wl->dev);
4695 	pm_runtime_put_autosuspend(wl->dev);
4696 
4697 out:
4698 	mutex_unlock(&wl->mutex);
4699 }
4700 
4701 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4702 				 struct ieee80211_chanctx_conf *ctx)
4703 {
4704 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4705 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4706 		     cfg80211_get_chandef_type(&ctx->def));
4707 	return 0;
4708 }
4709 
4710 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4711 				     struct ieee80211_chanctx_conf *ctx)
4712 {
4713 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4714 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4715 		     cfg80211_get_chandef_type(&ctx->def));
4716 }
4717 
4718 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4719 				     struct ieee80211_chanctx_conf *ctx,
4720 				     u32 changed)
4721 {
4722 	struct wl1271 *wl = hw->priv;
4723 	struct wl12xx_vif *wlvif;
4724 	int ret;
4725 	int channel = ieee80211_frequency_to_channel(
4726 		ctx->def.chan->center_freq);
4727 
4728 	wl1271_debug(DEBUG_MAC80211,
4729 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4730 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4731 
4732 	mutex_lock(&wl->mutex);
4733 
4734 	ret = pm_runtime_get_sync(wl->dev);
4735 	if (ret < 0) {
4736 		pm_runtime_put_noidle(wl->dev);
4737 		goto out;
4738 	}
4739 
4740 	wl12xx_for_each_wlvif(wl, wlvif) {
4741 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4742 
4743 		rcu_read_lock();
4744 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4745 			rcu_read_unlock();
4746 			continue;
4747 		}
4748 		rcu_read_unlock();
4749 
4750 		/* start radar if needed */
4751 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4752 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4753 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4754 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4755 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4756 			wlcore_hw_set_cac(wl, wlvif, true);
4757 			wlvif->radar_enabled = true;
4758 		}
4759 	}
4760 
4761 	pm_runtime_mark_last_busy(wl->dev);
4762 	pm_runtime_put_autosuspend(wl->dev);
4763 out:
4764 	mutex_unlock(&wl->mutex);
4765 }
4766 
4767 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4768 					struct ieee80211_vif *vif,
4769 					struct ieee80211_chanctx_conf *ctx)
4770 {
4771 	struct wl1271 *wl = hw->priv;
4772 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4773 	int channel = ieee80211_frequency_to_channel(
4774 		ctx->def.chan->center_freq);
4775 	int ret = -EINVAL;
4776 
4777 	wl1271_debug(DEBUG_MAC80211,
4778 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4779 		     wlvif->role_id, channel,
4780 		     cfg80211_get_chandef_type(&ctx->def),
4781 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4782 
4783 	mutex_lock(&wl->mutex);
4784 
4785 	if (unlikely(wl->state != WLCORE_STATE_ON))
4786 		goto out;
4787 
4788 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4789 		goto out;
4790 
4791 	ret = pm_runtime_get_sync(wl->dev);
4792 	if (ret < 0) {
4793 		pm_runtime_put_noidle(wl->dev);
4794 		goto out;
4795 	}
4796 
4797 	wlvif->band = ctx->def.chan->band;
4798 	wlvif->channel = channel;
4799 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4800 
4801 	/* update default rates according to the band */
4802 	wl1271_set_band_rate(wl, wlvif);
4803 
4804 	if (ctx->radar_enabled &&
4805 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4806 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4807 		wlcore_hw_set_cac(wl, wlvif, true);
4808 		wlvif->radar_enabled = true;
4809 	}
4810 
4811 	pm_runtime_mark_last_busy(wl->dev);
4812 	pm_runtime_put_autosuspend(wl->dev);
4813 out:
4814 	mutex_unlock(&wl->mutex);
4815 
4816 	return 0;
4817 }
4818 
4819 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4820 					   struct ieee80211_vif *vif,
4821 					   struct ieee80211_chanctx_conf *ctx)
4822 {
4823 	struct wl1271 *wl = hw->priv;
4824 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4825 	int ret;
4826 
4827 	wl1271_debug(DEBUG_MAC80211,
4828 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4829 		     wlvif->role_id,
4830 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4831 		     cfg80211_get_chandef_type(&ctx->def));
4832 
4833 	wl1271_tx_flush(wl);
4834 
4835 	mutex_lock(&wl->mutex);
4836 
4837 	if (unlikely(wl->state != WLCORE_STATE_ON))
4838 		goto out;
4839 
4840 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4841 		goto out;
4842 
4843 	ret = pm_runtime_get_sync(wl->dev);
4844 	if (ret < 0) {
4845 		pm_runtime_put_noidle(wl->dev);
4846 		goto out;
4847 	}
4848 
4849 	if (wlvif->radar_enabled) {
4850 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4851 		wlcore_hw_set_cac(wl, wlvif, false);
4852 		wlvif->radar_enabled = false;
4853 	}
4854 
4855 	pm_runtime_mark_last_busy(wl->dev);
4856 	pm_runtime_put_autosuspend(wl->dev);
4857 out:
4858 	mutex_unlock(&wl->mutex);
4859 }
4860 
4861 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4862 				    struct wl12xx_vif *wlvif,
4863 				    struct ieee80211_chanctx_conf *new_ctx)
4864 {
4865 	int channel = ieee80211_frequency_to_channel(
4866 		new_ctx->def.chan->center_freq);
4867 
4868 	wl1271_debug(DEBUG_MAC80211,
4869 		     "switch vif (role %d) %d -> %d chan_type: %d",
4870 		     wlvif->role_id, wlvif->channel, channel,
4871 		     cfg80211_get_chandef_type(&new_ctx->def));
4872 
4873 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4874 		return 0;
4875 
4876 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4877 
4878 	if (wlvif->radar_enabled) {
4879 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4880 		wlcore_hw_set_cac(wl, wlvif, false);
4881 		wlvif->radar_enabled = false;
4882 	}
4883 
4884 	wlvif->band = new_ctx->def.chan->band;
4885 	wlvif->channel = channel;
4886 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4887 
4888 	/* start radar if needed */
4889 	if (new_ctx->radar_enabled) {
4890 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4891 		wlcore_hw_set_cac(wl, wlvif, true);
4892 		wlvif->radar_enabled = true;
4893 	}
4894 
4895 	return 0;
4896 }
4897 
4898 static int
4899 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4900 			     struct ieee80211_vif_chanctx_switch *vifs,
4901 			     int n_vifs,
4902 			     enum ieee80211_chanctx_switch_mode mode)
4903 {
4904 	struct wl1271 *wl = hw->priv;
4905 	int i, ret;
4906 
4907 	wl1271_debug(DEBUG_MAC80211,
4908 		     "mac80211 switch chanctx n_vifs %d mode %d",
4909 		     n_vifs, mode);
4910 
4911 	mutex_lock(&wl->mutex);
4912 
4913 	ret = pm_runtime_get_sync(wl->dev);
4914 	if (ret < 0) {
4915 		pm_runtime_put_noidle(wl->dev);
4916 		goto out;
4917 	}
4918 
4919 	for (i = 0; i < n_vifs; i++) {
4920 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4921 
4922 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4923 		if (ret)
4924 			goto out_sleep;
4925 	}
4926 out_sleep:
4927 	pm_runtime_mark_last_busy(wl->dev);
4928 	pm_runtime_put_autosuspend(wl->dev);
4929 out:
4930 	mutex_unlock(&wl->mutex);
4931 
4932 	return 0;
4933 }
4934 
4935 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4936 			     struct ieee80211_vif *vif, u16 queue,
4937 			     const struct ieee80211_tx_queue_params *params)
4938 {
4939 	struct wl1271 *wl = hw->priv;
4940 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4941 	u8 ps_scheme;
4942 	int ret = 0;
4943 
4944 	if (wlcore_is_p2p_mgmt(wlvif))
4945 		return 0;
4946 
4947 	mutex_lock(&wl->mutex);
4948 
4949 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4950 
4951 	if (params->uapsd)
4952 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4953 	else
4954 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4955 
4956 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4957 		goto out;
4958 
4959 	ret = pm_runtime_get_sync(wl->dev);
4960 	if (ret < 0) {
4961 		pm_runtime_put_noidle(wl->dev);
4962 		goto out;
4963 	}
4964 
4965 	/*
4966 	 * the txop is confed in units of 32us by the mac80211,
4967 	 * we need us
4968 	 */
4969 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4970 				params->cw_min, params->cw_max,
4971 				params->aifs, params->txop << 5);
4972 	if (ret < 0)
4973 		goto out_sleep;
4974 
4975 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4976 				 CONF_CHANNEL_TYPE_EDCF,
4977 				 wl1271_tx_get_queue(queue),
4978 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4979 				 0, 0);
4980 
4981 out_sleep:
4982 	pm_runtime_mark_last_busy(wl->dev);
4983 	pm_runtime_put_autosuspend(wl->dev);
4984 
4985 out:
4986 	mutex_unlock(&wl->mutex);
4987 
4988 	return ret;
4989 }
4990 
4991 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4992 			     struct ieee80211_vif *vif)
4993 {
4994 
4995 	struct wl1271 *wl = hw->priv;
4996 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4997 	u64 mactime = ULLONG_MAX;
4998 	int ret;
4999 
5000 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
5001 
5002 	mutex_lock(&wl->mutex);
5003 
5004 	if (unlikely(wl->state != WLCORE_STATE_ON))
5005 		goto out;
5006 
5007 	ret = pm_runtime_get_sync(wl->dev);
5008 	if (ret < 0) {
5009 		pm_runtime_put_noidle(wl->dev);
5010 		goto out;
5011 	}
5012 
5013 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5014 	if (ret < 0)
5015 		goto out_sleep;
5016 
5017 out_sleep:
5018 	pm_runtime_mark_last_busy(wl->dev);
5019 	pm_runtime_put_autosuspend(wl->dev);
5020 
5021 out:
5022 	mutex_unlock(&wl->mutex);
5023 	return mactime;
5024 }
5025 
5026 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5027 				struct survey_info *survey)
5028 {
5029 	struct ieee80211_conf *conf = &hw->conf;
5030 
5031 	if (idx != 0)
5032 		return -ENOENT;
5033 
5034 	survey->channel = conf->chandef.chan;
5035 	survey->filled = 0;
5036 	return 0;
5037 }
5038 
5039 static int wl1271_allocate_sta(struct wl1271 *wl,
5040 			     struct wl12xx_vif *wlvif,
5041 			     struct ieee80211_sta *sta)
5042 {
5043 	struct wl1271_station *wl_sta;
5044 	int ret;
5045 
5046 
5047 	if (wl->active_sta_count >= wl->max_ap_stations) {
5048 		wl1271_warning("could not allocate HLID - too much stations");
5049 		return -EBUSY;
5050 	}
5051 
5052 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5053 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5054 	if (ret < 0) {
5055 		wl1271_warning("could not allocate HLID - too many links");
5056 		return -EBUSY;
5057 	}
5058 
5059 	/* use the previous security seq, if this is a recovery/resume */
5060 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5061 
5062 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5063 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5064 	wl->active_sta_count++;
5065 	return 0;
5066 }
5067 
5068 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5069 {
5070 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5071 		return;
5072 
5073 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5074 	__clear_bit(hlid, &wl->ap_ps_map);
5075 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5076 
5077 	/*
5078 	 * save the last used PN in the private part of iee80211_sta,
5079 	 * in case of recovery/suspend
5080 	 */
5081 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5082 
5083 	wl12xx_free_link(wl, wlvif, &hlid);
5084 	wl->active_sta_count--;
5085 
5086 	/*
5087 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5088 	 * chance to return STA-buffered packets before complaining.
5089 	 */
5090 	if (wl->active_sta_count == 0)
5091 		wl12xx_rearm_tx_watchdog_locked(wl);
5092 }
5093 
5094 static int wl12xx_sta_add(struct wl1271 *wl,
5095 			  struct wl12xx_vif *wlvif,
5096 			  struct ieee80211_sta *sta)
5097 {
5098 	struct wl1271_station *wl_sta;
5099 	int ret = 0;
5100 	u8 hlid;
5101 
5102 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5103 
5104 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5105 	if (ret < 0)
5106 		return ret;
5107 
5108 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5109 	hlid = wl_sta->hlid;
5110 
5111 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5112 	if (ret < 0)
5113 		wl1271_free_sta(wl, wlvif, hlid);
5114 
5115 	return ret;
5116 }
5117 
5118 static int wl12xx_sta_remove(struct wl1271 *wl,
5119 			     struct wl12xx_vif *wlvif,
5120 			     struct ieee80211_sta *sta)
5121 {
5122 	struct wl1271_station *wl_sta;
5123 	int ret = 0, id;
5124 
5125 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5126 
5127 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5128 	id = wl_sta->hlid;
5129 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5130 		return -EINVAL;
5131 
5132 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5133 	if (ret < 0)
5134 		return ret;
5135 
5136 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5137 	return ret;
5138 }
5139 
5140 static void wlcore_roc_if_possible(struct wl1271 *wl,
5141 				   struct wl12xx_vif *wlvif)
5142 {
5143 	if (find_first_bit(wl->roc_map,
5144 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5145 		return;
5146 
5147 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5148 		return;
5149 
5150 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5151 }
5152 
5153 /*
5154  * when wl_sta is NULL, we treat this call as if coming from a
5155  * pending auth reply.
5156  * wl->mutex must be taken and the FW must be awake when the call
5157  * takes place.
5158  */
5159 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5160 			      struct wl1271_station *wl_sta, bool in_conn)
5161 {
5162 	if (in_conn) {
5163 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5164 			return;
5165 
5166 		if (!wlvif->ap_pending_auth_reply &&
5167 		    !wlvif->inconn_count)
5168 			wlcore_roc_if_possible(wl, wlvif);
5169 
5170 		if (wl_sta) {
5171 			wl_sta->in_connection = true;
5172 			wlvif->inconn_count++;
5173 		} else {
5174 			wlvif->ap_pending_auth_reply = true;
5175 		}
5176 	} else {
5177 		if (wl_sta && !wl_sta->in_connection)
5178 			return;
5179 
5180 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5181 			return;
5182 
5183 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5184 			return;
5185 
5186 		if (wl_sta) {
5187 			wl_sta->in_connection = false;
5188 			wlvif->inconn_count--;
5189 		} else {
5190 			wlvif->ap_pending_auth_reply = false;
5191 		}
5192 
5193 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5194 		    test_bit(wlvif->role_id, wl->roc_map))
5195 			wl12xx_croc(wl, wlvif->role_id);
5196 	}
5197 }
5198 
5199 static int wl12xx_update_sta_state(struct wl1271 *wl,
5200 				   struct wl12xx_vif *wlvif,
5201 				   struct ieee80211_sta *sta,
5202 				   enum ieee80211_sta_state old_state,
5203 				   enum ieee80211_sta_state new_state)
5204 {
5205 	struct wl1271_station *wl_sta;
5206 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5207 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5208 	int ret;
5209 
5210 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5211 
5212 	/* Add station (AP mode) */
5213 	if (is_ap &&
5214 	    old_state == IEEE80211_STA_NOTEXIST &&
5215 	    new_state == IEEE80211_STA_NONE) {
5216 		ret = wl12xx_sta_add(wl, wlvif, sta);
5217 		if (ret)
5218 			return ret;
5219 
5220 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5221 	}
5222 
5223 	/* Remove station (AP mode) */
5224 	if (is_ap &&
5225 	    old_state == IEEE80211_STA_NONE &&
5226 	    new_state == IEEE80211_STA_NOTEXIST) {
5227 		/* must not fail */
5228 		wl12xx_sta_remove(wl, wlvif, sta);
5229 
5230 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5231 	}
5232 
5233 	/* Authorize station (AP mode) */
5234 	if (is_ap &&
5235 	    new_state == IEEE80211_STA_AUTHORIZED) {
5236 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5237 		if (ret < 0)
5238 			return ret;
5239 
5240 		/* reconfigure rates */
5241 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5242 		if (ret < 0)
5243 			return ret;
5244 
5245 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5246 						     wl_sta->hlid);
5247 		if (ret)
5248 			return ret;
5249 
5250 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5251 	}
5252 
5253 	/* Authorize station */
5254 	if (is_sta &&
5255 	    new_state == IEEE80211_STA_AUTHORIZED) {
5256 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5257 		ret = wl12xx_set_authorized(wl, wlvif);
5258 		if (ret)
5259 			return ret;
5260 	}
5261 
5262 	if (is_sta &&
5263 	    old_state == IEEE80211_STA_AUTHORIZED &&
5264 	    new_state == IEEE80211_STA_ASSOC) {
5265 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5266 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5267 	}
5268 
5269 	/* save seq number on disassoc (suspend) */
5270 	if (is_sta &&
5271 	    old_state == IEEE80211_STA_ASSOC &&
5272 	    new_state == IEEE80211_STA_AUTH) {
5273 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5274 		wlvif->total_freed_pkts = 0;
5275 	}
5276 
5277 	/* restore seq number on assoc (resume) */
5278 	if (is_sta &&
5279 	    old_state == IEEE80211_STA_AUTH &&
5280 	    new_state == IEEE80211_STA_ASSOC) {
5281 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5282 	}
5283 
5284 	/* clear ROCs on failure or authorization */
5285 	if (is_sta &&
5286 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5287 	     new_state == IEEE80211_STA_NOTEXIST)) {
5288 		if (test_bit(wlvif->role_id, wl->roc_map))
5289 			wl12xx_croc(wl, wlvif->role_id);
5290 	}
5291 
5292 	if (is_sta &&
5293 	    old_state == IEEE80211_STA_NOTEXIST &&
5294 	    new_state == IEEE80211_STA_NONE) {
5295 		if (find_first_bit(wl->roc_map,
5296 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5297 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5298 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5299 				   wlvif->band, wlvif->channel);
5300 		}
5301 	}
5302 	return 0;
5303 }
5304 
5305 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5306 			       struct ieee80211_vif *vif,
5307 			       struct ieee80211_sta *sta,
5308 			       enum ieee80211_sta_state old_state,
5309 			       enum ieee80211_sta_state new_state)
5310 {
5311 	struct wl1271 *wl = hw->priv;
5312 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5313 	int ret;
5314 
5315 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5316 		     sta->aid, old_state, new_state);
5317 
5318 	mutex_lock(&wl->mutex);
5319 
5320 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5321 		ret = -EBUSY;
5322 		goto out;
5323 	}
5324 
5325 	ret = pm_runtime_get_sync(wl->dev);
5326 	if (ret < 0) {
5327 		pm_runtime_put_noidle(wl->dev);
5328 		goto out;
5329 	}
5330 
5331 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5332 
5333 	pm_runtime_mark_last_busy(wl->dev);
5334 	pm_runtime_put_autosuspend(wl->dev);
5335 out:
5336 	mutex_unlock(&wl->mutex);
5337 	if (new_state < old_state)
5338 		return 0;
5339 	return ret;
5340 }
5341 
5342 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5343 				  struct ieee80211_vif *vif,
5344 				  struct ieee80211_ampdu_params *params)
5345 {
5346 	struct wl1271 *wl = hw->priv;
5347 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5348 	int ret;
5349 	u8 hlid, *ba_bitmap;
5350 	struct ieee80211_sta *sta = params->sta;
5351 	enum ieee80211_ampdu_mlme_action action = params->action;
5352 	u16 tid = params->tid;
5353 	u16 *ssn = &params->ssn;
5354 
5355 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5356 		     tid);
5357 
5358 	/* sanity check - the fields in FW are only 8bits wide */
5359 	if (WARN_ON(tid > 0xFF))
5360 		return -ENOTSUPP;
5361 
5362 	mutex_lock(&wl->mutex);
5363 
5364 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5365 		ret = -EAGAIN;
5366 		goto out;
5367 	}
5368 
5369 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5370 		hlid = wlvif->sta.hlid;
5371 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5372 		struct wl1271_station *wl_sta;
5373 
5374 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5375 		hlid = wl_sta->hlid;
5376 	} else {
5377 		ret = -EINVAL;
5378 		goto out;
5379 	}
5380 
5381 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5382 
5383 	ret = pm_runtime_get_sync(wl->dev);
5384 	if (ret < 0) {
5385 		pm_runtime_put_noidle(wl->dev);
5386 		goto out;
5387 	}
5388 
5389 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5390 		     tid, action);
5391 
5392 	switch (action) {
5393 	case IEEE80211_AMPDU_RX_START:
5394 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5395 			ret = -ENOTSUPP;
5396 			break;
5397 		}
5398 
5399 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5400 			ret = -EBUSY;
5401 			wl1271_error("exceeded max RX BA sessions");
5402 			break;
5403 		}
5404 
5405 		if (*ba_bitmap & BIT(tid)) {
5406 			ret = -EINVAL;
5407 			wl1271_error("cannot enable RX BA session on active "
5408 				     "tid: %d", tid);
5409 			break;
5410 		}
5411 
5412 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5413 				hlid,
5414 				params->buf_size);
5415 
5416 		if (!ret) {
5417 			*ba_bitmap |= BIT(tid);
5418 			wl->ba_rx_session_count++;
5419 		}
5420 		break;
5421 
5422 	case IEEE80211_AMPDU_RX_STOP:
5423 		if (!(*ba_bitmap & BIT(tid))) {
5424 			/*
5425 			 * this happens on reconfig - so only output a debug
5426 			 * message for now, and don't fail the function.
5427 			 */
5428 			wl1271_debug(DEBUG_MAC80211,
5429 				     "no active RX BA session on tid: %d",
5430 				     tid);
5431 			ret = 0;
5432 			break;
5433 		}
5434 
5435 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5436 							 hlid, 0);
5437 		if (!ret) {
5438 			*ba_bitmap &= ~BIT(tid);
5439 			wl->ba_rx_session_count--;
5440 		}
5441 		break;
5442 
5443 	/*
5444 	 * The BA initiator session management in FW independently.
5445 	 * Falling break here on purpose for all TX APDU commands.
5446 	 */
5447 	case IEEE80211_AMPDU_TX_START:
5448 	case IEEE80211_AMPDU_TX_STOP_CONT:
5449 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5450 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5451 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5452 		ret = -EINVAL;
5453 		break;
5454 
5455 	default:
5456 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5457 		ret = -EINVAL;
5458 	}
5459 
5460 	pm_runtime_mark_last_busy(wl->dev);
5461 	pm_runtime_put_autosuspend(wl->dev);
5462 
5463 out:
5464 	mutex_unlock(&wl->mutex);
5465 
5466 	return ret;
5467 }
5468 
5469 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5470 				   struct ieee80211_vif *vif,
5471 				   const struct cfg80211_bitrate_mask *mask)
5472 {
5473 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5474 	struct wl1271 *wl = hw->priv;
5475 	int i, ret = 0;
5476 
5477 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5478 		mask->control[NL80211_BAND_2GHZ].legacy,
5479 		mask->control[NL80211_BAND_5GHZ].legacy);
5480 
5481 	mutex_lock(&wl->mutex);
5482 
5483 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5484 		wlvif->bitrate_masks[i] =
5485 			wl1271_tx_enabled_rates_get(wl,
5486 						    mask->control[i].legacy,
5487 						    i);
5488 
5489 	if (unlikely(wl->state != WLCORE_STATE_ON))
5490 		goto out;
5491 
5492 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5493 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5494 
5495 		ret = pm_runtime_get_sync(wl->dev);
5496 		if (ret < 0) {
5497 			pm_runtime_put_noidle(wl->dev);
5498 			goto out;
5499 		}
5500 
5501 		wl1271_set_band_rate(wl, wlvif);
5502 		wlvif->basic_rate =
5503 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5504 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5505 
5506 		pm_runtime_mark_last_busy(wl->dev);
5507 		pm_runtime_put_autosuspend(wl->dev);
5508 	}
5509 out:
5510 	mutex_unlock(&wl->mutex);
5511 
5512 	return ret;
5513 }
5514 
5515 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5516 				     struct ieee80211_vif *vif,
5517 				     struct ieee80211_channel_switch *ch_switch)
5518 {
5519 	struct wl1271 *wl = hw->priv;
5520 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5521 	int ret;
5522 
5523 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5524 
5525 	wl1271_tx_flush(wl);
5526 
5527 	mutex_lock(&wl->mutex);
5528 
5529 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5530 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5531 			ieee80211_chswitch_done(vif, false);
5532 		goto out;
5533 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5534 		goto out;
5535 	}
5536 
5537 	ret = pm_runtime_get_sync(wl->dev);
5538 	if (ret < 0) {
5539 		pm_runtime_put_noidle(wl->dev);
5540 		goto out;
5541 	}
5542 
5543 	/* TODO: change mac80211 to pass vif as param */
5544 
5545 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5546 		unsigned long delay_usec;
5547 
5548 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5549 		if (ret)
5550 			goto out_sleep;
5551 
5552 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5553 
5554 		/* indicate failure 5 seconds after channel switch time */
5555 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5556 			ch_switch->count;
5557 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5558 					     usecs_to_jiffies(delay_usec) +
5559 					     msecs_to_jiffies(5000));
5560 	}
5561 
5562 out_sleep:
5563 	pm_runtime_mark_last_busy(wl->dev);
5564 	pm_runtime_put_autosuspend(wl->dev);
5565 
5566 out:
5567 	mutex_unlock(&wl->mutex);
5568 }
5569 
5570 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5571 					struct wl12xx_vif *wlvif,
5572 					u8 eid)
5573 {
5574 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5575 	struct sk_buff *beacon =
5576 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5577 
5578 	if (!beacon)
5579 		return NULL;
5580 
5581 	return cfg80211_find_ie(eid,
5582 				beacon->data + ieoffset,
5583 				beacon->len - ieoffset);
5584 }
5585 
5586 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5587 				u8 *csa_count)
5588 {
5589 	const u8 *ie;
5590 	const struct ieee80211_channel_sw_ie *ie_csa;
5591 
5592 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5593 	if (!ie)
5594 		return -EINVAL;
5595 
5596 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5597 	*csa_count = ie_csa->count;
5598 
5599 	return 0;
5600 }
5601 
5602 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5603 					    struct ieee80211_vif *vif,
5604 					    struct cfg80211_chan_def *chandef)
5605 {
5606 	struct wl1271 *wl = hw->priv;
5607 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5608 	struct ieee80211_channel_switch ch_switch = {
5609 		.block_tx = true,
5610 		.chandef = *chandef,
5611 	};
5612 	int ret;
5613 
5614 	wl1271_debug(DEBUG_MAC80211,
5615 		     "mac80211 channel switch beacon (role %d)",
5616 		     wlvif->role_id);
5617 
5618 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5619 	if (ret < 0) {
5620 		wl1271_error("error getting beacon (for CSA counter)");
5621 		return;
5622 	}
5623 
5624 	mutex_lock(&wl->mutex);
5625 
5626 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5627 		ret = -EBUSY;
5628 		goto out;
5629 	}
5630 
5631 	ret = pm_runtime_get_sync(wl->dev);
5632 	if (ret < 0) {
5633 		pm_runtime_put_noidle(wl->dev);
5634 		goto out;
5635 	}
5636 
5637 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5638 	if (ret)
5639 		goto out_sleep;
5640 
5641 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5642 
5643 out_sleep:
5644 	pm_runtime_mark_last_busy(wl->dev);
5645 	pm_runtime_put_autosuspend(wl->dev);
5646 out:
5647 	mutex_unlock(&wl->mutex);
5648 }
5649 
5650 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5651 			    u32 queues, bool drop)
5652 {
5653 	struct wl1271 *wl = hw->priv;
5654 
5655 	wl1271_tx_flush(wl);
5656 }
5657 
5658 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5659 				       struct ieee80211_vif *vif,
5660 				       struct ieee80211_channel *chan,
5661 				       int duration,
5662 				       enum ieee80211_roc_type type)
5663 {
5664 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5665 	struct wl1271 *wl = hw->priv;
5666 	int channel, active_roc, ret = 0;
5667 
5668 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5669 
5670 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5671 		     channel, wlvif->role_id);
5672 
5673 	mutex_lock(&wl->mutex);
5674 
5675 	if (unlikely(wl->state != WLCORE_STATE_ON))
5676 		goto out;
5677 
5678 	/* return EBUSY if we can't ROC right now */
5679 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5680 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5681 		wl1271_warning("active roc on role %d", active_roc);
5682 		ret = -EBUSY;
5683 		goto out;
5684 	}
5685 
5686 	ret = pm_runtime_get_sync(wl->dev);
5687 	if (ret < 0) {
5688 		pm_runtime_put_noidle(wl->dev);
5689 		goto out;
5690 	}
5691 
5692 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5693 	if (ret < 0)
5694 		goto out_sleep;
5695 
5696 	wl->roc_vif = vif;
5697 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5698 				     msecs_to_jiffies(duration));
5699 out_sleep:
5700 	pm_runtime_mark_last_busy(wl->dev);
5701 	pm_runtime_put_autosuspend(wl->dev);
5702 out:
5703 	mutex_unlock(&wl->mutex);
5704 	return ret;
5705 }
5706 
5707 static int __wlcore_roc_completed(struct wl1271 *wl)
5708 {
5709 	struct wl12xx_vif *wlvif;
5710 	int ret;
5711 
5712 	/* already completed */
5713 	if (unlikely(!wl->roc_vif))
5714 		return 0;
5715 
5716 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5717 
5718 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5719 		return -EBUSY;
5720 
5721 	ret = wl12xx_stop_dev(wl, wlvif);
5722 	if (ret < 0)
5723 		return ret;
5724 
5725 	wl->roc_vif = NULL;
5726 
5727 	return 0;
5728 }
5729 
5730 static int wlcore_roc_completed(struct wl1271 *wl)
5731 {
5732 	int ret;
5733 
5734 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5735 
5736 	mutex_lock(&wl->mutex);
5737 
5738 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5739 		ret = -EBUSY;
5740 		goto out;
5741 	}
5742 
5743 	ret = pm_runtime_get_sync(wl->dev);
5744 	if (ret < 0) {
5745 		pm_runtime_put_noidle(wl->dev);
5746 		goto out;
5747 	}
5748 
5749 	ret = __wlcore_roc_completed(wl);
5750 
5751 	pm_runtime_mark_last_busy(wl->dev);
5752 	pm_runtime_put_autosuspend(wl->dev);
5753 out:
5754 	mutex_unlock(&wl->mutex);
5755 
5756 	return ret;
5757 }
5758 
5759 static void wlcore_roc_complete_work(struct work_struct *work)
5760 {
5761 	struct delayed_work *dwork;
5762 	struct wl1271 *wl;
5763 	int ret;
5764 
5765 	dwork = to_delayed_work(work);
5766 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5767 
5768 	ret = wlcore_roc_completed(wl);
5769 	if (!ret)
5770 		ieee80211_remain_on_channel_expired(wl->hw);
5771 }
5772 
5773 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5774 					      struct ieee80211_vif *vif)
5775 {
5776 	struct wl1271 *wl = hw->priv;
5777 
5778 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5779 
5780 	/* TODO: per-vif */
5781 	wl1271_tx_flush(wl);
5782 
5783 	/*
5784 	 * we can't just flush_work here, because it might deadlock
5785 	 * (as we might get called from the same workqueue)
5786 	 */
5787 	cancel_delayed_work_sync(&wl->roc_complete_work);
5788 	wlcore_roc_completed(wl);
5789 
5790 	return 0;
5791 }
5792 
5793 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5794 				    struct ieee80211_vif *vif,
5795 				    struct ieee80211_sta *sta,
5796 				    u32 changed)
5797 {
5798 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5799 
5800 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5801 
5802 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5803 		return;
5804 
5805 	/* this callback is atomic, so schedule a new work */
5806 	wlvif->rc_update_bw = sta->bandwidth;
5807 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5808 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5809 }
5810 
5811 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5812 				     struct ieee80211_vif *vif,
5813 				     struct ieee80211_sta *sta,
5814 				     struct station_info *sinfo)
5815 {
5816 	struct wl1271 *wl = hw->priv;
5817 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5818 	s8 rssi_dbm;
5819 	int ret;
5820 
5821 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5822 
5823 	mutex_lock(&wl->mutex);
5824 
5825 	if (unlikely(wl->state != WLCORE_STATE_ON))
5826 		goto out;
5827 
5828 	ret = pm_runtime_get_sync(wl->dev);
5829 	if (ret < 0) {
5830 		pm_runtime_put_noidle(wl->dev);
5831 		goto out_sleep;
5832 	}
5833 
5834 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5835 	if (ret < 0)
5836 		goto out_sleep;
5837 
5838 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5839 	sinfo->signal = rssi_dbm;
5840 
5841 out_sleep:
5842 	pm_runtime_mark_last_busy(wl->dev);
5843 	pm_runtime_put_autosuspend(wl->dev);
5844 
5845 out:
5846 	mutex_unlock(&wl->mutex);
5847 }
5848 
5849 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5850 					     struct ieee80211_sta *sta)
5851 {
5852 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5853 	struct wl1271 *wl = hw->priv;
5854 	u8 hlid = wl_sta->hlid;
5855 
5856 	/* return in units of Kbps */
5857 	return (wl->links[hlid].fw_rate_mbps * 1000);
5858 }
5859 
5860 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5861 {
5862 	struct wl1271 *wl = hw->priv;
5863 	bool ret = false;
5864 
5865 	mutex_lock(&wl->mutex);
5866 
5867 	if (unlikely(wl->state != WLCORE_STATE_ON))
5868 		goto out;
5869 
5870 	/* packets are considered pending if in the TX queue or the FW */
5871 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5872 out:
5873 	mutex_unlock(&wl->mutex);
5874 
5875 	return ret;
5876 }
5877 
5878 /* can't be const, mac80211 writes to this */
5879 static struct ieee80211_rate wl1271_rates[] = {
5880 	{ .bitrate = 10,
5881 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5882 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5883 	{ .bitrate = 20,
5884 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5885 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5886 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5887 	{ .bitrate = 55,
5888 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5889 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5890 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5891 	{ .bitrate = 110,
5892 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5893 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5894 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5895 	{ .bitrate = 60,
5896 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5897 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5898 	{ .bitrate = 90,
5899 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5900 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5901 	{ .bitrate = 120,
5902 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5903 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5904 	{ .bitrate = 180,
5905 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5906 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5907 	{ .bitrate = 240,
5908 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5909 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5910 	{ .bitrate = 360,
5911 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5912 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5913 	{ .bitrate = 480,
5914 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5915 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5916 	{ .bitrate = 540,
5917 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5918 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5919 };
5920 
5921 /* can't be const, mac80211 writes to this */
5922 static struct ieee80211_channel wl1271_channels[] = {
5923 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5924 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5925 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5926 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5927 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5928 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5929 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5930 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5931 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5932 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5933 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5934 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5935 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5936 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5937 };
5938 
5939 /* can't be const, mac80211 writes to this */
5940 static struct ieee80211_supported_band wl1271_band_2ghz = {
5941 	.channels = wl1271_channels,
5942 	.n_channels = ARRAY_SIZE(wl1271_channels),
5943 	.bitrates = wl1271_rates,
5944 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5945 };
5946 
5947 /* 5 GHz data rates for WL1273 */
5948 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5949 	{ .bitrate = 60,
5950 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5951 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5952 	{ .bitrate = 90,
5953 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5954 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5955 	{ .bitrate = 120,
5956 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5957 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5958 	{ .bitrate = 180,
5959 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5960 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5961 	{ .bitrate = 240,
5962 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5963 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5964 	{ .bitrate = 360,
5965 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5966 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5967 	{ .bitrate = 480,
5968 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5969 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5970 	{ .bitrate = 540,
5971 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5972 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5973 };
5974 
5975 /* 5 GHz band channels for WL1273 */
5976 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5977 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5994 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5995 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5996 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5997 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5998 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5999 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
6000 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
6001 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
6002 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
6003 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
6004 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
6005 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
6006 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
6007 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6008 };
6009 
6010 static struct ieee80211_supported_band wl1271_band_5ghz = {
6011 	.channels = wl1271_channels_5ghz,
6012 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6013 	.bitrates = wl1271_rates_5ghz,
6014 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6015 };
6016 
6017 static const struct ieee80211_ops wl1271_ops = {
6018 	.start = wl1271_op_start,
6019 	.stop = wlcore_op_stop,
6020 	.add_interface = wl1271_op_add_interface,
6021 	.remove_interface = wl1271_op_remove_interface,
6022 	.change_interface = wl12xx_op_change_interface,
6023 #ifdef CONFIG_PM
6024 	.suspend = wl1271_op_suspend,
6025 	.resume = wl1271_op_resume,
6026 #endif
6027 	.config = wl1271_op_config,
6028 	.prepare_multicast = wl1271_op_prepare_multicast,
6029 	.configure_filter = wl1271_op_configure_filter,
6030 	.tx = wl1271_op_tx,
6031 	.set_key = wlcore_op_set_key,
6032 	.hw_scan = wl1271_op_hw_scan,
6033 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6034 	.sched_scan_start = wl1271_op_sched_scan_start,
6035 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6036 	.bss_info_changed = wl1271_op_bss_info_changed,
6037 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6038 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6039 	.conf_tx = wl1271_op_conf_tx,
6040 	.get_tsf = wl1271_op_get_tsf,
6041 	.get_survey = wl1271_op_get_survey,
6042 	.sta_state = wl12xx_op_sta_state,
6043 	.ampdu_action = wl1271_op_ampdu_action,
6044 	.tx_frames_pending = wl1271_tx_frames_pending,
6045 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6046 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6047 	.channel_switch = wl12xx_op_channel_switch,
6048 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6049 	.flush = wlcore_op_flush,
6050 	.remain_on_channel = wlcore_op_remain_on_channel,
6051 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6052 	.add_chanctx = wlcore_op_add_chanctx,
6053 	.remove_chanctx = wlcore_op_remove_chanctx,
6054 	.change_chanctx = wlcore_op_change_chanctx,
6055 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6056 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6057 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6058 	.sta_rc_update = wlcore_op_sta_rc_update,
6059 	.sta_statistics = wlcore_op_sta_statistics,
6060 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6061 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6062 };
6063 
6064 
6065 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6066 {
6067 	u8 idx;
6068 
6069 	BUG_ON(band >= 2);
6070 
6071 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6072 		wl1271_error("Illegal RX rate from HW: %d", rate);
6073 		return 0;
6074 	}
6075 
6076 	idx = wl->band_rate_to_idx[band][rate];
6077 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6078 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6079 		return 0;
6080 	}
6081 
6082 	return idx;
6083 }
6084 
6085 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6086 {
6087 	int i;
6088 
6089 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6090 		     oui, nic);
6091 
6092 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6093 		wl1271_warning("NIC part of the MAC address wraps around!");
6094 
6095 	for (i = 0; i < wl->num_mac_addr; i++) {
6096 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6097 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6098 		wl->addresses[i].addr[2] = (u8) oui;
6099 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6100 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6101 		wl->addresses[i].addr[5] = (u8) nic;
6102 		nic++;
6103 	}
6104 
6105 	/* we may be one address short at the most */
6106 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6107 
6108 	/*
6109 	 * turn on the LAA bit in the first address and use it as
6110 	 * the last address.
6111 	 */
6112 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6113 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6114 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6115 		       sizeof(wl->addresses[0]));
6116 		/* LAA bit */
6117 		wl->addresses[idx].addr[0] |= BIT(1);
6118 	}
6119 
6120 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6121 	wl->hw->wiphy->addresses = wl->addresses;
6122 }
6123 
6124 static int wl12xx_get_hw_info(struct wl1271 *wl)
6125 {
6126 	int ret;
6127 
6128 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6129 	if (ret < 0)
6130 		goto out;
6131 
6132 	wl->fuse_oui_addr = 0;
6133 	wl->fuse_nic_addr = 0;
6134 
6135 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6136 	if (ret < 0)
6137 		goto out;
6138 
6139 	if (wl->ops->get_mac)
6140 		ret = wl->ops->get_mac(wl);
6141 
6142 out:
6143 	return ret;
6144 }
6145 
6146 static int wl1271_register_hw(struct wl1271 *wl)
6147 {
6148 	int ret;
6149 	u32 oui_addr = 0, nic_addr = 0;
6150 	struct platform_device *pdev = wl->pdev;
6151 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6152 
6153 	if (wl->mac80211_registered)
6154 		return 0;
6155 
6156 	if (wl->nvs_len >= 12) {
6157 		/* NOTE: The wl->nvs->nvs element must be first, in
6158 		 * order to simplify the casting, we assume it is at
6159 		 * the beginning of the wl->nvs structure.
6160 		 */
6161 		u8 *nvs_ptr = (u8 *)wl->nvs;
6162 
6163 		oui_addr =
6164 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6165 		nic_addr =
6166 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6167 	}
6168 
6169 	/* if the MAC address is zeroed in the NVS derive from fuse */
6170 	if (oui_addr == 0 && nic_addr == 0) {
6171 		oui_addr = wl->fuse_oui_addr;
6172 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6173 		nic_addr = wl->fuse_nic_addr + 1;
6174 	}
6175 
6176 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6177 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6178 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6179 			wl1271_warning("This default nvs file can be removed from the file system");
6180 		} else {
6181 			wl1271_warning("Your device performance is not optimized.");
6182 			wl1271_warning("Please use the calibrator tool to configure your device.");
6183 		}
6184 
6185 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6186 			wl1271_warning("Fuse mac address is zero. using random mac");
6187 			/* Use TI oui and a random nic */
6188 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6189 			nic_addr = get_random_int();
6190 		} else {
6191 			oui_addr = wl->fuse_oui_addr;
6192 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6193 			nic_addr = wl->fuse_nic_addr + 1;
6194 		}
6195 	}
6196 
6197 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6198 
6199 	ret = ieee80211_register_hw(wl->hw);
6200 	if (ret < 0) {
6201 		wl1271_error("unable to register mac80211 hw: %d", ret);
6202 		goto out;
6203 	}
6204 
6205 	wl->mac80211_registered = true;
6206 
6207 	wl1271_debugfs_init(wl);
6208 
6209 	wl1271_notice("loaded");
6210 
6211 out:
6212 	return ret;
6213 }
6214 
6215 static void wl1271_unregister_hw(struct wl1271 *wl)
6216 {
6217 	if (wl->plt)
6218 		wl1271_plt_stop(wl);
6219 
6220 	ieee80211_unregister_hw(wl->hw);
6221 	wl->mac80211_registered = false;
6222 
6223 }
6224 
6225 static int wl1271_init_ieee80211(struct wl1271 *wl)
6226 {
6227 	int i;
6228 	static const u32 cipher_suites[] = {
6229 		WLAN_CIPHER_SUITE_WEP40,
6230 		WLAN_CIPHER_SUITE_WEP104,
6231 		WLAN_CIPHER_SUITE_TKIP,
6232 		WLAN_CIPHER_SUITE_CCMP,
6233 		WL1271_CIPHER_SUITE_GEM,
6234 		WLAN_CIPHER_SUITE_AES_CMAC,
6235 	};
6236 
6237 	/* The tx descriptor buffer */
6238 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6239 
6240 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6241 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6242 
6243 	/* unit us */
6244 	/* FIXME: find a proper value */
6245 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6246 
6247 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6248 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6249 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6250 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6251 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6252 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6253 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6254 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6255 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6256 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6257 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6258 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6259 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6260 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6261 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6262 
6263 	wl->hw->wiphy->cipher_suites = cipher_suites;
6264 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6265 
6266 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6267 					 BIT(NL80211_IFTYPE_AP) |
6268 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6269 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6270 #ifdef CONFIG_MAC80211_MESH
6271 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6272 #endif
6273 					 BIT(NL80211_IFTYPE_P2P_GO);
6274 
6275 	wl->hw->wiphy->max_scan_ssids = 1;
6276 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6277 	wl->hw->wiphy->max_match_sets = 16;
6278 	/*
6279 	 * Maximum length of elements in scanning probe request templates
6280 	 * should be the maximum length possible for a template, without
6281 	 * the IEEE80211 header of the template
6282 	 */
6283 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6284 			sizeof(struct ieee80211_header);
6285 
6286 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6287 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6288 		sizeof(struct ieee80211_header);
6289 
6290 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6291 
6292 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6293 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6294 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6295 				WIPHY_FLAG_IBSS_RSN;
6296 
6297 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6298 
6299 	/* make sure all our channels fit in the scanned_ch bitmask */
6300 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6301 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6302 		     WL1271_MAX_CHANNELS);
6303 	/*
6304 	* clear channel flags from the previous usage
6305 	* and restore max_power & max_antenna_gain values.
6306 	*/
6307 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6308 		wl1271_band_2ghz.channels[i].flags = 0;
6309 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6310 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6311 	}
6312 
6313 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6314 		wl1271_band_5ghz.channels[i].flags = 0;
6315 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6316 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6317 	}
6318 
6319 	/*
6320 	 * We keep local copies of the band structs because we need to
6321 	 * modify them on a per-device basis.
6322 	 */
6323 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6324 	       sizeof(wl1271_band_2ghz));
6325 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6326 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6327 	       sizeof(*wl->ht_cap));
6328 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6329 	       sizeof(wl1271_band_5ghz));
6330 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6331 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6332 	       sizeof(*wl->ht_cap));
6333 
6334 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6335 		&wl->bands[NL80211_BAND_2GHZ];
6336 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6337 		&wl->bands[NL80211_BAND_5GHZ];
6338 
6339 	/*
6340 	 * allow 4 queues per mac address we support +
6341 	 * 1 cab queue per mac + one global offchannel Tx queue
6342 	 */
6343 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6344 
6345 	/* the last queue is the offchannel queue */
6346 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6347 	wl->hw->max_rates = 1;
6348 
6349 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6350 
6351 	/* the FW answers probe-requests in AP-mode */
6352 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6353 	wl->hw->wiphy->probe_resp_offload =
6354 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6355 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6356 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6357 
6358 	/* allowed interface combinations */
6359 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6360 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6361 
6362 	/* register vendor commands */
6363 	wlcore_set_vendor_commands(wl->hw->wiphy);
6364 
6365 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6366 
6367 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6368 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6369 
6370 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6371 
6372 	return 0;
6373 }
6374 
6375 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6376 				     u32 mbox_size)
6377 {
6378 	struct ieee80211_hw *hw;
6379 	struct wl1271 *wl;
6380 	int i, j, ret;
6381 	unsigned int order;
6382 
6383 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6384 	if (!hw) {
6385 		wl1271_error("could not alloc ieee80211_hw");
6386 		ret = -ENOMEM;
6387 		goto err_hw_alloc;
6388 	}
6389 
6390 	wl = hw->priv;
6391 	memset(wl, 0, sizeof(*wl));
6392 
6393 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6394 	if (!wl->priv) {
6395 		wl1271_error("could not alloc wl priv");
6396 		ret = -ENOMEM;
6397 		goto err_priv_alloc;
6398 	}
6399 
6400 	INIT_LIST_HEAD(&wl->wlvif_list);
6401 
6402 	wl->hw = hw;
6403 
6404 	/*
6405 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6406 	 * we don't allocate any additional resource here, so that's fine.
6407 	 */
6408 	for (i = 0; i < NUM_TX_QUEUES; i++)
6409 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6410 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6411 
6412 	skb_queue_head_init(&wl->deferred_rx_queue);
6413 	skb_queue_head_init(&wl->deferred_tx_queue);
6414 
6415 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6416 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6417 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6418 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6419 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6420 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6421 
6422 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6423 	if (!wl->freezable_wq) {
6424 		ret = -ENOMEM;
6425 		goto err_hw;
6426 	}
6427 
6428 	wl->channel = 0;
6429 	wl->rx_counter = 0;
6430 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6431 	wl->band = NL80211_BAND_2GHZ;
6432 	wl->channel_type = NL80211_CHAN_NO_HT;
6433 	wl->flags = 0;
6434 	wl->sg_enabled = true;
6435 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6436 	wl->recovery_count = 0;
6437 	wl->hw_pg_ver = -1;
6438 	wl->ap_ps_map = 0;
6439 	wl->ap_fw_ps_map = 0;
6440 	wl->quirks = 0;
6441 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6442 	wl->active_sta_count = 0;
6443 	wl->active_link_count = 0;
6444 	wl->fwlog_size = 0;
6445 
6446 	/* The system link is always allocated */
6447 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6448 
6449 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6450 	for (i = 0; i < wl->num_tx_desc; i++)
6451 		wl->tx_frames[i] = NULL;
6452 
6453 	spin_lock_init(&wl->wl_lock);
6454 
6455 	wl->state = WLCORE_STATE_OFF;
6456 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6457 	mutex_init(&wl->mutex);
6458 	mutex_init(&wl->flush_mutex);
6459 	init_completion(&wl->nvs_loading_complete);
6460 
6461 	order = get_order(aggr_buf_size);
6462 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6463 	if (!wl->aggr_buf) {
6464 		ret = -ENOMEM;
6465 		goto err_wq;
6466 	}
6467 	wl->aggr_buf_size = aggr_buf_size;
6468 
6469 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6470 	if (!wl->dummy_packet) {
6471 		ret = -ENOMEM;
6472 		goto err_aggr;
6473 	}
6474 
6475 	/* Allocate one page for the FW log */
6476 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6477 	if (!wl->fwlog) {
6478 		ret = -ENOMEM;
6479 		goto err_dummy_packet;
6480 	}
6481 
6482 	wl->mbox_size = mbox_size;
6483 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6484 	if (!wl->mbox) {
6485 		ret = -ENOMEM;
6486 		goto err_fwlog;
6487 	}
6488 
6489 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6490 	if (!wl->buffer_32) {
6491 		ret = -ENOMEM;
6492 		goto err_mbox;
6493 	}
6494 
6495 	return hw;
6496 
6497 err_mbox:
6498 	kfree(wl->mbox);
6499 
6500 err_fwlog:
6501 	free_page((unsigned long)wl->fwlog);
6502 
6503 err_dummy_packet:
6504 	dev_kfree_skb(wl->dummy_packet);
6505 
6506 err_aggr:
6507 	free_pages((unsigned long)wl->aggr_buf, order);
6508 
6509 err_wq:
6510 	destroy_workqueue(wl->freezable_wq);
6511 
6512 err_hw:
6513 	wl1271_debugfs_exit(wl);
6514 	kfree(wl->priv);
6515 
6516 err_priv_alloc:
6517 	ieee80211_free_hw(hw);
6518 
6519 err_hw_alloc:
6520 
6521 	return ERR_PTR(ret);
6522 }
6523 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6524 
6525 int wlcore_free_hw(struct wl1271 *wl)
6526 {
6527 	/* Unblock any fwlog readers */
6528 	mutex_lock(&wl->mutex);
6529 	wl->fwlog_size = -1;
6530 	mutex_unlock(&wl->mutex);
6531 
6532 	wlcore_sysfs_free(wl);
6533 
6534 	kfree(wl->buffer_32);
6535 	kfree(wl->mbox);
6536 	free_page((unsigned long)wl->fwlog);
6537 	dev_kfree_skb(wl->dummy_packet);
6538 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6539 
6540 	wl1271_debugfs_exit(wl);
6541 
6542 	vfree(wl->fw);
6543 	wl->fw = NULL;
6544 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6545 	kfree(wl->nvs);
6546 	wl->nvs = NULL;
6547 
6548 	kfree(wl->raw_fw_status);
6549 	kfree(wl->fw_status);
6550 	kfree(wl->tx_res_if);
6551 	destroy_workqueue(wl->freezable_wq);
6552 
6553 	kfree(wl->priv);
6554 	ieee80211_free_hw(wl->hw);
6555 
6556 	return 0;
6557 }
6558 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6559 
6560 #ifdef CONFIG_PM
6561 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6562 	.flags = WIPHY_WOWLAN_ANY,
6563 	.n_patterns = WL1271_MAX_RX_FILTERS,
6564 	.pattern_min_len = 1,
6565 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6566 };
6567 #endif
6568 
6569 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6570 {
6571 	return IRQ_WAKE_THREAD;
6572 }
6573 
6574 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6575 {
6576 	struct wl1271 *wl = context;
6577 	struct platform_device *pdev = wl->pdev;
6578 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6579 	struct resource *res;
6580 
6581 	int ret;
6582 	irq_handler_t hardirq_fn = NULL;
6583 
6584 	if (fw) {
6585 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6586 		if (!wl->nvs) {
6587 			wl1271_error("Could not allocate nvs data");
6588 			goto out;
6589 		}
6590 		wl->nvs_len = fw->size;
6591 	} else if (pdev_data->family->nvs_name) {
6592 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6593 			     pdev_data->family->nvs_name);
6594 		wl->nvs = NULL;
6595 		wl->nvs_len = 0;
6596 	} else {
6597 		wl->nvs = NULL;
6598 		wl->nvs_len = 0;
6599 	}
6600 
6601 	ret = wl->ops->setup(wl);
6602 	if (ret < 0)
6603 		goto out_free_nvs;
6604 
6605 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6606 
6607 	/* adjust some runtime configuration parameters */
6608 	wlcore_adjust_conf(wl);
6609 
6610 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6611 	if (!res) {
6612 		wl1271_error("Could not get IRQ resource");
6613 		goto out_free_nvs;
6614 	}
6615 
6616 	wl->irq = res->start;
6617 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6618 	wl->if_ops = pdev_data->if_ops;
6619 
6620 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6621 		hardirq_fn = wlcore_hardirq;
6622 	else
6623 		wl->irq_flags |= IRQF_ONESHOT;
6624 
6625 	ret = wl12xx_set_power_on(wl);
6626 	if (ret < 0)
6627 		goto out_free_nvs;
6628 
6629 	ret = wl12xx_get_hw_info(wl);
6630 	if (ret < 0) {
6631 		wl1271_error("couldn't get hw info");
6632 		wl1271_power_off(wl);
6633 		goto out_free_nvs;
6634 	}
6635 
6636 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6637 				   wl->irq_flags, pdev->name, wl);
6638 	if (ret < 0) {
6639 		wl1271_error("interrupt configuration failed");
6640 		wl1271_power_off(wl);
6641 		goto out_free_nvs;
6642 	}
6643 
6644 #ifdef CONFIG_PM
6645 	device_init_wakeup(wl->dev, true);
6646 
6647 	ret = enable_irq_wake(wl->irq);
6648 	if (!ret) {
6649 		wl->irq_wake_enabled = true;
6650 		if (pdev_data->pwr_in_suspend)
6651 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6652 	}
6653 
6654 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6655 	if (res) {
6656 		wl->wakeirq = res->start;
6657 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6658 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6659 		if (ret)
6660 			wl->wakeirq = -ENODEV;
6661 	} else {
6662 		wl->wakeirq = -ENODEV;
6663 	}
6664 #endif
6665 	disable_irq(wl->irq);
6666 	wl1271_power_off(wl);
6667 
6668 	ret = wl->ops->identify_chip(wl);
6669 	if (ret < 0)
6670 		goto out_irq;
6671 
6672 	ret = wl1271_init_ieee80211(wl);
6673 	if (ret)
6674 		goto out_irq;
6675 
6676 	ret = wl1271_register_hw(wl);
6677 	if (ret)
6678 		goto out_irq;
6679 
6680 	ret = wlcore_sysfs_init(wl);
6681 	if (ret)
6682 		goto out_unreg;
6683 
6684 	wl->initialized = true;
6685 	goto out;
6686 
6687 out_unreg:
6688 	wl1271_unregister_hw(wl);
6689 
6690 out_irq:
6691 	if (wl->wakeirq >= 0)
6692 		dev_pm_clear_wake_irq(wl->dev);
6693 	device_init_wakeup(wl->dev, false);
6694 	free_irq(wl->irq, wl);
6695 
6696 out_free_nvs:
6697 	kfree(wl->nvs);
6698 
6699 out:
6700 	release_firmware(fw);
6701 	complete_all(&wl->nvs_loading_complete);
6702 }
6703 
6704 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6705 {
6706 	struct wl1271 *wl = dev_get_drvdata(dev);
6707 	struct wl12xx_vif *wlvif;
6708 	int error;
6709 
6710 	/* We do not enter elp sleep in PLT mode */
6711 	if (wl->plt)
6712 		return 0;
6713 
6714 	/* Nothing to do if no ELP mode requested */
6715 	if (wl->sleep_auth != WL1271_PSM_ELP)
6716 		return 0;
6717 
6718 	wl12xx_for_each_wlvif(wl, wlvif) {
6719 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6720 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6721 			return -EBUSY;
6722 	}
6723 
6724 	wl1271_debug(DEBUG_PSM, "chip to elp");
6725 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6726 	if (error < 0) {
6727 		wl12xx_queue_recovery_work(wl);
6728 
6729 		return error;
6730 	}
6731 
6732 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6733 
6734 	return 0;
6735 }
6736 
6737 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6738 {
6739 	struct wl1271 *wl = dev_get_drvdata(dev);
6740 	DECLARE_COMPLETION_ONSTACK(compl);
6741 	unsigned long flags;
6742 	int ret;
6743 	unsigned long start_time = jiffies;
6744 	bool recovery = false;
6745 
6746 	/* Nothing to do if no ELP mode requested */
6747 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6748 		return 0;
6749 
6750 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6751 
6752 	spin_lock_irqsave(&wl->wl_lock, flags);
6753 	wl->elp_compl = &compl;
6754 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6755 
6756 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6757 	if (ret < 0) {
6758 		recovery = true;
6759 	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6760 		ret = wait_for_completion_timeout(&compl,
6761 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6762 		if (ret == 0) {
6763 			wl1271_warning("ELP wakeup timeout!");
6764 			recovery = true;
6765 		}
6766 	}
6767 
6768 	spin_lock_irqsave(&wl->wl_lock, flags);
6769 	wl->elp_compl = NULL;
6770 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6771 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6772 
6773 	if (recovery) {
6774 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6775 		wl12xx_queue_recovery_work(wl);
6776 	} else {
6777 		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6778 			     jiffies_to_msecs(jiffies - start_time));
6779 	}
6780 
6781 	return 0;
6782 }
6783 
6784 static const struct dev_pm_ops wlcore_pm_ops = {
6785 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6786 			   wlcore_runtime_resume,
6787 			   NULL)
6788 };
6789 
6790 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6791 {
6792 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6793 	const char *nvs_name;
6794 	int ret = 0;
6795 
6796 	if (!wl->ops || !wl->ptable || !pdev_data)
6797 		return -EINVAL;
6798 
6799 	wl->dev = &pdev->dev;
6800 	wl->pdev = pdev;
6801 	platform_set_drvdata(pdev, wl);
6802 
6803 	if (pdev_data->family && pdev_data->family->nvs_name) {
6804 		nvs_name = pdev_data->family->nvs_name;
6805 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6806 					      nvs_name, &pdev->dev, GFP_KERNEL,
6807 					      wl, wlcore_nvs_cb);
6808 		if (ret < 0) {
6809 			wl1271_error("request_firmware_nowait failed for %s: %d",
6810 				     nvs_name, ret);
6811 			complete_all(&wl->nvs_loading_complete);
6812 		}
6813 	} else {
6814 		wlcore_nvs_cb(NULL, wl);
6815 	}
6816 
6817 	wl->dev->driver->pm = &wlcore_pm_ops;
6818 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6819 	pm_runtime_use_autosuspend(wl->dev);
6820 	pm_runtime_enable(wl->dev);
6821 
6822 	return ret;
6823 }
6824 EXPORT_SYMBOL_GPL(wlcore_probe);
6825 
6826 int wlcore_remove(struct platform_device *pdev)
6827 {
6828 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6829 	struct wl1271 *wl = platform_get_drvdata(pdev);
6830 	int error;
6831 
6832 	error = pm_runtime_get_sync(wl->dev);
6833 	if (error < 0)
6834 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6835 
6836 	wl->dev->driver->pm = NULL;
6837 
6838 	if (pdev_data->family && pdev_data->family->nvs_name)
6839 		wait_for_completion(&wl->nvs_loading_complete);
6840 	if (!wl->initialized)
6841 		return 0;
6842 
6843 	if (wl->wakeirq >= 0) {
6844 		dev_pm_clear_wake_irq(wl->dev);
6845 		wl->wakeirq = -ENODEV;
6846 	}
6847 
6848 	device_init_wakeup(wl->dev, false);
6849 
6850 	if (wl->irq_wake_enabled)
6851 		disable_irq_wake(wl->irq);
6852 
6853 	wl1271_unregister_hw(wl);
6854 
6855 	pm_runtime_put_sync(wl->dev);
6856 	pm_runtime_dont_use_autosuspend(wl->dev);
6857 	pm_runtime_disable(wl->dev);
6858 
6859 	free_irq(wl->irq, wl);
6860 	wlcore_free_hw(wl);
6861 
6862 	return 0;
6863 }
6864 EXPORT_SYMBOL_GPL(wlcore_remove);
6865 
6866 u32 wl12xx_debug_level = DEBUG_NONE;
6867 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6868 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6869 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6870 
6871 module_param_named(fwlog, fwlog_param, charp, 0);
6872 MODULE_PARM_DESC(fwlog,
6873 		 "FW logger options: continuous, dbgpins or disable");
6874 
6875 module_param(fwlog_mem_blocks, int, 0600);
6876 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6877 
6878 module_param(bug_on_recovery, int, 0600);
6879 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6880 
6881 module_param(no_recovery, int, 0600);
6882 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6883 
6884 MODULE_LICENSE("GPL");
6885 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6886 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6887