xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision d2999e1b)
1 
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
30 
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43 
44 #define WL1271_BOOT_RETRIES 3
45 
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery     = -1;
50 
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 					 struct ieee80211_vif *vif,
53 					 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56 
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 	int ret;
60 
61 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 		return -EINVAL;
63 
64 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 		return 0;
66 
67 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 		return 0;
69 
70 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 	if (ret < 0)
72 		return ret;
73 
74 	wl1271_info("Association completed.");
75 	return 0;
76 }
77 
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 			      struct regulatory_request *request)
80 {
81 	struct ieee80211_supported_band *band;
82 	struct ieee80211_channel *ch;
83 	int i;
84 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 	struct wl1271 *wl = hw->priv;
86 
87 	band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 	for (i = 0; i < band->n_channels; i++) {
89 		ch = &band->channels[i];
90 		if (ch->flags & IEEE80211_CHAN_DISABLED)
91 			continue;
92 
93 		if (ch->flags & IEEE80211_CHAN_RADAR)
94 			ch->flags |= IEEE80211_CHAN_NO_IR;
95 
96 	}
97 
98 	wlcore_regdomain_config(wl);
99 }
100 
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
102 				   bool enable)
103 {
104 	int ret = 0;
105 
106 	/* we should hold wl->mutex */
107 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
108 	if (ret < 0)
109 		goto out;
110 
111 	if (enable)
112 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
113 	else
114 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
115 out:
116 	return ret;
117 }
118 
119 /*
120  * this function is being called when the rx_streaming interval
121  * has beed changed or rx_streaming should be disabled
122  */
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
124 {
125 	int ret = 0;
126 	int period = wl->conf.rx_streaming.interval;
127 
128 	/* don't reconfigure if rx_streaming is disabled */
129 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
130 		goto out;
131 
132 	/* reconfigure/disable according to new streaming_period */
133 	if (period &&
134 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 	    (wl->conf.rx_streaming.always ||
136 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
138 	else {
139 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 		/* don't cancel_work_sync since we might deadlock */
141 		del_timer_sync(&wlvif->rx_streaming_timer);
142 	}
143 out:
144 	return ret;
145 }
146 
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
148 {
149 	int ret;
150 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 						rx_streaming_enable_work);
152 	struct wl1271 *wl = wlvif->wl;
153 
154 	mutex_lock(&wl->mutex);
155 
156 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 	    (!wl->conf.rx_streaming.always &&
159 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
160 		goto out;
161 
162 	if (!wl->conf.rx_streaming.interval)
163 		goto out;
164 
165 	ret = wl1271_ps_elp_wakeup(wl);
166 	if (ret < 0)
167 		goto out;
168 
169 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
170 	if (ret < 0)
171 		goto out_sleep;
172 
173 	/* stop it after some time of inactivity */
174 	mod_timer(&wlvif->rx_streaming_timer,
175 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
176 
177 out_sleep:
178 	wl1271_ps_elp_sleep(wl);
179 out:
180 	mutex_unlock(&wl->mutex);
181 }
182 
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
184 {
185 	int ret;
186 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 						rx_streaming_disable_work);
188 	struct wl1271 *wl = wlvif->wl;
189 
190 	mutex_lock(&wl->mutex);
191 
192 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
193 		goto out;
194 
195 	ret = wl1271_ps_elp_wakeup(wl);
196 	if (ret < 0)
197 		goto out;
198 
199 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
200 	if (ret)
201 		goto out_sleep;
202 
203 out_sleep:
204 	wl1271_ps_elp_sleep(wl);
205 out:
206 	mutex_unlock(&wl->mutex);
207 }
208 
209 static void wl1271_rx_streaming_timer(unsigned long data)
210 {
211 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 	struct wl1271 *wl = wlvif->wl;
213 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
214 }
215 
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218 {
219 	/* if the watchdog is not armed, don't do anything */
220 	if (wl->tx_allocated_blocks == 0)
221 		return;
222 
223 	cancel_delayed_work(&wl->tx_watchdog_work);
224 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
226 }
227 
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
229 {
230 	struct delayed_work *dwork;
231 	struct wl1271 *wl;
232 
233 	dwork = container_of(work, struct delayed_work, work);
234 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
235 
236 	mutex_lock(&wl->mutex);
237 
238 	if (unlikely(wl->state != WLCORE_STATE_ON))
239 		goto out;
240 
241 	/* Tx went out in the meantime - everything is ok */
242 	if (unlikely(wl->tx_allocated_blocks == 0))
243 		goto out;
244 
245 	/*
246 	 * if a ROC is in progress, we might not have any Tx for a long
247 	 * time (e.g. pending Tx on the non-ROC channels)
248 	 */
249 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 			     wl->conf.tx.tx_watchdog_timeout);
252 		wl12xx_rearm_tx_watchdog_locked(wl);
253 		goto out;
254 	}
255 
256 	/*
257 	 * if a scan is in progress, we might not have any Tx for a long
258 	 * time
259 	 */
260 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 			     wl->conf.tx.tx_watchdog_timeout);
263 		wl12xx_rearm_tx_watchdog_locked(wl);
264 		goto out;
265 	}
266 
267 	/*
268 	* AP might cache a frame for a long time for a sleeping station,
269 	* so rearm the timer if there's an AP interface with stations. If
270 	* Tx is genuinely stuck we will most hopefully discover it when all
271 	* stations are removed due to inactivity.
272 	*/
273 	if (wl->active_sta_count) {
274 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
275 			     " %d stations",
276 			      wl->conf.tx.tx_watchdog_timeout,
277 			      wl->active_sta_count);
278 		wl12xx_rearm_tx_watchdog_locked(wl);
279 		goto out;
280 	}
281 
282 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 		     wl->conf.tx.tx_watchdog_timeout);
284 	wl12xx_queue_recovery_work(wl);
285 
286 out:
287 	mutex_unlock(&wl->mutex);
288 }
289 
290 static void wlcore_adjust_conf(struct wl1271 *wl)
291 {
292 	/* Adjust settings according to optional module parameters */
293 
294 	/* Firmware Logger params */
295 	if (fwlog_mem_blocks != -1) {
296 		if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
297 		    fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
298 			wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
299 		} else {
300 			wl1271_error(
301 				"Illegal fwlog_mem_blocks=%d using default %d",
302 				fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
303 		}
304 	}
305 
306 	if (fwlog_param) {
307 		if (!strcmp(fwlog_param, "continuous")) {
308 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 		} else if (!strcmp(fwlog_param, "ondemand")) {
310 			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 		} else if (!strcmp(fwlog_param, "dbgpins")) {
312 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 		} else if (!strcmp(fwlog_param, "disable")) {
315 			wl->conf.fwlog.mem_blocks = 0;
316 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 		} else {
318 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 		}
320 	}
321 
322 	if (bug_on_recovery != -1)
323 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
324 
325 	if (no_recovery != -1)
326 		wl->conf.recovery.no_recovery = (u8) no_recovery;
327 }
328 
329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 					struct wl12xx_vif *wlvif,
331 					u8 hlid, u8 tx_pkts)
332 {
333 	bool fw_ps;
334 
335 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
336 
337 	/*
338 	 * Wake up from high level PS if the STA is asleep with too little
339 	 * packets in FW or if the STA is awake.
340 	 */
341 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 		wl12xx_ps_link_end(wl, wlvif, hlid);
343 
344 	/*
345 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 	 * Make an exception if this is the only connected link. In this
347 	 * case FW-memory congestion is less of a problem.
348 	 * Note that a single connected STA means 2*ap_count + 1 active links,
349 	 * since we must account for the global and broadcast AP links
350 	 * for each AP. The "fw_ps" check assures us the other link is a STA
351 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
352 	 */
353 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
354 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
356 }
357 
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 					   struct wl12xx_vif *wlvif,
360 					   struct wl_fw_status *status)
361 {
362 	u32 cur_fw_ps_map;
363 	u8 hlid;
364 
365 	cur_fw_ps_map = status->link_ps_bitmap;
366 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 		wl1271_debug(DEBUG_PSM,
368 			     "link ps prev 0x%x cur 0x%x changed 0x%x",
369 			     wl->ap_fw_ps_map, cur_fw_ps_map,
370 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
371 
372 		wl->ap_fw_ps_map = cur_fw_ps_map;
373 	}
374 
375 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
376 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 					    wl->links[hlid].allocated_pkts);
378 }
379 
380 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
381 {
382 	struct wl12xx_vif *wlvif;
383 	struct timespec ts;
384 	u32 old_tx_blk_count = wl->tx_blocks_available;
385 	int avail, freed_blocks;
386 	int i;
387 	int ret;
388 	struct wl1271_link *lnk;
389 
390 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
391 				   wl->raw_fw_status,
392 				   wl->fw_status_len, false);
393 	if (ret < 0)
394 		return ret;
395 
396 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
397 
398 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
399 		     "drv_rx_counter = %d, tx_results_counter = %d)",
400 		     status->intr,
401 		     status->fw_rx_counter,
402 		     status->drv_rx_counter,
403 		     status->tx_results_counter);
404 
405 	for (i = 0; i < NUM_TX_QUEUES; i++) {
406 		/* prevent wrap-around in freed-packets counter */
407 		wl->tx_allocated_pkts[i] -=
408 				(status->counters.tx_released_pkts[i] -
409 				wl->tx_pkts_freed[i]) & 0xff;
410 
411 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
412 	}
413 
414 
415 	for_each_set_bit(i, wl->links_map, wl->num_links) {
416 		u8 diff;
417 		lnk = &wl->links[i];
418 
419 		/* prevent wrap-around in freed-packets counter */
420 		diff = (status->counters.tx_lnk_free_pkts[i] -
421 		       lnk->prev_freed_pkts) & 0xff;
422 
423 		if (diff == 0)
424 			continue;
425 
426 		lnk->allocated_pkts -= diff;
427 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
428 
429 		/* accumulate the prev_freed_pkts counter */
430 		lnk->total_freed_pkts += diff;
431 	}
432 
433 	/* prevent wrap-around in total blocks counter */
434 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
435 		freed_blocks = status->total_released_blks -
436 			       wl->tx_blocks_freed;
437 	else
438 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
439 			       status->total_released_blks;
440 
441 	wl->tx_blocks_freed = status->total_released_blks;
442 
443 	wl->tx_allocated_blocks -= freed_blocks;
444 
445 	/*
446 	 * If the FW freed some blocks:
447 	 * If we still have allocated blocks - re-arm the timer, Tx is
448 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
449 	 */
450 	if (freed_blocks) {
451 		if (wl->tx_allocated_blocks)
452 			wl12xx_rearm_tx_watchdog_locked(wl);
453 		else
454 			cancel_delayed_work(&wl->tx_watchdog_work);
455 	}
456 
457 	avail = status->tx_total - wl->tx_allocated_blocks;
458 
459 	/*
460 	 * The FW might change the total number of TX memblocks before
461 	 * we get a notification about blocks being released. Thus, the
462 	 * available blocks calculation might yield a temporary result
463 	 * which is lower than the actual available blocks. Keeping in
464 	 * mind that only blocks that were allocated can be moved from
465 	 * TX to RX, tx_blocks_available should never decrease here.
466 	 */
467 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
468 				      avail);
469 
470 	/* if more blocks are available now, tx work can be scheduled */
471 	if (wl->tx_blocks_available > old_tx_blk_count)
472 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
473 
474 	/* for AP update num of allocated TX blocks per link and ps status */
475 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
476 		wl12xx_irq_update_links_status(wl, wlvif, status);
477 	}
478 
479 	/* update the host-chipset time offset */
480 	getnstimeofday(&ts);
481 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
482 		(s64)(status->fw_localtime);
483 
484 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
485 
486 	return 0;
487 }
488 
489 static void wl1271_flush_deferred_work(struct wl1271 *wl)
490 {
491 	struct sk_buff *skb;
492 
493 	/* Pass all received frames to the network stack */
494 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
495 		ieee80211_rx_ni(wl->hw, skb);
496 
497 	/* Return sent skbs to the network stack */
498 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
499 		ieee80211_tx_status_ni(wl->hw, skb);
500 }
501 
502 static void wl1271_netstack_work(struct work_struct *work)
503 {
504 	struct wl1271 *wl =
505 		container_of(work, struct wl1271, netstack_work);
506 
507 	do {
508 		wl1271_flush_deferred_work(wl);
509 	} while (skb_queue_len(&wl->deferred_rx_queue));
510 }
511 
512 #define WL1271_IRQ_MAX_LOOPS 256
513 
514 static int wlcore_irq_locked(struct wl1271 *wl)
515 {
516 	int ret = 0;
517 	u32 intr;
518 	int loopcount = WL1271_IRQ_MAX_LOOPS;
519 	bool done = false;
520 	unsigned int defer_count;
521 	unsigned long flags;
522 
523 	/*
524 	 * In case edge triggered interrupt must be used, we cannot iterate
525 	 * more than once without introducing race conditions with the hardirq.
526 	 */
527 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
528 		loopcount = 1;
529 
530 	wl1271_debug(DEBUG_IRQ, "IRQ work");
531 
532 	if (unlikely(wl->state != WLCORE_STATE_ON))
533 		goto out;
534 
535 	ret = wl1271_ps_elp_wakeup(wl);
536 	if (ret < 0)
537 		goto out;
538 
539 	while (!done && loopcount--) {
540 		/*
541 		 * In order to avoid a race with the hardirq, clear the flag
542 		 * before acknowledging the chip. Since the mutex is held,
543 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
544 		 */
545 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
546 		smp_mb__after_atomic();
547 
548 		ret = wlcore_fw_status(wl, wl->fw_status);
549 		if (ret < 0)
550 			goto out;
551 
552 		wlcore_hw_tx_immediate_compl(wl);
553 
554 		intr = wl->fw_status->intr;
555 		intr &= WLCORE_ALL_INTR_MASK;
556 		if (!intr) {
557 			done = true;
558 			continue;
559 		}
560 
561 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
562 			wl1271_error("HW watchdog interrupt received! starting recovery.");
563 			wl->watchdog_recovery = true;
564 			ret = -EIO;
565 
566 			/* restarting the chip. ignore any other interrupt. */
567 			goto out;
568 		}
569 
570 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
571 			wl1271_error("SW watchdog interrupt received! "
572 				     "starting recovery.");
573 			wl->watchdog_recovery = true;
574 			ret = -EIO;
575 
576 			/* restarting the chip. ignore any other interrupt. */
577 			goto out;
578 		}
579 
580 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
581 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
582 
583 			ret = wlcore_rx(wl, wl->fw_status);
584 			if (ret < 0)
585 				goto out;
586 
587 			/* Check if any tx blocks were freed */
588 			spin_lock_irqsave(&wl->wl_lock, flags);
589 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
590 			    wl1271_tx_total_queue_count(wl) > 0) {
591 				spin_unlock_irqrestore(&wl->wl_lock, flags);
592 				/*
593 				 * In order to avoid starvation of the TX path,
594 				 * call the work function directly.
595 				 */
596 				ret = wlcore_tx_work_locked(wl);
597 				if (ret < 0)
598 					goto out;
599 			} else {
600 				spin_unlock_irqrestore(&wl->wl_lock, flags);
601 			}
602 
603 			/* check for tx results */
604 			ret = wlcore_hw_tx_delayed_compl(wl);
605 			if (ret < 0)
606 				goto out;
607 
608 			/* Make sure the deferred queues don't get too long */
609 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
610 				      skb_queue_len(&wl->deferred_rx_queue);
611 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
612 				wl1271_flush_deferred_work(wl);
613 		}
614 
615 		if (intr & WL1271_ACX_INTR_EVENT_A) {
616 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
617 			ret = wl1271_event_handle(wl, 0);
618 			if (ret < 0)
619 				goto out;
620 		}
621 
622 		if (intr & WL1271_ACX_INTR_EVENT_B) {
623 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
624 			ret = wl1271_event_handle(wl, 1);
625 			if (ret < 0)
626 				goto out;
627 		}
628 
629 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
630 			wl1271_debug(DEBUG_IRQ,
631 				     "WL1271_ACX_INTR_INIT_COMPLETE");
632 
633 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
634 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
635 	}
636 
637 	wl1271_ps_elp_sleep(wl);
638 
639 out:
640 	return ret;
641 }
642 
643 static irqreturn_t wlcore_irq(int irq, void *cookie)
644 {
645 	int ret;
646 	unsigned long flags;
647 	struct wl1271 *wl = cookie;
648 
649 	/* complete the ELP completion */
650 	spin_lock_irqsave(&wl->wl_lock, flags);
651 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
652 	if (wl->elp_compl) {
653 		complete(wl->elp_compl);
654 		wl->elp_compl = NULL;
655 	}
656 
657 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
658 		/* don't enqueue a work right now. mark it as pending */
659 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
660 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
661 		disable_irq_nosync(wl->irq);
662 		pm_wakeup_event(wl->dev, 0);
663 		spin_unlock_irqrestore(&wl->wl_lock, flags);
664 		return IRQ_HANDLED;
665 	}
666 	spin_unlock_irqrestore(&wl->wl_lock, flags);
667 
668 	/* TX might be handled here, avoid redundant work */
669 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
670 	cancel_work_sync(&wl->tx_work);
671 
672 	mutex_lock(&wl->mutex);
673 
674 	ret = wlcore_irq_locked(wl);
675 	if (ret)
676 		wl12xx_queue_recovery_work(wl);
677 
678 	spin_lock_irqsave(&wl->wl_lock, flags);
679 	/* In case TX was not handled here, queue TX work */
680 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
681 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
682 	    wl1271_tx_total_queue_count(wl) > 0)
683 		ieee80211_queue_work(wl->hw, &wl->tx_work);
684 	spin_unlock_irqrestore(&wl->wl_lock, flags);
685 
686 	mutex_unlock(&wl->mutex);
687 
688 	return IRQ_HANDLED;
689 }
690 
691 struct vif_counter_data {
692 	u8 counter;
693 
694 	struct ieee80211_vif *cur_vif;
695 	bool cur_vif_running;
696 };
697 
698 static void wl12xx_vif_count_iter(void *data, u8 *mac,
699 				  struct ieee80211_vif *vif)
700 {
701 	struct vif_counter_data *counter = data;
702 
703 	counter->counter++;
704 	if (counter->cur_vif == vif)
705 		counter->cur_vif_running = true;
706 }
707 
708 /* caller must not hold wl->mutex, as it might deadlock */
709 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
710 			       struct ieee80211_vif *cur_vif,
711 			       struct vif_counter_data *data)
712 {
713 	memset(data, 0, sizeof(*data));
714 	data->cur_vif = cur_vif;
715 
716 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
717 					    wl12xx_vif_count_iter, data);
718 }
719 
720 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
721 {
722 	const struct firmware *fw;
723 	const char *fw_name;
724 	enum wl12xx_fw_type fw_type;
725 	int ret;
726 
727 	if (plt) {
728 		fw_type = WL12XX_FW_TYPE_PLT;
729 		fw_name = wl->plt_fw_name;
730 	} else {
731 		/*
732 		 * we can't call wl12xx_get_vif_count() here because
733 		 * wl->mutex is taken, so use the cached last_vif_count value
734 		 */
735 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
736 			fw_type = WL12XX_FW_TYPE_MULTI;
737 			fw_name = wl->mr_fw_name;
738 		} else {
739 			fw_type = WL12XX_FW_TYPE_NORMAL;
740 			fw_name = wl->sr_fw_name;
741 		}
742 	}
743 
744 	if (wl->fw_type == fw_type)
745 		return 0;
746 
747 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
748 
749 	ret = request_firmware(&fw, fw_name, wl->dev);
750 
751 	if (ret < 0) {
752 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
753 		return ret;
754 	}
755 
756 	if (fw->size % 4) {
757 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
758 			     fw->size);
759 		ret = -EILSEQ;
760 		goto out;
761 	}
762 
763 	vfree(wl->fw);
764 	wl->fw_type = WL12XX_FW_TYPE_NONE;
765 	wl->fw_len = fw->size;
766 	wl->fw = vmalloc(wl->fw_len);
767 
768 	if (!wl->fw) {
769 		wl1271_error("could not allocate memory for the firmware");
770 		ret = -ENOMEM;
771 		goto out;
772 	}
773 
774 	memcpy(wl->fw, fw->data, wl->fw_len);
775 	ret = 0;
776 	wl->fw_type = fw_type;
777 out:
778 	release_firmware(fw);
779 
780 	return ret;
781 }
782 
783 void wl12xx_queue_recovery_work(struct wl1271 *wl)
784 {
785 	/* Avoid a recursive recovery */
786 	if (wl->state == WLCORE_STATE_ON) {
787 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
788 				  &wl->flags));
789 
790 		wl->state = WLCORE_STATE_RESTARTING;
791 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
792 		wl1271_ps_elp_wakeup(wl);
793 		wlcore_disable_interrupts_nosync(wl);
794 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
795 	}
796 }
797 
798 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
799 {
800 	size_t len;
801 
802 	/* Make sure we have enough room */
803 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
804 
805 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
806 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
807 	wl->fwlog_size += len;
808 
809 	return len;
810 }
811 
812 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
813 {
814 	struct wlcore_partition_set part, old_part;
815 	u32 addr;
816 	u32 offset;
817 	u32 end_of_log;
818 	u8 *block;
819 	int ret;
820 
821 	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
822 	    (wl->conf.fwlog.mem_blocks == 0))
823 		return;
824 
825 	wl1271_info("Reading FW panic log");
826 
827 	block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
828 	if (!block)
829 		return;
830 
831 	/*
832 	 * Make sure the chip is awake and the logger isn't active.
833 	 * Do not send a stop fwlog command if the fw is hanged or if
834 	 * dbgpins are used (due to some fw bug).
835 	 */
836 	if (wl1271_ps_elp_wakeup(wl))
837 		goto out;
838 	if (!wl->watchdog_recovery &&
839 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
840 		wl12xx_cmd_stop_fwlog(wl);
841 
842 	/* Read the first memory block address */
843 	ret = wlcore_fw_status(wl, wl->fw_status);
844 	if (ret < 0)
845 		goto out;
846 
847 	addr = wl->fw_status->log_start_addr;
848 	if (!addr)
849 		goto out;
850 
851 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
852 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
853 		end_of_log = wl->fwlog_end;
854 	} else {
855 		offset = sizeof(addr);
856 		end_of_log = addr;
857 	}
858 
859 	old_part = wl->curr_part;
860 	memset(&part, 0, sizeof(part));
861 
862 	/* Traverse the memory blocks linked list */
863 	do {
864 		part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
865 		part.mem.size  = PAGE_SIZE;
866 
867 		ret = wlcore_set_partition(wl, &part);
868 		if (ret < 0) {
869 			wl1271_error("%s: set_partition start=0x%X size=%d",
870 				__func__, part.mem.start, part.mem.size);
871 			goto out;
872 		}
873 
874 		memset(block, 0, wl->fw_mem_block_size);
875 		ret = wlcore_read_hwaddr(wl, addr, block,
876 					wl->fw_mem_block_size, false);
877 
878 		if (ret < 0)
879 			goto out;
880 
881 		/*
882 		 * Memory blocks are linked to one another. The first 4 bytes
883 		 * of each memory block hold the hardware address of the next
884 		 * one. The last memory block points to the first one in
885 		 * on demand mode and is equal to 0x2000000 in continuous mode.
886 		 */
887 		addr = le32_to_cpup((__le32 *)block);
888 
889 		if (!wl12xx_copy_fwlog(wl, block + offset,
890 					wl->fw_mem_block_size - offset))
891 			break;
892 	} while (addr && (addr != end_of_log));
893 
894 	wake_up_interruptible(&wl->fwlog_waitq);
895 
896 out:
897 	kfree(block);
898 	wlcore_set_partition(wl, &old_part);
899 }
900 
901 static void wlcore_print_recovery(struct wl1271 *wl)
902 {
903 	u32 pc = 0;
904 	u32 hint_sts = 0;
905 	int ret;
906 
907 	wl1271_info("Hardware recovery in progress. FW ver: %s",
908 		    wl->chip.fw_ver_str);
909 
910 	/* change partitions momentarily so we can read the FW pc */
911 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
912 	if (ret < 0)
913 		return;
914 
915 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
916 	if (ret < 0)
917 		return;
918 
919 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
920 	if (ret < 0)
921 		return;
922 
923 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
924 				pc, hint_sts, ++wl->recovery_count);
925 
926 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
927 }
928 
929 
930 static void wl1271_recovery_work(struct work_struct *work)
931 {
932 	struct wl1271 *wl =
933 		container_of(work, struct wl1271, recovery_work);
934 	struct wl12xx_vif *wlvif;
935 	struct ieee80211_vif *vif;
936 
937 	mutex_lock(&wl->mutex);
938 
939 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
940 		goto out_unlock;
941 
942 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
943 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
944 			wl12xx_read_fwlog_panic(wl);
945 		wlcore_print_recovery(wl);
946 	}
947 
948 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
949 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
950 
951 	if (wl->conf.recovery.no_recovery) {
952 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
953 		goto out_unlock;
954 	}
955 
956 	/* Prevent spurious TX during FW restart */
957 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
958 
959 	/* reboot the chipset */
960 	while (!list_empty(&wl->wlvif_list)) {
961 		wlvif = list_first_entry(&wl->wlvif_list,
962 				       struct wl12xx_vif, list);
963 		vif = wl12xx_wlvif_to_vif(wlvif);
964 		__wl1271_op_remove_interface(wl, vif, false);
965 	}
966 
967 	wlcore_op_stop_locked(wl);
968 
969 	ieee80211_restart_hw(wl->hw);
970 
971 	/*
972 	 * Its safe to enable TX now - the queues are stopped after a request
973 	 * to restart the HW.
974 	 */
975 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
976 
977 out_unlock:
978 	wl->watchdog_recovery = false;
979 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
980 	mutex_unlock(&wl->mutex);
981 }
982 
983 static int wlcore_fw_wakeup(struct wl1271 *wl)
984 {
985 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
986 }
987 
988 static int wl1271_setup(struct wl1271 *wl)
989 {
990 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
991 	if (!wl->raw_fw_status)
992 		goto err;
993 
994 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
995 	if (!wl->fw_status)
996 		goto err;
997 
998 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
999 	if (!wl->tx_res_if)
1000 		goto err;
1001 
1002 	return 0;
1003 err:
1004 	kfree(wl->fw_status);
1005 	kfree(wl->raw_fw_status);
1006 	return -ENOMEM;
1007 }
1008 
1009 static int wl12xx_set_power_on(struct wl1271 *wl)
1010 {
1011 	int ret;
1012 
1013 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1014 	ret = wl1271_power_on(wl);
1015 	if (ret < 0)
1016 		goto out;
1017 	msleep(WL1271_POWER_ON_SLEEP);
1018 	wl1271_io_reset(wl);
1019 	wl1271_io_init(wl);
1020 
1021 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1022 	if (ret < 0)
1023 		goto fail;
1024 
1025 	/* ELP module wake up */
1026 	ret = wlcore_fw_wakeup(wl);
1027 	if (ret < 0)
1028 		goto fail;
1029 
1030 out:
1031 	return ret;
1032 
1033 fail:
1034 	wl1271_power_off(wl);
1035 	return ret;
1036 }
1037 
1038 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1039 {
1040 	int ret = 0;
1041 
1042 	ret = wl12xx_set_power_on(wl);
1043 	if (ret < 0)
1044 		goto out;
1045 
1046 	/*
1047 	 * For wl127x based devices we could use the default block
1048 	 * size (512 bytes), but due to a bug in the sdio driver, we
1049 	 * need to set it explicitly after the chip is powered on.  To
1050 	 * simplify the code and since the performance impact is
1051 	 * negligible, we use the same block size for all different
1052 	 * chip types.
1053 	 *
1054 	 * Check if the bus supports blocksize alignment and, if it
1055 	 * doesn't, make sure we don't have the quirk.
1056 	 */
1057 	if (!wl1271_set_block_size(wl))
1058 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1059 
1060 	/* TODO: make sure the lower driver has set things up correctly */
1061 
1062 	ret = wl1271_setup(wl);
1063 	if (ret < 0)
1064 		goto out;
1065 
1066 	ret = wl12xx_fetch_firmware(wl, plt);
1067 	if (ret < 0)
1068 		goto out;
1069 
1070 out:
1071 	return ret;
1072 }
1073 
1074 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1075 {
1076 	int retries = WL1271_BOOT_RETRIES;
1077 	struct wiphy *wiphy = wl->hw->wiphy;
1078 
1079 	static const char* const PLT_MODE[] = {
1080 		"PLT_OFF",
1081 		"PLT_ON",
1082 		"PLT_FEM_DETECT",
1083 		"PLT_CHIP_AWAKE"
1084 	};
1085 
1086 	int ret;
1087 
1088 	mutex_lock(&wl->mutex);
1089 
1090 	wl1271_notice("power up");
1091 
1092 	if (wl->state != WLCORE_STATE_OFF) {
1093 		wl1271_error("cannot go into PLT state because not "
1094 			     "in off state: %d", wl->state);
1095 		ret = -EBUSY;
1096 		goto out;
1097 	}
1098 
1099 	/* Indicate to lower levels that we are now in PLT mode */
1100 	wl->plt = true;
1101 	wl->plt_mode = plt_mode;
1102 
1103 	while (retries) {
1104 		retries--;
1105 		ret = wl12xx_chip_wakeup(wl, true);
1106 		if (ret < 0)
1107 			goto power_off;
1108 
1109 		if (plt_mode != PLT_CHIP_AWAKE) {
1110 			ret = wl->ops->plt_init(wl);
1111 			if (ret < 0)
1112 				goto power_off;
1113 		}
1114 
1115 		wl->state = WLCORE_STATE_ON;
1116 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1117 			      PLT_MODE[plt_mode],
1118 			      wl->chip.fw_ver_str);
1119 
1120 		/* update hw/fw version info in wiphy struct */
1121 		wiphy->hw_version = wl->chip.id;
1122 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1123 			sizeof(wiphy->fw_version));
1124 
1125 		goto out;
1126 
1127 power_off:
1128 		wl1271_power_off(wl);
1129 	}
1130 
1131 	wl->plt = false;
1132 	wl->plt_mode = PLT_OFF;
1133 
1134 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1135 		     WL1271_BOOT_RETRIES);
1136 out:
1137 	mutex_unlock(&wl->mutex);
1138 
1139 	return ret;
1140 }
1141 
1142 int wl1271_plt_stop(struct wl1271 *wl)
1143 {
1144 	int ret = 0;
1145 
1146 	wl1271_notice("power down");
1147 
1148 	/*
1149 	 * Interrupts must be disabled before setting the state to OFF.
1150 	 * Otherwise, the interrupt handler might be called and exit without
1151 	 * reading the interrupt status.
1152 	 */
1153 	wlcore_disable_interrupts(wl);
1154 	mutex_lock(&wl->mutex);
1155 	if (!wl->plt) {
1156 		mutex_unlock(&wl->mutex);
1157 
1158 		/*
1159 		 * This will not necessarily enable interrupts as interrupts
1160 		 * may have been disabled when op_stop was called. It will,
1161 		 * however, balance the above call to disable_interrupts().
1162 		 */
1163 		wlcore_enable_interrupts(wl);
1164 
1165 		wl1271_error("cannot power down because not in PLT "
1166 			     "state: %d", wl->state);
1167 		ret = -EBUSY;
1168 		goto out;
1169 	}
1170 
1171 	mutex_unlock(&wl->mutex);
1172 
1173 	wl1271_flush_deferred_work(wl);
1174 	cancel_work_sync(&wl->netstack_work);
1175 	cancel_work_sync(&wl->recovery_work);
1176 	cancel_delayed_work_sync(&wl->elp_work);
1177 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1178 
1179 	mutex_lock(&wl->mutex);
1180 	wl1271_power_off(wl);
1181 	wl->flags = 0;
1182 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1183 	wl->state = WLCORE_STATE_OFF;
1184 	wl->plt = false;
1185 	wl->plt_mode = PLT_OFF;
1186 	wl->rx_counter = 0;
1187 	mutex_unlock(&wl->mutex);
1188 
1189 out:
1190 	return ret;
1191 }
1192 
1193 static void wl1271_op_tx(struct ieee80211_hw *hw,
1194 			 struct ieee80211_tx_control *control,
1195 			 struct sk_buff *skb)
1196 {
1197 	struct wl1271 *wl = hw->priv;
1198 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1199 	struct ieee80211_vif *vif = info->control.vif;
1200 	struct wl12xx_vif *wlvif = NULL;
1201 	unsigned long flags;
1202 	int q, mapping;
1203 	u8 hlid;
1204 
1205 	if (!vif) {
1206 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1207 		ieee80211_free_txskb(hw, skb);
1208 		return;
1209 	}
1210 
1211 	wlvif = wl12xx_vif_to_data(vif);
1212 	mapping = skb_get_queue_mapping(skb);
1213 	q = wl1271_tx_get_queue(mapping);
1214 
1215 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1216 
1217 	spin_lock_irqsave(&wl->wl_lock, flags);
1218 
1219 	/*
1220 	 * drop the packet if the link is invalid or the queue is stopped
1221 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1222 	 * allow these packets through.
1223 	 */
1224 	if (hlid == WL12XX_INVALID_LINK_ID ||
1225 	    (!test_bit(hlid, wlvif->links_map)) ||
1226 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1227 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1228 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1229 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1230 		ieee80211_free_txskb(hw, skb);
1231 		goto out;
1232 	}
1233 
1234 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1235 		     hlid, q, skb->len);
1236 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1237 
1238 	wl->tx_queue_count[q]++;
1239 	wlvif->tx_queue_count[q]++;
1240 
1241 	/*
1242 	 * The workqueue is slow to process the tx_queue and we need stop
1243 	 * the queue here, otherwise the queue will get too long.
1244 	 */
1245 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1246 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1247 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1248 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1249 		wlcore_stop_queue_locked(wl, wlvif, q,
1250 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1251 	}
1252 
1253 	/*
1254 	 * The chip specific setup must run before the first TX packet -
1255 	 * before that, the tx_work will not be initialized!
1256 	 */
1257 
1258 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1259 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1260 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1261 
1262 out:
1263 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1264 }
1265 
1266 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1267 {
1268 	unsigned long flags;
1269 	int q;
1270 
1271 	/* no need to queue a new dummy packet if one is already pending */
1272 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1273 		return 0;
1274 
1275 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1276 
1277 	spin_lock_irqsave(&wl->wl_lock, flags);
1278 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1279 	wl->tx_queue_count[q]++;
1280 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1281 
1282 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1283 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1284 		return wlcore_tx_work_locked(wl);
1285 
1286 	/*
1287 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1288 	 * interrupt handler function
1289 	 */
1290 	return 0;
1291 }
1292 
1293 /*
1294  * The size of the dummy packet should be at least 1400 bytes. However, in
1295  * order to minimize the number of bus transactions, aligning it to 512 bytes
1296  * boundaries could be beneficial, performance wise
1297  */
1298 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1299 
1300 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1301 {
1302 	struct sk_buff *skb;
1303 	struct ieee80211_hdr_3addr *hdr;
1304 	unsigned int dummy_packet_size;
1305 
1306 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1307 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1308 
1309 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1310 	if (!skb) {
1311 		wl1271_warning("Failed to allocate a dummy packet skb");
1312 		return NULL;
1313 	}
1314 
1315 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1316 
1317 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1318 	memset(hdr, 0, sizeof(*hdr));
1319 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1320 					 IEEE80211_STYPE_NULLFUNC |
1321 					 IEEE80211_FCTL_TODS);
1322 
1323 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1324 
1325 	/* Dummy packets require the TID to be management */
1326 	skb->priority = WL1271_TID_MGMT;
1327 
1328 	/* Initialize all fields that might be used */
1329 	skb_set_queue_mapping(skb, 0);
1330 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1331 
1332 	return skb;
1333 }
1334 
1335 
1336 #ifdef CONFIG_PM
1337 static int
1338 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1339 {
1340 	int num_fields = 0, in_field = 0, fields_size = 0;
1341 	int i, pattern_len = 0;
1342 
1343 	if (!p->mask) {
1344 		wl1271_warning("No mask in WoWLAN pattern");
1345 		return -EINVAL;
1346 	}
1347 
1348 	/*
1349 	 * The pattern is broken up into segments of bytes at different offsets
1350 	 * that need to be checked by the FW filter. Each segment is called
1351 	 * a field in the FW API. We verify that the total number of fields
1352 	 * required for this pattern won't exceed FW limits (8)
1353 	 * as well as the total fields buffer won't exceed the FW limit.
1354 	 * Note that if there's a pattern which crosses Ethernet/IP header
1355 	 * boundary a new field is required.
1356 	 */
1357 	for (i = 0; i < p->pattern_len; i++) {
1358 		if (test_bit(i, (unsigned long *)p->mask)) {
1359 			if (!in_field) {
1360 				in_field = 1;
1361 				pattern_len = 1;
1362 			} else {
1363 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1364 					num_fields++;
1365 					fields_size += pattern_len +
1366 						RX_FILTER_FIELD_OVERHEAD;
1367 					pattern_len = 1;
1368 				} else
1369 					pattern_len++;
1370 			}
1371 		} else {
1372 			if (in_field) {
1373 				in_field = 0;
1374 				fields_size += pattern_len +
1375 					RX_FILTER_FIELD_OVERHEAD;
1376 				num_fields++;
1377 			}
1378 		}
1379 	}
1380 
1381 	if (in_field) {
1382 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1383 		num_fields++;
1384 	}
1385 
1386 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1387 		wl1271_warning("RX Filter too complex. Too many segments");
1388 		return -EINVAL;
1389 	}
1390 
1391 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1392 		wl1271_warning("RX filter pattern is too big");
1393 		return -E2BIG;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1400 {
1401 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1402 }
1403 
1404 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1405 {
1406 	int i;
1407 
1408 	if (filter == NULL)
1409 		return;
1410 
1411 	for (i = 0; i < filter->num_fields; i++)
1412 		kfree(filter->fields[i].pattern);
1413 
1414 	kfree(filter);
1415 }
1416 
1417 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1418 				 u16 offset, u8 flags,
1419 				 const u8 *pattern, u8 len)
1420 {
1421 	struct wl12xx_rx_filter_field *field;
1422 
1423 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1424 		wl1271_warning("Max fields per RX filter. can't alloc another");
1425 		return -EINVAL;
1426 	}
1427 
1428 	field = &filter->fields[filter->num_fields];
1429 
1430 	field->pattern = kzalloc(len, GFP_KERNEL);
1431 	if (!field->pattern) {
1432 		wl1271_warning("Failed to allocate RX filter pattern");
1433 		return -ENOMEM;
1434 	}
1435 
1436 	filter->num_fields++;
1437 
1438 	field->offset = cpu_to_le16(offset);
1439 	field->flags = flags;
1440 	field->len = len;
1441 	memcpy(field->pattern, pattern, len);
1442 
1443 	return 0;
1444 }
1445 
1446 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1447 {
1448 	int i, fields_size = 0;
1449 
1450 	for (i = 0; i < filter->num_fields; i++)
1451 		fields_size += filter->fields[i].len +
1452 			sizeof(struct wl12xx_rx_filter_field) -
1453 			sizeof(u8 *);
1454 
1455 	return fields_size;
1456 }
1457 
1458 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1459 				    u8 *buf)
1460 {
1461 	int i;
1462 	struct wl12xx_rx_filter_field *field;
1463 
1464 	for (i = 0; i < filter->num_fields; i++) {
1465 		field = (struct wl12xx_rx_filter_field *)buf;
1466 
1467 		field->offset = filter->fields[i].offset;
1468 		field->flags = filter->fields[i].flags;
1469 		field->len = filter->fields[i].len;
1470 
1471 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1472 		buf += sizeof(struct wl12xx_rx_filter_field) -
1473 			sizeof(u8 *) + field->len;
1474 	}
1475 }
1476 
1477 /*
1478  * Allocates an RX filter returned through f
1479  * which needs to be freed using rx_filter_free()
1480  */
1481 static int
1482 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1483 					   struct wl12xx_rx_filter **f)
1484 {
1485 	int i, j, ret = 0;
1486 	struct wl12xx_rx_filter *filter;
1487 	u16 offset;
1488 	u8 flags, len;
1489 
1490 	filter = wl1271_rx_filter_alloc();
1491 	if (!filter) {
1492 		wl1271_warning("Failed to alloc rx filter");
1493 		ret = -ENOMEM;
1494 		goto err;
1495 	}
1496 
1497 	i = 0;
1498 	while (i < p->pattern_len) {
1499 		if (!test_bit(i, (unsigned long *)p->mask)) {
1500 			i++;
1501 			continue;
1502 		}
1503 
1504 		for (j = i; j < p->pattern_len; j++) {
1505 			if (!test_bit(j, (unsigned long *)p->mask))
1506 				break;
1507 
1508 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1509 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1510 				break;
1511 		}
1512 
1513 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1514 			offset = i;
1515 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1516 		} else {
1517 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1518 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1519 		}
1520 
1521 		len = j - i;
1522 
1523 		ret = wl1271_rx_filter_alloc_field(filter,
1524 						   offset,
1525 						   flags,
1526 						   &p->pattern[i], len);
1527 		if (ret)
1528 			goto err;
1529 
1530 		i = j;
1531 	}
1532 
1533 	filter->action = FILTER_SIGNAL;
1534 
1535 	*f = filter;
1536 	return 0;
1537 
1538 err:
1539 	wl1271_rx_filter_free(filter);
1540 	*f = NULL;
1541 
1542 	return ret;
1543 }
1544 
1545 static int wl1271_configure_wowlan(struct wl1271 *wl,
1546 				   struct cfg80211_wowlan *wow)
1547 {
1548 	int i, ret;
1549 
1550 	if (!wow || wow->any || !wow->n_patterns) {
1551 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1552 							  FILTER_SIGNAL);
1553 		if (ret)
1554 			goto out;
1555 
1556 		ret = wl1271_rx_filter_clear_all(wl);
1557 		if (ret)
1558 			goto out;
1559 
1560 		return 0;
1561 	}
1562 
1563 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1564 		return -EINVAL;
1565 
1566 	/* Validate all incoming patterns before clearing current FW state */
1567 	for (i = 0; i < wow->n_patterns; i++) {
1568 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1569 		if (ret) {
1570 			wl1271_warning("Bad wowlan pattern %d", i);
1571 			return ret;
1572 		}
1573 	}
1574 
1575 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1576 	if (ret)
1577 		goto out;
1578 
1579 	ret = wl1271_rx_filter_clear_all(wl);
1580 	if (ret)
1581 		goto out;
1582 
1583 	/* Translate WoWLAN patterns into filters */
1584 	for (i = 0; i < wow->n_patterns; i++) {
1585 		struct cfg80211_pkt_pattern *p;
1586 		struct wl12xx_rx_filter *filter = NULL;
1587 
1588 		p = &wow->patterns[i];
1589 
1590 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1591 		if (ret) {
1592 			wl1271_warning("Failed to create an RX filter from "
1593 				       "wowlan pattern %d", i);
1594 			goto out;
1595 		}
1596 
1597 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1598 
1599 		wl1271_rx_filter_free(filter);
1600 		if (ret)
1601 			goto out;
1602 	}
1603 
1604 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1605 
1606 out:
1607 	return ret;
1608 }
1609 
1610 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1611 					struct wl12xx_vif *wlvif,
1612 					struct cfg80211_wowlan *wow)
1613 {
1614 	int ret = 0;
1615 
1616 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1617 		goto out;
1618 
1619 	ret = wl1271_ps_elp_wakeup(wl);
1620 	if (ret < 0)
1621 		goto out;
1622 
1623 	ret = wl1271_configure_wowlan(wl, wow);
1624 	if (ret < 0)
1625 		goto out_sleep;
1626 
1627 	if ((wl->conf.conn.suspend_wake_up_event ==
1628 	     wl->conf.conn.wake_up_event) &&
1629 	    (wl->conf.conn.suspend_listen_interval ==
1630 	     wl->conf.conn.listen_interval))
1631 		goto out_sleep;
1632 
1633 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1634 				    wl->conf.conn.suspend_wake_up_event,
1635 				    wl->conf.conn.suspend_listen_interval);
1636 
1637 	if (ret < 0)
1638 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1639 
1640 out_sleep:
1641 	wl1271_ps_elp_sleep(wl);
1642 out:
1643 	return ret;
1644 
1645 }
1646 
1647 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1648 				       struct wl12xx_vif *wlvif)
1649 {
1650 	int ret = 0;
1651 
1652 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1653 		goto out;
1654 
1655 	ret = wl1271_ps_elp_wakeup(wl);
1656 	if (ret < 0)
1657 		goto out;
1658 
1659 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1660 
1661 	wl1271_ps_elp_sleep(wl);
1662 out:
1663 	return ret;
1664 
1665 }
1666 
1667 static int wl1271_configure_suspend(struct wl1271 *wl,
1668 				    struct wl12xx_vif *wlvif,
1669 				    struct cfg80211_wowlan *wow)
1670 {
1671 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1672 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1673 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1674 		return wl1271_configure_suspend_ap(wl, wlvif);
1675 	return 0;
1676 }
1677 
1678 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1679 {
1680 	int ret = 0;
1681 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1682 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1683 
1684 	if ((!is_ap) && (!is_sta))
1685 		return;
1686 
1687 	if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1688 		return;
1689 
1690 	ret = wl1271_ps_elp_wakeup(wl);
1691 	if (ret < 0)
1692 		return;
1693 
1694 	if (is_sta) {
1695 		wl1271_configure_wowlan(wl, NULL);
1696 
1697 		if ((wl->conf.conn.suspend_wake_up_event ==
1698 		     wl->conf.conn.wake_up_event) &&
1699 		    (wl->conf.conn.suspend_listen_interval ==
1700 		     wl->conf.conn.listen_interval))
1701 			goto out_sleep;
1702 
1703 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1704 				    wl->conf.conn.wake_up_event,
1705 				    wl->conf.conn.listen_interval);
1706 
1707 		if (ret < 0)
1708 			wl1271_error("resume: wake up conditions failed: %d",
1709 				     ret);
1710 
1711 	} else if (is_ap) {
1712 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1713 	}
1714 
1715 out_sleep:
1716 	wl1271_ps_elp_sleep(wl);
1717 }
1718 
1719 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1720 			    struct cfg80211_wowlan *wow)
1721 {
1722 	struct wl1271 *wl = hw->priv;
1723 	struct wl12xx_vif *wlvif;
1724 	int ret;
1725 
1726 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1727 	WARN_ON(!wow);
1728 
1729 	/* we want to perform the recovery before suspending */
1730 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1731 		wl1271_warning("postponing suspend to perform recovery");
1732 		return -EBUSY;
1733 	}
1734 
1735 	wl1271_tx_flush(wl);
1736 
1737 	mutex_lock(&wl->mutex);
1738 	wl->wow_enabled = true;
1739 	wl12xx_for_each_wlvif(wl, wlvif) {
1740 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1741 		if (ret < 0) {
1742 			mutex_unlock(&wl->mutex);
1743 			wl1271_warning("couldn't prepare device to suspend");
1744 			return ret;
1745 		}
1746 	}
1747 	mutex_unlock(&wl->mutex);
1748 	/* flush any remaining work */
1749 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1750 
1751 	/*
1752 	 * disable and re-enable interrupts in order to flush
1753 	 * the threaded_irq
1754 	 */
1755 	wlcore_disable_interrupts(wl);
1756 
1757 	/*
1758 	 * set suspended flag to avoid triggering a new threaded_irq
1759 	 * work. no need for spinlock as interrupts are disabled.
1760 	 */
1761 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1762 
1763 	wlcore_enable_interrupts(wl);
1764 	flush_work(&wl->tx_work);
1765 	flush_delayed_work(&wl->elp_work);
1766 
1767 	/*
1768 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1769 	 * it on resume anyway.
1770 	 */
1771 	cancel_delayed_work(&wl->tx_watchdog_work);
1772 
1773 	return 0;
1774 }
1775 
1776 static int wl1271_op_resume(struct ieee80211_hw *hw)
1777 {
1778 	struct wl1271 *wl = hw->priv;
1779 	struct wl12xx_vif *wlvif;
1780 	unsigned long flags;
1781 	bool run_irq_work = false, pending_recovery;
1782 	int ret;
1783 
1784 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1785 		     wl->wow_enabled);
1786 	WARN_ON(!wl->wow_enabled);
1787 
1788 	/*
1789 	 * re-enable irq_work enqueuing, and call irq_work directly if
1790 	 * there is a pending work.
1791 	 */
1792 	spin_lock_irqsave(&wl->wl_lock, flags);
1793 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1794 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1795 		run_irq_work = true;
1796 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1797 
1798 	mutex_lock(&wl->mutex);
1799 
1800 	/* test the recovery flag before calling any SDIO functions */
1801 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1802 				    &wl->flags);
1803 
1804 	if (run_irq_work) {
1805 		wl1271_debug(DEBUG_MAC80211,
1806 			     "run postponed irq_work directly");
1807 
1808 		/* don't talk to the HW if recovery is pending */
1809 		if (!pending_recovery) {
1810 			ret = wlcore_irq_locked(wl);
1811 			if (ret)
1812 				wl12xx_queue_recovery_work(wl);
1813 		}
1814 
1815 		wlcore_enable_interrupts(wl);
1816 	}
1817 
1818 	if (pending_recovery) {
1819 		wl1271_warning("queuing forgotten recovery on resume");
1820 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1821 		goto out;
1822 	}
1823 
1824 	wl12xx_for_each_wlvif(wl, wlvif) {
1825 		wl1271_configure_resume(wl, wlvif);
1826 	}
1827 
1828 out:
1829 	wl->wow_enabled = false;
1830 
1831 	/*
1832 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1833 	 * That way we avoid possible conditions where Tx-complete interrupts
1834 	 * fail to arrive and we perform a spurious recovery.
1835 	 */
1836 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1837 	mutex_unlock(&wl->mutex);
1838 
1839 	return 0;
1840 }
1841 #endif
1842 
1843 static int wl1271_op_start(struct ieee80211_hw *hw)
1844 {
1845 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1846 
1847 	/*
1848 	 * We have to delay the booting of the hardware because
1849 	 * we need to know the local MAC address before downloading and
1850 	 * initializing the firmware. The MAC address cannot be changed
1851 	 * after boot, and without the proper MAC address, the firmware
1852 	 * will not function properly.
1853 	 *
1854 	 * The MAC address is first known when the corresponding interface
1855 	 * is added. That is where we will initialize the hardware.
1856 	 */
1857 
1858 	return 0;
1859 }
1860 
1861 static void wlcore_op_stop_locked(struct wl1271 *wl)
1862 {
1863 	int i;
1864 
1865 	if (wl->state == WLCORE_STATE_OFF) {
1866 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1867 					&wl->flags))
1868 			wlcore_enable_interrupts(wl);
1869 
1870 		return;
1871 	}
1872 
1873 	/*
1874 	 * this must be before the cancel_work calls below, so that the work
1875 	 * functions don't perform further work.
1876 	 */
1877 	wl->state = WLCORE_STATE_OFF;
1878 
1879 	/*
1880 	 * Use the nosync variant to disable interrupts, so the mutex could be
1881 	 * held while doing so without deadlocking.
1882 	 */
1883 	wlcore_disable_interrupts_nosync(wl);
1884 
1885 	mutex_unlock(&wl->mutex);
1886 
1887 	wlcore_synchronize_interrupts(wl);
1888 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1889 		cancel_work_sync(&wl->recovery_work);
1890 	wl1271_flush_deferred_work(wl);
1891 	cancel_delayed_work_sync(&wl->scan_complete_work);
1892 	cancel_work_sync(&wl->netstack_work);
1893 	cancel_work_sync(&wl->tx_work);
1894 	cancel_delayed_work_sync(&wl->elp_work);
1895 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1896 
1897 	/* let's notify MAC80211 about the remaining pending TX frames */
1898 	mutex_lock(&wl->mutex);
1899 	wl12xx_tx_reset(wl);
1900 
1901 	wl1271_power_off(wl);
1902 	/*
1903 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1904 	 * an interrupt storm. Now that the power is down, it is safe to
1905 	 * re-enable interrupts to balance the disable depth
1906 	 */
1907 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1908 		wlcore_enable_interrupts(wl);
1909 
1910 	wl->band = IEEE80211_BAND_2GHZ;
1911 
1912 	wl->rx_counter = 0;
1913 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1914 	wl->channel_type = NL80211_CHAN_NO_HT;
1915 	wl->tx_blocks_available = 0;
1916 	wl->tx_allocated_blocks = 0;
1917 	wl->tx_results_count = 0;
1918 	wl->tx_packets_count = 0;
1919 	wl->time_offset = 0;
1920 	wl->ap_fw_ps_map = 0;
1921 	wl->ap_ps_map = 0;
1922 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1923 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1924 	memset(wl->links_map, 0, sizeof(wl->links_map));
1925 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1926 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1927 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1928 	wl->active_sta_count = 0;
1929 	wl->active_link_count = 0;
1930 
1931 	/* The system link is always allocated */
1932 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1933 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1934 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1935 
1936 	/*
1937 	 * this is performed after the cancel_work calls and the associated
1938 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1939 	 * get executed before all these vars have been reset.
1940 	 */
1941 	wl->flags = 0;
1942 
1943 	wl->tx_blocks_freed = 0;
1944 
1945 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1946 		wl->tx_pkts_freed[i] = 0;
1947 		wl->tx_allocated_pkts[i] = 0;
1948 	}
1949 
1950 	wl1271_debugfs_reset(wl);
1951 
1952 	kfree(wl->raw_fw_status);
1953 	wl->raw_fw_status = NULL;
1954 	kfree(wl->fw_status);
1955 	wl->fw_status = NULL;
1956 	kfree(wl->tx_res_if);
1957 	wl->tx_res_if = NULL;
1958 	kfree(wl->target_mem_map);
1959 	wl->target_mem_map = NULL;
1960 
1961 	/*
1962 	 * FW channels must be re-calibrated after recovery,
1963 	 * save current Reg-Domain channel configuration and clear it.
1964 	 */
1965 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1966 	       sizeof(wl->reg_ch_conf_pending));
1967 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1968 }
1969 
1970 static void wlcore_op_stop(struct ieee80211_hw *hw)
1971 {
1972 	struct wl1271 *wl = hw->priv;
1973 
1974 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1975 
1976 	mutex_lock(&wl->mutex);
1977 
1978 	wlcore_op_stop_locked(wl);
1979 
1980 	mutex_unlock(&wl->mutex);
1981 }
1982 
1983 static void wlcore_channel_switch_work(struct work_struct *work)
1984 {
1985 	struct delayed_work *dwork;
1986 	struct wl1271 *wl;
1987 	struct ieee80211_vif *vif;
1988 	struct wl12xx_vif *wlvif;
1989 	int ret;
1990 
1991 	dwork = container_of(work, struct delayed_work, work);
1992 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1993 	wl = wlvif->wl;
1994 
1995 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1996 
1997 	mutex_lock(&wl->mutex);
1998 
1999 	if (unlikely(wl->state != WLCORE_STATE_ON))
2000 		goto out;
2001 
2002 	/* check the channel switch is still ongoing */
2003 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2004 		goto out;
2005 
2006 	vif = wl12xx_wlvif_to_vif(wlvif);
2007 	ieee80211_chswitch_done(vif, false);
2008 
2009 	ret = wl1271_ps_elp_wakeup(wl);
2010 	if (ret < 0)
2011 		goto out;
2012 
2013 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2014 
2015 	wl1271_ps_elp_sleep(wl);
2016 out:
2017 	mutex_unlock(&wl->mutex);
2018 }
2019 
2020 static void wlcore_connection_loss_work(struct work_struct *work)
2021 {
2022 	struct delayed_work *dwork;
2023 	struct wl1271 *wl;
2024 	struct ieee80211_vif *vif;
2025 	struct wl12xx_vif *wlvif;
2026 
2027 	dwork = container_of(work, struct delayed_work, work);
2028 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2029 	wl = wlvif->wl;
2030 
2031 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2032 
2033 	mutex_lock(&wl->mutex);
2034 
2035 	if (unlikely(wl->state != WLCORE_STATE_ON))
2036 		goto out;
2037 
2038 	/* Call mac80211 connection loss */
2039 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2040 		goto out;
2041 
2042 	vif = wl12xx_wlvif_to_vif(wlvif);
2043 	ieee80211_connection_loss(vif);
2044 out:
2045 	mutex_unlock(&wl->mutex);
2046 }
2047 
2048 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2049 {
2050 	struct delayed_work *dwork;
2051 	struct wl1271 *wl;
2052 	struct wl12xx_vif *wlvif;
2053 	unsigned long time_spare;
2054 	int ret;
2055 
2056 	dwork = container_of(work, struct delayed_work, work);
2057 	wlvif = container_of(dwork, struct wl12xx_vif,
2058 			     pending_auth_complete_work);
2059 	wl = wlvif->wl;
2060 
2061 	mutex_lock(&wl->mutex);
2062 
2063 	if (unlikely(wl->state != WLCORE_STATE_ON))
2064 		goto out;
2065 
2066 	/*
2067 	 * Make sure a second really passed since the last auth reply. Maybe
2068 	 * a second auth reply arrived while we were stuck on the mutex.
2069 	 * Check for a little less than the timeout to protect from scheduler
2070 	 * irregularities.
2071 	 */
2072 	time_spare = jiffies +
2073 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2074 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2075 		goto out;
2076 
2077 	ret = wl1271_ps_elp_wakeup(wl);
2078 	if (ret < 0)
2079 		goto out;
2080 
2081 	/* cancel the ROC if active */
2082 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2083 
2084 	wl1271_ps_elp_sleep(wl);
2085 out:
2086 	mutex_unlock(&wl->mutex);
2087 }
2088 
2089 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2090 {
2091 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2092 					WL12XX_MAX_RATE_POLICIES);
2093 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2094 		return -EBUSY;
2095 
2096 	__set_bit(policy, wl->rate_policies_map);
2097 	*idx = policy;
2098 	return 0;
2099 }
2100 
2101 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2102 {
2103 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2104 		return;
2105 
2106 	__clear_bit(*idx, wl->rate_policies_map);
2107 	*idx = WL12XX_MAX_RATE_POLICIES;
2108 }
2109 
2110 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2111 {
2112 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2113 					WLCORE_MAX_KLV_TEMPLATES);
2114 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2115 		return -EBUSY;
2116 
2117 	__set_bit(policy, wl->klv_templates_map);
2118 	*idx = policy;
2119 	return 0;
2120 }
2121 
2122 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2123 {
2124 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2125 		return;
2126 
2127 	__clear_bit(*idx, wl->klv_templates_map);
2128 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2129 }
2130 
2131 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2132 {
2133 	switch (wlvif->bss_type) {
2134 	case BSS_TYPE_AP_BSS:
2135 		if (wlvif->p2p)
2136 			return WL1271_ROLE_P2P_GO;
2137 		else
2138 			return WL1271_ROLE_AP;
2139 
2140 	case BSS_TYPE_STA_BSS:
2141 		if (wlvif->p2p)
2142 			return WL1271_ROLE_P2P_CL;
2143 		else
2144 			return WL1271_ROLE_STA;
2145 
2146 	case BSS_TYPE_IBSS:
2147 		return WL1271_ROLE_IBSS;
2148 
2149 	default:
2150 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2151 	}
2152 	return WL12XX_INVALID_ROLE_TYPE;
2153 }
2154 
2155 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2156 {
2157 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2158 	int i;
2159 
2160 	/* clear everything but the persistent data */
2161 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2162 
2163 	switch (ieee80211_vif_type_p2p(vif)) {
2164 	case NL80211_IFTYPE_P2P_CLIENT:
2165 		wlvif->p2p = 1;
2166 		/* fall-through */
2167 	case NL80211_IFTYPE_STATION:
2168 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2169 		break;
2170 	case NL80211_IFTYPE_ADHOC:
2171 		wlvif->bss_type = BSS_TYPE_IBSS;
2172 		break;
2173 	case NL80211_IFTYPE_P2P_GO:
2174 		wlvif->p2p = 1;
2175 		/* fall-through */
2176 	case NL80211_IFTYPE_AP:
2177 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2178 		break;
2179 	default:
2180 		wlvif->bss_type = MAX_BSS_TYPE;
2181 		return -EOPNOTSUPP;
2182 	}
2183 
2184 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2185 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2186 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2187 
2188 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2189 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2190 		/* init sta/ibss data */
2191 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2192 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2193 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2194 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2195 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2196 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2197 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2198 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2199 	} else {
2200 		/* init ap data */
2201 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2202 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2203 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2204 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2205 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2206 			wl12xx_allocate_rate_policy(wl,
2207 						&wlvif->ap.ucast_rate_idx[i]);
2208 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2209 		/*
2210 		 * TODO: check if basic_rate shouldn't be
2211 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2212 		 * instead (the same thing for STA above).
2213 		*/
2214 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2215 		/* TODO: this seems to be used only for STA, check it */
2216 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2217 	}
2218 
2219 	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2220 	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2221 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2222 
2223 	/*
2224 	 * mac80211 configures some values globally, while we treat them
2225 	 * per-interface. thus, on init, we have to copy them from wl
2226 	 */
2227 	wlvif->band = wl->band;
2228 	wlvif->channel = wl->channel;
2229 	wlvif->power_level = wl->power_level;
2230 	wlvif->channel_type = wl->channel_type;
2231 
2232 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2233 		  wl1271_rx_streaming_enable_work);
2234 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2235 		  wl1271_rx_streaming_disable_work);
2236 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2237 			  wlcore_channel_switch_work);
2238 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2239 			  wlcore_connection_loss_work);
2240 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2241 			  wlcore_pending_auth_complete_work);
2242 	INIT_LIST_HEAD(&wlvif->list);
2243 
2244 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2245 		    (unsigned long) wlvif);
2246 	return 0;
2247 }
2248 
2249 static int wl12xx_init_fw(struct wl1271 *wl)
2250 {
2251 	int retries = WL1271_BOOT_RETRIES;
2252 	bool booted = false;
2253 	struct wiphy *wiphy = wl->hw->wiphy;
2254 	int ret;
2255 
2256 	while (retries) {
2257 		retries--;
2258 		ret = wl12xx_chip_wakeup(wl, false);
2259 		if (ret < 0)
2260 			goto power_off;
2261 
2262 		ret = wl->ops->boot(wl);
2263 		if (ret < 0)
2264 			goto power_off;
2265 
2266 		ret = wl1271_hw_init(wl);
2267 		if (ret < 0)
2268 			goto irq_disable;
2269 
2270 		booted = true;
2271 		break;
2272 
2273 irq_disable:
2274 		mutex_unlock(&wl->mutex);
2275 		/* Unlocking the mutex in the middle of handling is
2276 		   inherently unsafe. In this case we deem it safe to do,
2277 		   because we need to let any possibly pending IRQ out of
2278 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2279 		   work function will not do anything.) Also, any other
2280 		   possible concurrent operations will fail due to the
2281 		   current state, hence the wl1271 struct should be safe. */
2282 		wlcore_disable_interrupts(wl);
2283 		wl1271_flush_deferred_work(wl);
2284 		cancel_work_sync(&wl->netstack_work);
2285 		mutex_lock(&wl->mutex);
2286 power_off:
2287 		wl1271_power_off(wl);
2288 	}
2289 
2290 	if (!booted) {
2291 		wl1271_error("firmware boot failed despite %d retries",
2292 			     WL1271_BOOT_RETRIES);
2293 		goto out;
2294 	}
2295 
2296 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2297 
2298 	/* update hw/fw version info in wiphy struct */
2299 	wiphy->hw_version = wl->chip.id;
2300 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2301 		sizeof(wiphy->fw_version));
2302 
2303 	/*
2304 	 * Now we know if 11a is supported (info from the NVS), so disable
2305 	 * 11a channels if not supported
2306 	 */
2307 	if (!wl->enable_11a)
2308 		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2309 
2310 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2311 		     wl->enable_11a ? "" : "not ");
2312 
2313 	wl->state = WLCORE_STATE_ON;
2314 out:
2315 	return ret;
2316 }
2317 
2318 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2319 {
2320 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2321 }
2322 
2323 /*
2324  * Check whether a fw switch (i.e. moving from one loaded
2325  * fw to another) is needed. This function is also responsible
2326  * for updating wl->last_vif_count, so it must be called before
2327  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2328  * will be used).
2329  */
2330 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2331 				  struct vif_counter_data vif_counter_data,
2332 				  bool add)
2333 {
2334 	enum wl12xx_fw_type current_fw = wl->fw_type;
2335 	u8 vif_count = vif_counter_data.counter;
2336 
2337 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2338 		return false;
2339 
2340 	/* increase the vif count if this is a new vif */
2341 	if (add && !vif_counter_data.cur_vif_running)
2342 		vif_count++;
2343 
2344 	wl->last_vif_count = vif_count;
2345 
2346 	/* no need for fw change if the device is OFF */
2347 	if (wl->state == WLCORE_STATE_OFF)
2348 		return false;
2349 
2350 	/* no need for fw change if a single fw is used */
2351 	if (!wl->mr_fw_name)
2352 		return false;
2353 
2354 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2355 		return true;
2356 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2357 		return true;
2358 
2359 	return false;
2360 }
2361 
2362 /*
2363  * Enter "forced psm". Make sure the sta is in psm against the ap,
2364  * to make the fw switch a bit more disconnection-persistent.
2365  */
2366 static void wl12xx_force_active_psm(struct wl1271 *wl)
2367 {
2368 	struct wl12xx_vif *wlvif;
2369 
2370 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2371 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2372 	}
2373 }
2374 
2375 struct wlcore_hw_queue_iter_data {
2376 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2377 	/* current vif */
2378 	struct ieee80211_vif *vif;
2379 	/* is the current vif among those iterated */
2380 	bool cur_running;
2381 };
2382 
2383 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2384 				 struct ieee80211_vif *vif)
2385 {
2386 	struct wlcore_hw_queue_iter_data *iter_data = data;
2387 
2388 	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2389 		return;
2390 
2391 	if (iter_data->cur_running || vif == iter_data->vif) {
2392 		iter_data->cur_running = true;
2393 		return;
2394 	}
2395 
2396 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2397 }
2398 
2399 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2400 					 struct wl12xx_vif *wlvif)
2401 {
2402 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2403 	struct wlcore_hw_queue_iter_data iter_data = {};
2404 	int i, q_base;
2405 
2406 	iter_data.vif = vif;
2407 
2408 	/* mark all bits taken by active interfaces */
2409 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2410 					IEEE80211_IFACE_ITER_RESUME_ALL,
2411 					wlcore_hw_queue_iter, &iter_data);
2412 
2413 	/* the current vif is already running in mac80211 (resume/recovery) */
2414 	if (iter_data.cur_running) {
2415 		wlvif->hw_queue_base = vif->hw_queue[0];
2416 		wl1271_debug(DEBUG_MAC80211,
2417 			     "using pre-allocated hw queue base %d",
2418 			     wlvif->hw_queue_base);
2419 
2420 		/* interface type might have changed type */
2421 		goto adjust_cab_queue;
2422 	}
2423 
2424 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2425 				     WLCORE_NUM_MAC_ADDRESSES);
2426 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2427 		return -EBUSY;
2428 
2429 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2430 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2431 		     wlvif->hw_queue_base);
2432 
2433 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2434 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2435 		/* register hw queues in mac80211 */
2436 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2437 	}
2438 
2439 adjust_cab_queue:
2440 	/* the last places are reserved for cab queues per interface */
2441 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2442 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2443 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2444 	else
2445 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2446 
2447 	return 0;
2448 }
2449 
2450 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2451 				   struct ieee80211_vif *vif)
2452 {
2453 	struct wl1271 *wl = hw->priv;
2454 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2455 	struct vif_counter_data vif_count;
2456 	int ret = 0;
2457 	u8 role_type;
2458 
2459 	if (wl->plt) {
2460 		wl1271_error("Adding Interface not allowed while in PLT mode");
2461 		return -EBUSY;
2462 	}
2463 
2464 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2465 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2466 
2467 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2468 		     ieee80211_vif_type_p2p(vif), vif->addr);
2469 
2470 	wl12xx_get_vif_count(hw, vif, &vif_count);
2471 
2472 	mutex_lock(&wl->mutex);
2473 	ret = wl1271_ps_elp_wakeup(wl);
2474 	if (ret < 0)
2475 		goto out_unlock;
2476 
2477 	/*
2478 	 * in some very corner case HW recovery scenarios its possible to
2479 	 * get here before __wl1271_op_remove_interface is complete, so
2480 	 * opt out if that is the case.
2481 	 */
2482 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2483 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2484 		ret = -EBUSY;
2485 		goto out;
2486 	}
2487 
2488 
2489 	ret = wl12xx_init_vif_data(wl, vif);
2490 	if (ret < 0)
2491 		goto out;
2492 
2493 	wlvif->wl = wl;
2494 	role_type = wl12xx_get_role_type(wl, wlvif);
2495 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2496 		ret = -EINVAL;
2497 		goto out;
2498 	}
2499 
2500 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2501 	if (ret < 0)
2502 		goto out;
2503 
2504 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2505 		wl12xx_force_active_psm(wl);
2506 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2507 		mutex_unlock(&wl->mutex);
2508 		wl1271_recovery_work(&wl->recovery_work);
2509 		return 0;
2510 	}
2511 
2512 	/*
2513 	 * TODO: after the nvs issue will be solved, move this block
2514 	 * to start(), and make sure here the driver is ON.
2515 	 */
2516 	if (wl->state == WLCORE_STATE_OFF) {
2517 		/*
2518 		 * we still need this in order to configure the fw
2519 		 * while uploading the nvs
2520 		 */
2521 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2522 
2523 		ret = wl12xx_init_fw(wl);
2524 		if (ret < 0)
2525 			goto out;
2526 	}
2527 
2528 	ret = wl12xx_cmd_role_enable(wl, vif->addr,
2529 				     role_type, &wlvif->role_id);
2530 	if (ret < 0)
2531 		goto out;
2532 
2533 	ret = wl1271_init_vif_specific(wl, vif);
2534 	if (ret < 0)
2535 		goto out;
2536 
2537 	list_add(&wlvif->list, &wl->wlvif_list);
2538 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2539 
2540 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2541 		wl->ap_count++;
2542 	else
2543 		wl->sta_count++;
2544 out:
2545 	wl1271_ps_elp_sleep(wl);
2546 out_unlock:
2547 	mutex_unlock(&wl->mutex);
2548 
2549 	return ret;
2550 }
2551 
2552 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2553 					 struct ieee80211_vif *vif,
2554 					 bool reset_tx_queues)
2555 {
2556 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2557 	int i, ret;
2558 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2559 
2560 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2561 
2562 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2563 		return;
2564 
2565 	/* because of hardware recovery, we may get here twice */
2566 	if (wl->state == WLCORE_STATE_OFF)
2567 		return;
2568 
2569 	wl1271_info("down");
2570 
2571 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2572 	    wl->scan_wlvif == wlvif) {
2573 		/*
2574 		 * Rearm the tx watchdog just before idling scan. This
2575 		 * prevents just-finished scans from triggering the watchdog
2576 		 */
2577 		wl12xx_rearm_tx_watchdog_locked(wl);
2578 
2579 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2580 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2581 		wl->scan_wlvif = NULL;
2582 		wl->scan.req = NULL;
2583 		ieee80211_scan_completed(wl->hw, true);
2584 	}
2585 
2586 	if (wl->sched_vif == wlvif)
2587 		wl->sched_vif = NULL;
2588 
2589 	if (wl->roc_vif == vif) {
2590 		wl->roc_vif = NULL;
2591 		ieee80211_remain_on_channel_expired(wl->hw);
2592 	}
2593 
2594 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2595 		/* disable active roles */
2596 		ret = wl1271_ps_elp_wakeup(wl);
2597 		if (ret < 0)
2598 			goto deinit;
2599 
2600 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2601 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2602 			if (wl12xx_dev_role_started(wlvif))
2603 				wl12xx_stop_dev(wl, wlvif);
2604 		}
2605 
2606 		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2607 		if (ret < 0)
2608 			goto deinit;
2609 
2610 		wl1271_ps_elp_sleep(wl);
2611 	}
2612 deinit:
2613 	wl12xx_tx_reset_wlvif(wl, wlvif);
2614 
2615 	/* clear all hlids (except system_hlid) */
2616 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2617 
2618 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2619 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2620 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2621 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2622 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2623 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2624 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2625 	} else {
2626 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2627 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2628 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2629 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2630 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2631 			wl12xx_free_rate_policy(wl,
2632 						&wlvif->ap.ucast_rate_idx[i]);
2633 		wl1271_free_ap_keys(wl, wlvif);
2634 	}
2635 
2636 	dev_kfree_skb(wlvif->probereq);
2637 	wlvif->probereq = NULL;
2638 	if (wl->last_wlvif == wlvif)
2639 		wl->last_wlvif = NULL;
2640 	list_del(&wlvif->list);
2641 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2642 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2643 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2644 
2645 	if (is_ap)
2646 		wl->ap_count--;
2647 	else
2648 		wl->sta_count--;
2649 
2650 	/*
2651 	 * Last AP, have more stations. Configure sleep auth according to STA.
2652 	 * Don't do thin on unintended recovery.
2653 	 */
2654 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2655 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2656 		goto unlock;
2657 
2658 	if (wl->ap_count == 0 && is_ap) {
2659 		/* mask ap events */
2660 		wl->event_mask &= ~wl->ap_event_mask;
2661 		wl1271_event_unmask(wl);
2662 	}
2663 
2664 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2665 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2666 		/* Configure for power according to debugfs */
2667 		if (sta_auth != WL1271_PSM_ILLEGAL)
2668 			wl1271_acx_sleep_auth(wl, sta_auth);
2669 		/* Configure for ELP power saving */
2670 		else
2671 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2672 	}
2673 
2674 unlock:
2675 	mutex_unlock(&wl->mutex);
2676 
2677 	del_timer_sync(&wlvif->rx_streaming_timer);
2678 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2679 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2680 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2681 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2682 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2683 
2684 	mutex_lock(&wl->mutex);
2685 }
2686 
2687 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2688 				       struct ieee80211_vif *vif)
2689 {
2690 	struct wl1271 *wl = hw->priv;
2691 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2692 	struct wl12xx_vif *iter;
2693 	struct vif_counter_data vif_count;
2694 
2695 	wl12xx_get_vif_count(hw, vif, &vif_count);
2696 	mutex_lock(&wl->mutex);
2697 
2698 	if (wl->state == WLCORE_STATE_OFF ||
2699 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2700 		goto out;
2701 
2702 	/*
2703 	 * wl->vif can be null here if someone shuts down the interface
2704 	 * just when hardware recovery has been started.
2705 	 */
2706 	wl12xx_for_each_wlvif(wl, iter) {
2707 		if (iter != wlvif)
2708 			continue;
2709 
2710 		__wl1271_op_remove_interface(wl, vif, true);
2711 		break;
2712 	}
2713 	WARN_ON(iter != wlvif);
2714 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2715 		wl12xx_force_active_psm(wl);
2716 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2717 		wl12xx_queue_recovery_work(wl);
2718 	}
2719 out:
2720 	mutex_unlock(&wl->mutex);
2721 }
2722 
2723 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2724 				      struct ieee80211_vif *vif,
2725 				      enum nl80211_iftype new_type, bool p2p)
2726 {
2727 	struct wl1271 *wl = hw->priv;
2728 	int ret;
2729 
2730 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2731 	wl1271_op_remove_interface(hw, vif);
2732 
2733 	vif->type = new_type;
2734 	vif->p2p = p2p;
2735 	ret = wl1271_op_add_interface(hw, vif);
2736 
2737 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2738 	return ret;
2739 }
2740 
2741 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2742 {
2743 	int ret;
2744 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2745 
2746 	/*
2747 	 * One of the side effects of the JOIN command is that is clears
2748 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2749 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2750 	 * Currently the only valid scenario for JOIN during association
2751 	 * is on roaming, in which case we will also be given new keys.
2752 	 * Keep the below message for now, unless it starts bothering
2753 	 * users who really like to roam a lot :)
2754 	 */
2755 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2756 		wl1271_info("JOIN while associated.");
2757 
2758 	/* clear encryption type */
2759 	wlvif->encryption_type = KEY_NONE;
2760 
2761 	if (is_ibss)
2762 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2763 	else {
2764 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2765 			/*
2766 			 * TODO: this is an ugly workaround for wl12xx fw
2767 			 * bug - we are not able to tx/rx after the first
2768 			 * start_sta, so make dummy start+stop calls,
2769 			 * and then call start_sta again.
2770 			 * this should be fixed in the fw.
2771 			 */
2772 			wl12xx_cmd_role_start_sta(wl, wlvif);
2773 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2774 		}
2775 
2776 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2777 	}
2778 
2779 	return ret;
2780 }
2781 
2782 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2783 			    int offset)
2784 {
2785 	u8 ssid_len;
2786 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2787 					 skb->len - offset);
2788 
2789 	if (!ptr) {
2790 		wl1271_error("No SSID in IEs!");
2791 		return -ENOENT;
2792 	}
2793 
2794 	ssid_len = ptr[1];
2795 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2796 		wl1271_error("SSID is too long!");
2797 		return -EINVAL;
2798 	}
2799 
2800 	wlvif->ssid_len = ssid_len;
2801 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2802 	return 0;
2803 }
2804 
2805 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2806 {
2807 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2808 	struct sk_buff *skb;
2809 	int ieoffset;
2810 
2811 	/* we currently only support setting the ssid from the ap probe req */
2812 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2813 		return -EINVAL;
2814 
2815 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2816 	if (!skb)
2817 		return -EINVAL;
2818 
2819 	ieoffset = offsetof(struct ieee80211_mgmt,
2820 			    u.probe_req.variable);
2821 	wl1271_ssid_set(wlvif, skb, ieoffset);
2822 	dev_kfree_skb(skb);
2823 
2824 	return 0;
2825 }
2826 
2827 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2828 			    struct ieee80211_bss_conf *bss_conf,
2829 			    u32 sta_rate_set)
2830 {
2831 	int ieoffset;
2832 	int ret;
2833 
2834 	wlvif->aid = bss_conf->aid;
2835 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2836 	wlvif->beacon_int = bss_conf->beacon_int;
2837 	wlvif->wmm_enabled = bss_conf->qos;
2838 
2839 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2840 
2841 	/*
2842 	 * with wl1271, we don't need to update the
2843 	 * beacon_int and dtim_period, because the firmware
2844 	 * updates it by itself when the first beacon is
2845 	 * received after a join.
2846 	 */
2847 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2848 	if (ret < 0)
2849 		return ret;
2850 
2851 	/*
2852 	 * Get a template for hardware connection maintenance
2853 	 */
2854 	dev_kfree_skb(wlvif->probereq);
2855 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2856 							wlvif,
2857 							NULL);
2858 	ieoffset = offsetof(struct ieee80211_mgmt,
2859 			    u.probe_req.variable);
2860 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2861 
2862 	/* enable the connection monitoring feature */
2863 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2864 	if (ret < 0)
2865 		return ret;
2866 
2867 	/*
2868 	 * The join command disable the keep-alive mode, shut down its process,
2869 	 * and also clear the template config, so we need to reset it all after
2870 	 * the join. The acx_aid starts the keep-alive process, and the order
2871 	 * of the commands below is relevant.
2872 	 */
2873 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2874 	if (ret < 0)
2875 		return ret;
2876 
2877 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2878 	if (ret < 0)
2879 		return ret;
2880 
2881 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2882 	if (ret < 0)
2883 		return ret;
2884 
2885 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2886 					   wlvif->sta.klv_template_id,
2887 					   ACX_KEEP_ALIVE_TPL_VALID);
2888 	if (ret < 0)
2889 		return ret;
2890 
2891 	/*
2892 	 * The default fw psm configuration is AUTO, while mac80211 default
2893 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2894 	 */
2895 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2896 	if (ret < 0)
2897 		return ret;
2898 
2899 	if (sta_rate_set) {
2900 		wlvif->rate_set =
2901 			wl1271_tx_enabled_rates_get(wl,
2902 						    sta_rate_set,
2903 						    wlvif->band);
2904 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2905 		if (ret < 0)
2906 			return ret;
2907 	}
2908 
2909 	return ret;
2910 }
2911 
2912 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2913 {
2914 	int ret;
2915 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2916 
2917 	/* make sure we are connected (sta) joined */
2918 	if (sta &&
2919 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2920 		return false;
2921 
2922 	/* make sure we are joined (ibss) */
2923 	if (!sta &&
2924 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2925 		return false;
2926 
2927 	if (sta) {
2928 		/* use defaults when not associated */
2929 		wlvif->aid = 0;
2930 
2931 		/* free probe-request template */
2932 		dev_kfree_skb(wlvif->probereq);
2933 		wlvif->probereq = NULL;
2934 
2935 		/* disable connection monitor features */
2936 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2937 		if (ret < 0)
2938 			return ret;
2939 
2940 		/* Disable the keep-alive feature */
2941 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2942 		if (ret < 0)
2943 			return ret;
2944 
2945 		/* disable beacon filtering */
2946 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
2947 		if (ret < 0)
2948 			return ret;
2949 	}
2950 
2951 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2952 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2953 
2954 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
2955 		ieee80211_chswitch_done(vif, false);
2956 		cancel_delayed_work(&wlvif->channel_switch_work);
2957 	}
2958 
2959 	/* invalidate keep-alive template */
2960 	wl1271_acx_keep_alive_config(wl, wlvif,
2961 				     wlvif->sta.klv_template_id,
2962 				     ACX_KEEP_ALIVE_TPL_INVALID);
2963 
2964 	return 0;
2965 }
2966 
2967 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2968 {
2969 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2970 	wlvif->rate_set = wlvif->basic_rate_set;
2971 }
2972 
2973 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2974 				   bool idle)
2975 {
2976 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2977 
2978 	if (idle == cur_idle)
2979 		return;
2980 
2981 	if (idle) {
2982 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2983 	} else {
2984 		/* The current firmware only supports sched_scan in idle */
2985 		if (wl->sched_vif == wlvif)
2986 			wl->ops->sched_scan_stop(wl, wlvif);
2987 
2988 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2989 	}
2990 }
2991 
2992 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2993 			     struct ieee80211_conf *conf, u32 changed)
2994 {
2995 	int ret;
2996 
2997 	if (conf->power_level != wlvif->power_level) {
2998 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2999 		if (ret < 0)
3000 			return ret;
3001 
3002 		wlvif->power_level = conf->power_level;
3003 	}
3004 
3005 	return 0;
3006 }
3007 
3008 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3009 {
3010 	struct wl1271 *wl = hw->priv;
3011 	struct wl12xx_vif *wlvif;
3012 	struct ieee80211_conf *conf = &hw->conf;
3013 	int ret = 0;
3014 
3015 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3016 		     " changed 0x%x",
3017 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3018 		     conf->power_level,
3019 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3020 			 changed);
3021 
3022 	mutex_lock(&wl->mutex);
3023 
3024 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3025 		wl->power_level = conf->power_level;
3026 
3027 	if (unlikely(wl->state != WLCORE_STATE_ON))
3028 		goto out;
3029 
3030 	ret = wl1271_ps_elp_wakeup(wl);
3031 	if (ret < 0)
3032 		goto out;
3033 
3034 	/* configure each interface */
3035 	wl12xx_for_each_wlvif(wl, wlvif) {
3036 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3037 		if (ret < 0)
3038 			goto out_sleep;
3039 	}
3040 
3041 out_sleep:
3042 	wl1271_ps_elp_sleep(wl);
3043 
3044 out:
3045 	mutex_unlock(&wl->mutex);
3046 
3047 	return ret;
3048 }
3049 
3050 struct wl1271_filter_params {
3051 	bool enabled;
3052 	int mc_list_length;
3053 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3054 };
3055 
3056 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3057 				       struct netdev_hw_addr_list *mc_list)
3058 {
3059 	struct wl1271_filter_params *fp;
3060 	struct netdev_hw_addr *ha;
3061 
3062 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3063 	if (!fp) {
3064 		wl1271_error("Out of memory setting filters.");
3065 		return 0;
3066 	}
3067 
3068 	/* update multicast filtering parameters */
3069 	fp->mc_list_length = 0;
3070 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3071 		fp->enabled = false;
3072 	} else {
3073 		fp->enabled = true;
3074 		netdev_hw_addr_list_for_each(ha, mc_list) {
3075 			memcpy(fp->mc_list[fp->mc_list_length],
3076 					ha->addr, ETH_ALEN);
3077 			fp->mc_list_length++;
3078 		}
3079 	}
3080 
3081 	return (u64)(unsigned long)fp;
3082 }
3083 
3084 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3085 				  FIF_ALLMULTI | \
3086 				  FIF_FCSFAIL | \
3087 				  FIF_BCN_PRBRESP_PROMISC | \
3088 				  FIF_CONTROL | \
3089 				  FIF_OTHER_BSS)
3090 
3091 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3092 				       unsigned int changed,
3093 				       unsigned int *total, u64 multicast)
3094 {
3095 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3096 	struct wl1271 *wl = hw->priv;
3097 	struct wl12xx_vif *wlvif;
3098 
3099 	int ret;
3100 
3101 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3102 		     " total %x", changed, *total);
3103 
3104 	mutex_lock(&wl->mutex);
3105 
3106 	*total &= WL1271_SUPPORTED_FILTERS;
3107 	changed &= WL1271_SUPPORTED_FILTERS;
3108 
3109 	if (unlikely(wl->state != WLCORE_STATE_ON))
3110 		goto out;
3111 
3112 	ret = wl1271_ps_elp_wakeup(wl);
3113 	if (ret < 0)
3114 		goto out;
3115 
3116 	wl12xx_for_each_wlvif(wl, wlvif) {
3117 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3118 			if (*total & FIF_ALLMULTI)
3119 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3120 								   false,
3121 								   NULL, 0);
3122 			else if (fp)
3123 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3124 							fp->enabled,
3125 							fp->mc_list,
3126 							fp->mc_list_length);
3127 			if (ret < 0)
3128 				goto out_sleep;
3129 		}
3130 	}
3131 
3132 	/*
3133 	 * the fw doesn't provide an api to configure the filters. instead,
3134 	 * the filters configuration is based on the active roles / ROC
3135 	 * state.
3136 	 */
3137 
3138 out_sleep:
3139 	wl1271_ps_elp_sleep(wl);
3140 
3141 out:
3142 	mutex_unlock(&wl->mutex);
3143 	kfree(fp);
3144 }
3145 
3146 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3147 				u8 id, u8 key_type, u8 key_size,
3148 				const u8 *key, u8 hlid, u32 tx_seq_32,
3149 				u16 tx_seq_16)
3150 {
3151 	struct wl1271_ap_key *ap_key;
3152 	int i;
3153 
3154 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3155 
3156 	if (key_size > MAX_KEY_SIZE)
3157 		return -EINVAL;
3158 
3159 	/*
3160 	 * Find next free entry in ap_keys. Also check we are not replacing
3161 	 * an existing key.
3162 	 */
3163 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3164 		if (wlvif->ap.recorded_keys[i] == NULL)
3165 			break;
3166 
3167 		if (wlvif->ap.recorded_keys[i]->id == id) {
3168 			wl1271_warning("trying to record key replacement");
3169 			return -EINVAL;
3170 		}
3171 	}
3172 
3173 	if (i == MAX_NUM_KEYS)
3174 		return -EBUSY;
3175 
3176 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3177 	if (!ap_key)
3178 		return -ENOMEM;
3179 
3180 	ap_key->id = id;
3181 	ap_key->key_type = key_type;
3182 	ap_key->key_size = key_size;
3183 	memcpy(ap_key->key, key, key_size);
3184 	ap_key->hlid = hlid;
3185 	ap_key->tx_seq_32 = tx_seq_32;
3186 	ap_key->tx_seq_16 = tx_seq_16;
3187 
3188 	wlvif->ap.recorded_keys[i] = ap_key;
3189 	return 0;
3190 }
3191 
3192 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3193 {
3194 	int i;
3195 
3196 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3197 		kfree(wlvif->ap.recorded_keys[i]);
3198 		wlvif->ap.recorded_keys[i] = NULL;
3199 	}
3200 }
3201 
3202 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3203 {
3204 	int i, ret = 0;
3205 	struct wl1271_ap_key *key;
3206 	bool wep_key_added = false;
3207 
3208 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3209 		u8 hlid;
3210 		if (wlvif->ap.recorded_keys[i] == NULL)
3211 			break;
3212 
3213 		key = wlvif->ap.recorded_keys[i];
3214 		hlid = key->hlid;
3215 		if (hlid == WL12XX_INVALID_LINK_ID)
3216 			hlid = wlvif->ap.bcast_hlid;
3217 
3218 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3219 					    key->id, key->key_type,
3220 					    key->key_size, key->key,
3221 					    hlid, key->tx_seq_32,
3222 					    key->tx_seq_16);
3223 		if (ret < 0)
3224 			goto out;
3225 
3226 		if (key->key_type == KEY_WEP)
3227 			wep_key_added = true;
3228 	}
3229 
3230 	if (wep_key_added) {
3231 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3232 						     wlvif->ap.bcast_hlid);
3233 		if (ret < 0)
3234 			goto out;
3235 	}
3236 
3237 out:
3238 	wl1271_free_ap_keys(wl, wlvif);
3239 	return ret;
3240 }
3241 
3242 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3243 		       u16 action, u8 id, u8 key_type,
3244 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3245 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3246 {
3247 	int ret;
3248 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3249 
3250 	if (is_ap) {
3251 		struct wl1271_station *wl_sta;
3252 		u8 hlid;
3253 
3254 		if (sta) {
3255 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3256 			hlid = wl_sta->hlid;
3257 		} else {
3258 			hlid = wlvif->ap.bcast_hlid;
3259 		}
3260 
3261 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3262 			/*
3263 			 * We do not support removing keys after AP shutdown.
3264 			 * Pretend we do to make mac80211 happy.
3265 			 */
3266 			if (action != KEY_ADD_OR_REPLACE)
3267 				return 0;
3268 
3269 			ret = wl1271_record_ap_key(wl, wlvif, id,
3270 					     key_type, key_size,
3271 					     key, hlid, tx_seq_32,
3272 					     tx_seq_16);
3273 		} else {
3274 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3275 					     id, key_type, key_size,
3276 					     key, hlid, tx_seq_32,
3277 					     tx_seq_16);
3278 		}
3279 
3280 		if (ret < 0)
3281 			return ret;
3282 	} else {
3283 		const u8 *addr;
3284 		static const u8 bcast_addr[ETH_ALEN] = {
3285 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3286 		};
3287 
3288 		addr = sta ? sta->addr : bcast_addr;
3289 
3290 		if (is_zero_ether_addr(addr)) {
3291 			/* We dont support TX only encryption */
3292 			return -EOPNOTSUPP;
3293 		}
3294 
3295 		/* The wl1271 does not allow to remove unicast keys - they
3296 		   will be cleared automatically on next CMD_JOIN. Ignore the
3297 		   request silently, as we dont want the mac80211 to emit
3298 		   an error message. */
3299 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3300 			return 0;
3301 
3302 		/* don't remove key if hlid was already deleted */
3303 		if (action == KEY_REMOVE &&
3304 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3305 			return 0;
3306 
3307 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3308 					     id, key_type, key_size,
3309 					     key, addr, tx_seq_32,
3310 					     tx_seq_16);
3311 		if (ret < 0)
3312 			return ret;
3313 
3314 	}
3315 
3316 	return 0;
3317 }
3318 
3319 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3320 			     struct ieee80211_vif *vif,
3321 			     struct ieee80211_sta *sta,
3322 			     struct ieee80211_key_conf *key_conf)
3323 {
3324 	struct wl1271 *wl = hw->priv;
3325 	int ret;
3326 	bool might_change_spare =
3327 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3328 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3329 
3330 	if (might_change_spare) {
3331 		/*
3332 		 * stop the queues and flush to ensure the next packets are
3333 		 * in sync with FW spare block accounting
3334 		 */
3335 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3336 		wl1271_tx_flush(wl);
3337 	}
3338 
3339 	mutex_lock(&wl->mutex);
3340 
3341 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3342 		ret = -EAGAIN;
3343 		goto out_wake_queues;
3344 	}
3345 
3346 	ret = wl1271_ps_elp_wakeup(wl);
3347 	if (ret < 0)
3348 		goto out_wake_queues;
3349 
3350 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3351 
3352 	wl1271_ps_elp_sleep(wl);
3353 
3354 out_wake_queues:
3355 	if (might_change_spare)
3356 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3357 
3358 	mutex_unlock(&wl->mutex);
3359 
3360 	return ret;
3361 }
3362 
3363 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3364 		   struct ieee80211_vif *vif,
3365 		   struct ieee80211_sta *sta,
3366 		   struct ieee80211_key_conf *key_conf)
3367 {
3368 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3369 	int ret;
3370 	u32 tx_seq_32 = 0;
3371 	u16 tx_seq_16 = 0;
3372 	u8 key_type;
3373 	u8 hlid;
3374 
3375 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3376 
3377 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3378 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3379 		     key_conf->cipher, key_conf->keyidx,
3380 		     key_conf->keylen, key_conf->flags);
3381 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3382 
3383 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3384 		if (sta) {
3385 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3386 			hlid = wl_sta->hlid;
3387 		} else {
3388 			hlid = wlvif->ap.bcast_hlid;
3389 		}
3390 	else
3391 		hlid = wlvif->sta.hlid;
3392 
3393 	if (hlid != WL12XX_INVALID_LINK_ID) {
3394 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3395 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3396 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3397 	}
3398 
3399 	switch (key_conf->cipher) {
3400 	case WLAN_CIPHER_SUITE_WEP40:
3401 	case WLAN_CIPHER_SUITE_WEP104:
3402 		key_type = KEY_WEP;
3403 
3404 		key_conf->hw_key_idx = key_conf->keyidx;
3405 		break;
3406 	case WLAN_CIPHER_SUITE_TKIP:
3407 		key_type = KEY_TKIP;
3408 		key_conf->hw_key_idx = key_conf->keyidx;
3409 		break;
3410 	case WLAN_CIPHER_SUITE_CCMP:
3411 		key_type = KEY_AES;
3412 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3413 		break;
3414 	case WL1271_CIPHER_SUITE_GEM:
3415 		key_type = KEY_GEM;
3416 		break;
3417 	default:
3418 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3419 
3420 		return -EOPNOTSUPP;
3421 	}
3422 
3423 	switch (cmd) {
3424 	case SET_KEY:
3425 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3426 				 key_conf->keyidx, key_type,
3427 				 key_conf->keylen, key_conf->key,
3428 				 tx_seq_32, tx_seq_16, sta);
3429 		if (ret < 0) {
3430 			wl1271_error("Could not add or replace key");
3431 			return ret;
3432 		}
3433 
3434 		/*
3435 		 * reconfiguring arp response if the unicast (or common)
3436 		 * encryption key type was changed
3437 		 */
3438 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3439 		    (sta || key_type == KEY_WEP) &&
3440 		    wlvif->encryption_type != key_type) {
3441 			wlvif->encryption_type = key_type;
3442 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3443 			if (ret < 0) {
3444 				wl1271_warning("build arp rsp failed: %d", ret);
3445 				return ret;
3446 			}
3447 		}
3448 		break;
3449 
3450 	case DISABLE_KEY:
3451 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3452 				     key_conf->keyidx, key_type,
3453 				     key_conf->keylen, key_conf->key,
3454 				     0, 0, sta);
3455 		if (ret < 0) {
3456 			wl1271_error("Could not remove key");
3457 			return ret;
3458 		}
3459 		break;
3460 
3461 	default:
3462 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3463 		return -EOPNOTSUPP;
3464 	}
3465 
3466 	return ret;
3467 }
3468 EXPORT_SYMBOL_GPL(wlcore_set_key);
3469 
3470 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3471 					  struct ieee80211_vif *vif,
3472 					  int key_idx)
3473 {
3474 	struct wl1271 *wl = hw->priv;
3475 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3476 	int ret;
3477 
3478 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3479 		     key_idx);
3480 
3481 	/* we don't handle unsetting of default key */
3482 	if (key_idx == -1)
3483 		return;
3484 
3485 	mutex_lock(&wl->mutex);
3486 
3487 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3488 		ret = -EAGAIN;
3489 		goto out_unlock;
3490 	}
3491 
3492 	ret = wl1271_ps_elp_wakeup(wl);
3493 	if (ret < 0)
3494 		goto out_unlock;
3495 
3496 	wlvif->default_key = key_idx;
3497 
3498 	/* the default WEP key needs to be configured at least once */
3499 	if (wlvif->encryption_type == KEY_WEP) {
3500 		ret = wl12xx_cmd_set_default_wep_key(wl,
3501 				key_idx,
3502 				wlvif->sta.hlid);
3503 		if (ret < 0)
3504 			goto out_sleep;
3505 	}
3506 
3507 out_sleep:
3508 	wl1271_ps_elp_sleep(wl);
3509 
3510 out_unlock:
3511 	mutex_unlock(&wl->mutex);
3512 }
3513 
3514 void wlcore_regdomain_config(struct wl1271 *wl)
3515 {
3516 	int ret;
3517 
3518 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3519 		return;
3520 
3521 	mutex_lock(&wl->mutex);
3522 
3523 	if (unlikely(wl->state != WLCORE_STATE_ON))
3524 		goto out;
3525 
3526 	ret = wl1271_ps_elp_wakeup(wl);
3527 	if (ret < 0)
3528 		goto out;
3529 
3530 	ret = wlcore_cmd_regdomain_config_locked(wl);
3531 	if (ret < 0) {
3532 		wl12xx_queue_recovery_work(wl);
3533 		goto out;
3534 	}
3535 
3536 	wl1271_ps_elp_sleep(wl);
3537 out:
3538 	mutex_unlock(&wl->mutex);
3539 }
3540 
3541 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3542 			     struct ieee80211_vif *vif,
3543 			     struct cfg80211_scan_request *req)
3544 {
3545 	struct wl1271 *wl = hw->priv;
3546 	int ret;
3547 	u8 *ssid = NULL;
3548 	size_t len = 0;
3549 
3550 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3551 
3552 	if (req->n_ssids) {
3553 		ssid = req->ssids[0].ssid;
3554 		len = req->ssids[0].ssid_len;
3555 	}
3556 
3557 	mutex_lock(&wl->mutex);
3558 
3559 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3560 		/*
3561 		 * We cannot return -EBUSY here because cfg80211 will expect
3562 		 * a call to ieee80211_scan_completed if we do - in this case
3563 		 * there won't be any call.
3564 		 */
3565 		ret = -EAGAIN;
3566 		goto out;
3567 	}
3568 
3569 	ret = wl1271_ps_elp_wakeup(wl);
3570 	if (ret < 0)
3571 		goto out;
3572 
3573 	/* fail if there is any role in ROC */
3574 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3575 		/* don't allow scanning right now */
3576 		ret = -EBUSY;
3577 		goto out_sleep;
3578 	}
3579 
3580 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3581 out_sleep:
3582 	wl1271_ps_elp_sleep(wl);
3583 out:
3584 	mutex_unlock(&wl->mutex);
3585 
3586 	return ret;
3587 }
3588 
3589 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3590 				     struct ieee80211_vif *vif)
3591 {
3592 	struct wl1271 *wl = hw->priv;
3593 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3594 	int ret;
3595 
3596 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3597 
3598 	mutex_lock(&wl->mutex);
3599 
3600 	if (unlikely(wl->state != WLCORE_STATE_ON))
3601 		goto out;
3602 
3603 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3604 		goto out;
3605 
3606 	ret = wl1271_ps_elp_wakeup(wl);
3607 	if (ret < 0)
3608 		goto out;
3609 
3610 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3611 		ret = wl->ops->scan_stop(wl, wlvif);
3612 		if (ret < 0)
3613 			goto out_sleep;
3614 	}
3615 
3616 	/*
3617 	 * Rearm the tx watchdog just before idling scan. This
3618 	 * prevents just-finished scans from triggering the watchdog
3619 	 */
3620 	wl12xx_rearm_tx_watchdog_locked(wl);
3621 
3622 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3623 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3624 	wl->scan_wlvif = NULL;
3625 	wl->scan.req = NULL;
3626 	ieee80211_scan_completed(wl->hw, true);
3627 
3628 out_sleep:
3629 	wl1271_ps_elp_sleep(wl);
3630 out:
3631 	mutex_unlock(&wl->mutex);
3632 
3633 	cancel_delayed_work_sync(&wl->scan_complete_work);
3634 }
3635 
3636 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3637 				      struct ieee80211_vif *vif,
3638 				      struct cfg80211_sched_scan_request *req,
3639 				      struct ieee80211_sched_scan_ies *ies)
3640 {
3641 	struct wl1271 *wl = hw->priv;
3642 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3643 	int ret;
3644 
3645 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3646 
3647 	mutex_lock(&wl->mutex);
3648 
3649 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3650 		ret = -EAGAIN;
3651 		goto out;
3652 	}
3653 
3654 	ret = wl1271_ps_elp_wakeup(wl);
3655 	if (ret < 0)
3656 		goto out;
3657 
3658 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3659 	if (ret < 0)
3660 		goto out_sleep;
3661 
3662 	wl->sched_vif = wlvif;
3663 
3664 out_sleep:
3665 	wl1271_ps_elp_sleep(wl);
3666 out:
3667 	mutex_unlock(&wl->mutex);
3668 	return ret;
3669 }
3670 
3671 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3672 				     struct ieee80211_vif *vif)
3673 {
3674 	struct wl1271 *wl = hw->priv;
3675 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3676 	int ret;
3677 
3678 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3679 
3680 	mutex_lock(&wl->mutex);
3681 
3682 	if (unlikely(wl->state != WLCORE_STATE_ON))
3683 		goto out;
3684 
3685 	ret = wl1271_ps_elp_wakeup(wl);
3686 	if (ret < 0)
3687 		goto out;
3688 
3689 	wl->ops->sched_scan_stop(wl, wlvif);
3690 
3691 	wl1271_ps_elp_sleep(wl);
3692 out:
3693 	mutex_unlock(&wl->mutex);
3694 
3695 	return 0;
3696 }
3697 
3698 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3699 {
3700 	struct wl1271 *wl = hw->priv;
3701 	int ret = 0;
3702 
3703 	mutex_lock(&wl->mutex);
3704 
3705 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3706 		ret = -EAGAIN;
3707 		goto out;
3708 	}
3709 
3710 	ret = wl1271_ps_elp_wakeup(wl);
3711 	if (ret < 0)
3712 		goto out;
3713 
3714 	ret = wl1271_acx_frag_threshold(wl, value);
3715 	if (ret < 0)
3716 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3717 
3718 	wl1271_ps_elp_sleep(wl);
3719 
3720 out:
3721 	mutex_unlock(&wl->mutex);
3722 
3723 	return ret;
3724 }
3725 
3726 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3727 {
3728 	struct wl1271 *wl = hw->priv;
3729 	struct wl12xx_vif *wlvif;
3730 	int ret = 0;
3731 
3732 	mutex_lock(&wl->mutex);
3733 
3734 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3735 		ret = -EAGAIN;
3736 		goto out;
3737 	}
3738 
3739 	ret = wl1271_ps_elp_wakeup(wl);
3740 	if (ret < 0)
3741 		goto out;
3742 
3743 	wl12xx_for_each_wlvif(wl, wlvif) {
3744 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3745 		if (ret < 0)
3746 			wl1271_warning("set rts threshold failed: %d", ret);
3747 	}
3748 	wl1271_ps_elp_sleep(wl);
3749 
3750 out:
3751 	mutex_unlock(&wl->mutex);
3752 
3753 	return ret;
3754 }
3755 
3756 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3757 {
3758 	int len;
3759 	const u8 *next, *end = skb->data + skb->len;
3760 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3761 					skb->len - ieoffset);
3762 	if (!ie)
3763 		return;
3764 	len = ie[1] + 2;
3765 	next = ie + len;
3766 	memmove(ie, next, end - next);
3767 	skb_trim(skb, skb->len - len);
3768 }
3769 
3770 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3771 					    unsigned int oui, u8 oui_type,
3772 					    int ieoffset)
3773 {
3774 	int len;
3775 	const u8 *next, *end = skb->data + skb->len;
3776 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3777 					       skb->data + ieoffset,
3778 					       skb->len - ieoffset);
3779 	if (!ie)
3780 		return;
3781 	len = ie[1] + 2;
3782 	next = ie + len;
3783 	memmove(ie, next, end - next);
3784 	skb_trim(skb, skb->len - len);
3785 }
3786 
3787 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3788 					 struct ieee80211_vif *vif)
3789 {
3790 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3791 	struct sk_buff *skb;
3792 	int ret;
3793 
3794 	skb = ieee80211_proberesp_get(wl->hw, vif);
3795 	if (!skb)
3796 		return -EOPNOTSUPP;
3797 
3798 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3799 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3800 				      skb->data,
3801 				      skb->len, 0,
3802 				      rates);
3803 	dev_kfree_skb(skb);
3804 
3805 	if (ret < 0)
3806 		goto out;
3807 
3808 	wl1271_debug(DEBUG_AP, "probe response updated");
3809 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3810 
3811 out:
3812 	return ret;
3813 }
3814 
3815 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3816 					     struct ieee80211_vif *vif,
3817 					     u8 *probe_rsp_data,
3818 					     size_t probe_rsp_len,
3819 					     u32 rates)
3820 {
3821 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3822 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3823 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3824 	int ssid_ie_offset, ie_offset, templ_len;
3825 	const u8 *ptr;
3826 
3827 	/* no need to change probe response if the SSID is set correctly */
3828 	if (wlvif->ssid_len > 0)
3829 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3830 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3831 					       probe_rsp_data,
3832 					       probe_rsp_len, 0,
3833 					       rates);
3834 
3835 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3836 		wl1271_error("probe_rsp template too big");
3837 		return -EINVAL;
3838 	}
3839 
3840 	/* start searching from IE offset */
3841 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3842 
3843 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3844 			       probe_rsp_len - ie_offset);
3845 	if (!ptr) {
3846 		wl1271_error("No SSID in beacon!");
3847 		return -EINVAL;
3848 	}
3849 
3850 	ssid_ie_offset = ptr - probe_rsp_data;
3851 	ptr += (ptr[1] + 2);
3852 
3853 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3854 
3855 	/* insert SSID from bss_conf */
3856 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3857 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3858 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3859 	       bss_conf->ssid, bss_conf->ssid_len);
3860 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3861 
3862 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3863 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3864 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3865 
3866 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3867 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3868 				       probe_rsp_templ,
3869 				       templ_len, 0,
3870 				       rates);
3871 }
3872 
3873 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3874 				       struct ieee80211_vif *vif,
3875 				       struct ieee80211_bss_conf *bss_conf,
3876 				       u32 changed)
3877 {
3878 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3879 	int ret = 0;
3880 
3881 	if (changed & BSS_CHANGED_ERP_SLOT) {
3882 		if (bss_conf->use_short_slot)
3883 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3884 		else
3885 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3886 		if (ret < 0) {
3887 			wl1271_warning("Set slot time failed %d", ret);
3888 			goto out;
3889 		}
3890 	}
3891 
3892 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3893 		if (bss_conf->use_short_preamble)
3894 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3895 		else
3896 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3897 	}
3898 
3899 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3900 		if (bss_conf->use_cts_prot)
3901 			ret = wl1271_acx_cts_protect(wl, wlvif,
3902 						     CTSPROTECT_ENABLE);
3903 		else
3904 			ret = wl1271_acx_cts_protect(wl, wlvif,
3905 						     CTSPROTECT_DISABLE);
3906 		if (ret < 0) {
3907 			wl1271_warning("Set ctsprotect failed %d", ret);
3908 			goto out;
3909 		}
3910 	}
3911 
3912 out:
3913 	return ret;
3914 }
3915 
3916 static int wlcore_set_beacon_template(struct wl1271 *wl,
3917 				      struct ieee80211_vif *vif,
3918 				      bool is_ap)
3919 {
3920 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3921 	struct ieee80211_hdr *hdr;
3922 	u32 min_rate;
3923 	int ret;
3924 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3925 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3926 	u16 tmpl_id;
3927 
3928 	if (!beacon) {
3929 		ret = -EINVAL;
3930 		goto out;
3931 	}
3932 
3933 	wl1271_debug(DEBUG_MASTER, "beacon updated");
3934 
3935 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3936 	if (ret < 0) {
3937 		dev_kfree_skb(beacon);
3938 		goto out;
3939 	}
3940 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3941 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3942 		CMD_TEMPL_BEACON;
3943 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3944 				      beacon->data,
3945 				      beacon->len, 0,
3946 				      min_rate);
3947 	if (ret < 0) {
3948 		dev_kfree_skb(beacon);
3949 		goto out;
3950 	}
3951 
3952 	wlvif->wmm_enabled =
3953 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3954 					WLAN_OUI_TYPE_MICROSOFT_WMM,
3955 					beacon->data + ieoffset,
3956 					beacon->len - ieoffset);
3957 
3958 	/*
3959 	 * In case we already have a probe-resp beacon set explicitly
3960 	 * by usermode, don't use the beacon data.
3961 	 */
3962 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3963 		goto end_bcn;
3964 
3965 	/* remove TIM ie from probe response */
3966 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3967 
3968 	/*
3969 	 * remove p2p ie from probe response.
3970 	 * the fw reponds to probe requests that don't include
3971 	 * the p2p ie. probe requests with p2p ie will be passed,
3972 	 * and will be responded by the supplicant (the spec
3973 	 * forbids including the p2p ie when responding to probe
3974 	 * requests that didn't include it).
3975 	 */
3976 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3977 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3978 
3979 	hdr = (struct ieee80211_hdr *) beacon->data;
3980 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3981 					 IEEE80211_STYPE_PROBE_RESP);
3982 	if (is_ap)
3983 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3984 							   beacon->data,
3985 							   beacon->len,
3986 							   min_rate);
3987 	else
3988 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3989 					      CMD_TEMPL_PROBE_RESPONSE,
3990 					      beacon->data,
3991 					      beacon->len, 0,
3992 					      min_rate);
3993 end_bcn:
3994 	dev_kfree_skb(beacon);
3995 	if (ret < 0)
3996 		goto out;
3997 
3998 out:
3999 	return ret;
4000 }
4001 
4002 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4003 					  struct ieee80211_vif *vif,
4004 					  struct ieee80211_bss_conf *bss_conf,
4005 					  u32 changed)
4006 {
4007 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4008 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4009 	int ret = 0;
4010 
4011 	if (changed & BSS_CHANGED_BEACON_INT) {
4012 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4013 			bss_conf->beacon_int);
4014 
4015 		wlvif->beacon_int = bss_conf->beacon_int;
4016 	}
4017 
4018 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4019 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4020 
4021 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4022 	}
4023 
4024 	if (changed & BSS_CHANGED_BEACON) {
4025 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4026 		if (ret < 0)
4027 			goto out;
4028 	}
4029 
4030 out:
4031 	if (ret != 0)
4032 		wl1271_error("beacon info change failed: %d", ret);
4033 	return ret;
4034 }
4035 
4036 /* AP mode changes */
4037 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4038 				       struct ieee80211_vif *vif,
4039 				       struct ieee80211_bss_conf *bss_conf,
4040 				       u32 changed)
4041 {
4042 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4043 	int ret = 0;
4044 
4045 	if (changed & BSS_CHANGED_BASIC_RATES) {
4046 		u32 rates = bss_conf->basic_rates;
4047 
4048 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4049 								 wlvif->band);
4050 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4051 							wlvif->basic_rate_set);
4052 
4053 		ret = wl1271_init_ap_rates(wl, wlvif);
4054 		if (ret < 0) {
4055 			wl1271_error("AP rate policy change failed %d", ret);
4056 			goto out;
4057 		}
4058 
4059 		ret = wl1271_ap_init_templates(wl, vif);
4060 		if (ret < 0)
4061 			goto out;
4062 
4063 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4064 		if (ret < 0)
4065 			goto out;
4066 
4067 		ret = wlcore_set_beacon_template(wl, vif, true);
4068 		if (ret < 0)
4069 			goto out;
4070 	}
4071 
4072 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4073 	if (ret < 0)
4074 		goto out;
4075 
4076 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4077 		if (bss_conf->enable_beacon) {
4078 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4079 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4080 				if (ret < 0)
4081 					goto out;
4082 
4083 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4084 				if (ret < 0)
4085 					goto out;
4086 
4087 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4088 				wl1271_debug(DEBUG_AP, "started AP");
4089 			}
4090 		} else {
4091 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4092 				/*
4093 				 * AP might be in ROC in case we have just
4094 				 * sent auth reply. handle it.
4095 				 */
4096 				if (test_bit(wlvif->role_id, wl->roc_map))
4097 					wl12xx_croc(wl, wlvif->role_id);
4098 
4099 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4100 				if (ret < 0)
4101 					goto out;
4102 
4103 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4104 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4105 					  &wlvif->flags);
4106 				wl1271_debug(DEBUG_AP, "stopped AP");
4107 			}
4108 		}
4109 	}
4110 
4111 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4112 	if (ret < 0)
4113 		goto out;
4114 
4115 	/* Handle HT information change */
4116 	if ((changed & BSS_CHANGED_HT) &&
4117 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4118 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4119 					bss_conf->ht_operation_mode);
4120 		if (ret < 0) {
4121 			wl1271_warning("Set ht information failed %d", ret);
4122 			goto out;
4123 		}
4124 	}
4125 
4126 out:
4127 	return;
4128 }
4129 
4130 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4131 			    struct ieee80211_bss_conf *bss_conf,
4132 			    u32 sta_rate_set)
4133 {
4134 	u32 rates;
4135 	int ret;
4136 
4137 	wl1271_debug(DEBUG_MAC80211,
4138 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4139 	     bss_conf->bssid, bss_conf->aid,
4140 	     bss_conf->beacon_int,
4141 	     bss_conf->basic_rates, sta_rate_set);
4142 
4143 	wlvif->beacon_int = bss_conf->beacon_int;
4144 	rates = bss_conf->basic_rates;
4145 	wlvif->basic_rate_set =
4146 		wl1271_tx_enabled_rates_get(wl, rates,
4147 					    wlvif->band);
4148 	wlvif->basic_rate =
4149 		wl1271_tx_min_rate_get(wl,
4150 				       wlvif->basic_rate_set);
4151 
4152 	if (sta_rate_set)
4153 		wlvif->rate_set =
4154 			wl1271_tx_enabled_rates_get(wl,
4155 						sta_rate_set,
4156 						wlvif->band);
4157 
4158 	/* we only support sched_scan while not connected */
4159 	if (wl->sched_vif == wlvif)
4160 		wl->ops->sched_scan_stop(wl, wlvif);
4161 
4162 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4163 	if (ret < 0)
4164 		return ret;
4165 
4166 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4167 	if (ret < 0)
4168 		return ret;
4169 
4170 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4171 	if (ret < 0)
4172 		return ret;
4173 
4174 	wlcore_set_ssid(wl, wlvif);
4175 
4176 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4177 
4178 	return 0;
4179 }
4180 
4181 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4182 {
4183 	int ret;
4184 
4185 	/* revert back to minimum rates for the current band */
4186 	wl1271_set_band_rate(wl, wlvif);
4187 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4188 
4189 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4190 	if (ret < 0)
4191 		return ret;
4192 
4193 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4194 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4195 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4196 		if (ret < 0)
4197 			return ret;
4198 	}
4199 
4200 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4201 	return 0;
4202 }
4203 /* STA/IBSS mode changes */
4204 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4205 					struct ieee80211_vif *vif,
4206 					struct ieee80211_bss_conf *bss_conf,
4207 					u32 changed)
4208 {
4209 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4210 	bool do_join = false;
4211 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4212 	bool ibss_joined = false;
4213 	u32 sta_rate_set = 0;
4214 	int ret;
4215 	struct ieee80211_sta *sta;
4216 	bool sta_exists = false;
4217 	struct ieee80211_sta_ht_cap sta_ht_cap;
4218 
4219 	if (is_ibss) {
4220 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4221 						     changed);
4222 		if (ret < 0)
4223 			goto out;
4224 	}
4225 
4226 	if (changed & BSS_CHANGED_IBSS) {
4227 		if (bss_conf->ibss_joined) {
4228 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4229 			ibss_joined = true;
4230 		} else {
4231 			wlcore_unset_assoc(wl, wlvif);
4232 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4233 		}
4234 	}
4235 
4236 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4237 		do_join = true;
4238 
4239 	/* Need to update the SSID (for filtering etc) */
4240 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4241 		do_join = true;
4242 
4243 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4244 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4245 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4246 
4247 		do_join = true;
4248 	}
4249 
4250 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4251 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4252 
4253 	if (changed & BSS_CHANGED_CQM) {
4254 		bool enable = false;
4255 		if (bss_conf->cqm_rssi_thold)
4256 			enable = true;
4257 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4258 						  bss_conf->cqm_rssi_thold,
4259 						  bss_conf->cqm_rssi_hyst);
4260 		if (ret < 0)
4261 			goto out;
4262 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4263 	}
4264 
4265 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4266 		       BSS_CHANGED_ASSOC)) {
4267 		rcu_read_lock();
4268 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4269 		if (sta) {
4270 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4271 
4272 			/* save the supp_rates of the ap */
4273 			sta_rate_set = sta->supp_rates[wlvif->band];
4274 			if (sta->ht_cap.ht_supported)
4275 				sta_rate_set |=
4276 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4277 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4278 			sta_ht_cap = sta->ht_cap;
4279 			sta_exists = true;
4280 		}
4281 
4282 		rcu_read_unlock();
4283 	}
4284 
4285 	if (changed & BSS_CHANGED_BSSID) {
4286 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4287 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4288 					       sta_rate_set);
4289 			if (ret < 0)
4290 				goto out;
4291 
4292 			/* Need to update the BSSID (for filtering etc) */
4293 			do_join = true;
4294 		} else {
4295 			ret = wlcore_clear_bssid(wl, wlvif);
4296 			if (ret < 0)
4297 				goto out;
4298 		}
4299 	}
4300 
4301 	if (changed & BSS_CHANGED_IBSS) {
4302 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4303 			     bss_conf->ibss_joined);
4304 
4305 		if (bss_conf->ibss_joined) {
4306 			u32 rates = bss_conf->basic_rates;
4307 			wlvif->basic_rate_set =
4308 				wl1271_tx_enabled_rates_get(wl, rates,
4309 							    wlvif->band);
4310 			wlvif->basic_rate =
4311 				wl1271_tx_min_rate_get(wl,
4312 						       wlvif->basic_rate_set);
4313 
4314 			/* by default, use 11b + OFDM rates */
4315 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4316 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4317 			if (ret < 0)
4318 				goto out;
4319 		}
4320 	}
4321 
4322 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4323 		/* enable beacon filtering */
4324 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4325 		if (ret < 0)
4326 			goto out;
4327 	}
4328 
4329 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4330 	if (ret < 0)
4331 		goto out;
4332 
4333 	if (do_join) {
4334 		ret = wlcore_join(wl, wlvif);
4335 		if (ret < 0) {
4336 			wl1271_warning("cmd join failed %d", ret);
4337 			goto out;
4338 		}
4339 	}
4340 
4341 	if (changed & BSS_CHANGED_ASSOC) {
4342 		if (bss_conf->assoc) {
4343 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4344 					       sta_rate_set);
4345 			if (ret < 0)
4346 				goto out;
4347 
4348 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4349 				wl12xx_set_authorized(wl, wlvif);
4350 		} else {
4351 			wlcore_unset_assoc(wl, wlvif);
4352 		}
4353 	}
4354 
4355 	if (changed & BSS_CHANGED_PS) {
4356 		if ((bss_conf->ps) &&
4357 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4358 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4359 			int ps_mode;
4360 			char *ps_mode_str;
4361 
4362 			if (wl->conf.conn.forced_ps) {
4363 				ps_mode = STATION_POWER_SAVE_MODE;
4364 				ps_mode_str = "forced";
4365 			} else {
4366 				ps_mode = STATION_AUTO_PS_MODE;
4367 				ps_mode_str = "auto";
4368 			}
4369 
4370 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4371 
4372 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4373 			if (ret < 0)
4374 				wl1271_warning("enter %s ps failed %d",
4375 					       ps_mode_str, ret);
4376 		} else if (!bss_conf->ps &&
4377 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4378 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4379 
4380 			ret = wl1271_ps_set_mode(wl, wlvif,
4381 						 STATION_ACTIVE_MODE);
4382 			if (ret < 0)
4383 				wl1271_warning("exit auto ps failed %d", ret);
4384 		}
4385 	}
4386 
4387 	/* Handle new association with HT. Do this after join. */
4388 	if (sta_exists) {
4389 		bool enabled =
4390 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4391 
4392 		ret = wlcore_hw_set_peer_cap(wl,
4393 					     &sta_ht_cap,
4394 					     enabled,
4395 					     wlvif->rate_set,
4396 					     wlvif->sta.hlid);
4397 		if (ret < 0) {
4398 			wl1271_warning("Set ht cap failed %d", ret);
4399 			goto out;
4400 
4401 		}
4402 
4403 		if (enabled) {
4404 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4405 						bss_conf->ht_operation_mode);
4406 			if (ret < 0) {
4407 				wl1271_warning("Set ht information failed %d",
4408 					       ret);
4409 				goto out;
4410 			}
4411 		}
4412 	}
4413 
4414 	/* Handle arp filtering. Done after join. */
4415 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4416 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4417 		__be32 addr = bss_conf->arp_addr_list[0];
4418 		wlvif->sta.qos = bss_conf->qos;
4419 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4420 
4421 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4422 			wlvif->ip_addr = addr;
4423 			/*
4424 			 * The template should have been configured only upon
4425 			 * association. however, it seems that the correct ip
4426 			 * isn't being set (when sending), so we have to
4427 			 * reconfigure the template upon every ip change.
4428 			 */
4429 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4430 			if (ret < 0) {
4431 				wl1271_warning("build arp rsp failed: %d", ret);
4432 				goto out;
4433 			}
4434 
4435 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4436 				(ACX_ARP_FILTER_ARP_FILTERING |
4437 				 ACX_ARP_FILTER_AUTO_ARP),
4438 				addr);
4439 		} else {
4440 			wlvif->ip_addr = 0;
4441 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4442 		}
4443 
4444 		if (ret < 0)
4445 			goto out;
4446 	}
4447 
4448 out:
4449 	return;
4450 }
4451 
4452 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4453 				       struct ieee80211_vif *vif,
4454 				       struct ieee80211_bss_conf *bss_conf,
4455 				       u32 changed)
4456 {
4457 	struct wl1271 *wl = hw->priv;
4458 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4459 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4460 	int ret;
4461 
4462 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4463 		     wlvif->role_id, (int)changed);
4464 
4465 	/*
4466 	 * make sure to cancel pending disconnections if our association
4467 	 * state changed
4468 	 */
4469 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4470 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4471 
4472 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4473 	    !bss_conf->enable_beacon)
4474 		wl1271_tx_flush(wl);
4475 
4476 	mutex_lock(&wl->mutex);
4477 
4478 	if (unlikely(wl->state != WLCORE_STATE_ON))
4479 		goto out;
4480 
4481 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4482 		goto out;
4483 
4484 	ret = wl1271_ps_elp_wakeup(wl);
4485 	if (ret < 0)
4486 		goto out;
4487 
4488 	if ((changed & BSS_CHANGED_TXPOWER) &&
4489 	    bss_conf->txpower != wlvif->power_level) {
4490 
4491 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4492 		if (ret < 0)
4493 			goto out;
4494 
4495 		wlvif->power_level = bss_conf->txpower;
4496 	}
4497 
4498 	if (is_ap)
4499 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4500 	else
4501 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4502 
4503 	wl1271_ps_elp_sleep(wl);
4504 
4505 out:
4506 	mutex_unlock(&wl->mutex);
4507 }
4508 
4509 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4510 				 struct ieee80211_chanctx_conf *ctx)
4511 {
4512 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4513 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4514 		     cfg80211_get_chandef_type(&ctx->def));
4515 	return 0;
4516 }
4517 
4518 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4519 				     struct ieee80211_chanctx_conf *ctx)
4520 {
4521 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4522 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4523 		     cfg80211_get_chandef_type(&ctx->def));
4524 }
4525 
4526 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4527 				     struct ieee80211_chanctx_conf *ctx,
4528 				     u32 changed)
4529 {
4530 	wl1271_debug(DEBUG_MAC80211,
4531 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4532 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4533 		     cfg80211_get_chandef_type(&ctx->def), changed);
4534 }
4535 
4536 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4537 					struct ieee80211_vif *vif,
4538 					struct ieee80211_chanctx_conf *ctx)
4539 {
4540 	struct wl1271 *wl = hw->priv;
4541 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4542 	int channel = ieee80211_frequency_to_channel(
4543 		ctx->def.chan->center_freq);
4544 
4545 	wl1271_debug(DEBUG_MAC80211,
4546 		     "mac80211 assign chanctx (role %d) %d (type %d)",
4547 		     wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4548 
4549 	mutex_lock(&wl->mutex);
4550 
4551 	wlvif->band = ctx->def.chan->band;
4552 	wlvif->channel = channel;
4553 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4554 
4555 	/* update default rates according to the band */
4556 	wl1271_set_band_rate(wl, wlvif);
4557 
4558 	mutex_unlock(&wl->mutex);
4559 
4560 	return 0;
4561 }
4562 
4563 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4564 					   struct ieee80211_vif *vif,
4565 					   struct ieee80211_chanctx_conf *ctx)
4566 {
4567 	struct wl1271 *wl = hw->priv;
4568 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4569 
4570 	wl1271_debug(DEBUG_MAC80211,
4571 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4572 		     wlvif->role_id,
4573 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4574 		     cfg80211_get_chandef_type(&ctx->def));
4575 
4576 	wl1271_tx_flush(wl);
4577 }
4578 
4579 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4580 			     struct ieee80211_vif *vif, u16 queue,
4581 			     const struct ieee80211_tx_queue_params *params)
4582 {
4583 	struct wl1271 *wl = hw->priv;
4584 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4585 	u8 ps_scheme;
4586 	int ret = 0;
4587 
4588 	mutex_lock(&wl->mutex);
4589 
4590 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4591 
4592 	if (params->uapsd)
4593 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4594 	else
4595 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4596 
4597 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4598 		goto out;
4599 
4600 	ret = wl1271_ps_elp_wakeup(wl);
4601 	if (ret < 0)
4602 		goto out;
4603 
4604 	/*
4605 	 * the txop is confed in units of 32us by the mac80211,
4606 	 * we need us
4607 	 */
4608 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4609 				params->cw_min, params->cw_max,
4610 				params->aifs, params->txop << 5);
4611 	if (ret < 0)
4612 		goto out_sleep;
4613 
4614 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4615 				 CONF_CHANNEL_TYPE_EDCF,
4616 				 wl1271_tx_get_queue(queue),
4617 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4618 				 0, 0);
4619 
4620 out_sleep:
4621 	wl1271_ps_elp_sleep(wl);
4622 
4623 out:
4624 	mutex_unlock(&wl->mutex);
4625 
4626 	return ret;
4627 }
4628 
4629 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4630 			     struct ieee80211_vif *vif)
4631 {
4632 
4633 	struct wl1271 *wl = hw->priv;
4634 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4635 	u64 mactime = ULLONG_MAX;
4636 	int ret;
4637 
4638 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4639 
4640 	mutex_lock(&wl->mutex);
4641 
4642 	if (unlikely(wl->state != WLCORE_STATE_ON))
4643 		goto out;
4644 
4645 	ret = wl1271_ps_elp_wakeup(wl);
4646 	if (ret < 0)
4647 		goto out;
4648 
4649 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4650 	if (ret < 0)
4651 		goto out_sleep;
4652 
4653 out_sleep:
4654 	wl1271_ps_elp_sleep(wl);
4655 
4656 out:
4657 	mutex_unlock(&wl->mutex);
4658 	return mactime;
4659 }
4660 
4661 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4662 				struct survey_info *survey)
4663 {
4664 	struct ieee80211_conf *conf = &hw->conf;
4665 
4666 	if (idx != 0)
4667 		return -ENOENT;
4668 
4669 	survey->channel = conf->chandef.chan;
4670 	survey->filled = 0;
4671 	return 0;
4672 }
4673 
4674 static int wl1271_allocate_sta(struct wl1271 *wl,
4675 			     struct wl12xx_vif *wlvif,
4676 			     struct ieee80211_sta *sta)
4677 {
4678 	struct wl1271_station *wl_sta;
4679 	int ret;
4680 
4681 
4682 	if (wl->active_sta_count >= wl->max_ap_stations) {
4683 		wl1271_warning("could not allocate HLID - too much stations");
4684 		return -EBUSY;
4685 	}
4686 
4687 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4688 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4689 	if (ret < 0) {
4690 		wl1271_warning("could not allocate HLID - too many links");
4691 		return -EBUSY;
4692 	}
4693 
4694 	/* use the previous security seq, if this is a recovery/resume */
4695 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4696 
4697 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4698 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4699 	wl->active_sta_count++;
4700 	return 0;
4701 }
4702 
4703 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4704 {
4705 	struct wl1271_station *wl_sta;
4706 	struct ieee80211_sta *sta;
4707 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4708 
4709 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4710 		return;
4711 
4712 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4713 	__clear_bit(hlid, &wl->ap_ps_map);
4714 	__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4715 
4716 	/*
4717 	 * save the last used PN in the private part of iee80211_sta,
4718 	 * in case of recovery/suspend
4719 	 */
4720 	rcu_read_lock();
4721 	sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4722 	if (sta) {
4723 		wl_sta = (void *)sta->drv_priv;
4724 		wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4725 
4726 		/*
4727 		 * increment the initial seq number on recovery to account for
4728 		 * transmitted packets that we haven't yet got in the FW status
4729 		 */
4730 		if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4731 			wl_sta->total_freed_pkts +=
4732 					WL1271_TX_SQN_POST_RECOVERY_PADDING;
4733 	}
4734 	rcu_read_unlock();
4735 
4736 	wl12xx_free_link(wl, wlvif, &hlid);
4737 	wl->active_sta_count--;
4738 
4739 	/*
4740 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4741 	 * chance to return STA-buffered packets before complaining.
4742 	 */
4743 	if (wl->active_sta_count == 0)
4744 		wl12xx_rearm_tx_watchdog_locked(wl);
4745 }
4746 
4747 static int wl12xx_sta_add(struct wl1271 *wl,
4748 			  struct wl12xx_vif *wlvif,
4749 			  struct ieee80211_sta *sta)
4750 {
4751 	struct wl1271_station *wl_sta;
4752 	int ret = 0;
4753 	u8 hlid;
4754 
4755 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4756 
4757 	ret = wl1271_allocate_sta(wl, wlvif, sta);
4758 	if (ret < 0)
4759 		return ret;
4760 
4761 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4762 	hlid = wl_sta->hlid;
4763 
4764 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4765 	if (ret < 0)
4766 		wl1271_free_sta(wl, wlvif, hlid);
4767 
4768 	return ret;
4769 }
4770 
4771 static int wl12xx_sta_remove(struct wl1271 *wl,
4772 			     struct wl12xx_vif *wlvif,
4773 			     struct ieee80211_sta *sta)
4774 {
4775 	struct wl1271_station *wl_sta;
4776 	int ret = 0, id;
4777 
4778 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4779 
4780 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4781 	id = wl_sta->hlid;
4782 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4783 		return -EINVAL;
4784 
4785 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4786 	if (ret < 0)
4787 		return ret;
4788 
4789 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4790 	return ret;
4791 }
4792 
4793 static void wlcore_roc_if_possible(struct wl1271 *wl,
4794 				   struct wl12xx_vif *wlvif)
4795 {
4796 	if (find_first_bit(wl->roc_map,
4797 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4798 		return;
4799 
4800 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4801 		return;
4802 
4803 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4804 }
4805 
4806 /*
4807  * when wl_sta is NULL, we treat this call as if coming from a
4808  * pending auth reply.
4809  * wl->mutex must be taken and the FW must be awake when the call
4810  * takes place.
4811  */
4812 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4813 			      struct wl1271_station *wl_sta, bool in_conn)
4814 {
4815 	if (in_conn) {
4816 		if (WARN_ON(wl_sta && wl_sta->in_connection))
4817 			return;
4818 
4819 		if (!wlvif->ap_pending_auth_reply &&
4820 		    !wlvif->inconn_count)
4821 			wlcore_roc_if_possible(wl, wlvif);
4822 
4823 		if (wl_sta) {
4824 			wl_sta->in_connection = true;
4825 			wlvif->inconn_count++;
4826 		} else {
4827 			wlvif->ap_pending_auth_reply = true;
4828 		}
4829 	} else {
4830 		if (wl_sta && !wl_sta->in_connection)
4831 			return;
4832 
4833 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4834 			return;
4835 
4836 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
4837 			return;
4838 
4839 		if (wl_sta) {
4840 			wl_sta->in_connection = false;
4841 			wlvif->inconn_count--;
4842 		} else {
4843 			wlvif->ap_pending_auth_reply = false;
4844 		}
4845 
4846 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4847 		    test_bit(wlvif->role_id, wl->roc_map))
4848 			wl12xx_croc(wl, wlvif->role_id);
4849 	}
4850 }
4851 
4852 static int wl12xx_update_sta_state(struct wl1271 *wl,
4853 				   struct wl12xx_vif *wlvif,
4854 				   struct ieee80211_sta *sta,
4855 				   enum ieee80211_sta_state old_state,
4856 				   enum ieee80211_sta_state new_state)
4857 {
4858 	struct wl1271_station *wl_sta;
4859 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4860 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4861 	int ret;
4862 
4863 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4864 
4865 	/* Add station (AP mode) */
4866 	if (is_ap &&
4867 	    old_state == IEEE80211_STA_NOTEXIST &&
4868 	    new_state == IEEE80211_STA_NONE) {
4869 		ret = wl12xx_sta_add(wl, wlvif, sta);
4870 		if (ret)
4871 			return ret;
4872 
4873 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4874 	}
4875 
4876 	/* Remove station (AP mode) */
4877 	if (is_ap &&
4878 	    old_state == IEEE80211_STA_NONE &&
4879 	    new_state == IEEE80211_STA_NOTEXIST) {
4880 		/* must not fail */
4881 		wl12xx_sta_remove(wl, wlvif, sta);
4882 
4883 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4884 	}
4885 
4886 	/* Authorize station (AP mode) */
4887 	if (is_ap &&
4888 	    new_state == IEEE80211_STA_AUTHORIZED) {
4889 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4890 		if (ret < 0)
4891 			return ret;
4892 
4893 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4894 						     wl_sta->hlid);
4895 		if (ret)
4896 			return ret;
4897 
4898 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4899 	}
4900 
4901 	/* Authorize station */
4902 	if (is_sta &&
4903 	    new_state == IEEE80211_STA_AUTHORIZED) {
4904 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4905 		ret = wl12xx_set_authorized(wl, wlvif);
4906 		if (ret)
4907 			return ret;
4908 	}
4909 
4910 	if (is_sta &&
4911 	    old_state == IEEE80211_STA_AUTHORIZED &&
4912 	    new_state == IEEE80211_STA_ASSOC) {
4913 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4914 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4915 	}
4916 
4917 	/* clear ROCs on failure or authorization */
4918 	if (is_sta &&
4919 	    (new_state == IEEE80211_STA_AUTHORIZED ||
4920 	     new_state == IEEE80211_STA_NOTEXIST)) {
4921 		if (test_bit(wlvif->role_id, wl->roc_map))
4922 			wl12xx_croc(wl, wlvif->role_id);
4923 	}
4924 
4925 	if (is_sta &&
4926 	    old_state == IEEE80211_STA_NOTEXIST &&
4927 	    new_state == IEEE80211_STA_NONE) {
4928 		if (find_first_bit(wl->roc_map,
4929 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4930 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4931 			wl12xx_roc(wl, wlvif, wlvif->role_id,
4932 				   wlvif->band, wlvif->channel);
4933 		}
4934 	}
4935 	return 0;
4936 }
4937 
4938 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4939 			       struct ieee80211_vif *vif,
4940 			       struct ieee80211_sta *sta,
4941 			       enum ieee80211_sta_state old_state,
4942 			       enum ieee80211_sta_state new_state)
4943 {
4944 	struct wl1271 *wl = hw->priv;
4945 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4946 	int ret;
4947 
4948 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4949 		     sta->aid, old_state, new_state);
4950 
4951 	mutex_lock(&wl->mutex);
4952 
4953 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4954 		ret = -EBUSY;
4955 		goto out;
4956 	}
4957 
4958 	ret = wl1271_ps_elp_wakeup(wl);
4959 	if (ret < 0)
4960 		goto out;
4961 
4962 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4963 
4964 	wl1271_ps_elp_sleep(wl);
4965 out:
4966 	mutex_unlock(&wl->mutex);
4967 	if (new_state < old_state)
4968 		return 0;
4969 	return ret;
4970 }
4971 
4972 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4973 				  struct ieee80211_vif *vif,
4974 				  enum ieee80211_ampdu_mlme_action action,
4975 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4976 				  u8 buf_size)
4977 {
4978 	struct wl1271 *wl = hw->priv;
4979 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4980 	int ret;
4981 	u8 hlid, *ba_bitmap;
4982 
4983 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4984 		     tid);
4985 
4986 	/* sanity check - the fields in FW are only 8bits wide */
4987 	if (WARN_ON(tid > 0xFF))
4988 		return -ENOTSUPP;
4989 
4990 	mutex_lock(&wl->mutex);
4991 
4992 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4993 		ret = -EAGAIN;
4994 		goto out;
4995 	}
4996 
4997 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4998 		hlid = wlvif->sta.hlid;
4999 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5000 		struct wl1271_station *wl_sta;
5001 
5002 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5003 		hlid = wl_sta->hlid;
5004 	} else {
5005 		ret = -EINVAL;
5006 		goto out;
5007 	}
5008 
5009 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5010 
5011 	ret = wl1271_ps_elp_wakeup(wl);
5012 	if (ret < 0)
5013 		goto out;
5014 
5015 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5016 		     tid, action);
5017 
5018 	switch (action) {
5019 	case IEEE80211_AMPDU_RX_START:
5020 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5021 			ret = -ENOTSUPP;
5022 			break;
5023 		}
5024 
5025 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5026 			ret = -EBUSY;
5027 			wl1271_error("exceeded max RX BA sessions");
5028 			break;
5029 		}
5030 
5031 		if (*ba_bitmap & BIT(tid)) {
5032 			ret = -EINVAL;
5033 			wl1271_error("cannot enable RX BA session on active "
5034 				     "tid: %d", tid);
5035 			break;
5036 		}
5037 
5038 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5039 							 hlid);
5040 		if (!ret) {
5041 			*ba_bitmap |= BIT(tid);
5042 			wl->ba_rx_session_count++;
5043 		}
5044 		break;
5045 
5046 	case IEEE80211_AMPDU_RX_STOP:
5047 		if (!(*ba_bitmap & BIT(tid))) {
5048 			/*
5049 			 * this happens on reconfig - so only output a debug
5050 			 * message for now, and don't fail the function.
5051 			 */
5052 			wl1271_debug(DEBUG_MAC80211,
5053 				     "no active RX BA session on tid: %d",
5054 				     tid);
5055 			ret = 0;
5056 			break;
5057 		}
5058 
5059 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5060 							 hlid);
5061 		if (!ret) {
5062 			*ba_bitmap &= ~BIT(tid);
5063 			wl->ba_rx_session_count--;
5064 		}
5065 		break;
5066 
5067 	/*
5068 	 * The BA initiator session management in FW independently.
5069 	 * Falling break here on purpose for all TX APDU commands.
5070 	 */
5071 	case IEEE80211_AMPDU_TX_START:
5072 	case IEEE80211_AMPDU_TX_STOP_CONT:
5073 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5074 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5075 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5076 		ret = -EINVAL;
5077 		break;
5078 
5079 	default:
5080 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5081 		ret = -EINVAL;
5082 	}
5083 
5084 	wl1271_ps_elp_sleep(wl);
5085 
5086 out:
5087 	mutex_unlock(&wl->mutex);
5088 
5089 	return ret;
5090 }
5091 
5092 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5093 				   struct ieee80211_vif *vif,
5094 				   const struct cfg80211_bitrate_mask *mask)
5095 {
5096 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5097 	struct wl1271 *wl = hw->priv;
5098 	int i, ret = 0;
5099 
5100 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5101 		mask->control[NL80211_BAND_2GHZ].legacy,
5102 		mask->control[NL80211_BAND_5GHZ].legacy);
5103 
5104 	mutex_lock(&wl->mutex);
5105 
5106 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5107 		wlvif->bitrate_masks[i] =
5108 			wl1271_tx_enabled_rates_get(wl,
5109 						    mask->control[i].legacy,
5110 						    i);
5111 
5112 	if (unlikely(wl->state != WLCORE_STATE_ON))
5113 		goto out;
5114 
5115 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5116 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5117 
5118 		ret = wl1271_ps_elp_wakeup(wl);
5119 		if (ret < 0)
5120 			goto out;
5121 
5122 		wl1271_set_band_rate(wl, wlvif);
5123 		wlvif->basic_rate =
5124 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5125 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5126 
5127 		wl1271_ps_elp_sleep(wl);
5128 	}
5129 out:
5130 	mutex_unlock(&wl->mutex);
5131 
5132 	return ret;
5133 }
5134 
5135 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5136 				     struct ieee80211_channel_switch *ch_switch)
5137 {
5138 	struct wl1271 *wl = hw->priv;
5139 	struct wl12xx_vif *wlvif;
5140 	int ret;
5141 
5142 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5143 
5144 	wl1271_tx_flush(wl);
5145 
5146 	mutex_lock(&wl->mutex);
5147 
5148 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5149 		wl12xx_for_each_wlvif_sta(wl, wlvif) {
5150 			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5151 			ieee80211_chswitch_done(vif, false);
5152 		}
5153 		goto out;
5154 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5155 		goto out;
5156 	}
5157 
5158 	ret = wl1271_ps_elp_wakeup(wl);
5159 	if (ret < 0)
5160 		goto out;
5161 
5162 	/* TODO: change mac80211 to pass vif as param */
5163 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
5164 		unsigned long delay_usec;
5165 
5166 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5167 		if (ret)
5168 			goto out_sleep;
5169 
5170 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5171 
5172 		/* indicate failure 5 seconds after channel switch time */
5173 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5174 			     ch_switch->count;
5175 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5176 				usecs_to_jiffies(delay_usec) +
5177 				msecs_to_jiffies(5000));
5178 	}
5179 
5180 out_sleep:
5181 	wl1271_ps_elp_sleep(wl);
5182 
5183 out:
5184 	mutex_unlock(&wl->mutex);
5185 }
5186 
5187 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5188 			    u32 queues, bool drop)
5189 {
5190 	struct wl1271 *wl = hw->priv;
5191 
5192 	wl1271_tx_flush(wl);
5193 }
5194 
5195 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5196 				       struct ieee80211_vif *vif,
5197 				       struct ieee80211_channel *chan,
5198 				       int duration,
5199 				       enum ieee80211_roc_type type)
5200 {
5201 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5202 	struct wl1271 *wl = hw->priv;
5203 	int channel, ret = 0;
5204 
5205 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5206 
5207 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5208 		     channel, wlvif->role_id);
5209 
5210 	mutex_lock(&wl->mutex);
5211 
5212 	if (unlikely(wl->state != WLCORE_STATE_ON))
5213 		goto out;
5214 
5215 	/* return EBUSY if we can't ROC right now */
5216 	if (WARN_ON(wl->roc_vif ||
5217 		    find_first_bit(wl->roc_map,
5218 				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5219 		ret = -EBUSY;
5220 		goto out;
5221 	}
5222 
5223 	ret = wl1271_ps_elp_wakeup(wl);
5224 	if (ret < 0)
5225 		goto out;
5226 
5227 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5228 	if (ret < 0)
5229 		goto out_sleep;
5230 
5231 	wl->roc_vif = vif;
5232 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5233 				     msecs_to_jiffies(duration));
5234 out_sleep:
5235 	wl1271_ps_elp_sleep(wl);
5236 out:
5237 	mutex_unlock(&wl->mutex);
5238 	return ret;
5239 }
5240 
5241 static int __wlcore_roc_completed(struct wl1271 *wl)
5242 {
5243 	struct wl12xx_vif *wlvif;
5244 	int ret;
5245 
5246 	/* already completed */
5247 	if (unlikely(!wl->roc_vif))
5248 		return 0;
5249 
5250 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5251 
5252 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5253 		return -EBUSY;
5254 
5255 	ret = wl12xx_stop_dev(wl, wlvif);
5256 	if (ret < 0)
5257 		return ret;
5258 
5259 	wl->roc_vif = NULL;
5260 
5261 	return 0;
5262 }
5263 
5264 static int wlcore_roc_completed(struct wl1271 *wl)
5265 {
5266 	int ret;
5267 
5268 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5269 
5270 	mutex_lock(&wl->mutex);
5271 
5272 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5273 		ret = -EBUSY;
5274 		goto out;
5275 	}
5276 
5277 	ret = wl1271_ps_elp_wakeup(wl);
5278 	if (ret < 0)
5279 		goto out;
5280 
5281 	ret = __wlcore_roc_completed(wl);
5282 
5283 	wl1271_ps_elp_sleep(wl);
5284 out:
5285 	mutex_unlock(&wl->mutex);
5286 
5287 	return ret;
5288 }
5289 
5290 static void wlcore_roc_complete_work(struct work_struct *work)
5291 {
5292 	struct delayed_work *dwork;
5293 	struct wl1271 *wl;
5294 	int ret;
5295 
5296 	dwork = container_of(work, struct delayed_work, work);
5297 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5298 
5299 	ret = wlcore_roc_completed(wl);
5300 	if (!ret)
5301 		ieee80211_remain_on_channel_expired(wl->hw);
5302 }
5303 
5304 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5305 {
5306 	struct wl1271 *wl = hw->priv;
5307 
5308 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5309 
5310 	/* TODO: per-vif */
5311 	wl1271_tx_flush(wl);
5312 
5313 	/*
5314 	 * we can't just flush_work here, because it might deadlock
5315 	 * (as we might get called from the same workqueue)
5316 	 */
5317 	cancel_delayed_work_sync(&wl->roc_complete_work);
5318 	wlcore_roc_completed(wl);
5319 
5320 	return 0;
5321 }
5322 
5323 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5324 				    struct ieee80211_vif *vif,
5325 				    struct ieee80211_sta *sta,
5326 				    u32 changed)
5327 {
5328 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5329 	struct wl1271 *wl = hw->priv;
5330 
5331 	wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5332 }
5333 
5334 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5335 			       struct ieee80211_vif *vif,
5336 			       struct ieee80211_sta *sta,
5337 			       s8 *rssi_dbm)
5338 {
5339 	struct wl1271 *wl = hw->priv;
5340 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5341 	int ret = 0;
5342 
5343 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5344 
5345 	mutex_lock(&wl->mutex);
5346 
5347 	if (unlikely(wl->state != WLCORE_STATE_ON))
5348 		goto out;
5349 
5350 	ret = wl1271_ps_elp_wakeup(wl);
5351 	if (ret < 0)
5352 		goto out_sleep;
5353 
5354 	ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5355 	if (ret < 0)
5356 		goto out_sleep;
5357 
5358 out_sleep:
5359 	wl1271_ps_elp_sleep(wl);
5360 
5361 out:
5362 	mutex_unlock(&wl->mutex);
5363 
5364 	return ret;
5365 }
5366 
5367 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5368 {
5369 	struct wl1271 *wl = hw->priv;
5370 	bool ret = false;
5371 
5372 	mutex_lock(&wl->mutex);
5373 
5374 	if (unlikely(wl->state != WLCORE_STATE_ON))
5375 		goto out;
5376 
5377 	/* packets are considered pending if in the TX queue or the FW */
5378 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5379 out:
5380 	mutex_unlock(&wl->mutex);
5381 
5382 	return ret;
5383 }
5384 
5385 /* can't be const, mac80211 writes to this */
5386 static struct ieee80211_rate wl1271_rates[] = {
5387 	{ .bitrate = 10,
5388 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5389 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5390 	{ .bitrate = 20,
5391 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5392 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5393 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5394 	{ .bitrate = 55,
5395 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5396 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5397 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5398 	{ .bitrate = 110,
5399 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5400 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5401 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5402 	{ .bitrate = 60,
5403 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5404 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5405 	{ .bitrate = 90,
5406 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5407 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5408 	{ .bitrate = 120,
5409 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5410 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5411 	{ .bitrate = 180,
5412 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5413 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5414 	{ .bitrate = 240,
5415 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5416 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5417 	{ .bitrate = 360,
5418 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5419 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5420 	{ .bitrate = 480,
5421 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5422 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5423 	{ .bitrate = 540,
5424 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5425 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5426 };
5427 
5428 /* can't be const, mac80211 writes to this */
5429 static struct ieee80211_channel wl1271_channels[] = {
5430 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5431 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5432 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5433 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5434 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5435 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5436 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5437 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5438 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5439 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5440 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5441 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5442 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5443 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5444 };
5445 
5446 /* can't be const, mac80211 writes to this */
5447 static struct ieee80211_supported_band wl1271_band_2ghz = {
5448 	.channels = wl1271_channels,
5449 	.n_channels = ARRAY_SIZE(wl1271_channels),
5450 	.bitrates = wl1271_rates,
5451 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5452 };
5453 
5454 /* 5 GHz data rates for WL1273 */
5455 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5456 	{ .bitrate = 60,
5457 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5458 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5459 	{ .bitrate = 90,
5460 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5461 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5462 	{ .bitrate = 120,
5463 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5464 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5465 	{ .bitrate = 180,
5466 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5467 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5468 	{ .bitrate = 240,
5469 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5470 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5471 	{ .bitrate = 360,
5472 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5473 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5474 	{ .bitrate = 480,
5475 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5476 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5477 	{ .bitrate = 540,
5478 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5479 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5480 };
5481 
5482 /* 5 GHz band channels for WL1273 */
5483 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5484 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5485 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5486 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5487 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5488 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5489 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5490 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5491 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5492 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5493 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5494 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5495 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5496 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5497 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5498 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5499 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5500 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5501 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5502 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5503 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5504 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5505 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5506 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5507 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5508 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5509 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5510 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5511 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5512 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5513 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5514 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5515 };
5516 
5517 static struct ieee80211_supported_band wl1271_band_5ghz = {
5518 	.channels = wl1271_channels_5ghz,
5519 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5520 	.bitrates = wl1271_rates_5ghz,
5521 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5522 };
5523 
5524 static const struct ieee80211_ops wl1271_ops = {
5525 	.start = wl1271_op_start,
5526 	.stop = wlcore_op_stop,
5527 	.add_interface = wl1271_op_add_interface,
5528 	.remove_interface = wl1271_op_remove_interface,
5529 	.change_interface = wl12xx_op_change_interface,
5530 #ifdef CONFIG_PM
5531 	.suspend = wl1271_op_suspend,
5532 	.resume = wl1271_op_resume,
5533 #endif
5534 	.config = wl1271_op_config,
5535 	.prepare_multicast = wl1271_op_prepare_multicast,
5536 	.configure_filter = wl1271_op_configure_filter,
5537 	.tx = wl1271_op_tx,
5538 	.set_key = wlcore_op_set_key,
5539 	.hw_scan = wl1271_op_hw_scan,
5540 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5541 	.sched_scan_start = wl1271_op_sched_scan_start,
5542 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5543 	.bss_info_changed = wl1271_op_bss_info_changed,
5544 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5545 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5546 	.conf_tx = wl1271_op_conf_tx,
5547 	.get_tsf = wl1271_op_get_tsf,
5548 	.get_survey = wl1271_op_get_survey,
5549 	.sta_state = wl12xx_op_sta_state,
5550 	.ampdu_action = wl1271_op_ampdu_action,
5551 	.tx_frames_pending = wl1271_tx_frames_pending,
5552 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5553 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5554 	.channel_switch = wl12xx_op_channel_switch,
5555 	.flush = wlcore_op_flush,
5556 	.remain_on_channel = wlcore_op_remain_on_channel,
5557 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5558 	.add_chanctx = wlcore_op_add_chanctx,
5559 	.remove_chanctx = wlcore_op_remove_chanctx,
5560 	.change_chanctx = wlcore_op_change_chanctx,
5561 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5562 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5563 	.sta_rc_update = wlcore_op_sta_rc_update,
5564 	.get_rssi = wlcore_op_get_rssi,
5565 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5566 };
5567 
5568 
5569 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5570 {
5571 	u8 idx;
5572 
5573 	BUG_ON(band >= 2);
5574 
5575 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5576 		wl1271_error("Illegal RX rate from HW: %d", rate);
5577 		return 0;
5578 	}
5579 
5580 	idx = wl->band_rate_to_idx[band][rate];
5581 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5582 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5583 		return 0;
5584 	}
5585 
5586 	return idx;
5587 }
5588 
5589 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5590 {
5591 	int i;
5592 
5593 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5594 		     oui, nic);
5595 
5596 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5597 		wl1271_warning("NIC part of the MAC address wraps around!");
5598 
5599 	for (i = 0; i < wl->num_mac_addr; i++) {
5600 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5601 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5602 		wl->addresses[i].addr[2] = (u8) oui;
5603 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5604 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5605 		wl->addresses[i].addr[5] = (u8) nic;
5606 		nic++;
5607 	}
5608 
5609 	/* we may be one address short at the most */
5610 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5611 
5612 	/*
5613 	 * turn on the LAA bit in the first address and use it as
5614 	 * the last address.
5615 	 */
5616 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5617 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5618 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5619 		       sizeof(wl->addresses[0]));
5620 		/* LAA bit */
5621 		wl->addresses[idx].addr[2] |= BIT(1);
5622 	}
5623 
5624 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5625 	wl->hw->wiphy->addresses = wl->addresses;
5626 }
5627 
5628 static int wl12xx_get_hw_info(struct wl1271 *wl)
5629 {
5630 	int ret;
5631 
5632 	ret = wl12xx_set_power_on(wl);
5633 	if (ret < 0)
5634 		return ret;
5635 
5636 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5637 	if (ret < 0)
5638 		goto out;
5639 
5640 	wl->fuse_oui_addr = 0;
5641 	wl->fuse_nic_addr = 0;
5642 
5643 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5644 	if (ret < 0)
5645 		goto out;
5646 
5647 	if (wl->ops->get_mac)
5648 		ret = wl->ops->get_mac(wl);
5649 
5650 out:
5651 	wl1271_power_off(wl);
5652 	return ret;
5653 }
5654 
5655 static int wl1271_register_hw(struct wl1271 *wl)
5656 {
5657 	int ret;
5658 	u32 oui_addr = 0, nic_addr = 0;
5659 
5660 	if (wl->mac80211_registered)
5661 		return 0;
5662 
5663 	if (wl->nvs_len >= 12) {
5664 		/* NOTE: The wl->nvs->nvs element must be first, in
5665 		 * order to simplify the casting, we assume it is at
5666 		 * the beginning of the wl->nvs structure.
5667 		 */
5668 		u8 *nvs_ptr = (u8 *)wl->nvs;
5669 
5670 		oui_addr =
5671 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5672 		nic_addr =
5673 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5674 	}
5675 
5676 	/* if the MAC address is zeroed in the NVS derive from fuse */
5677 	if (oui_addr == 0 && nic_addr == 0) {
5678 		oui_addr = wl->fuse_oui_addr;
5679 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
5680 		nic_addr = wl->fuse_nic_addr + 1;
5681 	}
5682 
5683 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5684 
5685 	ret = ieee80211_register_hw(wl->hw);
5686 	if (ret < 0) {
5687 		wl1271_error("unable to register mac80211 hw: %d", ret);
5688 		goto out;
5689 	}
5690 
5691 	wl->mac80211_registered = true;
5692 
5693 	wl1271_debugfs_init(wl);
5694 
5695 	wl1271_notice("loaded");
5696 
5697 out:
5698 	return ret;
5699 }
5700 
5701 static void wl1271_unregister_hw(struct wl1271 *wl)
5702 {
5703 	if (wl->plt)
5704 		wl1271_plt_stop(wl);
5705 
5706 	ieee80211_unregister_hw(wl->hw);
5707 	wl->mac80211_registered = false;
5708 
5709 }
5710 
5711 static int wl1271_init_ieee80211(struct wl1271 *wl)
5712 {
5713 	int i;
5714 	static const u32 cipher_suites[] = {
5715 		WLAN_CIPHER_SUITE_WEP40,
5716 		WLAN_CIPHER_SUITE_WEP104,
5717 		WLAN_CIPHER_SUITE_TKIP,
5718 		WLAN_CIPHER_SUITE_CCMP,
5719 		WL1271_CIPHER_SUITE_GEM,
5720 	};
5721 
5722 	/* The tx descriptor buffer */
5723 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5724 
5725 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5726 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5727 
5728 	/* unit us */
5729 	/* FIXME: find a proper value */
5730 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5731 
5732 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5733 		IEEE80211_HW_SUPPORTS_PS |
5734 		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5735 		IEEE80211_HW_SUPPORTS_UAPSD |
5736 		IEEE80211_HW_HAS_RATE_CONTROL |
5737 		IEEE80211_HW_CONNECTION_MONITOR |
5738 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5739 		IEEE80211_HW_SPECTRUM_MGMT |
5740 		IEEE80211_HW_AP_LINK_PS |
5741 		IEEE80211_HW_AMPDU_AGGREGATION |
5742 		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5743 		IEEE80211_HW_QUEUE_CONTROL |
5744 		IEEE80211_HW_CHANCTX_STA_CSA;
5745 
5746 	wl->hw->wiphy->cipher_suites = cipher_suites;
5747 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5748 
5749 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5750 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5751 		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5752 	wl->hw->wiphy->max_scan_ssids = 1;
5753 	wl->hw->wiphy->max_sched_scan_ssids = 16;
5754 	wl->hw->wiphy->max_match_sets = 16;
5755 	/*
5756 	 * Maximum length of elements in scanning probe request templates
5757 	 * should be the maximum length possible for a template, without
5758 	 * the IEEE80211 header of the template
5759 	 */
5760 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5761 			sizeof(struct ieee80211_header);
5762 
5763 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5764 		sizeof(struct ieee80211_header);
5765 
5766 	wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5767 
5768 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5769 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5770 				WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5771 
5772 	/* make sure all our channels fit in the scanned_ch bitmask */
5773 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5774 		     ARRAY_SIZE(wl1271_channels_5ghz) >
5775 		     WL1271_MAX_CHANNELS);
5776 	/*
5777 	* clear channel flags from the previous usage
5778 	* and restore max_power & max_antenna_gain values.
5779 	*/
5780 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5781 		wl1271_band_2ghz.channels[i].flags = 0;
5782 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5783 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5784 	}
5785 
5786 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5787 		wl1271_band_5ghz.channels[i].flags = 0;
5788 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5789 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5790 	}
5791 
5792 	/*
5793 	 * We keep local copies of the band structs because we need to
5794 	 * modify them on a per-device basis.
5795 	 */
5796 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5797 	       sizeof(wl1271_band_2ghz));
5798 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5799 	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
5800 	       sizeof(*wl->ht_cap));
5801 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5802 	       sizeof(wl1271_band_5ghz));
5803 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5804 	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
5805 	       sizeof(*wl->ht_cap));
5806 
5807 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5808 		&wl->bands[IEEE80211_BAND_2GHZ];
5809 	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5810 		&wl->bands[IEEE80211_BAND_5GHZ];
5811 
5812 	/*
5813 	 * allow 4 queues per mac address we support +
5814 	 * 1 cab queue per mac + one global offchannel Tx queue
5815 	 */
5816 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5817 
5818 	/* the last queue is the offchannel queue */
5819 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5820 	wl->hw->max_rates = 1;
5821 
5822 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5823 
5824 	/* the FW answers probe-requests in AP-mode */
5825 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5826 	wl->hw->wiphy->probe_resp_offload =
5827 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5828 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5829 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5830 
5831 	/* allowed interface combinations */
5832 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5833 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
5834 
5835 	SET_IEEE80211_DEV(wl->hw, wl->dev);
5836 
5837 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
5838 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5839 
5840 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5841 
5842 	return 0;
5843 }
5844 
5845 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5846 				     u32 mbox_size)
5847 {
5848 	struct ieee80211_hw *hw;
5849 	struct wl1271 *wl;
5850 	int i, j, ret;
5851 	unsigned int order;
5852 
5853 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5854 	if (!hw) {
5855 		wl1271_error("could not alloc ieee80211_hw");
5856 		ret = -ENOMEM;
5857 		goto err_hw_alloc;
5858 	}
5859 
5860 	wl = hw->priv;
5861 	memset(wl, 0, sizeof(*wl));
5862 
5863 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
5864 	if (!wl->priv) {
5865 		wl1271_error("could not alloc wl priv");
5866 		ret = -ENOMEM;
5867 		goto err_priv_alloc;
5868 	}
5869 
5870 	INIT_LIST_HEAD(&wl->wlvif_list);
5871 
5872 	wl->hw = hw;
5873 
5874 	/*
5875 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5876 	 * we don't allocate any additional resource here, so that's fine.
5877 	 */
5878 	for (i = 0; i < NUM_TX_QUEUES; i++)
5879 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
5880 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
5881 
5882 	skb_queue_head_init(&wl->deferred_rx_queue);
5883 	skb_queue_head_init(&wl->deferred_tx_queue);
5884 
5885 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5886 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5887 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
5888 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5889 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5890 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5891 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5892 
5893 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5894 	if (!wl->freezable_wq) {
5895 		ret = -ENOMEM;
5896 		goto err_hw;
5897 	}
5898 
5899 	wl->channel = 0;
5900 	wl->rx_counter = 0;
5901 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5902 	wl->band = IEEE80211_BAND_2GHZ;
5903 	wl->channel_type = NL80211_CHAN_NO_HT;
5904 	wl->flags = 0;
5905 	wl->sg_enabled = true;
5906 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
5907 	wl->recovery_count = 0;
5908 	wl->hw_pg_ver = -1;
5909 	wl->ap_ps_map = 0;
5910 	wl->ap_fw_ps_map = 0;
5911 	wl->quirks = 0;
5912 	wl->platform_quirks = 0;
5913 	wl->system_hlid = WL12XX_SYSTEM_HLID;
5914 	wl->active_sta_count = 0;
5915 	wl->active_link_count = 0;
5916 	wl->fwlog_size = 0;
5917 	init_waitqueue_head(&wl->fwlog_waitq);
5918 
5919 	/* The system link is always allocated */
5920 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5921 
5922 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5923 	for (i = 0; i < wl->num_tx_desc; i++)
5924 		wl->tx_frames[i] = NULL;
5925 
5926 	spin_lock_init(&wl->wl_lock);
5927 
5928 	wl->state = WLCORE_STATE_OFF;
5929 	wl->fw_type = WL12XX_FW_TYPE_NONE;
5930 	mutex_init(&wl->mutex);
5931 	mutex_init(&wl->flush_mutex);
5932 	init_completion(&wl->nvs_loading_complete);
5933 
5934 	order = get_order(aggr_buf_size);
5935 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5936 	if (!wl->aggr_buf) {
5937 		ret = -ENOMEM;
5938 		goto err_wq;
5939 	}
5940 	wl->aggr_buf_size = aggr_buf_size;
5941 
5942 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5943 	if (!wl->dummy_packet) {
5944 		ret = -ENOMEM;
5945 		goto err_aggr;
5946 	}
5947 
5948 	/* Allocate one page for the FW log */
5949 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5950 	if (!wl->fwlog) {
5951 		ret = -ENOMEM;
5952 		goto err_dummy_packet;
5953 	}
5954 
5955 	wl->mbox_size = mbox_size;
5956 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5957 	if (!wl->mbox) {
5958 		ret = -ENOMEM;
5959 		goto err_fwlog;
5960 	}
5961 
5962 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5963 	if (!wl->buffer_32) {
5964 		ret = -ENOMEM;
5965 		goto err_mbox;
5966 	}
5967 
5968 	return hw;
5969 
5970 err_mbox:
5971 	kfree(wl->mbox);
5972 
5973 err_fwlog:
5974 	free_page((unsigned long)wl->fwlog);
5975 
5976 err_dummy_packet:
5977 	dev_kfree_skb(wl->dummy_packet);
5978 
5979 err_aggr:
5980 	free_pages((unsigned long)wl->aggr_buf, order);
5981 
5982 err_wq:
5983 	destroy_workqueue(wl->freezable_wq);
5984 
5985 err_hw:
5986 	wl1271_debugfs_exit(wl);
5987 	kfree(wl->priv);
5988 
5989 err_priv_alloc:
5990 	ieee80211_free_hw(hw);
5991 
5992 err_hw_alloc:
5993 
5994 	return ERR_PTR(ret);
5995 }
5996 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5997 
5998 int wlcore_free_hw(struct wl1271 *wl)
5999 {
6000 	/* Unblock any fwlog readers */
6001 	mutex_lock(&wl->mutex);
6002 	wl->fwlog_size = -1;
6003 	wake_up_interruptible_all(&wl->fwlog_waitq);
6004 	mutex_unlock(&wl->mutex);
6005 
6006 	wlcore_sysfs_free(wl);
6007 
6008 	kfree(wl->buffer_32);
6009 	kfree(wl->mbox);
6010 	free_page((unsigned long)wl->fwlog);
6011 	dev_kfree_skb(wl->dummy_packet);
6012 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6013 
6014 	wl1271_debugfs_exit(wl);
6015 
6016 	vfree(wl->fw);
6017 	wl->fw = NULL;
6018 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6019 	kfree(wl->nvs);
6020 	wl->nvs = NULL;
6021 
6022 	kfree(wl->raw_fw_status);
6023 	kfree(wl->fw_status);
6024 	kfree(wl->tx_res_if);
6025 	destroy_workqueue(wl->freezable_wq);
6026 
6027 	kfree(wl->priv);
6028 	ieee80211_free_hw(wl->hw);
6029 
6030 	return 0;
6031 }
6032 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6033 
6034 #ifdef CONFIG_PM
6035 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6036 	.flags = WIPHY_WOWLAN_ANY,
6037 	.n_patterns = WL1271_MAX_RX_FILTERS,
6038 	.pattern_min_len = 1,
6039 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6040 };
6041 #endif
6042 
6043 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6044 {
6045 	return IRQ_WAKE_THREAD;
6046 }
6047 
6048 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6049 {
6050 	struct wl1271 *wl = context;
6051 	struct platform_device *pdev = wl->pdev;
6052 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6053 	struct wl12xx_platform_data *pdata = pdev_data->pdata;
6054 	unsigned long irqflags;
6055 	int ret;
6056 	irq_handler_t hardirq_fn = NULL;
6057 
6058 	if (fw) {
6059 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6060 		if (!wl->nvs) {
6061 			wl1271_error("Could not allocate nvs data");
6062 			goto out;
6063 		}
6064 		wl->nvs_len = fw->size;
6065 	} else {
6066 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6067 			     WL12XX_NVS_NAME);
6068 		wl->nvs = NULL;
6069 		wl->nvs_len = 0;
6070 	}
6071 
6072 	ret = wl->ops->setup(wl);
6073 	if (ret < 0)
6074 		goto out_free_nvs;
6075 
6076 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6077 
6078 	/* adjust some runtime configuration parameters */
6079 	wlcore_adjust_conf(wl);
6080 
6081 	wl->irq = platform_get_irq(pdev, 0);
6082 	wl->platform_quirks = pdata->platform_quirks;
6083 	wl->if_ops = pdev_data->if_ops;
6084 
6085 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6086 		irqflags = IRQF_TRIGGER_RISING;
6087 		hardirq_fn = wlcore_hardirq;
6088 	} else {
6089 		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6090 	}
6091 
6092 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6093 				   irqflags, pdev->name, wl);
6094 	if (ret < 0) {
6095 		wl1271_error("request_irq() failed: %d", ret);
6096 		goto out_free_nvs;
6097 	}
6098 
6099 #ifdef CONFIG_PM
6100 	ret = enable_irq_wake(wl->irq);
6101 	if (!ret) {
6102 		wl->irq_wake_enabled = true;
6103 		device_init_wakeup(wl->dev, 1);
6104 		if (pdata->pwr_in_suspend)
6105 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6106 	}
6107 #endif
6108 	disable_irq(wl->irq);
6109 
6110 	ret = wl12xx_get_hw_info(wl);
6111 	if (ret < 0) {
6112 		wl1271_error("couldn't get hw info");
6113 		goto out_irq;
6114 	}
6115 
6116 	ret = wl->ops->identify_chip(wl);
6117 	if (ret < 0)
6118 		goto out_irq;
6119 
6120 	ret = wl1271_init_ieee80211(wl);
6121 	if (ret)
6122 		goto out_irq;
6123 
6124 	ret = wl1271_register_hw(wl);
6125 	if (ret)
6126 		goto out_irq;
6127 
6128 	ret = wlcore_sysfs_init(wl);
6129 	if (ret)
6130 		goto out_unreg;
6131 
6132 	wl->initialized = true;
6133 	goto out;
6134 
6135 out_unreg:
6136 	wl1271_unregister_hw(wl);
6137 
6138 out_irq:
6139 	free_irq(wl->irq, wl);
6140 
6141 out_free_nvs:
6142 	kfree(wl->nvs);
6143 
6144 out:
6145 	release_firmware(fw);
6146 	complete_all(&wl->nvs_loading_complete);
6147 }
6148 
6149 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6150 {
6151 	int ret;
6152 
6153 	if (!wl->ops || !wl->ptable)
6154 		return -EINVAL;
6155 
6156 	wl->dev = &pdev->dev;
6157 	wl->pdev = pdev;
6158 	platform_set_drvdata(pdev, wl);
6159 
6160 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6161 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6162 				      wl, wlcore_nvs_cb);
6163 	if (ret < 0) {
6164 		wl1271_error("request_firmware_nowait failed: %d", ret);
6165 		complete_all(&wl->nvs_loading_complete);
6166 	}
6167 
6168 	return ret;
6169 }
6170 EXPORT_SYMBOL_GPL(wlcore_probe);
6171 
6172 int wlcore_remove(struct platform_device *pdev)
6173 {
6174 	struct wl1271 *wl = platform_get_drvdata(pdev);
6175 
6176 	wait_for_completion(&wl->nvs_loading_complete);
6177 	if (!wl->initialized)
6178 		return 0;
6179 
6180 	if (wl->irq_wake_enabled) {
6181 		device_init_wakeup(wl->dev, 0);
6182 		disable_irq_wake(wl->irq);
6183 	}
6184 	wl1271_unregister_hw(wl);
6185 	free_irq(wl->irq, wl);
6186 	wlcore_free_hw(wl);
6187 
6188 	return 0;
6189 }
6190 EXPORT_SYMBOL_GPL(wlcore_remove);
6191 
6192 u32 wl12xx_debug_level = DEBUG_NONE;
6193 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6194 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6195 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6196 
6197 module_param_named(fwlog, fwlog_param, charp, 0);
6198 MODULE_PARM_DESC(fwlog,
6199 		 "FW logger options: continuous, ondemand, dbgpins or disable");
6200 
6201 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6202 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6203 
6204 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6205 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6206 
6207 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6208 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6209 
6210 MODULE_LICENSE("GPL");
6211 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6212 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6213 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6214