xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision 089a49b6)
1 
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
30 
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43 
44 #define WL1271_BOOT_RETRIES 3
45 
46 static char *fwlog_param;
47 static int bug_on_recovery = -1;
48 static int no_recovery     = -1;
49 
50 static void __wl1271_op_remove_interface(struct wl1271 *wl,
51 					 struct ieee80211_vif *vif,
52 					 bool reset_tx_queues);
53 static void wlcore_op_stop_locked(struct wl1271 *wl);
54 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
55 
56 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
57 {
58 	int ret;
59 
60 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
61 		return -EINVAL;
62 
63 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
64 		return 0;
65 
66 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
67 		return 0;
68 
69 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
70 	if (ret < 0)
71 		return ret;
72 
73 	wl1271_info("Association completed.");
74 	return 0;
75 }
76 
77 static void wl1271_reg_notify(struct wiphy *wiphy,
78 			      struct regulatory_request *request)
79 {
80 	struct ieee80211_supported_band *band;
81 	struct ieee80211_channel *ch;
82 	int i;
83 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
84 	struct wl1271 *wl = hw->priv;
85 
86 	band = wiphy->bands[IEEE80211_BAND_5GHZ];
87 	for (i = 0; i < band->n_channels; i++) {
88 		ch = &band->channels[i];
89 		if (ch->flags & IEEE80211_CHAN_DISABLED)
90 			continue;
91 
92 		if (ch->flags & IEEE80211_CHAN_RADAR)
93 			ch->flags |= IEEE80211_CHAN_NO_IBSS |
94 				     IEEE80211_CHAN_PASSIVE_SCAN;
95 
96 	}
97 
98 	wlcore_regdomain_config(wl);
99 }
100 
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
102 				   bool enable)
103 {
104 	int ret = 0;
105 
106 	/* we should hold wl->mutex */
107 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
108 	if (ret < 0)
109 		goto out;
110 
111 	if (enable)
112 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
113 	else
114 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
115 out:
116 	return ret;
117 }
118 
119 /*
120  * this function is being called when the rx_streaming interval
121  * has beed changed or rx_streaming should be disabled
122  */
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
124 {
125 	int ret = 0;
126 	int period = wl->conf.rx_streaming.interval;
127 
128 	/* don't reconfigure if rx_streaming is disabled */
129 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
130 		goto out;
131 
132 	/* reconfigure/disable according to new streaming_period */
133 	if (period &&
134 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 	    (wl->conf.rx_streaming.always ||
136 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
138 	else {
139 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 		/* don't cancel_work_sync since we might deadlock */
141 		del_timer_sync(&wlvif->rx_streaming_timer);
142 	}
143 out:
144 	return ret;
145 }
146 
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
148 {
149 	int ret;
150 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 						rx_streaming_enable_work);
152 	struct wl1271 *wl = wlvif->wl;
153 
154 	mutex_lock(&wl->mutex);
155 
156 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 	    (!wl->conf.rx_streaming.always &&
159 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
160 		goto out;
161 
162 	if (!wl->conf.rx_streaming.interval)
163 		goto out;
164 
165 	ret = wl1271_ps_elp_wakeup(wl);
166 	if (ret < 0)
167 		goto out;
168 
169 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
170 	if (ret < 0)
171 		goto out_sleep;
172 
173 	/* stop it after some time of inactivity */
174 	mod_timer(&wlvif->rx_streaming_timer,
175 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
176 
177 out_sleep:
178 	wl1271_ps_elp_sleep(wl);
179 out:
180 	mutex_unlock(&wl->mutex);
181 }
182 
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
184 {
185 	int ret;
186 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 						rx_streaming_disable_work);
188 	struct wl1271 *wl = wlvif->wl;
189 
190 	mutex_lock(&wl->mutex);
191 
192 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
193 		goto out;
194 
195 	ret = wl1271_ps_elp_wakeup(wl);
196 	if (ret < 0)
197 		goto out;
198 
199 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
200 	if (ret)
201 		goto out_sleep;
202 
203 out_sleep:
204 	wl1271_ps_elp_sleep(wl);
205 out:
206 	mutex_unlock(&wl->mutex);
207 }
208 
209 static void wl1271_rx_streaming_timer(unsigned long data)
210 {
211 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 	struct wl1271 *wl = wlvif->wl;
213 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
214 }
215 
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218 {
219 	/* if the watchdog is not armed, don't do anything */
220 	if (wl->tx_allocated_blocks == 0)
221 		return;
222 
223 	cancel_delayed_work(&wl->tx_watchdog_work);
224 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
226 }
227 
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
229 {
230 	struct delayed_work *dwork;
231 	struct wl1271 *wl;
232 
233 	dwork = container_of(work, struct delayed_work, work);
234 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
235 
236 	mutex_lock(&wl->mutex);
237 
238 	if (unlikely(wl->state != WLCORE_STATE_ON))
239 		goto out;
240 
241 	/* Tx went out in the meantime - everything is ok */
242 	if (unlikely(wl->tx_allocated_blocks == 0))
243 		goto out;
244 
245 	/*
246 	 * if a ROC is in progress, we might not have any Tx for a long
247 	 * time (e.g. pending Tx on the non-ROC channels)
248 	 */
249 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 			     wl->conf.tx.tx_watchdog_timeout);
252 		wl12xx_rearm_tx_watchdog_locked(wl);
253 		goto out;
254 	}
255 
256 	/*
257 	 * if a scan is in progress, we might not have any Tx for a long
258 	 * time
259 	 */
260 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 			     wl->conf.tx.tx_watchdog_timeout);
263 		wl12xx_rearm_tx_watchdog_locked(wl);
264 		goto out;
265 	}
266 
267 	/*
268 	* AP might cache a frame for a long time for a sleeping station,
269 	* so rearm the timer if there's an AP interface with stations. If
270 	* Tx is genuinely stuck we will most hopefully discover it when all
271 	* stations are removed due to inactivity.
272 	*/
273 	if (wl->active_sta_count) {
274 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
275 			     " %d stations",
276 			      wl->conf.tx.tx_watchdog_timeout,
277 			      wl->active_sta_count);
278 		wl12xx_rearm_tx_watchdog_locked(wl);
279 		goto out;
280 	}
281 
282 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 		     wl->conf.tx.tx_watchdog_timeout);
284 	wl12xx_queue_recovery_work(wl);
285 
286 out:
287 	mutex_unlock(&wl->mutex);
288 }
289 
290 static void wlcore_adjust_conf(struct wl1271 *wl)
291 {
292 	/* Adjust settings according to optional module parameters */
293 
294 	if (fwlog_param) {
295 		if (!strcmp(fwlog_param, "continuous")) {
296 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
297 		} else if (!strcmp(fwlog_param, "ondemand")) {
298 			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
299 		} else if (!strcmp(fwlog_param, "dbgpins")) {
300 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
301 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
302 		} else if (!strcmp(fwlog_param, "disable")) {
303 			wl->conf.fwlog.mem_blocks = 0;
304 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
305 		} else {
306 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
307 		}
308 	}
309 
310 	if (bug_on_recovery != -1)
311 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
312 
313 	if (no_recovery != -1)
314 		wl->conf.recovery.no_recovery = (u8) no_recovery;
315 }
316 
317 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
318 					struct wl12xx_vif *wlvif,
319 					u8 hlid, u8 tx_pkts)
320 {
321 	bool fw_ps;
322 
323 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
324 
325 	/*
326 	 * Wake up from high level PS if the STA is asleep with too little
327 	 * packets in FW or if the STA is awake.
328 	 */
329 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
330 		wl12xx_ps_link_end(wl, wlvif, hlid);
331 
332 	/*
333 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
334 	 * Make an exception if this is the only connected link. In this
335 	 * case FW-memory congestion is less of a problem.
336 	 * Note that a single connected STA means 3 active links, since we must
337 	 * account for the global and broadcast AP links. The "fw_ps" check
338 	 * assures us the third link is a STA connected to the AP. Otherwise
339 	 * the FW would not set the PSM bit.
340 	 */
341 	else if (wl->active_link_count > 3 && fw_ps &&
342 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
343 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
344 }
345 
346 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
347 					   struct wl12xx_vif *wlvif,
348 					   struct wl_fw_status_2 *status)
349 {
350 	u32 cur_fw_ps_map;
351 	u8 hlid;
352 
353 	cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
354 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
355 		wl1271_debug(DEBUG_PSM,
356 			     "link ps prev 0x%x cur 0x%x changed 0x%x",
357 			     wl->ap_fw_ps_map, cur_fw_ps_map,
358 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
359 
360 		wl->ap_fw_ps_map = cur_fw_ps_map;
361 	}
362 
363 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
364 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
365 					    wl->links[hlid].allocated_pkts);
366 }
367 
368 static int wlcore_fw_status(struct wl1271 *wl,
369 			    struct wl_fw_status_1 *status_1,
370 			    struct wl_fw_status_2 *status_2)
371 {
372 	struct wl12xx_vif *wlvif;
373 	struct timespec ts;
374 	u32 old_tx_blk_count = wl->tx_blocks_available;
375 	int avail, freed_blocks;
376 	int i;
377 	size_t status_len;
378 	int ret;
379 	struct wl1271_link *lnk;
380 
381 	status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
382 		sizeof(*status_2) + wl->fw_status_priv_len;
383 
384 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
385 				   status_len, false);
386 	if (ret < 0)
387 		return ret;
388 
389 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
390 		     "drv_rx_counter = %d, tx_results_counter = %d)",
391 		     status_1->intr,
392 		     status_1->fw_rx_counter,
393 		     status_1->drv_rx_counter,
394 		     status_1->tx_results_counter);
395 
396 	for (i = 0; i < NUM_TX_QUEUES; i++) {
397 		/* prevent wrap-around in freed-packets counter */
398 		wl->tx_allocated_pkts[i] -=
399 				(status_2->counters.tx_released_pkts[i] -
400 				wl->tx_pkts_freed[i]) & 0xff;
401 
402 		wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
403 	}
404 
405 
406 	for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
407 		u8 diff;
408 		lnk = &wl->links[i];
409 
410 		/* prevent wrap-around in freed-packets counter */
411 		diff = (status_2->counters.tx_lnk_free_pkts[i] -
412 		       lnk->prev_freed_pkts) & 0xff;
413 
414 		if (diff == 0)
415 			continue;
416 
417 		lnk->allocated_pkts -= diff;
418 		lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
419 
420 		/* accumulate the prev_freed_pkts counter */
421 		lnk->total_freed_pkts += diff;
422 	}
423 
424 	/* prevent wrap-around in total blocks counter */
425 	if (likely(wl->tx_blocks_freed <=
426 		   le32_to_cpu(status_2->total_released_blks)))
427 		freed_blocks = le32_to_cpu(status_2->total_released_blks) -
428 			       wl->tx_blocks_freed;
429 	else
430 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
431 			       le32_to_cpu(status_2->total_released_blks);
432 
433 	wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
434 
435 	wl->tx_allocated_blocks -= freed_blocks;
436 
437 	/*
438 	 * If the FW freed some blocks:
439 	 * If we still have allocated blocks - re-arm the timer, Tx is
440 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
441 	 */
442 	if (freed_blocks) {
443 		if (wl->tx_allocated_blocks)
444 			wl12xx_rearm_tx_watchdog_locked(wl);
445 		else
446 			cancel_delayed_work(&wl->tx_watchdog_work);
447 	}
448 
449 	avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
450 
451 	/*
452 	 * The FW might change the total number of TX memblocks before
453 	 * we get a notification about blocks being released. Thus, the
454 	 * available blocks calculation might yield a temporary result
455 	 * which is lower than the actual available blocks. Keeping in
456 	 * mind that only blocks that were allocated can be moved from
457 	 * TX to RX, tx_blocks_available should never decrease here.
458 	 */
459 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
460 				      avail);
461 
462 	/* if more blocks are available now, tx work can be scheduled */
463 	if (wl->tx_blocks_available > old_tx_blk_count)
464 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
465 
466 	/* for AP update num of allocated TX blocks per link and ps status */
467 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
468 		wl12xx_irq_update_links_status(wl, wlvif, status_2);
469 	}
470 
471 	/* update the host-chipset time offset */
472 	getnstimeofday(&ts);
473 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
474 		(s64)le32_to_cpu(status_2->fw_localtime);
475 
476 	wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
477 
478 	return 0;
479 }
480 
481 static void wl1271_flush_deferred_work(struct wl1271 *wl)
482 {
483 	struct sk_buff *skb;
484 
485 	/* Pass all received frames to the network stack */
486 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
487 		ieee80211_rx_ni(wl->hw, skb);
488 
489 	/* Return sent skbs to the network stack */
490 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
491 		ieee80211_tx_status_ni(wl->hw, skb);
492 }
493 
494 static void wl1271_netstack_work(struct work_struct *work)
495 {
496 	struct wl1271 *wl =
497 		container_of(work, struct wl1271, netstack_work);
498 
499 	do {
500 		wl1271_flush_deferred_work(wl);
501 	} while (skb_queue_len(&wl->deferred_rx_queue));
502 }
503 
504 #define WL1271_IRQ_MAX_LOOPS 256
505 
506 static int wlcore_irq_locked(struct wl1271 *wl)
507 {
508 	int ret = 0;
509 	u32 intr;
510 	int loopcount = WL1271_IRQ_MAX_LOOPS;
511 	bool done = false;
512 	unsigned int defer_count;
513 	unsigned long flags;
514 
515 	/*
516 	 * In case edge triggered interrupt must be used, we cannot iterate
517 	 * more than once without introducing race conditions with the hardirq.
518 	 */
519 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
520 		loopcount = 1;
521 
522 	wl1271_debug(DEBUG_IRQ, "IRQ work");
523 
524 	if (unlikely(wl->state != WLCORE_STATE_ON))
525 		goto out;
526 
527 	ret = wl1271_ps_elp_wakeup(wl);
528 	if (ret < 0)
529 		goto out;
530 
531 	while (!done && loopcount--) {
532 		/*
533 		 * In order to avoid a race with the hardirq, clear the flag
534 		 * before acknowledging the chip. Since the mutex is held,
535 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
536 		 */
537 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
538 		smp_mb__after_clear_bit();
539 
540 		ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
541 		if (ret < 0)
542 			goto out;
543 
544 		wlcore_hw_tx_immediate_compl(wl);
545 
546 		intr = le32_to_cpu(wl->fw_status_1->intr);
547 		intr &= WLCORE_ALL_INTR_MASK;
548 		if (!intr) {
549 			done = true;
550 			continue;
551 		}
552 
553 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
554 			wl1271_error("HW watchdog interrupt received! starting recovery.");
555 			wl->watchdog_recovery = true;
556 			ret = -EIO;
557 
558 			/* restarting the chip. ignore any other interrupt. */
559 			goto out;
560 		}
561 
562 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
563 			wl1271_error("SW watchdog interrupt received! "
564 				     "starting recovery.");
565 			wl->watchdog_recovery = true;
566 			ret = -EIO;
567 
568 			/* restarting the chip. ignore any other interrupt. */
569 			goto out;
570 		}
571 
572 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
573 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
574 
575 			ret = wlcore_rx(wl, wl->fw_status_1);
576 			if (ret < 0)
577 				goto out;
578 
579 			/* Check if any tx blocks were freed */
580 			spin_lock_irqsave(&wl->wl_lock, flags);
581 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
582 			    wl1271_tx_total_queue_count(wl) > 0) {
583 				spin_unlock_irqrestore(&wl->wl_lock, flags);
584 				/*
585 				 * In order to avoid starvation of the TX path,
586 				 * call the work function directly.
587 				 */
588 				ret = wlcore_tx_work_locked(wl);
589 				if (ret < 0)
590 					goto out;
591 			} else {
592 				spin_unlock_irqrestore(&wl->wl_lock, flags);
593 			}
594 
595 			/* check for tx results */
596 			ret = wlcore_hw_tx_delayed_compl(wl);
597 			if (ret < 0)
598 				goto out;
599 
600 			/* Make sure the deferred queues don't get too long */
601 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
602 				      skb_queue_len(&wl->deferred_rx_queue);
603 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
604 				wl1271_flush_deferred_work(wl);
605 		}
606 
607 		if (intr & WL1271_ACX_INTR_EVENT_A) {
608 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
609 			ret = wl1271_event_handle(wl, 0);
610 			if (ret < 0)
611 				goto out;
612 		}
613 
614 		if (intr & WL1271_ACX_INTR_EVENT_B) {
615 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
616 			ret = wl1271_event_handle(wl, 1);
617 			if (ret < 0)
618 				goto out;
619 		}
620 
621 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
622 			wl1271_debug(DEBUG_IRQ,
623 				     "WL1271_ACX_INTR_INIT_COMPLETE");
624 
625 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
626 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
627 	}
628 
629 	wl1271_ps_elp_sleep(wl);
630 
631 out:
632 	return ret;
633 }
634 
635 static irqreturn_t wlcore_irq(int irq, void *cookie)
636 {
637 	int ret;
638 	unsigned long flags;
639 	struct wl1271 *wl = cookie;
640 
641 	/* complete the ELP completion */
642 	spin_lock_irqsave(&wl->wl_lock, flags);
643 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
644 	if (wl->elp_compl) {
645 		complete(wl->elp_compl);
646 		wl->elp_compl = NULL;
647 	}
648 
649 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
650 		/* don't enqueue a work right now. mark it as pending */
651 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
652 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
653 		disable_irq_nosync(wl->irq);
654 		pm_wakeup_event(wl->dev, 0);
655 		spin_unlock_irqrestore(&wl->wl_lock, flags);
656 		return IRQ_HANDLED;
657 	}
658 	spin_unlock_irqrestore(&wl->wl_lock, flags);
659 
660 	/* TX might be handled here, avoid redundant work */
661 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
662 	cancel_work_sync(&wl->tx_work);
663 
664 	mutex_lock(&wl->mutex);
665 
666 	ret = wlcore_irq_locked(wl);
667 	if (ret)
668 		wl12xx_queue_recovery_work(wl);
669 
670 	spin_lock_irqsave(&wl->wl_lock, flags);
671 	/* In case TX was not handled here, queue TX work */
672 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
673 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
674 	    wl1271_tx_total_queue_count(wl) > 0)
675 		ieee80211_queue_work(wl->hw, &wl->tx_work);
676 	spin_unlock_irqrestore(&wl->wl_lock, flags);
677 
678 	mutex_unlock(&wl->mutex);
679 
680 	return IRQ_HANDLED;
681 }
682 
683 struct vif_counter_data {
684 	u8 counter;
685 
686 	struct ieee80211_vif *cur_vif;
687 	bool cur_vif_running;
688 };
689 
690 static void wl12xx_vif_count_iter(void *data, u8 *mac,
691 				  struct ieee80211_vif *vif)
692 {
693 	struct vif_counter_data *counter = data;
694 
695 	counter->counter++;
696 	if (counter->cur_vif == vif)
697 		counter->cur_vif_running = true;
698 }
699 
700 /* caller must not hold wl->mutex, as it might deadlock */
701 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
702 			       struct ieee80211_vif *cur_vif,
703 			       struct vif_counter_data *data)
704 {
705 	memset(data, 0, sizeof(*data));
706 	data->cur_vif = cur_vif;
707 
708 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
709 					    wl12xx_vif_count_iter, data);
710 }
711 
712 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
713 {
714 	const struct firmware *fw;
715 	const char *fw_name;
716 	enum wl12xx_fw_type fw_type;
717 	int ret;
718 
719 	if (plt) {
720 		fw_type = WL12XX_FW_TYPE_PLT;
721 		fw_name = wl->plt_fw_name;
722 	} else {
723 		/*
724 		 * we can't call wl12xx_get_vif_count() here because
725 		 * wl->mutex is taken, so use the cached last_vif_count value
726 		 */
727 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
728 			fw_type = WL12XX_FW_TYPE_MULTI;
729 			fw_name = wl->mr_fw_name;
730 		} else {
731 			fw_type = WL12XX_FW_TYPE_NORMAL;
732 			fw_name = wl->sr_fw_name;
733 		}
734 	}
735 
736 	if (wl->fw_type == fw_type)
737 		return 0;
738 
739 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
740 
741 	ret = request_firmware(&fw, fw_name, wl->dev);
742 
743 	if (ret < 0) {
744 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
745 		return ret;
746 	}
747 
748 	if (fw->size % 4) {
749 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
750 			     fw->size);
751 		ret = -EILSEQ;
752 		goto out;
753 	}
754 
755 	vfree(wl->fw);
756 	wl->fw_type = WL12XX_FW_TYPE_NONE;
757 	wl->fw_len = fw->size;
758 	wl->fw = vmalloc(wl->fw_len);
759 
760 	if (!wl->fw) {
761 		wl1271_error("could not allocate memory for the firmware");
762 		ret = -ENOMEM;
763 		goto out;
764 	}
765 
766 	memcpy(wl->fw, fw->data, wl->fw_len);
767 	ret = 0;
768 	wl->fw_type = fw_type;
769 out:
770 	release_firmware(fw);
771 
772 	return ret;
773 }
774 
775 void wl12xx_queue_recovery_work(struct wl1271 *wl)
776 {
777 	WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
778 
779 	/* Avoid a recursive recovery */
780 	if (wl->state == WLCORE_STATE_ON) {
781 		wl->state = WLCORE_STATE_RESTARTING;
782 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
783 		wlcore_disable_interrupts_nosync(wl);
784 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
785 	}
786 }
787 
788 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
789 {
790 	size_t len = 0;
791 
792 	/* The FW log is a length-value list, find where the log end */
793 	while (len < maxlen) {
794 		if (memblock[len] == 0)
795 			break;
796 		if (len + memblock[len] + 1 > maxlen)
797 			break;
798 		len += memblock[len] + 1;
799 	}
800 
801 	/* Make sure we have enough room */
802 	len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
803 
804 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
805 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
806 	wl->fwlog_size += len;
807 
808 	return len;
809 }
810 
811 #define WLCORE_FW_LOG_END 0x2000000
812 
813 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
814 {
815 	u32 addr;
816 	u32 offset;
817 	u32 end_of_log;
818 	u8 *block;
819 	int ret;
820 
821 	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
822 	    (wl->conf.fwlog.mem_blocks == 0))
823 		return;
824 
825 	wl1271_info("Reading FW panic log");
826 
827 	block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
828 	if (!block)
829 		return;
830 
831 	/*
832 	 * Make sure the chip is awake and the logger isn't active.
833 	 * Do not send a stop fwlog command if the fw is hanged or if
834 	 * dbgpins are used (due to some fw bug).
835 	 */
836 	if (wl1271_ps_elp_wakeup(wl))
837 		goto out;
838 	if (!wl->watchdog_recovery &&
839 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
840 		wl12xx_cmd_stop_fwlog(wl);
841 
842 	/* Read the first memory block address */
843 	ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
844 	if (ret < 0)
845 		goto out;
846 
847 	addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
848 	if (!addr)
849 		goto out;
850 
851 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
852 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
853 		end_of_log = WLCORE_FW_LOG_END;
854 	} else {
855 		offset = sizeof(addr);
856 		end_of_log = addr;
857 	}
858 
859 	/* Traverse the memory blocks linked list */
860 	do {
861 		memset(block, 0, WL12XX_HW_BLOCK_SIZE);
862 		ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
863 					 false);
864 		if (ret < 0)
865 			goto out;
866 
867 		/*
868 		 * Memory blocks are linked to one another. The first 4 bytes
869 		 * of each memory block hold the hardware address of the next
870 		 * one. The last memory block points to the first one in
871 		 * on demand mode and is equal to 0x2000000 in continuous mode.
872 		 */
873 		addr = le32_to_cpup((__le32 *)block);
874 		if (!wl12xx_copy_fwlog(wl, block + offset,
875 				       WL12XX_HW_BLOCK_SIZE - offset))
876 			break;
877 	} while (addr && (addr != end_of_log));
878 
879 	wake_up_interruptible(&wl->fwlog_waitq);
880 
881 out:
882 	kfree(block);
883 }
884 
885 static void wlcore_print_recovery(struct wl1271 *wl)
886 {
887 	u32 pc = 0;
888 	u32 hint_sts = 0;
889 	int ret;
890 
891 	wl1271_info("Hardware recovery in progress. FW ver: %s",
892 		    wl->chip.fw_ver_str);
893 
894 	/* change partitions momentarily so we can read the FW pc */
895 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
896 	if (ret < 0)
897 		return;
898 
899 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
900 	if (ret < 0)
901 		return;
902 
903 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
904 	if (ret < 0)
905 		return;
906 
907 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
908 				pc, hint_sts, ++wl->recovery_count);
909 
910 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
911 }
912 
913 
914 static void wl1271_recovery_work(struct work_struct *work)
915 {
916 	struct wl1271 *wl =
917 		container_of(work, struct wl1271, recovery_work);
918 	struct wl12xx_vif *wlvif;
919 	struct ieee80211_vif *vif;
920 
921 	mutex_lock(&wl->mutex);
922 
923 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
924 		goto out_unlock;
925 
926 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
927 		wl12xx_read_fwlog_panic(wl);
928 		wlcore_print_recovery(wl);
929 	}
930 
931 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
932 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
933 
934 	if (wl->conf.recovery.no_recovery) {
935 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
936 		goto out_unlock;
937 	}
938 
939 	/* Prevent spurious TX during FW restart */
940 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
941 
942 	/* reboot the chipset */
943 	while (!list_empty(&wl->wlvif_list)) {
944 		wlvif = list_first_entry(&wl->wlvif_list,
945 				       struct wl12xx_vif, list);
946 		vif = wl12xx_wlvif_to_vif(wlvif);
947 		__wl1271_op_remove_interface(wl, vif, false);
948 	}
949 
950 	wlcore_op_stop_locked(wl);
951 
952 	ieee80211_restart_hw(wl->hw);
953 
954 	/*
955 	 * Its safe to enable TX now - the queues are stopped after a request
956 	 * to restart the HW.
957 	 */
958 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
959 
960 out_unlock:
961 	wl->watchdog_recovery = false;
962 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
963 	mutex_unlock(&wl->mutex);
964 }
965 
966 static int wlcore_fw_wakeup(struct wl1271 *wl)
967 {
968 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
969 }
970 
971 static int wl1271_setup(struct wl1271 *wl)
972 {
973 	wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
974 				  sizeof(*wl->fw_status_2) +
975 				  wl->fw_status_priv_len, GFP_KERNEL);
976 	if (!wl->fw_status_1)
977 		return -ENOMEM;
978 
979 	wl->fw_status_2 = (struct wl_fw_status_2 *)
980 				(((u8 *) wl->fw_status_1) +
981 				WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
982 
983 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
984 	if (!wl->tx_res_if) {
985 		kfree(wl->fw_status_1);
986 		return -ENOMEM;
987 	}
988 
989 	return 0;
990 }
991 
992 static int wl12xx_set_power_on(struct wl1271 *wl)
993 {
994 	int ret;
995 
996 	msleep(WL1271_PRE_POWER_ON_SLEEP);
997 	ret = wl1271_power_on(wl);
998 	if (ret < 0)
999 		goto out;
1000 	msleep(WL1271_POWER_ON_SLEEP);
1001 	wl1271_io_reset(wl);
1002 	wl1271_io_init(wl);
1003 
1004 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1005 	if (ret < 0)
1006 		goto fail;
1007 
1008 	/* ELP module wake up */
1009 	ret = wlcore_fw_wakeup(wl);
1010 	if (ret < 0)
1011 		goto fail;
1012 
1013 out:
1014 	return ret;
1015 
1016 fail:
1017 	wl1271_power_off(wl);
1018 	return ret;
1019 }
1020 
1021 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1022 {
1023 	int ret = 0;
1024 
1025 	ret = wl12xx_set_power_on(wl);
1026 	if (ret < 0)
1027 		goto out;
1028 
1029 	/*
1030 	 * For wl127x based devices we could use the default block
1031 	 * size (512 bytes), but due to a bug in the sdio driver, we
1032 	 * need to set it explicitly after the chip is powered on.  To
1033 	 * simplify the code and since the performance impact is
1034 	 * negligible, we use the same block size for all different
1035 	 * chip types.
1036 	 *
1037 	 * Check if the bus supports blocksize alignment and, if it
1038 	 * doesn't, make sure we don't have the quirk.
1039 	 */
1040 	if (!wl1271_set_block_size(wl))
1041 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1042 
1043 	/* TODO: make sure the lower driver has set things up correctly */
1044 
1045 	ret = wl1271_setup(wl);
1046 	if (ret < 0)
1047 		goto out;
1048 
1049 	ret = wl12xx_fetch_firmware(wl, plt);
1050 	if (ret < 0)
1051 		goto out;
1052 
1053 out:
1054 	return ret;
1055 }
1056 
1057 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1058 {
1059 	int retries = WL1271_BOOT_RETRIES;
1060 	struct wiphy *wiphy = wl->hw->wiphy;
1061 
1062 	static const char* const PLT_MODE[] = {
1063 		"PLT_OFF",
1064 		"PLT_ON",
1065 		"PLT_FEM_DETECT"
1066 	};
1067 
1068 	int ret;
1069 
1070 	mutex_lock(&wl->mutex);
1071 
1072 	wl1271_notice("power up");
1073 
1074 	if (wl->state != WLCORE_STATE_OFF) {
1075 		wl1271_error("cannot go into PLT state because not "
1076 			     "in off state: %d", wl->state);
1077 		ret = -EBUSY;
1078 		goto out;
1079 	}
1080 
1081 	/* Indicate to lower levels that we are now in PLT mode */
1082 	wl->plt = true;
1083 	wl->plt_mode = plt_mode;
1084 
1085 	while (retries) {
1086 		retries--;
1087 		ret = wl12xx_chip_wakeup(wl, true);
1088 		if (ret < 0)
1089 			goto power_off;
1090 
1091 		ret = wl->ops->plt_init(wl);
1092 		if (ret < 0)
1093 			goto power_off;
1094 
1095 		wl->state = WLCORE_STATE_ON;
1096 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1097 			      PLT_MODE[plt_mode],
1098 			      wl->chip.fw_ver_str);
1099 
1100 		/* update hw/fw version info in wiphy struct */
1101 		wiphy->hw_version = wl->chip.id;
1102 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1103 			sizeof(wiphy->fw_version));
1104 
1105 		goto out;
1106 
1107 power_off:
1108 		wl1271_power_off(wl);
1109 	}
1110 
1111 	wl->plt = false;
1112 	wl->plt_mode = PLT_OFF;
1113 
1114 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1115 		     WL1271_BOOT_RETRIES);
1116 out:
1117 	mutex_unlock(&wl->mutex);
1118 
1119 	return ret;
1120 }
1121 
1122 int wl1271_plt_stop(struct wl1271 *wl)
1123 {
1124 	int ret = 0;
1125 
1126 	wl1271_notice("power down");
1127 
1128 	/*
1129 	 * Interrupts must be disabled before setting the state to OFF.
1130 	 * Otherwise, the interrupt handler might be called and exit without
1131 	 * reading the interrupt status.
1132 	 */
1133 	wlcore_disable_interrupts(wl);
1134 	mutex_lock(&wl->mutex);
1135 	if (!wl->plt) {
1136 		mutex_unlock(&wl->mutex);
1137 
1138 		/*
1139 		 * This will not necessarily enable interrupts as interrupts
1140 		 * may have been disabled when op_stop was called. It will,
1141 		 * however, balance the above call to disable_interrupts().
1142 		 */
1143 		wlcore_enable_interrupts(wl);
1144 
1145 		wl1271_error("cannot power down because not in PLT "
1146 			     "state: %d", wl->state);
1147 		ret = -EBUSY;
1148 		goto out;
1149 	}
1150 
1151 	mutex_unlock(&wl->mutex);
1152 
1153 	wl1271_flush_deferred_work(wl);
1154 	cancel_work_sync(&wl->netstack_work);
1155 	cancel_work_sync(&wl->recovery_work);
1156 	cancel_delayed_work_sync(&wl->elp_work);
1157 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1158 
1159 	mutex_lock(&wl->mutex);
1160 	wl1271_power_off(wl);
1161 	wl->flags = 0;
1162 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1163 	wl->state = WLCORE_STATE_OFF;
1164 	wl->plt = false;
1165 	wl->plt_mode = PLT_OFF;
1166 	wl->rx_counter = 0;
1167 	mutex_unlock(&wl->mutex);
1168 
1169 out:
1170 	return ret;
1171 }
1172 
1173 static void wl1271_op_tx(struct ieee80211_hw *hw,
1174 			 struct ieee80211_tx_control *control,
1175 			 struct sk_buff *skb)
1176 {
1177 	struct wl1271 *wl = hw->priv;
1178 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1179 	struct ieee80211_vif *vif = info->control.vif;
1180 	struct wl12xx_vif *wlvif = NULL;
1181 	unsigned long flags;
1182 	int q, mapping;
1183 	u8 hlid;
1184 
1185 	if (!vif) {
1186 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1187 		ieee80211_free_txskb(hw, skb);
1188 		return;
1189 	}
1190 
1191 	wlvif = wl12xx_vif_to_data(vif);
1192 	mapping = skb_get_queue_mapping(skb);
1193 	q = wl1271_tx_get_queue(mapping);
1194 
1195 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1196 
1197 	spin_lock_irqsave(&wl->wl_lock, flags);
1198 
1199 	/*
1200 	 * drop the packet if the link is invalid or the queue is stopped
1201 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1202 	 * allow these packets through.
1203 	 */
1204 	if (hlid == WL12XX_INVALID_LINK_ID ||
1205 	    (!test_bit(hlid, wlvif->links_map)) ||
1206 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1207 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1208 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1209 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1210 		ieee80211_free_txskb(hw, skb);
1211 		goto out;
1212 	}
1213 
1214 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1215 		     hlid, q, skb->len);
1216 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1217 
1218 	wl->tx_queue_count[q]++;
1219 	wlvif->tx_queue_count[q]++;
1220 
1221 	/*
1222 	 * The workqueue is slow to process the tx_queue and we need stop
1223 	 * the queue here, otherwise the queue will get too long.
1224 	 */
1225 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1226 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1227 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1228 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1229 		wlcore_stop_queue_locked(wl, wlvif, q,
1230 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1231 	}
1232 
1233 	/*
1234 	 * The chip specific setup must run before the first TX packet -
1235 	 * before that, the tx_work will not be initialized!
1236 	 */
1237 
1238 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1239 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1240 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1241 
1242 out:
1243 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1244 }
1245 
1246 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1247 {
1248 	unsigned long flags;
1249 	int q;
1250 
1251 	/* no need to queue a new dummy packet if one is already pending */
1252 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1253 		return 0;
1254 
1255 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1256 
1257 	spin_lock_irqsave(&wl->wl_lock, flags);
1258 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1259 	wl->tx_queue_count[q]++;
1260 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1261 
1262 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1263 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1264 		return wlcore_tx_work_locked(wl);
1265 
1266 	/*
1267 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1268 	 * interrupt handler function
1269 	 */
1270 	return 0;
1271 }
1272 
1273 /*
1274  * The size of the dummy packet should be at least 1400 bytes. However, in
1275  * order to minimize the number of bus transactions, aligning it to 512 bytes
1276  * boundaries could be beneficial, performance wise
1277  */
1278 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1279 
1280 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1281 {
1282 	struct sk_buff *skb;
1283 	struct ieee80211_hdr_3addr *hdr;
1284 	unsigned int dummy_packet_size;
1285 
1286 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1287 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1288 
1289 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1290 	if (!skb) {
1291 		wl1271_warning("Failed to allocate a dummy packet skb");
1292 		return NULL;
1293 	}
1294 
1295 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1296 
1297 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1298 	memset(hdr, 0, sizeof(*hdr));
1299 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1300 					 IEEE80211_STYPE_NULLFUNC |
1301 					 IEEE80211_FCTL_TODS);
1302 
1303 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1304 
1305 	/* Dummy packets require the TID to be management */
1306 	skb->priority = WL1271_TID_MGMT;
1307 
1308 	/* Initialize all fields that might be used */
1309 	skb_set_queue_mapping(skb, 0);
1310 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1311 
1312 	return skb;
1313 }
1314 
1315 
1316 #ifdef CONFIG_PM
1317 static int
1318 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1319 {
1320 	int num_fields = 0, in_field = 0, fields_size = 0;
1321 	int i, pattern_len = 0;
1322 
1323 	if (!p->mask) {
1324 		wl1271_warning("No mask in WoWLAN pattern");
1325 		return -EINVAL;
1326 	}
1327 
1328 	/*
1329 	 * The pattern is broken up into segments of bytes at different offsets
1330 	 * that need to be checked by the FW filter. Each segment is called
1331 	 * a field in the FW API. We verify that the total number of fields
1332 	 * required for this pattern won't exceed FW limits (8)
1333 	 * as well as the total fields buffer won't exceed the FW limit.
1334 	 * Note that if there's a pattern which crosses Ethernet/IP header
1335 	 * boundary a new field is required.
1336 	 */
1337 	for (i = 0; i < p->pattern_len; i++) {
1338 		if (test_bit(i, (unsigned long *)p->mask)) {
1339 			if (!in_field) {
1340 				in_field = 1;
1341 				pattern_len = 1;
1342 			} else {
1343 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1344 					num_fields++;
1345 					fields_size += pattern_len +
1346 						RX_FILTER_FIELD_OVERHEAD;
1347 					pattern_len = 1;
1348 				} else
1349 					pattern_len++;
1350 			}
1351 		} else {
1352 			if (in_field) {
1353 				in_field = 0;
1354 				fields_size += pattern_len +
1355 					RX_FILTER_FIELD_OVERHEAD;
1356 				num_fields++;
1357 			}
1358 		}
1359 	}
1360 
1361 	if (in_field) {
1362 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1363 		num_fields++;
1364 	}
1365 
1366 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1367 		wl1271_warning("RX Filter too complex. Too many segments");
1368 		return -EINVAL;
1369 	}
1370 
1371 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1372 		wl1271_warning("RX filter pattern is too big");
1373 		return -E2BIG;
1374 	}
1375 
1376 	return 0;
1377 }
1378 
1379 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1380 {
1381 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1382 }
1383 
1384 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1385 {
1386 	int i;
1387 
1388 	if (filter == NULL)
1389 		return;
1390 
1391 	for (i = 0; i < filter->num_fields; i++)
1392 		kfree(filter->fields[i].pattern);
1393 
1394 	kfree(filter);
1395 }
1396 
1397 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1398 				 u16 offset, u8 flags,
1399 				 u8 *pattern, u8 len)
1400 {
1401 	struct wl12xx_rx_filter_field *field;
1402 
1403 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1404 		wl1271_warning("Max fields per RX filter. can't alloc another");
1405 		return -EINVAL;
1406 	}
1407 
1408 	field = &filter->fields[filter->num_fields];
1409 
1410 	field->pattern = kzalloc(len, GFP_KERNEL);
1411 	if (!field->pattern) {
1412 		wl1271_warning("Failed to allocate RX filter pattern");
1413 		return -ENOMEM;
1414 	}
1415 
1416 	filter->num_fields++;
1417 
1418 	field->offset = cpu_to_le16(offset);
1419 	field->flags = flags;
1420 	field->len = len;
1421 	memcpy(field->pattern, pattern, len);
1422 
1423 	return 0;
1424 }
1425 
1426 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1427 {
1428 	int i, fields_size = 0;
1429 
1430 	for (i = 0; i < filter->num_fields; i++)
1431 		fields_size += filter->fields[i].len +
1432 			sizeof(struct wl12xx_rx_filter_field) -
1433 			sizeof(u8 *);
1434 
1435 	return fields_size;
1436 }
1437 
1438 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1439 				    u8 *buf)
1440 {
1441 	int i;
1442 	struct wl12xx_rx_filter_field *field;
1443 
1444 	for (i = 0; i < filter->num_fields; i++) {
1445 		field = (struct wl12xx_rx_filter_field *)buf;
1446 
1447 		field->offset = filter->fields[i].offset;
1448 		field->flags = filter->fields[i].flags;
1449 		field->len = filter->fields[i].len;
1450 
1451 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1452 		buf += sizeof(struct wl12xx_rx_filter_field) -
1453 			sizeof(u8 *) + field->len;
1454 	}
1455 }
1456 
1457 /*
1458  * Allocates an RX filter returned through f
1459  * which needs to be freed using rx_filter_free()
1460  */
1461 static int
1462 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1463 					   struct wl12xx_rx_filter **f)
1464 {
1465 	int i, j, ret = 0;
1466 	struct wl12xx_rx_filter *filter;
1467 	u16 offset;
1468 	u8 flags, len;
1469 
1470 	filter = wl1271_rx_filter_alloc();
1471 	if (!filter) {
1472 		wl1271_warning("Failed to alloc rx filter");
1473 		ret = -ENOMEM;
1474 		goto err;
1475 	}
1476 
1477 	i = 0;
1478 	while (i < p->pattern_len) {
1479 		if (!test_bit(i, (unsigned long *)p->mask)) {
1480 			i++;
1481 			continue;
1482 		}
1483 
1484 		for (j = i; j < p->pattern_len; j++) {
1485 			if (!test_bit(j, (unsigned long *)p->mask))
1486 				break;
1487 
1488 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1489 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1490 				break;
1491 		}
1492 
1493 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1494 			offset = i;
1495 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1496 		} else {
1497 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1498 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1499 		}
1500 
1501 		len = j - i;
1502 
1503 		ret = wl1271_rx_filter_alloc_field(filter,
1504 						   offset,
1505 						   flags,
1506 						   &p->pattern[i], len);
1507 		if (ret)
1508 			goto err;
1509 
1510 		i = j;
1511 	}
1512 
1513 	filter->action = FILTER_SIGNAL;
1514 
1515 	*f = filter;
1516 	return 0;
1517 
1518 err:
1519 	wl1271_rx_filter_free(filter);
1520 	*f = NULL;
1521 
1522 	return ret;
1523 }
1524 
1525 static int wl1271_configure_wowlan(struct wl1271 *wl,
1526 				   struct cfg80211_wowlan *wow)
1527 {
1528 	int i, ret;
1529 
1530 	if (!wow || wow->any || !wow->n_patterns) {
1531 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1532 							  FILTER_SIGNAL);
1533 		if (ret)
1534 			goto out;
1535 
1536 		ret = wl1271_rx_filter_clear_all(wl);
1537 		if (ret)
1538 			goto out;
1539 
1540 		return 0;
1541 	}
1542 
1543 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1544 		return -EINVAL;
1545 
1546 	/* Validate all incoming patterns before clearing current FW state */
1547 	for (i = 0; i < wow->n_patterns; i++) {
1548 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1549 		if (ret) {
1550 			wl1271_warning("Bad wowlan pattern %d", i);
1551 			return ret;
1552 		}
1553 	}
1554 
1555 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1556 	if (ret)
1557 		goto out;
1558 
1559 	ret = wl1271_rx_filter_clear_all(wl);
1560 	if (ret)
1561 		goto out;
1562 
1563 	/* Translate WoWLAN patterns into filters */
1564 	for (i = 0; i < wow->n_patterns; i++) {
1565 		struct cfg80211_pkt_pattern *p;
1566 		struct wl12xx_rx_filter *filter = NULL;
1567 
1568 		p = &wow->patterns[i];
1569 
1570 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1571 		if (ret) {
1572 			wl1271_warning("Failed to create an RX filter from "
1573 				       "wowlan pattern %d", i);
1574 			goto out;
1575 		}
1576 
1577 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1578 
1579 		wl1271_rx_filter_free(filter);
1580 		if (ret)
1581 			goto out;
1582 	}
1583 
1584 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1585 
1586 out:
1587 	return ret;
1588 }
1589 
1590 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1591 					struct wl12xx_vif *wlvif,
1592 					struct cfg80211_wowlan *wow)
1593 {
1594 	int ret = 0;
1595 
1596 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1597 		goto out;
1598 
1599 	ret = wl1271_ps_elp_wakeup(wl);
1600 	if (ret < 0)
1601 		goto out;
1602 
1603 	ret = wl1271_configure_wowlan(wl, wow);
1604 	if (ret < 0)
1605 		goto out_sleep;
1606 
1607 	if ((wl->conf.conn.suspend_wake_up_event ==
1608 	     wl->conf.conn.wake_up_event) &&
1609 	    (wl->conf.conn.suspend_listen_interval ==
1610 	     wl->conf.conn.listen_interval))
1611 		goto out_sleep;
1612 
1613 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1614 				    wl->conf.conn.suspend_wake_up_event,
1615 				    wl->conf.conn.suspend_listen_interval);
1616 
1617 	if (ret < 0)
1618 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1619 
1620 out_sleep:
1621 	wl1271_ps_elp_sleep(wl);
1622 out:
1623 	return ret;
1624 
1625 }
1626 
1627 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1628 				       struct wl12xx_vif *wlvif)
1629 {
1630 	int ret = 0;
1631 
1632 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1633 		goto out;
1634 
1635 	ret = wl1271_ps_elp_wakeup(wl);
1636 	if (ret < 0)
1637 		goto out;
1638 
1639 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1640 
1641 	wl1271_ps_elp_sleep(wl);
1642 out:
1643 	return ret;
1644 
1645 }
1646 
1647 static int wl1271_configure_suspend(struct wl1271 *wl,
1648 				    struct wl12xx_vif *wlvif,
1649 				    struct cfg80211_wowlan *wow)
1650 {
1651 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1652 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1653 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1654 		return wl1271_configure_suspend_ap(wl, wlvif);
1655 	return 0;
1656 }
1657 
1658 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1659 {
1660 	int ret = 0;
1661 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1662 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1663 
1664 	if ((!is_ap) && (!is_sta))
1665 		return;
1666 
1667 	if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1668 		return;
1669 
1670 	ret = wl1271_ps_elp_wakeup(wl);
1671 	if (ret < 0)
1672 		return;
1673 
1674 	if (is_sta) {
1675 		wl1271_configure_wowlan(wl, NULL);
1676 
1677 		if ((wl->conf.conn.suspend_wake_up_event ==
1678 		     wl->conf.conn.wake_up_event) &&
1679 		    (wl->conf.conn.suspend_listen_interval ==
1680 		     wl->conf.conn.listen_interval))
1681 			goto out_sleep;
1682 
1683 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1684 				    wl->conf.conn.wake_up_event,
1685 				    wl->conf.conn.listen_interval);
1686 
1687 		if (ret < 0)
1688 			wl1271_error("resume: wake up conditions failed: %d",
1689 				     ret);
1690 
1691 	} else if (is_ap) {
1692 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1693 	}
1694 
1695 out_sleep:
1696 	wl1271_ps_elp_sleep(wl);
1697 }
1698 
1699 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1700 			    struct cfg80211_wowlan *wow)
1701 {
1702 	struct wl1271 *wl = hw->priv;
1703 	struct wl12xx_vif *wlvif;
1704 	int ret;
1705 
1706 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1707 	WARN_ON(!wow);
1708 
1709 	/* we want to perform the recovery before suspending */
1710 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1711 		wl1271_warning("postponing suspend to perform recovery");
1712 		return -EBUSY;
1713 	}
1714 
1715 	wl1271_tx_flush(wl);
1716 
1717 	mutex_lock(&wl->mutex);
1718 	wl->wow_enabled = true;
1719 	wl12xx_for_each_wlvif(wl, wlvif) {
1720 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1721 		if (ret < 0) {
1722 			mutex_unlock(&wl->mutex);
1723 			wl1271_warning("couldn't prepare device to suspend");
1724 			return ret;
1725 		}
1726 	}
1727 	mutex_unlock(&wl->mutex);
1728 	/* flush any remaining work */
1729 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1730 
1731 	/*
1732 	 * disable and re-enable interrupts in order to flush
1733 	 * the threaded_irq
1734 	 */
1735 	wlcore_disable_interrupts(wl);
1736 
1737 	/*
1738 	 * set suspended flag to avoid triggering a new threaded_irq
1739 	 * work. no need for spinlock as interrupts are disabled.
1740 	 */
1741 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1742 
1743 	wlcore_enable_interrupts(wl);
1744 	flush_work(&wl->tx_work);
1745 	flush_delayed_work(&wl->elp_work);
1746 
1747 	return 0;
1748 }
1749 
1750 static int wl1271_op_resume(struct ieee80211_hw *hw)
1751 {
1752 	struct wl1271 *wl = hw->priv;
1753 	struct wl12xx_vif *wlvif;
1754 	unsigned long flags;
1755 	bool run_irq_work = false, pending_recovery;
1756 	int ret;
1757 
1758 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1759 		     wl->wow_enabled);
1760 	WARN_ON(!wl->wow_enabled);
1761 
1762 	/*
1763 	 * re-enable irq_work enqueuing, and call irq_work directly if
1764 	 * there is a pending work.
1765 	 */
1766 	spin_lock_irqsave(&wl->wl_lock, flags);
1767 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1768 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1769 		run_irq_work = true;
1770 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1771 
1772 	mutex_lock(&wl->mutex);
1773 
1774 	/* test the recovery flag before calling any SDIO functions */
1775 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1776 				    &wl->flags);
1777 
1778 	if (run_irq_work) {
1779 		wl1271_debug(DEBUG_MAC80211,
1780 			     "run postponed irq_work directly");
1781 
1782 		/* don't talk to the HW if recovery is pending */
1783 		if (!pending_recovery) {
1784 			ret = wlcore_irq_locked(wl);
1785 			if (ret)
1786 				wl12xx_queue_recovery_work(wl);
1787 		}
1788 
1789 		wlcore_enable_interrupts(wl);
1790 	}
1791 
1792 	if (pending_recovery) {
1793 		wl1271_warning("queuing forgotten recovery on resume");
1794 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1795 		goto out;
1796 	}
1797 
1798 	wl12xx_for_each_wlvif(wl, wlvif) {
1799 		wl1271_configure_resume(wl, wlvif);
1800 	}
1801 
1802 out:
1803 	wl->wow_enabled = false;
1804 	mutex_unlock(&wl->mutex);
1805 
1806 	return 0;
1807 }
1808 #endif
1809 
1810 static int wl1271_op_start(struct ieee80211_hw *hw)
1811 {
1812 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1813 
1814 	/*
1815 	 * We have to delay the booting of the hardware because
1816 	 * we need to know the local MAC address before downloading and
1817 	 * initializing the firmware. The MAC address cannot be changed
1818 	 * after boot, and without the proper MAC address, the firmware
1819 	 * will not function properly.
1820 	 *
1821 	 * The MAC address is first known when the corresponding interface
1822 	 * is added. That is where we will initialize the hardware.
1823 	 */
1824 
1825 	return 0;
1826 }
1827 
1828 static void wlcore_op_stop_locked(struct wl1271 *wl)
1829 {
1830 	int i;
1831 
1832 	if (wl->state == WLCORE_STATE_OFF) {
1833 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1834 					&wl->flags))
1835 			wlcore_enable_interrupts(wl);
1836 
1837 		return;
1838 	}
1839 
1840 	/*
1841 	 * this must be before the cancel_work calls below, so that the work
1842 	 * functions don't perform further work.
1843 	 */
1844 	wl->state = WLCORE_STATE_OFF;
1845 
1846 	/*
1847 	 * Use the nosync variant to disable interrupts, so the mutex could be
1848 	 * held while doing so without deadlocking.
1849 	 */
1850 	wlcore_disable_interrupts_nosync(wl);
1851 
1852 	mutex_unlock(&wl->mutex);
1853 
1854 	wlcore_synchronize_interrupts(wl);
1855 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1856 		cancel_work_sync(&wl->recovery_work);
1857 	wl1271_flush_deferred_work(wl);
1858 	cancel_delayed_work_sync(&wl->scan_complete_work);
1859 	cancel_work_sync(&wl->netstack_work);
1860 	cancel_work_sync(&wl->tx_work);
1861 	cancel_delayed_work_sync(&wl->elp_work);
1862 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1863 
1864 	/* let's notify MAC80211 about the remaining pending TX frames */
1865 	mutex_lock(&wl->mutex);
1866 	wl12xx_tx_reset(wl);
1867 
1868 	wl1271_power_off(wl);
1869 	/*
1870 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1871 	 * an interrupt storm. Now that the power is down, it is safe to
1872 	 * re-enable interrupts to balance the disable depth
1873 	 */
1874 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1875 		wlcore_enable_interrupts(wl);
1876 
1877 	wl->band = IEEE80211_BAND_2GHZ;
1878 
1879 	wl->rx_counter = 0;
1880 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1881 	wl->channel_type = NL80211_CHAN_NO_HT;
1882 	wl->tx_blocks_available = 0;
1883 	wl->tx_allocated_blocks = 0;
1884 	wl->tx_results_count = 0;
1885 	wl->tx_packets_count = 0;
1886 	wl->time_offset = 0;
1887 	wl->ap_fw_ps_map = 0;
1888 	wl->ap_ps_map = 0;
1889 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1890 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1891 	memset(wl->links_map, 0, sizeof(wl->links_map));
1892 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1893 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1894 	wl->active_sta_count = 0;
1895 	wl->active_link_count = 0;
1896 
1897 	/* The system link is always allocated */
1898 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1899 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1900 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1901 
1902 	/*
1903 	 * this is performed after the cancel_work calls and the associated
1904 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1905 	 * get executed before all these vars have been reset.
1906 	 */
1907 	wl->flags = 0;
1908 
1909 	wl->tx_blocks_freed = 0;
1910 
1911 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1912 		wl->tx_pkts_freed[i] = 0;
1913 		wl->tx_allocated_pkts[i] = 0;
1914 	}
1915 
1916 	wl1271_debugfs_reset(wl);
1917 
1918 	kfree(wl->fw_status_1);
1919 	wl->fw_status_1 = NULL;
1920 	wl->fw_status_2 = NULL;
1921 	kfree(wl->tx_res_if);
1922 	wl->tx_res_if = NULL;
1923 	kfree(wl->target_mem_map);
1924 	wl->target_mem_map = NULL;
1925 
1926 	/*
1927 	 * FW channels must be re-calibrated after recovery,
1928 	 * clear the last Reg-Domain channel configuration.
1929 	 */
1930 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1931 }
1932 
1933 static void wlcore_op_stop(struct ieee80211_hw *hw)
1934 {
1935 	struct wl1271 *wl = hw->priv;
1936 
1937 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1938 
1939 	mutex_lock(&wl->mutex);
1940 
1941 	wlcore_op_stop_locked(wl);
1942 
1943 	mutex_unlock(&wl->mutex);
1944 }
1945 
1946 static void wlcore_channel_switch_work(struct work_struct *work)
1947 {
1948 	struct delayed_work *dwork;
1949 	struct wl1271 *wl;
1950 	struct ieee80211_vif *vif;
1951 	struct wl12xx_vif *wlvif;
1952 	int ret;
1953 
1954 	dwork = container_of(work, struct delayed_work, work);
1955 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1956 	wl = wlvif->wl;
1957 
1958 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1959 
1960 	mutex_lock(&wl->mutex);
1961 
1962 	if (unlikely(wl->state != WLCORE_STATE_ON))
1963 		goto out;
1964 
1965 	/* check the channel switch is still ongoing */
1966 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1967 		goto out;
1968 
1969 	vif = wl12xx_wlvif_to_vif(wlvif);
1970 	ieee80211_chswitch_done(vif, false);
1971 
1972 	ret = wl1271_ps_elp_wakeup(wl);
1973 	if (ret < 0)
1974 		goto out;
1975 
1976 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
1977 
1978 	wl1271_ps_elp_sleep(wl);
1979 out:
1980 	mutex_unlock(&wl->mutex);
1981 }
1982 
1983 static void wlcore_connection_loss_work(struct work_struct *work)
1984 {
1985 	struct delayed_work *dwork;
1986 	struct wl1271 *wl;
1987 	struct ieee80211_vif *vif;
1988 	struct wl12xx_vif *wlvif;
1989 
1990 	dwork = container_of(work, struct delayed_work, work);
1991 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1992 	wl = wlvif->wl;
1993 
1994 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1995 
1996 	mutex_lock(&wl->mutex);
1997 
1998 	if (unlikely(wl->state != WLCORE_STATE_ON))
1999 		goto out;
2000 
2001 	/* Call mac80211 connection loss */
2002 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2003 		goto out;
2004 
2005 	vif = wl12xx_wlvif_to_vif(wlvif);
2006 	ieee80211_connection_loss(vif);
2007 out:
2008 	mutex_unlock(&wl->mutex);
2009 }
2010 
2011 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2012 {
2013 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2014 					WL12XX_MAX_RATE_POLICIES);
2015 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2016 		return -EBUSY;
2017 
2018 	__set_bit(policy, wl->rate_policies_map);
2019 	*idx = policy;
2020 	return 0;
2021 }
2022 
2023 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2024 {
2025 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2026 		return;
2027 
2028 	__clear_bit(*idx, wl->rate_policies_map);
2029 	*idx = WL12XX_MAX_RATE_POLICIES;
2030 }
2031 
2032 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2033 {
2034 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2035 					WLCORE_MAX_KLV_TEMPLATES);
2036 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2037 		return -EBUSY;
2038 
2039 	__set_bit(policy, wl->klv_templates_map);
2040 	*idx = policy;
2041 	return 0;
2042 }
2043 
2044 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2045 {
2046 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2047 		return;
2048 
2049 	__clear_bit(*idx, wl->klv_templates_map);
2050 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2051 }
2052 
2053 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2054 {
2055 	switch (wlvif->bss_type) {
2056 	case BSS_TYPE_AP_BSS:
2057 		if (wlvif->p2p)
2058 			return WL1271_ROLE_P2P_GO;
2059 		else
2060 			return WL1271_ROLE_AP;
2061 
2062 	case BSS_TYPE_STA_BSS:
2063 		if (wlvif->p2p)
2064 			return WL1271_ROLE_P2P_CL;
2065 		else
2066 			return WL1271_ROLE_STA;
2067 
2068 	case BSS_TYPE_IBSS:
2069 		return WL1271_ROLE_IBSS;
2070 
2071 	default:
2072 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2073 	}
2074 	return WL12XX_INVALID_ROLE_TYPE;
2075 }
2076 
2077 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2078 {
2079 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2080 	int i;
2081 
2082 	/* clear everything but the persistent data */
2083 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2084 
2085 	switch (ieee80211_vif_type_p2p(vif)) {
2086 	case NL80211_IFTYPE_P2P_CLIENT:
2087 		wlvif->p2p = 1;
2088 		/* fall-through */
2089 	case NL80211_IFTYPE_STATION:
2090 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2091 		break;
2092 	case NL80211_IFTYPE_ADHOC:
2093 		wlvif->bss_type = BSS_TYPE_IBSS;
2094 		break;
2095 	case NL80211_IFTYPE_P2P_GO:
2096 		wlvif->p2p = 1;
2097 		/* fall-through */
2098 	case NL80211_IFTYPE_AP:
2099 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2100 		break;
2101 	default:
2102 		wlvif->bss_type = MAX_BSS_TYPE;
2103 		return -EOPNOTSUPP;
2104 	}
2105 
2106 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2107 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2108 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2109 
2110 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2111 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2112 		/* init sta/ibss data */
2113 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2114 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2115 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2116 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2117 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2118 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2119 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2120 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2121 	} else {
2122 		/* init ap data */
2123 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2124 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2125 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2126 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2127 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2128 			wl12xx_allocate_rate_policy(wl,
2129 						&wlvif->ap.ucast_rate_idx[i]);
2130 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2131 		/*
2132 		 * TODO: check if basic_rate shouldn't be
2133 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2134 		 * instead (the same thing for STA above).
2135 		*/
2136 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2137 		/* TODO: this seems to be used only for STA, check it */
2138 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2139 	}
2140 
2141 	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2142 	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2143 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2144 
2145 	/*
2146 	 * mac80211 configures some values globally, while we treat them
2147 	 * per-interface. thus, on init, we have to copy them from wl
2148 	 */
2149 	wlvif->band = wl->band;
2150 	wlvif->channel = wl->channel;
2151 	wlvif->power_level = wl->power_level;
2152 	wlvif->channel_type = wl->channel_type;
2153 
2154 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2155 		  wl1271_rx_streaming_enable_work);
2156 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2157 		  wl1271_rx_streaming_disable_work);
2158 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2159 			  wlcore_channel_switch_work);
2160 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2161 			  wlcore_connection_loss_work);
2162 	INIT_LIST_HEAD(&wlvif->list);
2163 
2164 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2165 		    (unsigned long) wlvif);
2166 	return 0;
2167 }
2168 
2169 static int wl12xx_init_fw(struct wl1271 *wl)
2170 {
2171 	int retries = WL1271_BOOT_RETRIES;
2172 	bool booted = false;
2173 	struct wiphy *wiphy = wl->hw->wiphy;
2174 	int ret;
2175 
2176 	while (retries) {
2177 		retries--;
2178 		ret = wl12xx_chip_wakeup(wl, false);
2179 		if (ret < 0)
2180 			goto power_off;
2181 
2182 		ret = wl->ops->boot(wl);
2183 		if (ret < 0)
2184 			goto power_off;
2185 
2186 		ret = wl1271_hw_init(wl);
2187 		if (ret < 0)
2188 			goto irq_disable;
2189 
2190 		booted = true;
2191 		break;
2192 
2193 irq_disable:
2194 		mutex_unlock(&wl->mutex);
2195 		/* Unlocking the mutex in the middle of handling is
2196 		   inherently unsafe. In this case we deem it safe to do,
2197 		   because we need to let any possibly pending IRQ out of
2198 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2199 		   work function will not do anything.) Also, any other
2200 		   possible concurrent operations will fail due to the
2201 		   current state, hence the wl1271 struct should be safe. */
2202 		wlcore_disable_interrupts(wl);
2203 		wl1271_flush_deferred_work(wl);
2204 		cancel_work_sync(&wl->netstack_work);
2205 		mutex_lock(&wl->mutex);
2206 power_off:
2207 		wl1271_power_off(wl);
2208 	}
2209 
2210 	if (!booted) {
2211 		wl1271_error("firmware boot failed despite %d retries",
2212 			     WL1271_BOOT_RETRIES);
2213 		goto out;
2214 	}
2215 
2216 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2217 
2218 	/* update hw/fw version info in wiphy struct */
2219 	wiphy->hw_version = wl->chip.id;
2220 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2221 		sizeof(wiphy->fw_version));
2222 
2223 	/*
2224 	 * Now we know if 11a is supported (info from the NVS), so disable
2225 	 * 11a channels if not supported
2226 	 */
2227 	if (!wl->enable_11a)
2228 		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2229 
2230 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2231 		     wl->enable_11a ? "" : "not ");
2232 
2233 	wl->state = WLCORE_STATE_ON;
2234 out:
2235 	return ret;
2236 }
2237 
2238 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2239 {
2240 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2241 }
2242 
2243 /*
2244  * Check whether a fw switch (i.e. moving from one loaded
2245  * fw to another) is needed. This function is also responsible
2246  * for updating wl->last_vif_count, so it must be called before
2247  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2248  * will be used).
2249  */
2250 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2251 				  struct vif_counter_data vif_counter_data,
2252 				  bool add)
2253 {
2254 	enum wl12xx_fw_type current_fw = wl->fw_type;
2255 	u8 vif_count = vif_counter_data.counter;
2256 
2257 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2258 		return false;
2259 
2260 	/* increase the vif count if this is a new vif */
2261 	if (add && !vif_counter_data.cur_vif_running)
2262 		vif_count++;
2263 
2264 	wl->last_vif_count = vif_count;
2265 
2266 	/* no need for fw change if the device is OFF */
2267 	if (wl->state == WLCORE_STATE_OFF)
2268 		return false;
2269 
2270 	/* no need for fw change if a single fw is used */
2271 	if (!wl->mr_fw_name)
2272 		return false;
2273 
2274 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2275 		return true;
2276 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2277 		return true;
2278 
2279 	return false;
2280 }
2281 
2282 /*
2283  * Enter "forced psm". Make sure the sta is in psm against the ap,
2284  * to make the fw switch a bit more disconnection-persistent.
2285  */
2286 static void wl12xx_force_active_psm(struct wl1271 *wl)
2287 {
2288 	struct wl12xx_vif *wlvif;
2289 
2290 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2291 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2292 	}
2293 }
2294 
2295 struct wlcore_hw_queue_iter_data {
2296 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2297 	/* current vif */
2298 	struct ieee80211_vif *vif;
2299 	/* is the current vif among those iterated */
2300 	bool cur_running;
2301 };
2302 
2303 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2304 				 struct ieee80211_vif *vif)
2305 {
2306 	struct wlcore_hw_queue_iter_data *iter_data = data;
2307 
2308 	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2309 		return;
2310 
2311 	if (iter_data->cur_running || vif == iter_data->vif) {
2312 		iter_data->cur_running = true;
2313 		return;
2314 	}
2315 
2316 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2317 }
2318 
2319 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2320 					 struct wl12xx_vif *wlvif)
2321 {
2322 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2323 	struct wlcore_hw_queue_iter_data iter_data = {};
2324 	int i, q_base;
2325 
2326 	iter_data.vif = vif;
2327 
2328 	/* mark all bits taken by active interfaces */
2329 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2330 					IEEE80211_IFACE_ITER_RESUME_ALL,
2331 					wlcore_hw_queue_iter, &iter_data);
2332 
2333 	/* the current vif is already running in mac80211 (resume/recovery) */
2334 	if (iter_data.cur_running) {
2335 		wlvif->hw_queue_base = vif->hw_queue[0];
2336 		wl1271_debug(DEBUG_MAC80211,
2337 			     "using pre-allocated hw queue base %d",
2338 			     wlvif->hw_queue_base);
2339 
2340 		/* interface type might have changed type */
2341 		goto adjust_cab_queue;
2342 	}
2343 
2344 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2345 				     WLCORE_NUM_MAC_ADDRESSES);
2346 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2347 		return -EBUSY;
2348 
2349 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2350 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2351 		     wlvif->hw_queue_base);
2352 
2353 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2354 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2355 		/* register hw queues in mac80211 */
2356 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2357 	}
2358 
2359 adjust_cab_queue:
2360 	/* the last places are reserved for cab queues per interface */
2361 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2362 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2363 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2364 	else
2365 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2366 
2367 	return 0;
2368 }
2369 
2370 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2371 				   struct ieee80211_vif *vif)
2372 {
2373 	struct wl1271 *wl = hw->priv;
2374 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2375 	struct vif_counter_data vif_count;
2376 	int ret = 0;
2377 	u8 role_type;
2378 
2379 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2380 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2381 
2382 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2383 		     ieee80211_vif_type_p2p(vif), vif->addr);
2384 
2385 	wl12xx_get_vif_count(hw, vif, &vif_count);
2386 
2387 	mutex_lock(&wl->mutex);
2388 	ret = wl1271_ps_elp_wakeup(wl);
2389 	if (ret < 0)
2390 		goto out_unlock;
2391 
2392 	/*
2393 	 * in some very corner case HW recovery scenarios its possible to
2394 	 * get here before __wl1271_op_remove_interface is complete, so
2395 	 * opt out if that is the case.
2396 	 */
2397 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2398 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2399 		ret = -EBUSY;
2400 		goto out;
2401 	}
2402 
2403 
2404 	ret = wl12xx_init_vif_data(wl, vif);
2405 	if (ret < 0)
2406 		goto out;
2407 
2408 	wlvif->wl = wl;
2409 	role_type = wl12xx_get_role_type(wl, wlvif);
2410 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2411 		ret = -EINVAL;
2412 		goto out;
2413 	}
2414 
2415 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2416 	if (ret < 0)
2417 		goto out;
2418 
2419 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2420 		wl12xx_force_active_psm(wl);
2421 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2422 		mutex_unlock(&wl->mutex);
2423 		wl1271_recovery_work(&wl->recovery_work);
2424 		return 0;
2425 	}
2426 
2427 	/*
2428 	 * TODO: after the nvs issue will be solved, move this block
2429 	 * to start(), and make sure here the driver is ON.
2430 	 */
2431 	if (wl->state == WLCORE_STATE_OFF) {
2432 		/*
2433 		 * we still need this in order to configure the fw
2434 		 * while uploading the nvs
2435 		 */
2436 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2437 
2438 		ret = wl12xx_init_fw(wl);
2439 		if (ret < 0)
2440 			goto out;
2441 	}
2442 
2443 	ret = wl12xx_cmd_role_enable(wl, vif->addr,
2444 				     role_type, &wlvif->role_id);
2445 	if (ret < 0)
2446 		goto out;
2447 
2448 	ret = wl1271_init_vif_specific(wl, vif);
2449 	if (ret < 0)
2450 		goto out;
2451 
2452 	list_add(&wlvif->list, &wl->wlvif_list);
2453 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2454 
2455 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2456 		wl->ap_count++;
2457 	else
2458 		wl->sta_count++;
2459 out:
2460 	wl1271_ps_elp_sleep(wl);
2461 out_unlock:
2462 	mutex_unlock(&wl->mutex);
2463 
2464 	return ret;
2465 }
2466 
2467 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2468 					 struct ieee80211_vif *vif,
2469 					 bool reset_tx_queues)
2470 {
2471 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2472 	int i, ret;
2473 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2474 
2475 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2476 
2477 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2478 		return;
2479 
2480 	/* because of hardware recovery, we may get here twice */
2481 	if (wl->state == WLCORE_STATE_OFF)
2482 		return;
2483 
2484 	wl1271_info("down");
2485 
2486 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2487 	    wl->scan_wlvif == wlvif) {
2488 		/*
2489 		 * Rearm the tx watchdog just before idling scan. This
2490 		 * prevents just-finished scans from triggering the watchdog
2491 		 */
2492 		wl12xx_rearm_tx_watchdog_locked(wl);
2493 
2494 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2495 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2496 		wl->scan_wlvif = NULL;
2497 		wl->scan.req = NULL;
2498 		ieee80211_scan_completed(wl->hw, true);
2499 	}
2500 
2501 	if (wl->sched_vif == wlvif) {
2502 		ieee80211_sched_scan_stopped(wl->hw);
2503 		wl->sched_vif = NULL;
2504 	}
2505 
2506 	if (wl->roc_vif == vif) {
2507 		wl->roc_vif = NULL;
2508 		ieee80211_remain_on_channel_expired(wl->hw);
2509 	}
2510 
2511 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2512 		/* disable active roles */
2513 		ret = wl1271_ps_elp_wakeup(wl);
2514 		if (ret < 0)
2515 			goto deinit;
2516 
2517 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2518 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2519 			if (wl12xx_dev_role_started(wlvif))
2520 				wl12xx_stop_dev(wl, wlvif);
2521 		}
2522 
2523 		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2524 		if (ret < 0)
2525 			goto deinit;
2526 
2527 		wl1271_ps_elp_sleep(wl);
2528 	}
2529 deinit:
2530 	wl12xx_tx_reset_wlvif(wl, wlvif);
2531 
2532 	/* clear all hlids (except system_hlid) */
2533 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2534 
2535 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2536 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2537 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2538 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2539 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2540 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2541 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2542 	} else {
2543 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2544 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2545 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2546 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2547 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2548 			wl12xx_free_rate_policy(wl,
2549 						&wlvif->ap.ucast_rate_idx[i]);
2550 		wl1271_free_ap_keys(wl, wlvif);
2551 	}
2552 
2553 	dev_kfree_skb(wlvif->probereq);
2554 	wlvif->probereq = NULL;
2555 	if (wl->last_wlvif == wlvif)
2556 		wl->last_wlvif = NULL;
2557 	list_del(&wlvif->list);
2558 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2559 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2560 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2561 
2562 	if (is_ap)
2563 		wl->ap_count--;
2564 	else
2565 		wl->sta_count--;
2566 
2567 	/*
2568 	 * Last AP, have more stations. Configure sleep auth according to STA.
2569 	 * Don't do thin on unintended recovery.
2570 	 */
2571 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2572 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2573 		goto unlock;
2574 
2575 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2576 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2577 		/* Configure for power according to debugfs */
2578 		if (sta_auth != WL1271_PSM_ILLEGAL)
2579 			wl1271_acx_sleep_auth(wl, sta_auth);
2580 		/* Configure for ELP power saving */
2581 		else
2582 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2583 	}
2584 
2585 unlock:
2586 	mutex_unlock(&wl->mutex);
2587 
2588 	del_timer_sync(&wlvif->rx_streaming_timer);
2589 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2590 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2591 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2592 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2593 
2594 	mutex_lock(&wl->mutex);
2595 }
2596 
2597 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2598 				       struct ieee80211_vif *vif)
2599 {
2600 	struct wl1271 *wl = hw->priv;
2601 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2602 	struct wl12xx_vif *iter;
2603 	struct vif_counter_data vif_count;
2604 
2605 	wl12xx_get_vif_count(hw, vif, &vif_count);
2606 	mutex_lock(&wl->mutex);
2607 
2608 	if (wl->state == WLCORE_STATE_OFF ||
2609 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2610 		goto out;
2611 
2612 	/*
2613 	 * wl->vif can be null here if someone shuts down the interface
2614 	 * just when hardware recovery has been started.
2615 	 */
2616 	wl12xx_for_each_wlvif(wl, iter) {
2617 		if (iter != wlvif)
2618 			continue;
2619 
2620 		__wl1271_op_remove_interface(wl, vif, true);
2621 		break;
2622 	}
2623 	WARN_ON(iter != wlvif);
2624 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2625 		wl12xx_force_active_psm(wl);
2626 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2627 		wl12xx_queue_recovery_work(wl);
2628 	}
2629 out:
2630 	mutex_unlock(&wl->mutex);
2631 }
2632 
2633 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2634 				      struct ieee80211_vif *vif,
2635 				      enum nl80211_iftype new_type, bool p2p)
2636 {
2637 	struct wl1271 *wl = hw->priv;
2638 	int ret;
2639 
2640 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2641 	wl1271_op_remove_interface(hw, vif);
2642 
2643 	vif->type = new_type;
2644 	vif->p2p = p2p;
2645 	ret = wl1271_op_add_interface(hw, vif);
2646 
2647 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2648 	return ret;
2649 }
2650 
2651 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2652 {
2653 	int ret;
2654 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2655 
2656 	/*
2657 	 * One of the side effects of the JOIN command is that is clears
2658 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2659 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2660 	 * Currently the only valid scenario for JOIN during association
2661 	 * is on roaming, in which case we will also be given new keys.
2662 	 * Keep the below message for now, unless it starts bothering
2663 	 * users who really like to roam a lot :)
2664 	 */
2665 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2666 		wl1271_info("JOIN while associated.");
2667 
2668 	/* clear encryption type */
2669 	wlvif->encryption_type = KEY_NONE;
2670 
2671 	if (is_ibss)
2672 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2673 	else {
2674 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2675 			/*
2676 			 * TODO: this is an ugly workaround for wl12xx fw
2677 			 * bug - we are not able to tx/rx after the first
2678 			 * start_sta, so make dummy start+stop calls,
2679 			 * and then call start_sta again.
2680 			 * this should be fixed in the fw.
2681 			 */
2682 			wl12xx_cmd_role_start_sta(wl, wlvif);
2683 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2684 		}
2685 
2686 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2687 	}
2688 
2689 	return ret;
2690 }
2691 
2692 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2693 			    int offset)
2694 {
2695 	u8 ssid_len;
2696 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2697 					 skb->len - offset);
2698 
2699 	if (!ptr) {
2700 		wl1271_error("No SSID in IEs!");
2701 		return -ENOENT;
2702 	}
2703 
2704 	ssid_len = ptr[1];
2705 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2706 		wl1271_error("SSID is too long!");
2707 		return -EINVAL;
2708 	}
2709 
2710 	wlvif->ssid_len = ssid_len;
2711 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2712 	return 0;
2713 }
2714 
2715 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2716 {
2717 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2718 	struct sk_buff *skb;
2719 	int ieoffset;
2720 
2721 	/* we currently only support setting the ssid from the ap probe req */
2722 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2723 		return -EINVAL;
2724 
2725 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2726 	if (!skb)
2727 		return -EINVAL;
2728 
2729 	ieoffset = offsetof(struct ieee80211_mgmt,
2730 			    u.probe_req.variable);
2731 	wl1271_ssid_set(wlvif, skb, ieoffset);
2732 	dev_kfree_skb(skb);
2733 
2734 	return 0;
2735 }
2736 
2737 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2738 			    struct ieee80211_bss_conf *bss_conf,
2739 			    u32 sta_rate_set)
2740 {
2741 	int ieoffset;
2742 	int ret;
2743 
2744 	wlvif->aid = bss_conf->aid;
2745 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2746 	wlvif->beacon_int = bss_conf->beacon_int;
2747 	wlvif->wmm_enabled = bss_conf->qos;
2748 
2749 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2750 
2751 	/*
2752 	 * with wl1271, we don't need to update the
2753 	 * beacon_int and dtim_period, because the firmware
2754 	 * updates it by itself when the first beacon is
2755 	 * received after a join.
2756 	 */
2757 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2758 	if (ret < 0)
2759 		return ret;
2760 
2761 	/*
2762 	 * Get a template for hardware connection maintenance
2763 	 */
2764 	dev_kfree_skb(wlvif->probereq);
2765 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2766 							wlvif,
2767 							NULL);
2768 	ieoffset = offsetof(struct ieee80211_mgmt,
2769 			    u.probe_req.variable);
2770 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2771 
2772 	/* enable the connection monitoring feature */
2773 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2774 	if (ret < 0)
2775 		return ret;
2776 
2777 	/*
2778 	 * The join command disable the keep-alive mode, shut down its process,
2779 	 * and also clear the template config, so we need to reset it all after
2780 	 * the join. The acx_aid starts the keep-alive process, and the order
2781 	 * of the commands below is relevant.
2782 	 */
2783 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2784 	if (ret < 0)
2785 		return ret;
2786 
2787 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2788 	if (ret < 0)
2789 		return ret;
2790 
2791 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2792 	if (ret < 0)
2793 		return ret;
2794 
2795 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2796 					   wlvif->sta.klv_template_id,
2797 					   ACX_KEEP_ALIVE_TPL_VALID);
2798 	if (ret < 0)
2799 		return ret;
2800 
2801 	/*
2802 	 * The default fw psm configuration is AUTO, while mac80211 default
2803 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2804 	 */
2805 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2806 	if (ret < 0)
2807 		return ret;
2808 
2809 	if (sta_rate_set) {
2810 		wlvif->rate_set =
2811 			wl1271_tx_enabled_rates_get(wl,
2812 						    sta_rate_set,
2813 						    wlvif->band);
2814 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2815 		if (ret < 0)
2816 			return ret;
2817 	}
2818 
2819 	return ret;
2820 }
2821 
2822 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2823 {
2824 	int ret;
2825 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2826 
2827 	/* make sure we are connected (sta) joined */
2828 	if (sta &&
2829 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2830 		return false;
2831 
2832 	/* make sure we are joined (ibss) */
2833 	if (!sta &&
2834 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2835 		return false;
2836 
2837 	if (sta) {
2838 		/* use defaults when not associated */
2839 		wlvif->aid = 0;
2840 
2841 		/* free probe-request template */
2842 		dev_kfree_skb(wlvif->probereq);
2843 		wlvif->probereq = NULL;
2844 
2845 		/* disable connection monitor features */
2846 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2847 		if (ret < 0)
2848 			return ret;
2849 
2850 		/* Disable the keep-alive feature */
2851 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2852 		if (ret < 0)
2853 			return ret;
2854 	}
2855 
2856 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2857 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2858 
2859 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
2860 		ieee80211_chswitch_done(vif, false);
2861 		cancel_delayed_work(&wlvif->channel_switch_work);
2862 	}
2863 
2864 	/* invalidate keep-alive template */
2865 	wl1271_acx_keep_alive_config(wl, wlvif,
2866 				     wlvif->sta.klv_template_id,
2867 				     ACX_KEEP_ALIVE_TPL_INVALID);
2868 
2869 	return 0;
2870 }
2871 
2872 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2873 {
2874 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2875 	wlvif->rate_set = wlvif->basic_rate_set;
2876 }
2877 
2878 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2879 			     struct ieee80211_conf *conf, u32 changed)
2880 {
2881 	int ret;
2882 
2883 	if (conf->power_level != wlvif->power_level) {
2884 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2885 		if (ret < 0)
2886 			return ret;
2887 
2888 		wlvif->power_level = conf->power_level;
2889 	}
2890 
2891 	return 0;
2892 }
2893 
2894 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2895 {
2896 	struct wl1271 *wl = hw->priv;
2897 	struct wl12xx_vif *wlvif;
2898 	struct ieee80211_conf *conf = &hw->conf;
2899 	int ret = 0;
2900 
2901 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2902 		     " changed 0x%x",
2903 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2904 		     conf->power_level,
2905 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2906 			 changed);
2907 
2908 	mutex_lock(&wl->mutex);
2909 
2910 	if (changed & IEEE80211_CONF_CHANGE_POWER)
2911 		wl->power_level = conf->power_level;
2912 
2913 	if (unlikely(wl->state != WLCORE_STATE_ON))
2914 		goto out;
2915 
2916 	ret = wl1271_ps_elp_wakeup(wl);
2917 	if (ret < 0)
2918 		goto out;
2919 
2920 	/* configure each interface */
2921 	wl12xx_for_each_wlvif(wl, wlvif) {
2922 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2923 		if (ret < 0)
2924 			goto out_sleep;
2925 	}
2926 
2927 out_sleep:
2928 	wl1271_ps_elp_sleep(wl);
2929 
2930 out:
2931 	mutex_unlock(&wl->mutex);
2932 
2933 	return ret;
2934 }
2935 
2936 struct wl1271_filter_params {
2937 	bool enabled;
2938 	int mc_list_length;
2939 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2940 };
2941 
2942 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2943 				       struct netdev_hw_addr_list *mc_list)
2944 {
2945 	struct wl1271_filter_params *fp;
2946 	struct netdev_hw_addr *ha;
2947 
2948 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2949 	if (!fp) {
2950 		wl1271_error("Out of memory setting filters.");
2951 		return 0;
2952 	}
2953 
2954 	/* update multicast filtering parameters */
2955 	fp->mc_list_length = 0;
2956 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2957 		fp->enabled = false;
2958 	} else {
2959 		fp->enabled = true;
2960 		netdev_hw_addr_list_for_each(ha, mc_list) {
2961 			memcpy(fp->mc_list[fp->mc_list_length],
2962 					ha->addr, ETH_ALEN);
2963 			fp->mc_list_length++;
2964 		}
2965 	}
2966 
2967 	return (u64)(unsigned long)fp;
2968 }
2969 
2970 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2971 				  FIF_ALLMULTI | \
2972 				  FIF_FCSFAIL | \
2973 				  FIF_BCN_PRBRESP_PROMISC | \
2974 				  FIF_CONTROL | \
2975 				  FIF_OTHER_BSS)
2976 
2977 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2978 				       unsigned int changed,
2979 				       unsigned int *total, u64 multicast)
2980 {
2981 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2982 	struct wl1271 *wl = hw->priv;
2983 	struct wl12xx_vif *wlvif;
2984 
2985 	int ret;
2986 
2987 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2988 		     " total %x", changed, *total);
2989 
2990 	mutex_lock(&wl->mutex);
2991 
2992 	*total &= WL1271_SUPPORTED_FILTERS;
2993 	changed &= WL1271_SUPPORTED_FILTERS;
2994 
2995 	if (unlikely(wl->state != WLCORE_STATE_ON))
2996 		goto out;
2997 
2998 	ret = wl1271_ps_elp_wakeup(wl);
2999 	if (ret < 0)
3000 		goto out;
3001 
3002 	wl12xx_for_each_wlvif(wl, wlvif) {
3003 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3004 			if (*total & FIF_ALLMULTI)
3005 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3006 								   false,
3007 								   NULL, 0);
3008 			else if (fp)
3009 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3010 							fp->enabled,
3011 							fp->mc_list,
3012 							fp->mc_list_length);
3013 			if (ret < 0)
3014 				goto out_sleep;
3015 		}
3016 	}
3017 
3018 	/*
3019 	 * the fw doesn't provide an api to configure the filters. instead,
3020 	 * the filters configuration is based on the active roles / ROC
3021 	 * state.
3022 	 */
3023 
3024 out_sleep:
3025 	wl1271_ps_elp_sleep(wl);
3026 
3027 out:
3028 	mutex_unlock(&wl->mutex);
3029 	kfree(fp);
3030 }
3031 
3032 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3033 				u8 id, u8 key_type, u8 key_size,
3034 				const u8 *key, u8 hlid, u32 tx_seq_32,
3035 				u16 tx_seq_16)
3036 {
3037 	struct wl1271_ap_key *ap_key;
3038 	int i;
3039 
3040 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3041 
3042 	if (key_size > MAX_KEY_SIZE)
3043 		return -EINVAL;
3044 
3045 	/*
3046 	 * Find next free entry in ap_keys. Also check we are not replacing
3047 	 * an existing key.
3048 	 */
3049 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3050 		if (wlvif->ap.recorded_keys[i] == NULL)
3051 			break;
3052 
3053 		if (wlvif->ap.recorded_keys[i]->id == id) {
3054 			wl1271_warning("trying to record key replacement");
3055 			return -EINVAL;
3056 		}
3057 	}
3058 
3059 	if (i == MAX_NUM_KEYS)
3060 		return -EBUSY;
3061 
3062 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3063 	if (!ap_key)
3064 		return -ENOMEM;
3065 
3066 	ap_key->id = id;
3067 	ap_key->key_type = key_type;
3068 	ap_key->key_size = key_size;
3069 	memcpy(ap_key->key, key, key_size);
3070 	ap_key->hlid = hlid;
3071 	ap_key->tx_seq_32 = tx_seq_32;
3072 	ap_key->tx_seq_16 = tx_seq_16;
3073 
3074 	wlvif->ap.recorded_keys[i] = ap_key;
3075 	return 0;
3076 }
3077 
3078 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3079 {
3080 	int i;
3081 
3082 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3083 		kfree(wlvif->ap.recorded_keys[i]);
3084 		wlvif->ap.recorded_keys[i] = NULL;
3085 	}
3086 }
3087 
3088 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3089 {
3090 	int i, ret = 0;
3091 	struct wl1271_ap_key *key;
3092 	bool wep_key_added = false;
3093 
3094 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3095 		u8 hlid;
3096 		if (wlvif->ap.recorded_keys[i] == NULL)
3097 			break;
3098 
3099 		key = wlvif->ap.recorded_keys[i];
3100 		hlid = key->hlid;
3101 		if (hlid == WL12XX_INVALID_LINK_ID)
3102 			hlid = wlvif->ap.bcast_hlid;
3103 
3104 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3105 					    key->id, key->key_type,
3106 					    key->key_size, key->key,
3107 					    hlid, key->tx_seq_32,
3108 					    key->tx_seq_16);
3109 		if (ret < 0)
3110 			goto out;
3111 
3112 		if (key->key_type == KEY_WEP)
3113 			wep_key_added = true;
3114 	}
3115 
3116 	if (wep_key_added) {
3117 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3118 						     wlvif->ap.bcast_hlid);
3119 		if (ret < 0)
3120 			goto out;
3121 	}
3122 
3123 out:
3124 	wl1271_free_ap_keys(wl, wlvif);
3125 	return ret;
3126 }
3127 
3128 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3129 		       u16 action, u8 id, u8 key_type,
3130 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3131 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3132 {
3133 	int ret;
3134 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3135 
3136 	if (is_ap) {
3137 		struct wl1271_station *wl_sta;
3138 		u8 hlid;
3139 
3140 		if (sta) {
3141 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3142 			hlid = wl_sta->hlid;
3143 		} else {
3144 			hlid = wlvif->ap.bcast_hlid;
3145 		}
3146 
3147 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3148 			/*
3149 			 * We do not support removing keys after AP shutdown.
3150 			 * Pretend we do to make mac80211 happy.
3151 			 */
3152 			if (action != KEY_ADD_OR_REPLACE)
3153 				return 0;
3154 
3155 			ret = wl1271_record_ap_key(wl, wlvif, id,
3156 					     key_type, key_size,
3157 					     key, hlid, tx_seq_32,
3158 					     tx_seq_16);
3159 		} else {
3160 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3161 					     id, key_type, key_size,
3162 					     key, hlid, tx_seq_32,
3163 					     tx_seq_16);
3164 		}
3165 
3166 		if (ret < 0)
3167 			return ret;
3168 	} else {
3169 		const u8 *addr;
3170 		static const u8 bcast_addr[ETH_ALEN] = {
3171 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3172 		};
3173 
3174 		addr = sta ? sta->addr : bcast_addr;
3175 
3176 		if (is_zero_ether_addr(addr)) {
3177 			/* We dont support TX only encryption */
3178 			return -EOPNOTSUPP;
3179 		}
3180 
3181 		/* The wl1271 does not allow to remove unicast keys - they
3182 		   will be cleared automatically on next CMD_JOIN. Ignore the
3183 		   request silently, as we dont want the mac80211 to emit
3184 		   an error message. */
3185 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3186 			return 0;
3187 
3188 		/* don't remove key if hlid was already deleted */
3189 		if (action == KEY_REMOVE &&
3190 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3191 			return 0;
3192 
3193 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3194 					     id, key_type, key_size,
3195 					     key, addr, tx_seq_32,
3196 					     tx_seq_16);
3197 		if (ret < 0)
3198 			return ret;
3199 
3200 	}
3201 
3202 	return 0;
3203 }
3204 
3205 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3206 			     struct ieee80211_vif *vif,
3207 			     struct ieee80211_sta *sta,
3208 			     struct ieee80211_key_conf *key_conf)
3209 {
3210 	struct wl1271 *wl = hw->priv;
3211 	int ret;
3212 	bool might_change_spare =
3213 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3214 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3215 
3216 	if (might_change_spare) {
3217 		/*
3218 		 * stop the queues and flush to ensure the next packets are
3219 		 * in sync with FW spare block accounting
3220 		 */
3221 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3222 		wl1271_tx_flush(wl);
3223 	}
3224 
3225 	mutex_lock(&wl->mutex);
3226 
3227 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3228 		ret = -EAGAIN;
3229 		goto out_wake_queues;
3230 	}
3231 
3232 	ret = wl1271_ps_elp_wakeup(wl);
3233 	if (ret < 0)
3234 		goto out_wake_queues;
3235 
3236 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3237 
3238 	wl1271_ps_elp_sleep(wl);
3239 
3240 out_wake_queues:
3241 	if (might_change_spare)
3242 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3243 
3244 	mutex_unlock(&wl->mutex);
3245 
3246 	return ret;
3247 }
3248 
3249 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3250 		   struct ieee80211_vif *vif,
3251 		   struct ieee80211_sta *sta,
3252 		   struct ieee80211_key_conf *key_conf)
3253 {
3254 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3255 	int ret;
3256 	u32 tx_seq_32 = 0;
3257 	u16 tx_seq_16 = 0;
3258 	u8 key_type;
3259 	u8 hlid;
3260 
3261 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3262 
3263 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3264 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3265 		     key_conf->cipher, key_conf->keyidx,
3266 		     key_conf->keylen, key_conf->flags);
3267 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3268 
3269 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3270 		if (sta) {
3271 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3272 			hlid = wl_sta->hlid;
3273 		} else {
3274 			hlid = wlvif->ap.bcast_hlid;
3275 		}
3276 	else
3277 		hlid = wlvif->sta.hlid;
3278 
3279 	if (hlid != WL12XX_INVALID_LINK_ID) {
3280 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3281 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3282 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3283 	}
3284 
3285 	switch (key_conf->cipher) {
3286 	case WLAN_CIPHER_SUITE_WEP40:
3287 	case WLAN_CIPHER_SUITE_WEP104:
3288 		key_type = KEY_WEP;
3289 
3290 		key_conf->hw_key_idx = key_conf->keyidx;
3291 		break;
3292 	case WLAN_CIPHER_SUITE_TKIP:
3293 		key_type = KEY_TKIP;
3294 		key_conf->hw_key_idx = key_conf->keyidx;
3295 		break;
3296 	case WLAN_CIPHER_SUITE_CCMP:
3297 		key_type = KEY_AES;
3298 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3299 		break;
3300 	case WL1271_CIPHER_SUITE_GEM:
3301 		key_type = KEY_GEM;
3302 		break;
3303 	default:
3304 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3305 
3306 		return -EOPNOTSUPP;
3307 	}
3308 
3309 	switch (cmd) {
3310 	case SET_KEY:
3311 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3312 				 key_conf->keyidx, key_type,
3313 				 key_conf->keylen, key_conf->key,
3314 				 tx_seq_32, tx_seq_16, sta);
3315 		if (ret < 0) {
3316 			wl1271_error("Could not add or replace key");
3317 			return ret;
3318 		}
3319 
3320 		/*
3321 		 * reconfiguring arp response if the unicast (or common)
3322 		 * encryption key type was changed
3323 		 */
3324 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3325 		    (sta || key_type == KEY_WEP) &&
3326 		    wlvif->encryption_type != key_type) {
3327 			wlvif->encryption_type = key_type;
3328 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3329 			if (ret < 0) {
3330 				wl1271_warning("build arp rsp failed: %d", ret);
3331 				return ret;
3332 			}
3333 		}
3334 		break;
3335 
3336 	case DISABLE_KEY:
3337 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3338 				     key_conf->keyidx, key_type,
3339 				     key_conf->keylen, key_conf->key,
3340 				     0, 0, sta);
3341 		if (ret < 0) {
3342 			wl1271_error("Could not remove key");
3343 			return ret;
3344 		}
3345 		break;
3346 
3347 	default:
3348 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3349 		return -EOPNOTSUPP;
3350 	}
3351 
3352 	return ret;
3353 }
3354 EXPORT_SYMBOL_GPL(wlcore_set_key);
3355 
3356 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3357 					  struct ieee80211_vif *vif,
3358 					  int key_idx)
3359 {
3360 	struct wl1271 *wl = hw->priv;
3361 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3362 	int ret;
3363 
3364 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3365 		     key_idx);
3366 
3367 	mutex_lock(&wl->mutex);
3368 
3369 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3370 		ret = -EAGAIN;
3371 		goto out_unlock;
3372 	}
3373 
3374 	ret = wl1271_ps_elp_wakeup(wl);
3375 	if (ret < 0)
3376 		goto out_unlock;
3377 
3378 	wlvif->default_key = key_idx;
3379 
3380 	/* the default WEP key needs to be configured at least once */
3381 	if (wlvif->encryption_type == KEY_WEP) {
3382 		ret = wl12xx_cmd_set_default_wep_key(wl,
3383 				key_idx,
3384 				wlvif->sta.hlid);
3385 		if (ret < 0)
3386 			goto out_sleep;
3387 	}
3388 
3389 out_sleep:
3390 	wl1271_ps_elp_sleep(wl);
3391 
3392 out_unlock:
3393 	mutex_unlock(&wl->mutex);
3394 }
3395 
3396 void wlcore_regdomain_config(struct wl1271 *wl)
3397 {
3398 	int ret;
3399 
3400 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3401 		return;
3402 
3403 	mutex_lock(&wl->mutex);
3404 
3405 	if (unlikely(wl->state != WLCORE_STATE_ON))
3406 		goto out;
3407 
3408 	ret = wl1271_ps_elp_wakeup(wl);
3409 	if (ret < 0)
3410 		goto out;
3411 
3412 	ret = wlcore_cmd_regdomain_config_locked(wl);
3413 	if (ret < 0) {
3414 		wl12xx_queue_recovery_work(wl);
3415 		goto out;
3416 	}
3417 
3418 	wl1271_ps_elp_sleep(wl);
3419 out:
3420 	mutex_unlock(&wl->mutex);
3421 }
3422 
3423 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3424 			     struct ieee80211_vif *vif,
3425 			     struct cfg80211_scan_request *req)
3426 {
3427 	struct wl1271 *wl = hw->priv;
3428 	int ret;
3429 	u8 *ssid = NULL;
3430 	size_t len = 0;
3431 
3432 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3433 
3434 	if (req->n_ssids) {
3435 		ssid = req->ssids[0].ssid;
3436 		len = req->ssids[0].ssid_len;
3437 	}
3438 
3439 	mutex_lock(&wl->mutex);
3440 
3441 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3442 		/*
3443 		 * We cannot return -EBUSY here because cfg80211 will expect
3444 		 * a call to ieee80211_scan_completed if we do - in this case
3445 		 * there won't be any call.
3446 		 */
3447 		ret = -EAGAIN;
3448 		goto out;
3449 	}
3450 
3451 	ret = wl1271_ps_elp_wakeup(wl);
3452 	if (ret < 0)
3453 		goto out;
3454 
3455 	/* fail if there is any role in ROC */
3456 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3457 		/* don't allow scanning right now */
3458 		ret = -EBUSY;
3459 		goto out_sleep;
3460 	}
3461 
3462 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3463 out_sleep:
3464 	wl1271_ps_elp_sleep(wl);
3465 out:
3466 	mutex_unlock(&wl->mutex);
3467 
3468 	return ret;
3469 }
3470 
3471 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3472 				     struct ieee80211_vif *vif)
3473 {
3474 	struct wl1271 *wl = hw->priv;
3475 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3476 	int ret;
3477 
3478 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3479 
3480 	mutex_lock(&wl->mutex);
3481 
3482 	if (unlikely(wl->state != WLCORE_STATE_ON))
3483 		goto out;
3484 
3485 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3486 		goto out;
3487 
3488 	ret = wl1271_ps_elp_wakeup(wl);
3489 	if (ret < 0)
3490 		goto out;
3491 
3492 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3493 		ret = wl->ops->scan_stop(wl, wlvif);
3494 		if (ret < 0)
3495 			goto out_sleep;
3496 	}
3497 
3498 	/*
3499 	 * Rearm the tx watchdog just before idling scan. This
3500 	 * prevents just-finished scans from triggering the watchdog
3501 	 */
3502 	wl12xx_rearm_tx_watchdog_locked(wl);
3503 
3504 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3505 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3506 	wl->scan_wlvif = NULL;
3507 	wl->scan.req = NULL;
3508 	ieee80211_scan_completed(wl->hw, true);
3509 
3510 out_sleep:
3511 	wl1271_ps_elp_sleep(wl);
3512 out:
3513 	mutex_unlock(&wl->mutex);
3514 
3515 	cancel_delayed_work_sync(&wl->scan_complete_work);
3516 }
3517 
3518 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3519 				      struct ieee80211_vif *vif,
3520 				      struct cfg80211_sched_scan_request *req,
3521 				      struct ieee80211_sched_scan_ies *ies)
3522 {
3523 	struct wl1271 *wl = hw->priv;
3524 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3525 	int ret;
3526 
3527 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3528 
3529 	mutex_lock(&wl->mutex);
3530 
3531 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3532 		ret = -EAGAIN;
3533 		goto out;
3534 	}
3535 
3536 	ret = wl1271_ps_elp_wakeup(wl);
3537 	if (ret < 0)
3538 		goto out;
3539 
3540 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3541 	if (ret < 0)
3542 		goto out_sleep;
3543 
3544 	wl->sched_vif = wlvif;
3545 
3546 out_sleep:
3547 	wl1271_ps_elp_sleep(wl);
3548 out:
3549 	mutex_unlock(&wl->mutex);
3550 	return ret;
3551 }
3552 
3553 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3554 				      struct ieee80211_vif *vif)
3555 {
3556 	struct wl1271 *wl = hw->priv;
3557 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3558 	int ret;
3559 
3560 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3561 
3562 	mutex_lock(&wl->mutex);
3563 
3564 	if (unlikely(wl->state != WLCORE_STATE_ON))
3565 		goto out;
3566 
3567 	ret = wl1271_ps_elp_wakeup(wl);
3568 	if (ret < 0)
3569 		goto out;
3570 
3571 	wl->ops->sched_scan_stop(wl, wlvif);
3572 
3573 	wl1271_ps_elp_sleep(wl);
3574 out:
3575 	mutex_unlock(&wl->mutex);
3576 }
3577 
3578 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3579 {
3580 	struct wl1271 *wl = hw->priv;
3581 	int ret = 0;
3582 
3583 	mutex_lock(&wl->mutex);
3584 
3585 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3586 		ret = -EAGAIN;
3587 		goto out;
3588 	}
3589 
3590 	ret = wl1271_ps_elp_wakeup(wl);
3591 	if (ret < 0)
3592 		goto out;
3593 
3594 	ret = wl1271_acx_frag_threshold(wl, value);
3595 	if (ret < 0)
3596 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3597 
3598 	wl1271_ps_elp_sleep(wl);
3599 
3600 out:
3601 	mutex_unlock(&wl->mutex);
3602 
3603 	return ret;
3604 }
3605 
3606 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3607 {
3608 	struct wl1271 *wl = hw->priv;
3609 	struct wl12xx_vif *wlvif;
3610 	int ret = 0;
3611 
3612 	mutex_lock(&wl->mutex);
3613 
3614 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3615 		ret = -EAGAIN;
3616 		goto out;
3617 	}
3618 
3619 	ret = wl1271_ps_elp_wakeup(wl);
3620 	if (ret < 0)
3621 		goto out;
3622 
3623 	wl12xx_for_each_wlvif(wl, wlvif) {
3624 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3625 		if (ret < 0)
3626 			wl1271_warning("set rts threshold failed: %d", ret);
3627 	}
3628 	wl1271_ps_elp_sleep(wl);
3629 
3630 out:
3631 	mutex_unlock(&wl->mutex);
3632 
3633 	return ret;
3634 }
3635 
3636 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3637 {
3638 	int len;
3639 	const u8 *next, *end = skb->data + skb->len;
3640 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3641 					skb->len - ieoffset);
3642 	if (!ie)
3643 		return;
3644 	len = ie[1] + 2;
3645 	next = ie + len;
3646 	memmove(ie, next, end - next);
3647 	skb_trim(skb, skb->len - len);
3648 }
3649 
3650 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3651 					    unsigned int oui, u8 oui_type,
3652 					    int ieoffset)
3653 {
3654 	int len;
3655 	const u8 *next, *end = skb->data + skb->len;
3656 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3657 					       skb->data + ieoffset,
3658 					       skb->len - ieoffset);
3659 	if (!ie)
3660 		return;
3661 	len = ie[1] + 2;
3662 	next = ie + len;
3663 	memmove(ie, next, end - next);
3664 	skb_trim(skb, skb->len - len);
3665 }
3666 
3667 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3668 					 struct ieee80211_vif *vif)
3669 {
3670 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3671 	struct sk_buff *skb;
3672 	int ret;
3673 
3674 	skb = ieee80211_proberesp_get(wl->hw, vif);
3675 	if (!skb)
3676 		return -EOPNOTSUPP;
3677 
3678 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3679 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3680 				      skb->data,
3681 				      skb->len, 0,
3682 				      rates);
3683 	dev_kfree_skb(skb);
3684 
3685 	if (ret < 0)
3686 		goto out;
3687 
3688 	wl1271_debug(DEBUG_AP, "probe response updated");
3689 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3690 
3691 out:
3692 	return ret;
3693 }
3694 
3695 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3696 					     struct ieee80211_vif *vif,
3697 					     u8 *probe_rsp_data,
3698 					     size_t probe_rsp_len,
3699 					     u32 rates)
3700 {
3701 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3702 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3703 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3704 	int ssid_ie_offset, ie_offset, templ_len;
3705 	const u8 *ptr;
3706 
3707 	/* no need to change probe response if the SSID is set correctly */
3708 	if (wlvif->ssid_len > 0)
3709 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3710 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3711 					       probe_rsp_data,
3712 					       probe_rsp_len, 0,
3713 					       rates);
3714 
3715 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3716 		wl1271_error("probe_rsp template too big");
3717 		return -EINVAL;
3718 	}
3719 
3720 	/* start searching from IE offset */
3721 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3722 
3723 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3724 			       probe_rsp_len - ie_offset);
3725 	if (!ptr) {
3726 		wl1271_error("No SSID in beacon!");
3727 		return -EINVAL;
3728 	}
3729 
3730 	ssid_ie_offset = ptr - probe_rsp_data;
3731 	ptr += (ptr[1] + 2);
3732 
3733 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3734 
3735 	/* insert SSID from bss_conf */
3736 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3737 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3738 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3739 	       bss_conf->ssid, bss_conf->ssid_len);
3740 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3741 
3742 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3743 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3744 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3745 
3746 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3747 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3748 				       probe_rsp_templ,
3749 				       templ_len, 0,
3750 				       rates);
3751 }
3752 
3753 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3754 				       struct ieee80211_vif *vif,
3755 				       struct ieee80211_bss_conf *bss_conf,
3756 				       u32 changed)
3757 {
3758 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3759 	int ret = 0;
3760 
3761 	if (changed & BSS_CHANGED_ERP_SLOT) {
3762 		if (bss_conf->use_short_slot)
3763 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3764 		else
3765 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3766 		if (ret < 0) {
3767 			wl1271_warning("Set slot time failed %d", ret);
3768 			goto out;
3769 		}
3770 	}
3771 
3772 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3773 		if (bss_conf->use_short_preamble)
3774 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3775 		else
3776 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3777 	}
3778 
3779 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3780 		if (bss_conf->use_cts_prot)
3781 			ret = wl1271_acx_cts_protect(wl, wlvif,
3782 						     CTSPROTECT_ENABLE);
3783 		else
3784 			ret = wl1271_acx_cts_protect(wl, wlvif,
3785 						     CTSPROTECT_DISABLE);
3786 		if (ret < 0) {
3787 			wl1271_warning("Set ctsprotect failed %d", ret);
3788 			goto out;
3789 		}
3790 	}
3791 
3792 out:
3793 	return ret;
3794 }
3795 
3796 static int wlcore_set_beacon_template(struct wl1271 *wl,
3797 				      struct ieee80211_vif *vif,
3798 				      bool is_ap)
3799 {
3800 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3801 	struct ieee80211_hdr *hdr;
3802 	u32 min_rate;
3803 	int ret;
3804 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3805 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3806 	u16 tmpl_id;
3807 
3808 	if (!beacon) {
3809 		ret = -EINVAL;
3810 		goto out;
3811 	}
3812 
3813 	wl1271_debug(DEBUG_MASTER, "beacon updated");
3814 
3815 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3816 	if (ret < 0) {
3817 		dev_kfree_skb(beacon);
3818 		goto out;
3819 	}
3820 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3821 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3822 		CMD_TEMPL_BEACON;
3823 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3824 				      beacon->data,
3825 				      beacon->len, 0,
3826 				      min_rate);
3827 	if (ret < 0) {
3828 		dev_kfree_skb(beacon);
3829 		goto out;
3830 	}
3831 
3832 	wlvif->wmm_enabled =
3833 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3834 					WLAN_OUI_TYPE_MICROSOFT_WMM,
3835 					beacon->data + ieoffset,
3836 					beacon->len - ieoffset);
3837 
3838 	/*
3839 	 * In case we already have a probe-resp beacon set explicitly
3840 	 * by usermode, don't use the beacon data.
3841 	 */
3842 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3843 		goto end_bcn;
3844 
3845 	/* remove TIM ie from probe response */
3846 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3847 
3848 	/*
3849 	 * remove p2p ie from probe response.
3850 	 * the fw reponds to probe requests that don't include
3851 	 * the p2p ie. probe requests with p2p ie will be passed,
3852 	 * and will be responded by the supplicant (the spec
3853 	 * forbids including the p2p ie when responding to probe
3854 	 * requests that didn't include it).
3855 	 */
3856 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3857 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3858 
3859 	hdr = (struct ieee80211_hdr *) beacon->data;
3860 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3861 					 IEEE80211_STYPE_PROBE_RESP);
3862 	if (is_ap)
3863 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3864 							   beacon->data,
3865 							   beacon->len,
3866 							   min_rate);
3867 	else
3868 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3869 					      CMD_TEMPL_PROBE_RESPONSE,
3870 					      beacon->data,
3871 					      beacon->len, 0,
3872 					      min_rate);
3873 end_bcn:
3874 	dev_kfree_skb(beacon);
3875 	if (ret < 0)
3876 		goto out;
3877 
3878 out:
3879 	return ret;
3880 }
3881 
3882 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3883 					  struct ieee80211_vif *vif,
3884 					  struct ieee80211_bss_conf *bss_conf,
3885 					  u32 changed)
3886 {
3887 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3888 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3889 	int ret = 0;
3890 
3891 	if (changed & BSS_CHANGED_BEACON_INT) {
3892 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3893 			bss_conf->beacon_int);
3894 
3895 		wlvif->beacon_int = bss_conf->beacon_int;
3896 	}
3897 
3898 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3899 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3900 
3901 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3902 	}
3903 
3904 	if (changed & BSS_CHANGED_BEACON) {
3905 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
3906 		if (ret < 0)
3907 			goto out;
3908 	}
3909 
3910 out:
3911 	if (ret != 0)
3912 		wl1271_error("beacon info change failed: %d", ret);
3913 	return ret;
3914 }
3915 
3916 /* AP mode changes */
3917 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3918 				       struct ieee80211_vif *vif,
3919 				       struct ieee80211_bss_conf *bss_conf,
3920 				       u32 changed)
3921 {
3922 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3923 	int ret = 0;
3924 
3925 	if (changed & BSS_CHANGED_BASIC_RATES) {
3926 		u32 rates = bss_conf->basic_rates;
3927 
3928 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3929 								 wlvif->band);
3930 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3931 							wlvif->basic_rate_set);
3932 
3933 		ret = wl1271_init_ap_rates(wl, wlvif);
3934 		if (ret < 0) {
3935 			wl1271_error("AP rate policy change failed %d", ret);
3936 			goto out;
3937 		}
3938 
3939 		ret = wl1271_ap_init_templates(wl, vif);
3940 		if (ret < 0)
3941 			goto out;
3942 
3943 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3944 		if (ret < 0)
3945 			goto out;
3946 
3947 		ret = wlcore_set_beacon_template(wl, vif, true);
3948 		if (ret < 0)
3949 			goto out;
3950 	}
3951 
3952 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3953 	if (ret < 0)
3954 		goto out;
3955 
3956 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
3957 		if (bss_conf->enable_beacon) {
3958 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3959 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3960 				if (ret < 0)
3961 					goto out;
3962 
3963 				ret = wl1271_ap_init_hwenc(wl, wlvif);
3964 				if (ret < 0)
3965 					goto out;
3966 
3967 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3968 				wl1271_debug(DEBUG_AP, "started AP");
3969 			}
3970 		} else {
3971 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3972 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3973 				if (ret < 0)
3974 					goto out;
3975 
3976 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3977 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3978 					  &wlvif->flags);
3979 				wl1271_debug(DEBUG_AP, "stopped AP");
3980 			}
3981 		}
3982 	}
3983 
3984 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3985 	if (ret < 0)
3986 		goto out;
3987 
3988 	/* Handle HT information change */
3989 	if ((changed & BSS_CHANGED_HT) &&
3990 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3991 		ret = wl1271_acx_set_ht_information(wl, wlvif,
3992 					bss_conf->ht_operation_mode);
3993 		if (ret < 0) {
3994 			wl1271_warning("Set ht information failed %d", ret);
3995 			goto out;
3996 		}
3997 	}
3998 
3999 out:
4000 	return;
4001 }
4002 
4003 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4004 			    struct ieee80211_bss_conf *bss_conf,
4005 			    u32 sta_rate_set)
4006 {
4007 	u32 rates;
4008 	int ret;
4009 
4010 	wl1271_debug(DEBUG_MAC80211,
4011 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4012 	     bss_conf->bssid, bss_conf->aid,
4013 	     bss_conf->beacon_int,
4014 	     bss_conf->basic_rates, sta_rate_set);
4015 
4016 	wlvif->beacon_int = bss_conf->beacon_int;
4017 	rates = bss_conf->basic_rates;
4018 	wlvif->basic_rate_set =
4019 		wl1271_tx_enabled_rates_get(wl, rates,
4020 					    wlvif->band);
4021 	wlvif->basic_rate =
4022 		wl1271_tx_min_rate_get(wl,
4023 				       wlvif->basic_rate_set);
4024 
4025 	if (sta_rate_set)
4026 		wlvif->rate_set =
4027 			wl1271_tx_enabled_rates_get(wl,
4028 						sta_rate_set,
4029 						wlvif->band);
4030 
4031 	/* we only support sched_scan while not connected */
4032 	if (wl->sched_vif == wlvif)
4033 		wl->ops->sched_scan_stop(wl, wlvif);
4034 
4035 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4036 	if (ret < 0)
4037 		return ret;
4038 
4039 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4040 	if (ret < 0)
4041 		return ret;
4042 
4043 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4044 	if (ret < 0)
4045 		return ret;
4046 
4047 	wlcore_set_ssid(wl, wlvif);
4048 
4049 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4050 
4051 	return 0;
4052 }
4053 
4054 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4055 {
4056 	int ret;
4057 
4058 	/* revert back to minimum rates for the current band */
4059 	wl1271_set_band_rate(wl, wlvif);
4060 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4061 
4062 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4063 	if (ret < 0)
4064 		return ret;
4065 
4066 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4067 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4068 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4069 		if (ret < 0)
4070 			return ret;
4071 	}
4072 
4073 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4074 	return 0;
4075 }
4076 /* STA/IBSS mode changes */
4077 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4078 					struct ieee80211_vif *vif,
4079 					struct ieee80211_bss_conf *bss_conf,
4080 					u32 changed)
4081 {
4082 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4083 	bool do_join = false;
4084 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4085 	bool ibss_joined = false;
4086 	u32 sta_rate_set = 0;
4087 	int ret;
4088 	struct ieee80211_sta *sta;
4089 	bool sta_exists = false;
4090 	struct ieee80211_sta_ht_cap sta_ht_cap;
4091 
4092 	if (is_ibss) {
4093 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4094 						     changed);
4095 		if (ret < 0)
4096 			goto out;
4097 	}
4098 
4099 	if (changed & BSS_CHANGED_IBSS) {
4100 		if (bss_conf->ibss_joined) {
4101 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4102 			ibss_joined = true;
4103 		} else {
4104 			wlcore_unset_assoc(wl, wlvif);
4105 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4106 		}
4107 	}
4108 
4109 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4110 		do_join = true;
4111 
4112 	/* Need to update the SSID (for filtering etc) */
4113 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4114 		do_join = true;
4115 
4116 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4117 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4118 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4119 
4120 		do_join = true;
4121 	}
4122 
4123 	if (changed & BSS_CHANGED_CQM) {
4124 		bool enable = false;
4125 		if (bss_conf->cqm_rssi_thold)
4126 			enable = true;
4127 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4128 						  bss_conf->cqm_rssi_thold,
4129 						  bss_conf->cqm_rssi_hyst);
4130 		if (ret < 0)
4131 			goto out;
4132 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4133 	}
4134 
4135 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4136 		       BSS_CHANGED_ASSOC)) {
4137 		rcu_read_lock();
4138 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4139 		if (sta) {
4140 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4141 
4142 			/* save the supp_rates of the ap */
4143 			sta_rate_set = sta->supp_rates[wlvif->band];
4144 			if (sta->ht_cap.ht_supported)
4145 				sta_rate_set |=
4146 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4147 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4148 			sta_ht_cap = sta->ht_cap;
4149 			sta_exists = true;
4150 		}
4151 
4152 		rcu_read_unlock();
4153 	}
4154 
4155 	if (changed & BSS_CHANGED_BSSID) {
4156 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4157 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4158 					       sta_rate_set);
4159 			if (ret < 0)
4160 				goto out;
4161 
4162 			/* Need to update the BSSID (for filtering etc) */
4163 			do_join = true;
4164 		} else {
4165 			ret = wlcore_clear_bssid(wl, wlvif);
4166 			if (ret < 0)
4167 				goto out;
4168 		}
4169 	}
4170 
4171 	if (changed & BSS_CHANGED_IBSS) {
4172 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4173 			     bss_conf->ibss_joined);
4174 
4175 		if (bss_conf->ibss_joined) {
4176 			u32 rates = bss_conf->basic_rates;
4177 			wlvif->basic_rate_set =
4178 				wl1271_tx_enabled_rates_get(wl, rates,
4179 							    wlvif->band);
4180 			wlvif->basic_rate =
4181 				wl1271_tx_min_rate_get(wl,
4182 						       wlvif->basic_rate_set);
4183 
4184 			/* by default, use 11b + OFDM rates */
4185 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4186 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4187 			if (ret < 0)
4188 				goto out;
4189 		}
4190 	}
4191 
4192 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4193 	if (ret < 0)
4194 		goto out;
4195 
4196 	if (do_join) {
4197 		ret = wlcore_join(wl, wlvif);
4198 		if (ret < 0) {
4199 			wl1271_warning("cmd join failed %d", ret);
4200 			goto out;
4201 		}
4202 	}
4203 
4204 	if (changed & BSS_CHANGED_ASSOC) {
4205 		if (bss_conf->assoc) {
4206 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4207 					       sta_rate_set);
4208 			if (ret < 0)
4209 				goto out;
4210 
4211 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4212 				wl12xx_set_authorized(wl, wlvif);
4213 		} else {
4214 			wlcore_unset_assoc(wl, wlvif);
4215 		}
4216 	}
4217 
4218 	if (changed & BSS_CHANGED_PS) {
4219 		if ((bss_conf->ps) &&
4220 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4221 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4222 			int ps_mode;
4223 			char *ps_mode_str;
4224 
4225 			if (wl->conf.conn.forced_ps) {
4226 				ps_mode = STATION_POWER_SAVE_MODE;
4227 				ps_mode_str = "forced";
4228 			} else {
4229 				ps_mode = STATION_AUTO_PS_MODE;
4230 				ps_mode_str = "auto";
4231 			}
4232 
4233 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4234 
4235 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4236 			if (ret < 0)
4237 				wl1271_warning("enter %s ps failed %d",
4238 					       ps_mode_str, ret);
4239 		} else if (!bss_conf->ps &&
4240 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4241 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4242 
4243 			ret = wl1271_ps_set_mode(wl, wlvif,
4244 						 STATION_ACTIVE_MODE);
4245 			if (ret < 0)
4246 				wl1271_warning("exit auto ps failed %d", ret);
4247 		}
4248 	}
4249 
4250 	/* Handle new association with HT. Do this after join. */
4251 	if (sta_exists) {
4252 		bool enabled =
4253 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4254 
4255 		ret = wlcore_hw_set_peer_cap(wl,
4256 					     &sta_ht_cap,
4257 					     enabled,
4258 					     wlvif->rate_set,
4259 					     wlvif->sta.hlid);
4260 		if (ret < 0) {
4261 			wl1271_warning("Set ht cap failed %d", ret);
4262 			goto out;
4263 
4264 		}
4265 
4266 		if (enabled) {
4267 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4268 						bss_conf->ht_operation_mode);
4269 			if (ret < 0) {
4270 				wl1271_warning("Set ht information failed %d",
4271 					       ret);
4272 				goto out;
4273 			}
4274 		}
4275 	}
4276 
4277 	/* Handle arp filtering. Done after join. */
4278 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4279 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4280 		__be32 addr = bss_conf->arp_addr_list[0];
4281 		wlvif->sta.qos = bss_conf->qos;
4282 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4283 
4284 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4285 			wlvif->ip_addr = addr;
4286 			/*
4287 			 * The template should have been configured only upon
4288 			 * association. however, it seems that the correct ip
4289 			 * isn't being set (when sending), so we have to
4290 			 * reconfigure the template upon every ip change.
4291 			 */
4292 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4293 			if (ret < 0) {
4294 				wl1271_warning("build arp rsp failed: %d", ret);
4295 				goto out;
4296 			}
4297 
4298 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4299 				(ACX_ARP_FILTER_ARP_FILTERING |
4300 				 ACX_ARP_FILTER_AUTO_ARP),
4301 				addr);
4302 		} else {
4303 			wlvif->ip_addr = 0;
4304 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4305 		}
4306 
4307 		if (ret < 0)
4308 			goto out;
4309 	}
4310 
4311 out:
4312 	return;
4313 }
4314 
4315 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4316 				       struct ieee80211_vif *vif,
4317 				       struct ieee80211_bss_conf *bss_conf,
4318 				       u32 changed)
4319 {
4320 	struct wl1271 *wl = hw->priv;
4321 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4322 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4323 	int ret;
4324 
4325 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4326 		     wlvif->role_id, (int)changed);
4327 
4328 	/*
4329 	 * make sure to cancel pending disconnections if our association
4330 	 * state changed
4331 	 */
4332 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4333 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4334 
4335 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4336 	    !bss_conf->enable_beacon)
4337 		wl1271_tx_flush(wl);
4338 
4339 	mutex_lock(&wl->mutex);
4340 
4341 	if (unlikely(wl->state != WLCORE_STATE_ON))
4342 		goto out;
4343 
4344 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4345 		goto out;
4346 
4347 	ret = wl1271_ps_elp_wakeup(wl);
4348 	if (ret < 0)
4349 		goto out;
4350 
4351 	if (is_ap)
4352 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4353 	else
4354 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4355 
4356 	wl1271_ps_elp_sleep(wl);
4357 
4358 out:
4359 	mutex_unlock(&wl->mutex);
4360 }
4361 
4362 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4363 				 struct ieee80211_chanctx_conf *ctx)
4364 {
4365 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4366 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4367 		     cfg80211_get_chandef_type(&ctx->def));
4368 	return 0;
4369 }
4370 
4371 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4372 				     struct ieee80211_chanctx_conf *ctx)
4373 {
4374 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4375 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4376 		     cfg80211_get_chandef_type(&ctx->def));
4377 }
4378 
4379 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4380 				     struct ieee80211_chanctx_conf *ctx,
4381 				     u32 changed)
4382 {
4383 	wl1271_debug(DEBUG_MAC80211,
4384 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4385 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4386 		     cfg80211_get_chandef_type(&ctx->def), changed);
4387 }
4388 
4389 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4390 					struct ieee80211_vif *vif,
4391 					struct ieee80211_chanctx_conf *ctx)
4392 {
4393 	struct wl1271 *wl = hw->priv;
4394 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4395 	int channel = ieee80211_frequency_to_channel(
4396 		ctx->def.chan->center_freq);
4397 
4398 	wl1271_debug(DEBUG_MAC80211,
4399 		     "mac80211 assign chanctx (role %d) %d (type %d)",
4400 		     wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4401 
4402 	mutex_lock(&wl->mutex);
4403 
4404 	wlvif->band = ctx->def.chan->band;
4405 	wlvif->channel = channel;
4406 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4407 
4408 	/* update default rates according to the band */
4409 	wl1271_set_band_rate(wl, wlvif);
4410 
4411 	mutex_unlock(&wl->mutex);
4412 
4413 	return 0;
4414 }
4415 
4416 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4417 					   struct ieee80211_vif *vif,
4418 					   struct ieee80211_chanctx_conf *ctx)
4419 {
4420 	struct wl1271 *wl = hw->priv;
4421 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4422 
4423 	wl1271_debug(DEBUG_MAC80211,
4424 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4425 		     wlvif->role_id,
4426 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4427 		     cfg80211_get_chandef_type(&ctx->def));
4428 
4429 	wl1271_tx_flush(wl);
4430 }
4431 
4432 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4433 			     struct ieee80211_vif *vif, u16 queue,
4434 			     const struct ieee80211_tx_queue_params *params)
4435 {
4436 	struct wl1271 *wl = hw->priv;
4437 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4438 	u8 ps_scheme;
4439 	int ret = 0;
4440 
4441 	mutex_lock(&wl->mutex);
4442 
4443 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4444 
4445 	if (params->uapsd)
4446 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4447 	else
4448 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4449 
4450 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4451 		goto out;
4452 
4453 	ret = wl1271_ps_elp_wakeup(wl);
4454 	if (ret < 0)
4455 		goto out;
4456 
4457 	/*
4458 	 * the txop is confed in units of 32us by the mac80211,
4459 	 * we need us
4460 	 */
4461 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4462 				params->cw_min, params->cw_max,
4463 				params->aifs, params->txop << 5);
4464 	if (ret < 0)
4465 		goto out_sleep;
4466 
4467 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4468 				 CONF_CHANNEL_TYPE_EDCF,
4469 				 wl1271_tx_get_queue(queue),
4470 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4471 				 0, 0);
4472 
4473 out_sleep:
4474 	wl1271_ps_elp_sleep(wl);
4475 
4476 out:
4477 	mutex_unlock(&wl->mutex);
4478 
4479 	return ret;
4480 }
4481 
4482 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4483 			     struct ieee80211_vif *vif)
4484 {
4485 
4486 	struct wl1271 *wl = hw->priv;
4487 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4488 	u64 mactime = ULLONG_MAX;
4489 	int ret;
4490 
4491 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4492 
4493 	mutex_lock(&wl->mutex);
4494 
4495 	if (unlikely(wl->state != WLCORE_STATE_ON))
4496 		goto out;
4497 
4498 	ret = wl1271_ps_elp_wakeup(wl);
4499 	if (ret < 0)
4500 		goto out;
4501 
4502 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4503 	if (ret < 0)
4504 		goto out_sleep;
4505 
4506 out_sleep:
4507 	wl1271_ps_elp_sleep(wl);
4508 
4509 out:
4510 	mutex_unlock(&wl->mutex);
4511 	return mactime;
4512 }
4513 
4514 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4515 				struct survey_info *survey)
4516 {
4517 	struct ieee80211_conf *conf = &hw->conf;
4518 
4519 	if (idx != 0)
4520 		return -ENOENT;
4521 
4522 	survey->channel = conf->chandef.chan;
4523 	survey->filled = 0;
4524 	return 0;
4525 }
4526 
4527 static int wl1271_allocate_sta(struct wl1271 *wl,
4528 			     struct wl12xx_vif *wlvif,
4529 			     struct ieee80211_sta *sta)
4530 {
4531 	struct wl1271_station *wl_sta;
4532 	int ret;
4533 
4534 
4535 	if (wl->active_sta_count >= AP_MAX_STATIONS) {
4536 		wl1271_warning("could not allocate HLID - too much stations");
4537 		return -EBUSY;
4538 	}
4539 
4540 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4541 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4542 	if (ret < 0) {
4543 		wl1271_warning("could not allocate HLID - too many links");
4544 		return -EBUSY;
4545 	}
4546 
4547 	/* use the previous security seq, if this is a recovery/resume */
4548 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4549 
4550 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4551 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4552 	wl->active_sta_count++;
4553 	return 0;
4554 }
4555 
4556 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4557 {
4558 	struct wl1271_station *wl_sta;
4559 	struct ieee80211_sta *sta;
4560 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4561 
4562 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4563 		return;
4564 
4565 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4566 	__clear_bit(hlid, &wl->ap_ps_map);
4567 	__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4568 
4569 	/*
4570 	 * save the last used PN in the private part of iee80211_sta,
4571 	 * in case of recovery/suspend
4572 	 */
4573 	rcu_read_lock();
4574 	sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4575 	if (sta) {
4576 		wl_sta = (void *)sta->drv_priv;
4577 		wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4578 
4579 		/*
4580 		 * increment the initial seq number on recovery to account for
4581 		 * transmitted packets that we haven't yet got in the FW status
4582 		 */
4583 		if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4584 			wl_sta->total_freed_pkts +=
4585 					WL1271_TX_SQN_POST_RECOVERY_PADDING;
4586 	}
4587 	rcu_read_unlock();
4588 
4589 	wl12xx_free_link(wl, wlvif, &hlid);
4590 	wl->active_sta_count--;
4591 
4592 	/*
4593 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4594 	 * chance to return STA-buffered packets before complaining.
4595 	 */
4596 	if (wl->active_sta_count == 0)
4597 		wl12xx_rearm_tx_watchdog_locked(wl);
4598 }
4599 
4600 static int wl12xx_sta_add(struct wl1271 *wl,
4601 			  struct wl12xx_vif *wlvif,
4602 			  struct ieee80211_sta *sta)
4603 {
4604 	struct wl1271_station *wl_sta;
4605 	int ret = 0;
4606 	u8 hlid;
4607 
4608 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4609 
4610 	ret = wl1271_allocate_sta(wl, wlvif, sta);
4611 	if (ret < 0)
4612 		return ret;
4613 
4614 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4615 	hlid = wl_sta->hlid;
4616 
4617 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4618 	if (ret < 0)
4619 		wl1271_free_sta(wl, wlvif, hlid);
4620 
4621 	return ret;
4622 }
4623 
4624 static int wl12xx_sta_remove(struct wl1271 *wl,
4625 			     struct wl12xx_vif *wlvif,
4626 			     struct ieee80211_sta *sta)
4627 {
4628 	struct wl1271_station *wl_sta;
4629 	int ret = 0, id;
4630 
4631 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4632 
4633 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4634 	id = wl_sta->hlid;
4635 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4636 		return -EINVAL;
4637 
4638 	ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4639 	if (ret < 0)
4640 		return ret;
4641 
4642 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4643 	return ret;
4644 }
4645 
4646 static void wlcore_roc_if_possible(struct wl1271 *wl,
4647 				   struct wl12xx_vif *wlvif)
4648 {
4649 	if (find_first_bit(wl->roc_map,
4650 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4651 		return;
4652 
4653 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4654 		return;
4655 
4656 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4657 }
4658 
4659 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4660 				     struct wl12xx_vif *wlvif,
4661 				     struct wl1271_station *wl_sta,
4662 				     bool in_connection)
4663 {
4664 	if (in_connection) {
4665 		if (WARN_ON(wl_sta->in_connection))
4666 			return;
4667 		wl_sta->in_connection = true;
4668 		if (!wlvif->inconn_count++)
4669 			wlcore_roc_if_possible(wl, wlvif);
4670 	} else {
4671 		if (!wl_sta->in_connection)
4672 			return;
4673 
4674 		wl_sta->in_connection = false;
4675 		wlvif->inconn_count--;
4676 		if (WARN_ON(wlvif->inconn_count < 0))
4677 			return;
4678 
4679 		if (!wlvif->inconn_count)
4680 			if (test_bit(wlvif->role_id, wl->roc_map))
4681 				wl12xx_croc(wl, wlvif->role_id);
4682 	}
4683 }
4684 
4685 static int wl12xx_update_sta_state(struct wl1271 *wl,
4686 				   struct wl12xx_vif *wlvif,
4687 				   struct ieee80211_sta *sta,
4688 				   enum ieee80211_sta_state old_state,
4689 				   enum ieee80211_sta_state new_state)
4690 {
4691 	struct wl1271_station *wl_sta;
4692 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4693 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4694 	int ret;
4695 
4696 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4697 
4698 	/* Add station (AP mode) */
4699 	if (is_ap &&
4700 	    old_state == IEEE80211_STA_NOTEXIST &&
4701 	    new_state == IEEE80211_STA_NONE) {
4702 		ret = wl12xx_sta_add(wl, wlvif, sta);
4703 		if (ret)
4704 			return ret;
4705 
4706 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4707 	}
4708 
4709 	/* Remove station (AP mode) */
4710 	if (is_ap &&
4711 	    old_state == IEEE80211_STA_NONE &&
4712 	    new_state == IEEE80211_STA_NOTEXIST) {
4713 		/* must not fail */
4714 		wl12xx_sta_remove(wl, wlvif, sta);
4715 
4716 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4717 	}
4718 
4719 	/* Authorize station (AP mode) */
4720 	if (is_ap &&
4721 	    new_state == IEEE80211_STA_AUTHORIZED) {
4722 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4723 		if (ret < 0)
4724 			return ret;
4725 
4726 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4727 						     wl_sta->hlid);
4728 		if (ret)
4729 			return ret;
4730 
4731 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4732 	}
4733 
4734 	/* Authorize station */
4735 	if (is_sta &&
4736 	    new_state == IEEE80211_STA_AUTHORIZED) {
4737 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4738 		ret = wl12xx_set_authorized(wl, wlvif);
4739 		if (ret)
4740 			return ret;
4741 	}
4742 
4743 	if (is_sta &&
4744 	    old_state == IEEE80211_STA_AUTHORIZED &&
4745 	    new_state == IEEE80211_STA_ASSOC) {
4746 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4747 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4748 	}
4749 
4750 	/* clear ROCs on failure or authorization */
4751 	if (is_sta &&
4752 	    (new_state == IEEE80211_STA_AUTHORIZED ||
4753 	     new_state == IEEE80211_STA_NOTEXIST)) {
4754 		if (test_bit(wlvif->role_id, wl->roc_map))
4755 			wl12xx_croc(wl, wlvif->role_id);
4756 	}
4757 
4758 	if (is_sta &&
4759 	    old_state == IEEE80211_STA_NOTEXIST &&
4760 	    new_state == IEEE80211_STA_NONE) {
4761 		if (find_first_bit(wl->roc_map,
4762 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4763 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4764 			wl12xx_roc(wl, wlvif, wlvif->role_id,
4765 				   wlvif->band, wlvif->channel);
4766 		}
4767 	}
4768 	return 0;
4769 }
4770 
4771 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4772 			       struct ieee80211_vif *vif,
4773 			       struct ieee80211_sta *sta,
4774 			       enum ieee80211_sta_state old_state,
4775 			       enum ieee80211_sta_state new_state)
4776 {
4777 	struct wl1271 *wl = hw->priv;
4778 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4779 	int ret;
4780 
4781 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4782 		     sta->aid, old_state, new_state);
4783 
4784 	mutex_lock(&wl->mutex);
4785 
4786 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4787 		ret = -EBUSY;
4788 		goto out;
4789 	}
4790 
4791 	ret = wl1271_ps_elp_wakeup(wl);
4792 	if (ret < 0)
4793 		goto out;
4794 
4795 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4796 
4797 	wl1271_ps_elp_sleep(wl);
4798 out:
4799 	mutex_unlock(&wl->mutex);
4800 	if (new_state < old_state)
4801 		return 0;
4802 	return ret;
4803 }
4804 
4805 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4806 				  struct ieee80211_vif *vif,
4807 				  enum ieee80211_ampdu_mlme_action action,
4808 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4809 				  u8 buf_size)
4810 {
4811 	struct wl1271 *wl = hw->priv;
4812 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4813 	int ret;
4814 	u8 hlid, *ba_bitmap;
4815 
4816 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4817 		     tid);
4818 
4819 	/* sanity check - the fields in FW are only 8bits wide */
4820 	if (WARN_ON(tid > 0xFF))
4821 		return -ENOTSUPP;
4822 
4823 	mutex_lock(&wl->mutex);
4824 
4825 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4826 		ret = -EAGAIN;
4827 		goto out;
4828 	}
4829 
4830 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4831 		hlid = wlvif->sta.hlid;
4832 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4833 		struct wl1271_station *wl_sta;
4834 
4835 		wl_sta = (struct wl1271_station *)sta->drv_priv;
4836 		hlid = wl_sta->hlid;
4837 	} else {
4838 		ret = -EINVAL;
4839 		goto out;
4840 	}
4841 
4842 	ba_bitmap = &wl->links[hlid].ba_bitmap;
4843 
4844 	ret = wl1271_ps_elp_wakeup(wl);
4845 	if (ret < 0)
4846 		goto out;
4847 
4848 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4849 		     tid, action);
4850 
4851 	switch (action) {
4852 	case IEEE80211_AMPDU_RX_START:
4853 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
4854 			ret = -ENOTSUPP;
4855 			break;
4856 		}
4857 
4858 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4859 			ret = -EBUSY;
4860 			wl1271_error("exceeded max RX BA sessions");
4861 			break;
4862 		}
4863 
4864 		if (*ba_bitmap & BIT(tid)) {
4865 			ret = -EINVAL;
4866 			wl1271_error("cannot enable RX BA session on active "
4867 				     "tid: %d", tid);
4868 			break;
4869 		}
4870 
4871 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4872 							 hlid);
4873 		if (!ret) {
4874 			*ba_bitmap |= BIT(tid);
4875 			wl->ba_rx_session_count++;
4876 		}
4877 		break;
4878 
4879 	case IEEE80211_AMPDU_RX_STOP:
4880 		if (!(*ba_bitmap & BIT(tid))) {
4881 			/*
4882 			 * this happens on reconfig - so only output a debug
4883 			 * message for now, and don't fail the function.
4884 			 */
4885 			wl1271_debug(DEBUG_MAC80211,
4886 				     "no active RX BA session on tid: %d",
4887 				     tid);
4888 			ret = 0;
4889 			break;
4890 		}
4891 
4892 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4893 							 hlid);
4894 		if (!ret) {
4895 			*ba_bitmap &= ~BIT(tid);
4896 			wl->ba_rx_session_count--;
4897 		}
4898 		break;
4899 
4900 	/*
4901 	 * The BA initiator session management in FW independently.
4902 	 * Falling break here on purpose for all TX APDU commands.
4903 	 */
4904 	case IEEE80211_AMPDU_TX_START:
4905 	case IEEE80211_AMPDU_TX_STOP_CONT:
4906 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
4907 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4908 	case IEEE80211_AMPDU_TX_OPERATIONAL:
4909 		ret = -EINVAL;
4910 		break;
4911 
4912 	default:
4913 		wl1271_error("Incorrect ampdu action id=%x\n", action);
4914 		ret = -EINVAL;
4915 	}
4916 
4917 	wl1271_ps_elp_sleep(wl);
4918 
4919 out:
4920 	mutex_unlock(&wl->mutex);
4921 
4922 	return ret;
4923 }
4924 
4925 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4926 				   struct ieee80211_vif *vif,
4927 				   const struct cfg80211_bitrate_mask *mask)
4928 {
4929 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4930 	struct wl1271 *wl = hw->priv;
4931 	int i, ret = 0;
4932 
4933 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4934 		mask->control[NL80211_BAND_2GHZ].legacy,
4935 		mask->control[NL80211_BAND_5GHZ].legacy);
4936 
4937 	mutex_lock(&wl->mutex);
4938 
4939 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
4940 		wlvif->bitrate_masks[i] =
4941 			wl1271_tx_enabled_rates_get(wl,
4942 						    mask->control[i].legacy,
4943 						    i);
4944 
4945 	if (unlikely(wl->state != WLCORE_STATE_ON))
4946 		goto out;
4947 
4948 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4949 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4950 
4951 		ret = wl1271_ps_elp_wakeup(wl);
4952 		if (ret < 0)
4953 			goto out;
4954 
4955 		wl1271_set_band_rate(wl, wlvif);
4956 		wlvif->basic_rate =
4957 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4958 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4959 
4960 		wl1271_ps_elp_sleep(wl);
4961 	}
4962 out:
4963 	mutex_unlock(&wl->mutex);
4964 
4965 	return ret;
4966 }
4967 
4968 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4969 				     struct ieee80211_channel_switch *ch_switch)
4970 {
4971 	struct wl1271 *wl = hw->priv;
4972 	struct wl12xx_vif *wlvif;
4973 	int ret;
4974 
4975 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4976 
4977 	wl1271_tx_flush(wl);
4978 
4979 	mutex_lock(&wl->mutex);
4980 
4981 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4982 		wl12xx_for_each_wlvif_sta(wl, wlvif) {
4983 			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4984 			ieee80211_chswitch_done(vif, false);
4985 		}
4986 		goto out;
4987 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4988 		goto out;
4989 	}
4990 
4991 	ret = wl1271_ps_elp_wakeup(wl);
4992 	if (ret < 0)
4993 		goto out;
4994 
4995 	/* TODO: change mac80211 to pass vif as param */
4996 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
4997 		unsigned long delay_usec;
4998 
4999 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5000 		if (ret)
5001 			goto out_sleep;
5002 
5003 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5004 
5005 		/* indicate failure 5 seconds after channel switch time */
5006 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5007 			     ch_switch->count;
5008 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5009 				usecs_to_jiffies(delay_usec) +
5010 				msecs_to_jiffies(5000));
5011 	}
5012 
5013 out_sleep:
5014 	wl1271_ps_elp_sleep(wl);
5015 
5016 out:
5017 	mutex_unlock(&wl->mutex);
5018 }
5019 
5020 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5021 {
5022 	struct wl1271 *wl = hw->priv;
5023 
5024 	wl1271_tx_flush(wl);
5025 }
5026 
5027 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5028 				       struct ieee80211_vif *vif,
5029 				       struct ieee80211_channel *chan,
5030 				       int duration,
5031 				       enum ieee80211_roc_type type)
5032 {
5033 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5034 	struct wl1271 *wl = hw->priv;
5035 	int channel, ret = 0;
5036 
5037 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5038 
5039 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5040 		     channel, wlvif->role_id);
5041 
5042 	mutex_lock(&wl->mutex);
5043 
5044 	if (unlikely(wl->state != WLCORE_STATE_ON))
5045 		goto out;
5046 
5047 	/* return EBUSY if we can't ROC right now */
5048 	if (WARN_ON(wl->roc_vif ||
5049 		    find_first_bit(wl->roc_map,
5050 				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5051 		ret = -EBUSY;
5052 		goto out;
5053 	}
5054 
5055 	ret = wl1271_ps_elp_wakeup(wl);
5056 	if (ret < 0)
5057 		goto out;
5058 
5059 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5060 	if (ret < 0)
5061 		goto out_sleep;
5062 
5063 	wl->roc_vif = vif;
5064 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5065 				     msecs_to_jiffies(duration));
5066 out_sleep:
5067 	wl1271_ps_elp_sleep(wl);
5068 out:
5069 	mutex_unlock(&wl->mutex);
5070 	return ret;
5071 }
5072 
5073 static int __wlcore_roc_completed(struct wl1271 *wl)
5074 {
5075 	struct wl12xx_vif *wlvif;
5076 	int ret;
5077 
5078 	/* already completed */
5079 	if (unlikely(!wl->roc_vif))
5080 		return 0;
5081 
5082 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5083 
5084 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5085 		return -EBUSY;
5086 
5087 	ret = wl12xx_stop_dev(wl, wlvif);
5088 	if (ret < 0)
5089 		return ret;
5090 
5091 	wl->roc_vif = NULL;
5092 
5093 	return 0;
5094 }
5095 
5096 static int wlcore_roc_completed(struct wl1271 *wl)
5097 {
5098 	int ret;
5099 
5100 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5101 
5102 	mutex_lock(&wl->mutex);
5103 
5104 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5105 		ret = -EBUSY;
5106 		goto out;
5107 	}
5108 
5109 	ret = wl1271_ps_elp_wakeup(wl);
5110 	if (ret < 0)
5111 		goto out;
5112 
5113 	ret = __wlcore_roc_completed(wl);
5114 
5115 	wl1271_ps_elp_sleep(wl);
5116 out:
5117 	mutex_unlock(&wl->mutex);
5118 
5119 	return ret;
5120 }
5121 
5122 static void wlcore_roc_complete_work(struct work_struct *work)
5123 {
5124 	struct delayed_work *dwork;
5125 	struct wl1271 *wl;
5126 	int ret;
5127 
5128 	dwork = container_of(work, struct delayed_work, work);
5129 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5130 
5131 	ret = wlcore_roc_completed(wl);
5132 	if (!ret)
5133 		ieee80211_remain_on_channel_expired(wl->hw);
5134 }
5135 
5136 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5137 {
5138 	struct wl1271 *wl = hw->priv;
5139 
5140 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5141 
5142 	/* TODO: per-vif */
5143 	wl1271_tx_flush(wl);
5144 
5145 	/*
5146 	 * we can't just flush_work here, because it might deadlock
5147 	 * (as we might get called from the same workqueue)
5148 	 */
5149 	cancel_delayed_work_sync(&wl->roc_complete_work);
5150 	wlcore_roc_completed(wl);
5151 
5152 	return 0;
5153 }
5154 
5155 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5156 				    struct ieee80211_vif *vif,
5157 				    struct ieee80211_sta *sta,
5158 				    u32 changed)
5159 {
5160 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5161 	struct wl1271 *wl = hw->priv;
5162 
5163 	wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5164 }
5165 
5166 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5167 			       struct ieee80211_vif *vif,
5168 			       struct ieee80211_sta *sta,
5169 			       s8 *rssi_dbm)
5170 {
5171 	struct wl1271 *wl = hw->priv;
5172 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5173 	int ret = 0;
5174 
5175 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5176 
5177 	mutex_lock(&wl->mutex);
5178 
5179 	if (unlikely(wl->state != WLCORE_STATE_ON))
5180 		goto out;
5181 
5182 	ret = wl1271_ps_elp_wakeup(wl);
5183 	if (ret < 0)
5184 		goto out_sleep;
5185 
5186 	ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5187 	if (ret < 0)
5188 		goto out_sleep;
5189 
5190 out_sleep:
5191 	wl1271_ps_elp_sleep(wl);
5192 
5193 out:
5194 	mutex_unlock(&wl->mutex);
5195 
5196 	return ret;
5197 }
5198 
5199 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5200 {
5201 	struct wl1271 *wl = hw->priv;
5202 	bool ret = false;
5203 
5204 	mutex_lock(&wl->mutex);
5205 
5206 	if (unlikely(wl->state != WLCORE_STATE_ON))
5207 		goto out;
5208 
5209 	/* packets are considered pending if in the TX queue or the FW */
5210 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5211 out:
5212 	mutex_unlock(&wl->mutex);
5213 
5214 	return ret;
5215 }
5216 
5217 /* can't be const, mac80211 writes to this */
5218 static struct ieee80211_rate wl1271_rates[] = {
5219 	{ .bitrate = 10,
5220 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5221 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5222 	{ .bitrate = 20,
5223 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5224 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5225 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5226 	{ .bitrate = 55,
5227 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5228 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5229 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5230 	{ .bitrate = 110,
5231 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5232 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5233 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5234 	{ .bitrate = 60,
5235 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5236 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5237 	{ .bitrate = 90,
5238 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5239 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5240 	{ .bitrate = 120,
5241 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5242 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5243 	{ .bitrate = 180,
5244 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5245 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5246 	{ .bitrate = 240,
5247 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5248 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5249 	{ .bitrate = 360,
5250 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5251 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5252 	{ .bitrate = 480,
5253 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5254 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5255 	{ .bitrate = 540,
5256 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5257 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5258 };
5259 
5260 /* can't be const, mac80211 writes to this */
5261 static struct ieee80211_channel wl1271_channels[] = {
5262 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5263 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5264 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5265 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5266 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5267 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5268 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5269 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5270 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5271 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5272 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5273 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5274 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5275 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5276 };
5277 
5278 /* can't be const, mac80211 writes to this */
5279 static struct ieee80211_supported_band wl1271_band_2ghz = {
5280 	.channels = wl1271_channels,
5281 	.n_channels = ARRAY_SIZE(wl1271_channels),
5282 	.bitrates = wl1271_rates,
5283 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5284 };
5285 
5286 /* 5 GHz data rates for WL1273 */
5287 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5288 	{ .bitrate = 60,
5289 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5290 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5291 	{ .bitrate = 90,
5292 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5293 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5294 	{ .bitrate = 120,
5295 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5296 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5297 	{ .bitrate = 180,
5298 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5299 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5300 	{ .bitrate = 240,
5301 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5302 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5303 	{ .bitrate = 360,
5304 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5305 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5306 	{ .bitrate = 480,
5307 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5308 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5309 	{ .bitrate = 540,
5310 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5311 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5312 };
5313 
5314 /* 5 GHz band channels for WL1273 */
5315 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5316 	{ .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5317 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5318 	{ .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5319 	{ .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5320 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5321 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5322 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5323 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5324 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5325 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5326 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5327 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5328 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5329 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5330 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5331 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5332 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5333 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5334 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5335 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5336 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5337 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5338 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5339 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5340 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5341 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5342 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5343 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5344 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5345 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5346 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5347 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5348 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5349 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5350 };
5351 
5352 static struct ieee80211_supported_band wl1271_band_5ghz = {
5353 	.channels = wl1271_channels_5ghz,
5354 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5355 	.bitrates = wl1271_rates_5ghz,
5356 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5357 };
5358 
5359 static const struct ieee80211_ops wl1271_ops = {
5360 	.start = wl1271_op_start,
5361 	.stop = wlcore_op_stop,
5362 	.add_interface = wl1271_op_add_interface,
5363 	.remove_interface = wl1271_op_remove_interface,
5364 	.change_interface = wl12xx_op_change_interface,
5365 #ifdef CONFIG_PM
5366 	.suspend = wl1271_op_suspend,
5367 	.resume = wl1271_op_resume,
5368 #endif
5369 	.config = wl1271_op_config,
5370 	.prepare_multicast = wl1271_op_prepare_multicast,
5371 	.configure_filter = wl1271_op_configure_filter,
5372 	.tx = wl1271_op_tx,
5373 	.set_key = wlcore_op_set_key,
5374 	.hw_scan = wl1271_op_hw_scan,
5375 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5376 	.sched_scan_start = wl1271_op_sched_scan_start,
5377 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5378 	.bss_info_changed = wl1271_op_bss_info_changed,
5379 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5380 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5381 	.conf_tx = wl1271_op_conf_tx,
5382 	.get_tsf = wl1271_op_get_tsf,
5383 	.get_survey = wl1271_op_get_survey,
5384 	.sta_state = wl12xx_op_sta_state,
5385 	.ampdu_action = wl1271_op_ampdu_action,
5386 	.tx_frames_pending = wl1271_tx_frames_pending,
5387 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5388 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5389 	.channel_switch = wl12xx_op_channel_switch,
5390 	.flush = wlcore_op_flush,
5391 	.remain_on_channel = wlcore_op_remain_on_channel,
5392 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5393 	.add_chanctx = wlcore_op_add_chanctx,
5394 	.remove_chanctx = wlcore_op_remove_chanctx,
5395 	.change_chanctx = wlcore_op_change_chanctx,
5396 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5397 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5398 	.sta_rc_update = wlcore_op_sta_rc_update,
5399 	.get_rssi = wlcore_op_get_rssi,
5400 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5401 };
5402 
5403 
5404 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5405 {
5406 	u8 idx;
5407 
5408 	BUG_ON(band >= 2);
5409 
5410 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5411 		wl1271_error("Illegal RX rate from HW: %d", rate);
5412 		return 0;
5413 	}
5414 
5415 	idx = wl->band_rate_to_idx[band][rate];
5416 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5417 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5418 		return 0;
5419 	}
5420 
5421 	return idx;
5422 }
5423 
5424 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5425 {
5426 	int i;
5427 
5428 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5429 		     oui, nic);
5430 
5431 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5432 		wl1271_warning("NIC part of the MAC address wraps around!");
5433 
5434 	for (i = 0; i < wl->num_mac_addr; i++) {
5435 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5436 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5437 		wl->addresses[i].addr[2] = (u8) oui;
5438 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5439 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5440 		wl->addresses[i].addr[5] = (u8) nic;
5441 		nic++;
5442 	}
5443 
5444 	/* we may be one address short at the most */
5445 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5446 
5447 	/*
5448 	 * turn on the LAA bit in the first address and use it as
5449 	 * the last address.
5450 	 */
5451 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5452 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5453 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5454 		       sizeof(wl->addresses[0]));
5455 		/* LAA bit */
5456 		wl->addresses[idx].addr[2] |= BIT(1);
5457 	}
5458 
5459 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5460 	wl->hw->wiphy->addresses = wl->addresses;
5461 }
5462 
5463 static int wl12xx_get_hw_info(struct wl1271 *wl)
5464 {
5465 	int ret;
5466 
5467 	ret = wl12xx_set_power_on(wl);
5468 	if (ret < 0)
5469 		return ret;
5470 
5471 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5472 	if (ret < 0)
5473 		goto out;
5474 
5475 	wl->fuse_oui_addr = 0;
5476 	wl->fuse_nic_addr = 0;
5477 
5478 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5479 	if (ret < 0)
5480 		goto out;
5481 
5482 	if (wl->ops->get_mac)
5483 		ret = wl->ops->get_mac(wl);
5484 
5485 out:
5486 	wl1271_power_off(wl);
5487 	return ret;
5488 }
5489 
5490 static int wl1271_register_hw(struct wl1271 *wl)
5491 {
5492 	int ret;
5493 	u32 oui_addr = 0, nic_addr = 0;
5494 
5495 	if (wl->mac80211_registered)
5496 		return 0;
5497 
5498 	if (wl->nvs_len >= 12) {
5499 		/* NOTE: The wl->nvs->nvs element must be first, in
5500 		 * order to simplify the casting, we assume it is at
5501 		 * the beginning of the wl->nvs structure.
5502 		 */
5503 		u8 *nvs_ptr = (u8 *)wl->nvs;
5504 
5505 		oui_addr =
5506 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5507 		nic_addr =
5508 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5509 	}
5510 
5511 	/* if the MAC address is zeroed in the NVS derive from fuse */
5512 	if (oui_addr == 0 && nic_addr == 0) {
5513 		oui_addr = wl->fuse_oui_addr;
5514 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
5515 		nic_addr = wl->fuse_nic_addr + 1;
5516 	}
5517 
5518 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5519 
5520 	ret = ieee80211_register_hw(wl->hw);
5521 	if (ret < 0) {
5522 		wl1271_error("unable to register mac80211 hw: %d", ret);
5523 		goto out;
5524 	}
5525 
5526 	wl->mac80211_registered = true;
5527 
5528 	wl1271_debugfs_init(wl);
5529 
5530 	wl1271_notice("loaded");
5531 
5532 out:
5533 	return ret;
5534 }
5535 
5536 static void wl1271_unregister_hw(struct wl1271 *wl)
5537 {
5538 	if (wl->plt)
5539 		wl1271_plt_stop(wl);
5540 
5541 	ieee80211_unregister_hw(wl->hw);
5542 	wl->mac80211_registered = false;
5543 
5544 }
5545 
5546 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5547 	{
5548 		.max = 3,
5549 		.types = BIT(NL80211_IFTYPE_STATION),
5550 	},
5551 	{
5552 		.max = 1,
5553 		.types = BIT(NL80211_IFTYPE_AP) |
5554 			 BIT(NL80211_IFTYPE_P2P_GO) |
5555 			 BIT(NL80211_IFTYPE_P2P_CLIENT),
5556 	},
5557 };
5558 
5559 static struct ieee80211_iface_combination
5560 wlcore_iface_combinations[] = {
5561 	{
5562 	  .max_interfaces = 3,
5563 	  .limits = wlcore_iface_limits,
5564 	  .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5565 	},
5566 };
5567 
5568 static int wl1271_init_ieee80211(struct wl1271 *wl)
5569 {
5570 	int i;
5571 	static const u32 cipher_suites[] = {
5572 		WLAN_CIPHER_SUITE_WEP40,
5573 		WLAN_CIPHER_SUITE_WEP104,
5574 		WLAN_CIPHER_SUITE_TKIP,
5575 		WLAN_CIPHER_SUITE_CCMP,
5576 		WL1271_CIPHER_SUITE_GEM,
5577 	};
5578 
5579 	/* The tx descriptor buffer */
5580 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5581 
5582 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5583 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5584 
5585 	/* unit us */
5586 	/* FIXME: find a proper value */
5587 	wl->hw->channel_change_time = 10000;
5588 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5589 
5590 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5591 		IEEE80211_HW_SUPPORTS_PS |
5592 		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5593 		IEEE80211_HW_SUPPORTS_UAPSD |
5594 		IEEE80211_HW_HAS_RATE_CONTROL |
5595 		IEEE80211_HW_CONNECTION_MONITOR |
5596 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5597 		IEEE80211_HW_SPECTRUM_MGMT |
5598 		IEEE80211_HW_AP_LINK_PS |
5599 		IEEE80211_HW_AMPDU_AGGREGATION |
5600 		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5601 		IEEE80211_HW_QUEUE_CONTROL;
5602 
5603 	wl->hw->wiphy->cipher_suites = cipher_suites;
5604 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5605 
5606 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5607 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5608 		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5609 	wl->hw->wiphy->max_scan_ssids = 1;
5610 	wl->hw->wiphy->max_sched_scan_ssids = 16;
5611 	wl->hw->wiphy->max_match_sets = 16;
5612 	/*
5613 	 * Maximum length of elements in scanning probe request templates
5614 	 * should be the maximum length possible for a template, without
5615 	 * the IEEE80211 header of the template
5616 	 */
5617 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5618 			sizeof(struct ieee80211_header);
5619 
5620 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5621 		sizeof(struct ieee80211_header);
5622 
5623 	wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5624 
5625 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5626 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5627 				WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5628 
5629 	/* make sure all our channels fit in the scanned_ch bitmask */
5630 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5631 		     ARRAY_SIZE(wl1271_channels_5ghz) >
5632 		     WL1271_MAX_CHANNELS);
5633 	/*
5634 	* clear channel flags from the previous usage
5635 	* and restore max_power & max_antenna_gain values.
5636 	*/
5637 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5638 		wl1271_band_2ghz.channels[i].flags = 0;
5639 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5640 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5641 	}
5642 
5643 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5644 		wl1271_band_5ghz.channels[i].flags = 0;
5645 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5646 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5647 	}
5648 
5649 	/*
5650 	 * We keep local copies of the band structs because we need to
5651 	 * modify them on a per-device basis.
5652 	 */
5653 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5654 	       sizeof(wl1271_band_2ghz));
5655 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5656 	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
5657 	       sizeof(*wl->ht_cap));
5658 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5659 	       sizeof(wl1271_band_5ghz));
5660 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5661 	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
5662 	       sizeof(*wl->ht_cap));
5663 
5664 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5665 		&wl->bands[IEEE80211_BAND_2GHZ];
5666 	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5667 		&wl->bands[IEEE80211_BAND_5GHZ];
5668 
5669 	/*
5670 	 * allow 4 queues per mac address we support +
5671 	 * 1 cab queue per mac + one global offchannel Tx queue
5672 	 */
5673 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5674 
5675 	/* the last queue is the offchannel queue */
5676 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5677 	wl->hw->max_rates = 1;
5678 
5679 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5680 
5681 	/* the FW answers probe-requests in AP-mode */
5682 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5683 	wl->hw->wiphy->probe_resp_offload =
5684 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5685 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5686 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5687 
5688 	/* allowed interface combinations */
5689 	wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5690 	wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5691 	wl->hw->wiphy->n_iface_combinations =
5692 		ARRAY_SIZE(wlcore_iface_combinations);
5693 
5694 	SET_IEEE80211_DEV(wl->hw, wl->dev);
5695 
5696 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
5697 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5698 
5699 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5700 
5701 	return 0;
5702 }
5703 
5704 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5705 				     u32 mbox_size)
5706 {
5707 	struct ieee80211_hw *hw;
5708 	struct wl1271 *wl;
5709 	int i, j, ret;
5710 	unsigned int order;
5711 
5712 	BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5713 
5714 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5715 	if (!hw) {
5716 		wl1271_error("could not alloc ieee80211_hw");
5717 		ret = -ENOMEM;
5718 		goto err_hw_alloc;
5719 	}
5720 
5721 	wl = hw->priv;
5722 	memset(wl, 0, sizeof(*wl));
5723 
5724 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
5725 	if (!wl->priv) {
5726 		wl1271_error("could not alloc wl priv");
5727 		ret = -ENOMEM;
5728 		goto err_priv_alloc;
5729 	}
5730 
5731 	INIT_LIST_HEAD(&wl->wlvif_list);
5732 
5733 	wl->hw = hw;
5734 
5735 	for (i = 0; i < NUM_TX_QUEUES; i++)
5736 		for (j = 0; j < WL12XX_MAX_LINKS; j++)
5737 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
5738 
5739 	skb_queue_head_init(&wl->deferred_rx_queue);
5740 	skb_queue_head_init(&wl->deferred_tx_queue);
5741 
5742 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5743 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5744 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
5745 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5746 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5747 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5748 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5749 
5750 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5751 	if (!wl->freezable_wq) {
5752 		ret = -ENOMEM;
5753 		goto err_hw;
5754 	}
5755 
5756 	wl->channel = 0;
5757 	wl->rx_counter = 0;
5758 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5759 	wl->band = IEEE80211_BAND_2GHZ;
5760 	wl->channel_type = NL80211_CHAN_NO_HT;
5761 	wl->flags = 0;
5762 	wl->sg_enabled = true;
5763 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
5764 	wl->recovery_count = 0;
5765 	wl->hw_pg_ver = -1;
5766 	wl->ap_ps_map = 0;
5767 	wl->ap_fw_ps_map = 0;
5768 	wl->quirks = 0;
5769 	wl->platform_quirks = 0;
5770 	wl->system_hlid = WL12XX_SYSTEM_HLID;
5771 	wl->active_sta_count = 0;
5772 	wl->active_link_count = 0;
5773 	wl->fwlog_size = 0;
5774 	init_waitqueue_head(&wl->fwlog_waitq);
5775 
5776 	/* The system link is always allocated */
5777 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5778 
5779 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5780 	for (i = 0; i < wl->num_tx_desc; i++)
5781 		wl->tx_frames[i] = NULL;
5782 
5783 	spin_lock_init(&wl->wl_lock);
5784 
5785 	wl->state = WLCORE_STATE_OFF;
5786 	wl->fw_type = WL12XX_FW_TYPE_NONE;
5787 	mutex_init(&wl->mutex);
5788 	mutex_init(&wl->flush_mutex);
5789 	init_completion(&wl->nvs_loading_complete);
5790 
5791 	order = get_order(aggr_buf_size);
5792 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5793 	if (!wl->aggr_buf) {
5794 		ret = -ENOMEM;
5795 		goto err_wq;
5796 	}
5797 	wl->aggr_buf_size = aggr_buf_size;
5798 
5799 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5800 	if (!wl->dummy_packet) {
5801 		ret = -ENOMEM;
5802 		goto err_aggr;
5803 	}
5804 
5805 	/* Allocate one page for the FW log */
5806 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5807 	if (!wl->fwlog) {
5808 		ret = -ENOMEM;
5809 		goto err_dummy_packet;
5810 	}
5811 
5812 	wl->mbox_size = mbox_size;
5813 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5814 	if (!wl->mbox) {
5815 		ret = -ENOMEM;
5816 		goto err_fwlog;
5817 	}
5818 
5819 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5820 	if (!wl->buffer_32) {
5821 		ret = -ENOMEM;
5822 		goto err_mbox;
5823 	}
5824 
5825 	return hw;
5826 
5827 err_mbox:
5828 	kfree(wl->mbox);
5829 
5830 err_fwlog:
5831 	free_page((unsigned long)wl->fwlog);
5832 
5833 err_dummy_packet:
5834 	dev_kfree_skb(wl->dummy_packet);
5835 
5836 err_aggr:
5837 	free_pages((unsigned long)wl->aggr_buf, order);
5838 
5839 err_wq:
5840 	destroy_workqueue(wl->freezable_wq);
5841 
5842 err_hw:
5843 	wl1271_debugfs_exit(wl);
5844 	kfree(wl->priv);
5845 
5846 err_priv_alloc:
5847 	ieee80211_free_hw(hw);
5848 
5849 err_hw_alloc:
5850 
5851 	return ERR_PTR(ret);
5852 }
5853 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5854 
5855 int wlcore_free_hw(struct wl1271 *wl)
5856 {
5857 	/* Unblock any fwlog readers */
5858 	mutex_lock(&wl->mutex);
5859 	wl->fwlog_size = -1;
5860 	wake_up_interruptible_all(&wl->fwlog_waitq);
5861 	mutex_unlock(&wl->mutex);
5862 
5863 	wlcore_sysfs_free(wl);
5864 
5865 	kfree(wl->buffer_32);
5866 	kfree(wl->mbox);
5867 	free_page((unsigned long)wl->fwlog);
5868 	dev_kfree_skb(wl->dummy_packet);
5869 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5870 
5871 	wl1271_debugfs_exit(wl);
5872 
5873 	vfree(wl->fw);
5874 	wl->fw = NULL;
5875 	wl->fw_type = WL12XX_FW_TYPE_NONE;
5876 	kfree(wl->nvs);
5877 	wl->nvs = NULL;
5878 
5879 	kfree(wl->fw_status_1);
5880 	kfree(wl->tx_res_if);
5881 	destroy_workqueue(wl->freezable_wq);
5882 
5883 	kfree(wl->priv);
5884 	ieee80211_free_hw(wl->hw);
5885 
5886 	return 0;
5887 }
5888 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5889 
5890 #ifdef CONFIG_PM
5891 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
5892 	.flags = WIPHY_WOWLAN_ANY,
5893 	.n_patterns = WL1271_MAX_RX_FILTERS,
5894 	.pattern_min_len = 1,
5895 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
5896 };
5897 #endif
5898 
5899 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5900 {
5901 	struct wl1271 *wl = context;
5902 	struct platform_device *pdev = wl->pdev;
5903 	struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
5904 	struct wl12xx_platform_data *pdata = pdev_data->pdata;
5905 	unsigned long irqflags;
5906 	int ret;
5907 
5908 	if (fw) {
5909 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5910 		if (!wl->nvs) {
5911 			wl1271_error("Could not allocate nvs data");
5912 			goto out;
5913 		}
5914 		wl->nvs_len = fw->size;
5915 	} else {
5916 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5917 			     WL12XX_NVS_NAME);
5918 		wl->nvs = NULL;
5919 		wl->nvs_len = 0;
5920 	}
5921 
5922 	ret = wl->ops->setup(wl);
5923 	if (ret < 0)
5924 		goto out_free_nvs;
5925 
5926 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5927 
5928 	/* adjust some runtime configuration parameters */
5929 	wlcore_adjust_conf(wl);
5930 
5931 	wl->irq = platform_get_irq(pdev, 0);
5932 	wl->platform_quirks = pdata->platform_quirks;
5933 	wl->if_ops = pdev_data->if_ops;
5934 
5935 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5936 		irqflags = IRQF_TRIGGER_RISING;
5937 	else
5938 		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5939 
5940 	ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
5941 				   irqflags, pdev->name, wl);
5942 	if (ret < 0) {
5943 		wl1271_error("request_irq() failed: %d", ret);
5944 		goto out_free_nvs;
5945 	}
5946 
5947 #ifdef CONFIG_PM
5948 	ret = enable_irq_wake(wl->irq);
5949 	if (!ret) {
5950 		wl->irq_wake_enabled = true;
5951 		device_init_wakeup(wl->dev, 1);
5952 		if (pdata->pwr_in_suspend)
5953 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
5954 	}
5955 #endif
5956 	disable_irq(wl->irq);
5957 
5958 	ret = wl12xx_get_hw_info(wl);
5959 	if (ret < 0) {
5960 		wl1271_error("couldn't get hw info");
5961 		goto out_irq;
5962 	}
5963 
5964 	ret = wl->ops->identify_chip(wl);
5965 	if (ret < 0)
5966 		goto out_irq;
5967 
5968 	ret = wl1271_init_ieee80211(wl);
5969 	if (ret)
5970 		goto out_irq;
5971 
5972 	ret = wl1271_register_hw(wl);
5973 	if (ret)
5974 		goto out_irq;
5975 
5976 	ret = wlcore_sysfs_init(wl);
5977 	if (ret)
5978 		goto out_unreg;
5979 
5980 	wl->initialized = true;
5981 	goto out;
5982 
5983 out_unreg:
5984 	wl1271_unregister_hw(wl);
5985 
5986 out_irq:
5987 	free_irq(wl->irq, wl);
5988 
5989 out_free_nvs:
5990 	kfree(wl->nvs);
5991 
5992 out:
5993 	release_firmware(fw);
5994 	complete_all(&wl->nvs_loading_complete);
5995 }
5996 
5997 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5998 {
5999 	int ret;
6000 
6001 	if (!wl->ops || !wl->ptable)
6002 		return -EINVAL;
6003 
6004 	wl->dev = &pdev->dev;
6005 	wl->pdev = pdev;
6006 	platform_set_drvdata(pdev, wl);
6007 
6008 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6009 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6010 				      wl, wlcore_nvs_cb);
6011 	if (ret < 0) {
6012 		wl1271_error("request_firmware_nowait failed: %d", ret);
6013 		complete_all(&wl->nvs_loading_complete);
6014 	}
6015 
6016 	return ret;
6017 }
6018 EXPORT_SYMBOL_GPL(wlcore_probe);
6019 
6020 int wlcore_remove(struct platform_device *pdev)
6021 {
6022 	struct wl1271 *wl = platform_get_drvdata(pdev);
6023 
6024 	wait_for_completion(&wl->nvs_loading_complete);
6025 	if (!wl->initialized)
6026 		return 0;
6027 
6028 	if (wl->irq_wake_enabled) {
6029 		device_init_wakeup(wl->dev, 0);
6030 		disable_irq_wake(wl->irq);
6031 	}
6032 	wl1271_unregister_hw(wl);
6033 	free_irq(wl->irq, wl);
6034 	wlcore_free_hw(wl);
6035 
6036 	return 0;
6037 }
6038 EXPORT_SYMBOL_GPL(wlcore_remove);
6039 
6040 u32 wl12xx_debug_level = DEBUG_NONE;
6041 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6042 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6043 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6044 
6045 module_param_named(fwlog, fwlog_param, charp, 0);
6046 MODULE_PARM_DESC(fwlog,
6047 		 "FW logger options: continuous, ondemand, dbgpins or disable");
6048 
6049 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6050 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6051 
6052 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6053 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6054 
6055 MODULE_LICENSE("GPL");
6056 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6057 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6058 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6059