xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision 05bcf503)
1 
2 /*
3  * This file is part of wl1271
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  *
7  * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  *
23  */
24 
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37 
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
53 
54 #define WL1271_BOOT_RETRIES 3
55 
56 #define WL1271_BOOT_RETRIES 3
57 
58 static char *fwlog_param;
59 static bool bug_on_recovery;
60 static bool no_recovery;
61 
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 					 struct ieee80211_vif *vif,
64 					 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67 
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 				 struct wl12xx_vif *wlvif)
70 {
71 	int ret;
72 
73 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 		return -EINVAL;
75 
76 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 		return 0;
78 
79 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 		return 0;
81 
82 	ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
83 	if (ret < 0)
84 		return ret;
85 
86 	wl12xx_croc(wl, wlvif->role_id);
87 
88 	wl1271_info("Association completed.");
89 	return 0;
90 }
91 
92 static int wl1271_reg_notify(struct wiphy *wiphy,
93 			     struct regulatory_request *request)
94 {
95 	struct ieee80211_supported_band *band;
96 	struct ieee80211_channel *ch;
97 	int i;
98 
99 	band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 	for (i = 0; i < band->n_channels; i++) {
101 		ch = &band->channels[i];
102 		if (ch->flags & IEEE80211_CHAN_DISABLED)
103 			continue;
104 
105 		if (ch->flags & IEEE80211_CHAN_RADAR)
106 			ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 				     IEEE80211_CHAN_PASSIVE_SCAN;
108 
109 	}
110 
111 	return 0;
112 }
113 
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
115 				   bool enable)
116 {
117 	int ret = 0;
118 
119 	/* we should hold wl->mutex */
120 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
121 	if (ret < 0)
122 		goto out;
123 
124 	if (enable)
125 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
126 	else
127 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
128 out:
129 	return ret;
130 }
131 
132 /*
133  * this function is being called when the rx_streaming interval
134  * has beed changed or rx_streaming should be disabled
135  */
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
137 {
138 	int ret = 0;
139 	int period = wl->conf.rx_streaming.interval;
140 
141 	/* don't reconfigure if rx_streaming is disabled */
142 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
143 		goto out;
144 
145 	/* reconfigure/disable according to new streaming_period */
146 	if (period &&
147 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 	    (wl->conf.rx_streaming.always ||
149 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
151 	else {
152 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 		/* don't cancel_work_sync since we might deadlock */
154 		del_timer_sync(&wlvif->rx_streaming_timer);
155 	}
156 out:
157 	return ret;
158 }
159 
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
161 {
162 	int ret;
163 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 						rx_streaming_enable_work);
165 	struct wl1271 *wl = wlvif->wl;
166 
167 	mutex_lock(&wl->mutex);
168 
169 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 	    (!wl->conf.rx_streaming.always &&
172 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
173 		goto out;
174 
175 	if (!wl->conf.rx_streaming.interval)
176 		goto out;
177 
178 	ret = wl1271_ps_elp_wakeup(wl);
179 	if (ret < 0)
180 		goto out;
181 
182 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
183 	if (ret < 0)
184 		goto out_sleep;
185 
186 	/* stop it after some time of inactivity */
187 	mod_timer(&wlvif->rx_streaming_timer,
188 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
189 
190 out_sleep:
191 	wl1271_ps_elp_sleep(wl);
192 out:
193 	mutex_unlock(&wl->mutex);
194 }
195 
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
197 {
198 	int ret;
199 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 						rx_streaming_disable_work);
201 	struct wl1271 *wl = wlvif->wl;
202 
203 	mutex_lock(&wl->mutex);
204 
205 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
206 		goto out;
207 
208 	ret = wl1271_ps_elp_wakeup(wl);
209 	if (ret < 0)
210 		goto out;
211 
212 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
213 	if (ret)
214 		goto out_sleep;
215 
216 out_sleep:
217 	wl1271_ps_elp_sleep(wl);
218 out:
219 	mutex_unlock(&wl->mutex);
220 }
221 
222 static void wl1271_rx_streaming_timer(unsigned long data)
223 {
224 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 	struct wl1271 *wl = wlvif->wl;
226 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
227 }
228 
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
231 {
232 	/* if the watchdog is not armed, don't do anything */
233 	if (wl->tx_allocated_blocks == 0)
234 		return;
235 
236 	cancel_delayed_work(&wl->tx_watchdog_work);
237 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
239 }
240 
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
242 {
243 	struct delayed_work *dwork;
244 	struct wl1271 *wl;
245 
246 	dwork = container_of(work, struct delayed_work, work);
247 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
248 
249 	mutex_lock(&wl->mutex);
250 
251 	if (unlikely(wl->state != WLCORE_STATE_ON))
252 		goto out;
253 
254 	/* Tx went out in the meantime - everything is ok */
255 	if (unlikely(wl->tx_allocated_blocks == 0))
256 		goto out;
257 
258 	/*
259 	 * if a ROC is in progress, we might not have any Tx for a long
260 	 * time (e.g. pending Tx on the non-ROC channels)
261 	 */
262 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 			     wl->conf.tx.tx_watchdog_timeout);
265 		wl12xx_rearm_tx_watchdog_locked(wl);
266 		goto out;
267 	}
268 
269 	/*
270 	 * if a scan is in progress, we might not have any Tx for a long
271 	 * time
272 	 */
273 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 			     wl->conf.tx.tx_watchdog_timeout);
276 		wl12xx_rearm_tx_watchdog_locked(wl);
277 		goto out;
278 	}
279 
280 	/*
281 	* AP might cache a frame for a long time for a sleeping station,
282 	* so rearm the timer if there's an AP interface with stations. If
283 	* Tx is genuinely stuck we will most hopefully discover it when all
284 	* stations are removed due to inactivity.
285 	*/
286 	if (wl->active_sta_count) {
287 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
288 			     " %d stations",
289 			      wl->conf.tx.tx_watchdog_timeout,
290 			      wl->active_sta_count);
291 		wl12xx_rearm_tx_watchdog_locked(wl);
292 		goto out;
293 	}
294 
295 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 		     wl->conf.tx.tx_watchdog_timeout);
297 	wl12xx_queue_recovery_work(wl);
298 
299 out:
300 	mutex_unlock(&wl->mutex);
301 }
302 
303 static void wlcore_adjust_conf(struct wl1271 *wl)
304 {
305 	/* Adjust settings according to optional module parameters */
306 	if (fwlog_param) {
307 		if (!strcmp(fwlog_param, "continuous")) {
308 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 		} else if (!strcmp(fwlog_param, "ondemand")) {
310 			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 		} else if (!strcmp(fwlog_param, "dbgpins")) {
312 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 		} else if (!strcmp(fwlog_param, "disable")) {
315 			wl->conf.fwlog.mem_blocks = 0;
316 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 		} else {
318 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 		}
320 	}
321 }
322 
323 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
324 					struct wl12xx_vif *wlvif,
325 					u8 hlid, u8 tx_pkts)
326 {
327 	bool fw_ps, single_sta;
328 
329 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
330 	single_sta = (wl->active_sta_count == 1);
331 
332 	/*
333 	 * Wake up from high level PS if the STA is asleep with too little
334 	 * packets in FW or if the STA is awake.
335 	 */
336 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
337 		wl12xx_ps_link_end(wl, wlvif, hlid);
338 
339 	/*
340 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 	 * Make an exception if this is the only connected station. In this
342 	 * case FW-memory congestion is not a problem.
343 	 */
344 	else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
345 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
346 }
347 
348 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
349 					   struct wl12xx_vif *wlvif,
350 					   struct wl_fw_status_2 *status)
351 {
352 	struct wl1271_link *lnk;
353 	u32 cur_fw_ps_map;
354 	u8 hlid, cnt;
355 
356 	/* TODO: also use link_fast_bitmap here */
357 
358 	cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
359 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
360 		wl1271_debug(DEBUG_PSM,
361 			     "link ps prev 0x%x cur 0x%x changed 0x%x",
362 			     wl->ap_fw_ps_map, cur_fw_ps_map,
363 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
364 
365 		wl->ap_fw_ps_map = cur_fw_ps_map;
366 	}
367 
368 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
369 		lnk = &wl->links[hlid];
370 		cnt = status->counters.tx_lnk_free_pkts[hlid] -
371 			lnk->prev_freed_pkts;
372 
373 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
374 		lnk->allocated_pkts -= cnt;
375 
376 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 					    lnk->allocated_pkts);
378 	}
379 }
380 
381 static int wlcore_fw_status(struct wl1271 *wl,
382 			    struct wl_fw_status_1 *status_1,
383 			    struct wl_fw_status_2 *status_2)
384 {
385 	struct wl12xx_vif *wlvif;
386 	struct timespec ts;
387 	u32 old_tx_blk_count = wl->tx_blocks_available;
388 	int avail, freed_blocks;
389 	int i;
390 	size_t status_len;
391 	int ret;
392 
393 	status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
394 		sizeof(*status_2) + wl->fw_status_priv_len;
395 
396 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
397 				   status_len, false);
398 	if (ret < 0)
399 		return ret;
400 
401 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
402 		     "drv_rx_counter = %d, tx_results_counter = %d)",
403 		     status_1->intr,
404 		     status_1->fw_rx_counter,
405 		     status_1->drv_rx_counter,
406 		     status_1->tx_results_counter);
407 
408 	for (i = 0; i < NUM_TX_QUEUES; i++) {
409 		/* prevent wrap-around in freed-packets counter */
410 		wl->tx_allocated_pkts[i] -=
411 				(status_2->counters.tx_released_pkts[i] -
412 				wl->tx_pkts_freed[i]) & 0xff;
413 
414 		wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
415 	}
416 
417 	/* prevent wrap-around in total blocks counter */
418 	if (likely(wl->tx_blocks_freed <=
419 		   le32_to_cpu(status_2->total_released_blks)))
420 		freed_blocks = le32_to_cpu(status_2->total_released_blks) -
421 			       wl->tx_blocks_freed;
422 	else
423 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
424 			       le32_to_cpu(status_2->total_released_blks);
425 
426 	wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
427 
428 	wl->tx_allocated_blocks -= freed_blocks;
429 
430 	/*
431 	 * If the FW freed some blocks:
432 	 * If we still have allocated blocks - re-arm the timer, Tx is
433 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
434 	 */
435 	if (freed_blocks) {
436 		if (wl->tx_allocated_blocks)
437 			wl12xx_rearm_tx_watchdog_locked(wl);
438 		else
439 			cancel_delayed_work(&wl->tx_watchdog_work);
440 	}
441 
442 	avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
443 
444 	/*
445 	 * The FW might change the total number of TX memblocks before
446 	 * we get a notification about blocks being released. Thus, the
447 	 * available blocks calculation might yield a temporary result
448 	 * which is lower than the actual available blocks. Keeping in
449 	 * mind that only blocks that were allocated can be moved from
450 	 * TX to RX, tx_blocks_available should never decrease here.
451 	 */
452 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
453 				      avail);
454 
455 	/* if more blocks are available now, tx work can be scheduled */
456 	if (wl->tx_blocks_available > old_tx_blk_count)
457 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
458 
459 	/* for AP update num of allocated TX blocks per link and ps status */
460 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
461 		wl12xx_irq_update_links_status(wl, wlvif, status_2);
462 	}
463 
464 	/* update the host-chipset time offset */
465 	getnstimeofday(&ts);
466 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
467 		(s64)le32_to_cpu(status_2->fw_localtime);
468 
469 	return 0;
470 }
471 
472 static void wl1271_flush_deferred_work(struct wl1271 *wl)
473 {
474 	struct sk_buff *skb;
475 
476 	/* Pass all received frames to the network stack */
477 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
478 		ieee80211_rx_ni(wl->hw, skb);
479 
480 	/* Return sent skbs to the network stack */
481 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
482 		ieee80211_tx_status_ni(wl->hw, skb);
483 }
484 
485 static void wl1271_netstack_work(struct work_struct *work)
486 {
487 	struct wl1271 *wl =
488 		container_of(work, struct wl1271, netstack_work);
489 
490 	do {
491 		wl1271_flush_deferred_work(wl);
492 	} while (skb_queue_len(&wl->deferred_rx_queue));
493 }
494 
495 #define WL1271_IRQ_MAX_LOOPS 256
496 
497 static int wlcore_irq_locked(struct wl1271 *wl)
498 {
499 	int ret = 0;
500 	u32 intr;
501 	int loopcount = WL1271_IRQ_MAX_LOOPS;
502 	bool done = false;
503 	unsigned int defer_count;
504 	unsigned long flags;
505 
506 	/*
507 	 * In case edge triggered interrupt must be used, we cannot iterate
508 	 * more than once without introducing race conditions with the hardirq.
509 	 */
510 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
511 		loopcount = 1;
512 
513 	wl1271_debug(DEBUG_IRQ, "IRQ work");
514 
515 	if (unlikely(wl->state != WLCORE_STATE_ON))
516 		goto out;
517 
518 	ret = wl1271_ps_elp_wakeup(wl);
519 	if (ret < 0)
520 		goto out;
521 
522 	while (!done && loopcount--) {
523 		/*
524 		 * In order to avoid a race with the hardirq, clear the flag
525 		 * before acknowledging the chip. Since the mutex is held,
526 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
527 		 */
528 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
529 		smp_mb__after_clear_bit();
530 
531 		ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
532 		if (ret < 0)
533 			goto out;
534 
535 		wlcore_hw_tx_immediate_compl(wl);
536 
537 		intr = le32_to_cpu(wl->fw_status_1->intr);
538 		intr &= WLCORE_ALL_INTR_MASK;
539 		if (!intr) {
540 			done = true;
541 			continue;
542 		}
543 
544 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
545 			wl1271_error("HW watchdog interrupt received! starting recovery.");
546 			wl->watchdog_recovery = true;
547 			ret = -EIO;
548 
549 			/* restarting the chip. ignore any other interrupt. */
550 			goto out;
551 		}
552 
553 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
554 			wl1271_error("SW watchdog interrupt received! "
555 				     "starting recovery.");
556 			wl->watchdog_recovery = true;
557 			ret = -EIO;
558 
559 			/* restarting the chip. ignore any other interrupt. */
560 			goto out;
561 		}
562 
563 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
564 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
565 
566 			ret = wlcore_rx(wl, wl->fw_status_1);
567 			if (ret < 0)
568 				goto out;
569 
570 			/* Check if any tx blocks were freed */
571 			spin_lock_irqsave(&wl->wl_lock, flags);
572 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
573 			    wl1271_tx_total_queue_count(wl) > 0) {
574 				spin_unlock_irqrestore(&wl->wl_lock, flags);
575 				/*
576 				 * In order to avoid starvation of the TX path,
577 				 * call the work function directly.
578 				 */
579 				ret = wlcore_tx_work_locked(wl);
580 				if (ret < 0)
581 					goto out;
582 			} else {
583 				spin_unlock_irqrestore(&wl->wl_lock, flags);
584 			}
585 
586 			/* check for tx results */
587 			ret = wlcore_hw_tx_delayed_compl(wl);
588 			if (ret < 0)
589 				goto out;
590 
591 			/* Make sure the deferred queues don't get too long */
592 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
593 				      skb_queue_len(&wl->deferred_rx_queue);
594 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
595 				wl1271_flush_deferred_work(wl);
596 		}
597 
598 		if (intr & WL1271_ACX_INTR_EVENT_A) {
599 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
600 			ret = wl1271_event_handle(wl, 0);
601 			if (ret < 0)
602 				goto out;
603 		}
604 
605 		if (intr & WL1271_ACX_INTR_EVENT_B) {
606 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
607 			ret = wl1271_event_handle(wl, 1);
608 			if (ret < 0)
609 				goto out;
610 		}
611 
612 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
613 			wl1271_debug(DEBUG_IRQ,
614 				     "WL1271_ACX_INTR_INIT_COMPLETE");
615 
616 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
617 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
618 	}
619 
620 	wl1271_ps_elp_sleep(wl);
621 
622 out:
623 	return ret;
624 }
625 
626 static irqreturn_t wlcore_irq(int irq, void *cookie)
627 {
628 	int ret;
629 	unsigned long flags;
630 	struct wl1271 *wl = cookie;
631 
632 	/* TX might be handled here, avoid redundant work */
633 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
634 	cancel_work_sync(&wl->tx_work);
635 
636 	mutex_lock(&wl->mutex);
637 
638 	ret = wlcore_irq_locked(wl);
639 	if (ret)
640 		wl12xx_queue_recovery_work(wl);
641 
642 	spin_lock_irqsave(&wl->wl_lock, flags);
643 	/* In case TX was not handled here, queue TX work */
644 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
645 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
646 	    wl1271_tx_total_queue_count(wl) > 0)
647 		ieee80211_queue_work(wl->hw, &wl->tx_work);
648 	spin_unlock_irqrestore(&wl->wl_lock, flags);
649 
650 	mutex_unlock(&wl->mutex);
651 
652 	return IRQ_HANDLED;
653 }
654 
655 struct vif_counter_data {
656 	u8 counter;
657 
658 	struct ieee80211_vif *cur_vif;
659 	bool cur_vif_running;
660 };
661 
662 static void wl12xx_vif_count_iter(void *data, u8 *mac,
663 				  struct ieee80211_vif *vif)
664 {
665 	struct vif_counter_data *counter = data;
666 
667 	counter->counter++;
668 	if (counter->cur_vif == vif)
669 		counter->cur_vif_running = true;
670 }
671 
672 /* caller must not hold wl->mutex, as it might deadlock */
673 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
674 			       struct ieee80211_vif *cur_vif,
675 			       struct vif_counter_data *data)
676 {
677 	memset(data, 0, sizeof(*data));
678 	data->cur_vif = cur_vif;
679 
680 	ieee80211_iterate_active_interfaces(hw,
681 					    wl12xx_vif_count_iter, data);
682 }
683 
684 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
685 {
686 	const struct firmware *fw;
687 	const char *fw_name;
688 	enum wl12xx_fw_type fw_type;
689 	int ret;
690 
691 	if (plt) {
692 		fw_type = WL12XX_FW_TYPE_PLT;
693 		fw_name = wl->plt_fw_name;
694 	} else {
695 		/*
696 		 * we can't call wl12xx_get_vif_count() here because
697 		 * wl->mutex is taken, so use the cached last_vif_count value
698 		 */
699 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
700 			fw_type = WL12XX_FW_TYPE_MULTI;
701 			fw_name = wl->mr_fw_name;
702 		} else {
703 			fw_type = WL12XX_FW_TYPE_NORMAL;
704 			fw_name = wl->sr_fw_name;
705 		}
706 	}
707 
708 	if (wl->fw_type == fw_type)
709 		return 0;
710 
711 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
712 
713 	ret = request_firmware(&fw, fw_name, wl->dev);
714 
715 	if (ret < 0) {
716 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
717 		return ret;
718 	}
719 
720 	if (fw->size % 4) {
721 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
722 			     fw->size);
723 		ret = -EILSEQ;
724 		goto out;
725 	}
726 
727 	vfree(wl->fw);
728 	wl->fw_type = WL12XX_FW_TYPE_NONE;
729 	wl->fw_len = fw->size;
730 	wl->fw = vmalloc(wl->fw_len);
731 
732 	if (!wl->fw) {
733 		wl1271_error("could not allocate memory for the firmware");
734 		ret = -ENOMEM;
735 		goto out;
736 	}
737 
738 	memcpy(wl->fw, fw->data, wl->fw_len);
739 	ret = 0;
740 	wl->fw_type = fw_type;
741 out:
742 	release_firmware(fw);
743 
744 	return ret;
745 }
746 
747 void wl12xx_queue_recovery_work(struct wl1271 *wl)
748 {
749 	WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
750 
751 	/* Avoid a recursive recovery */
752 	if (wl->state == WLCORE_STATE_ON) {
753 		wl->state = WLCORE_STATE_RESTARTING;
754 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
755 		wlcore_disable_interrupts_nosync(wl);
756 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
757 	}
758 }
759 
760 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
761 {
762 	size_t len = 0;
763 
764 	/* The FW log is a length-value list, find where the log end */
765 	while (len < maxlen) {
766 		if (memblock[len] == 0)
767 			break;
768 		if (len + memblock[len] + 1 > maxlen)
769 			break;
770 		len += memblock[len] + 1;
771 	}
772 
773 	/* Make sure we have enough room */
774 	len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
775 
776 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
777 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
778 	wl->fwlog_size += len;
779 
780 	return len;
781 }
782 
783 #define WLCORE_FW_LOG_END 0x2000000
784 
785 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
786 {
787 	u32 addr;
788 	u32 offset;
789 	u32 end_of_log;
790 	u8 *block;
791 	int ret;
792 
793 	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
794 	    (wl->conf.fwlog.mem_blocks == 0))
795 		return;
796 
797 	wl1271_info("Reading FW panic log");
798 
799 	block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
800 	if (!block)
801 		return;
802 
803 	/*
804 	 * Make sure the chip is awake and the logger isn't active.
805 	 * Do not send a stop fwlog command if the fw is hanged.
806 	 */
807 	if (wl1271_ps_elp_wakeup(wl))
808 		goto out;
809 	if (!wl->watchdog_recovery)
810 		wl12xx_cmd_stop_fwlog(wl);
811 
812 	/* Read the first memory block address */
813 	ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
814 	if (ret < 0)
815 		goto out;
816 
817 	addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
818 	if (!addr)
819 		goto out;
820 
821 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
822 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
823 		end_of_log = WLCORE_FW_LOG_END;
824 	} else {
825 		offset = sizeof(addr);
826 		end_of_log = addr;
827 	}
828 
829 	/* Traverse the memory blocks linked list */
830 	do {
831 		memset(block, 0, WL12XX_HW_BLOCK_SIZE);
832 		ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
833 					 false);
834 		if (ret < 0)
835 			goto out;
836 
837 		/*
838 		 * Memory blocks are linked to one another. The first 4 bytes
839 		 * of each memory block hold the hardware address of the next
840 		 * one. The last memory block points to the first one in
841 		 * on demand mode and is equal to 0x2000000 in continuous mode.
842 		 */
843 		addr = le32_to_cpup((__le32 *)block);
844 		if (!wl12xx_copy_fwlog(wl, block + offset,
845 				       WL12XX_HW_BLOCK_SIZE - offset))
846 			break;
847 	} while (addr && (addr != end_of_log));
848 
849 	wake_up_interruptible(&wl->fwlog_waitq);
850 
851 out:
852 	kfree(block);
853 }
854 
855 static void wlcore_print_recovery(struct wl1271 *wl)
856 {
857 	u32 pc = 0;
858 	u32 hint_sts = 0;
859 	int ret;
860 
861 	wl1271_info("Hardware recovery in progress. FW ver: %s",
862 		    wl->chip.fw_ver_str);
863 
864 	/* change partitions momentarily so we can read the FW pc */
865 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
866 	if (ret < 0)
867 		return;
868 
869 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
870 	if (ret < 0)
871 		return;
872 
873 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
874 	if (ret < 0)
875 		return;
876 
877 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
878 
879 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
880 }
881 
882 
883 static void wl1271_recovery_work(struct work_struct *work)
884 {
885 	struct wl1271 *wl =
886 		container_of(work, struct wl1271, recovery_work);
887 	struct wl12xx_vif *wlvif;
888 	struct ieee80211_vif *vif;
889 
890 	mutex_lock(&wl->mutex);
891 
892 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
893 		goto out_unlock;
894 
895 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
896 		wl12xx_read_fwlog_panic(wl);
897 		wlcore_print_recovery(wl);
898 	}
899 
900 	BUG_ON(bug_on_recovery &&
901 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
902 
903 	if (no_recovery) {
904 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
905 		goto out_unlock;
906 	}
907 
908 	/*
909 	 * Advance security sequence number to overcome potential progress
910 	 * in the firmware during recovery. This doens't hurt if the network is
911 	 * not encrypted.
912 	 */
913 	wl12xx_for_each_wlvif(wl, wlvif) {
914 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
915 		    test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
916 			wlvif->tx_security_seq +=
917 				WL1271_TX_SQN_POST_RECOVERY_PADDING;
918 	}
919 
920 	/* Prevent spurious TX during FW restart */
921 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
922 
923 	if (wl->sched_scanning) {
924 		ieee80211_sched_scan_stopped(wl->hw);
925 		wl->sched_scanning = false;
926 	}
927 
928 	/* reboot the chipset */
929 	while (!list_empty(&wl->wlvif_list)) {
930 		wlvif = list_first_entry(&wl->wlvif_list,
931 				       struct wl12xx_vif, list);
932 		vif = wl12xx_wlvif_to_vif(wlvif);
933 		__wl1271_op_remove_interface(wl, vif, false);
934 	}
935 
936 	wlcore_op_stop_locked(wl);
937 
938 	ieee80211_restart_hw(wl->hw);
939 
940 	/*
941 	 * Its safe to enable TX now - the queues are stopped after a request
942 	 * to restart the HW.
943 	 */
944 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
945 
946 out_unlock:
947 	wl->watchdog_recovery = false;
948 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
949 	mutex_unlock(&wl->mutex);
950 }
951 
952 static int wlcore_fw_wakeup(struct wl1271 *wl)
953 {
954 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
955 }
956 
957 static int wl1271_setup(struct wl1271 *wl)
958 {
959 	wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
960 				  sizeof(*wl->fw_status_2) +
961 				  wl->fw_status_priv_len, GFP_KERNEL);
962 	if (!wl->fw_status_1)
963 		return -ENOMEM;
964 
965 	wl->fw_status_2 = (struct wl_fw_status_2 *)
966 				(((u8 *) wl->fw_status_1) +
967 				WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
968 
969 	wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
970 	if (!wl->tx_res_if) {
971 		kfree(wl->fw_status_1);
972 		return -ENOMEM;
973 	}
974 
975 	return 0;
976 }
977 
978 static int wl12xx_set_power_on(struct wl1271 *wl)
979 {
980 	int ret;
981 
982 	msleep(WL1271_PRE_POWER_ON_SLEEP);
983 	ret = wl1271_power_on(wl);
984 	if (ret < 0)
985 		goto out;
986 	msleep(WL1271_POWER_ON_SLEEP);
987 	wl1271_io_reset(wl);
988 	wl1271_io_init(wl);
989 
990 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
991 	if (ret < 0)
992 		goto fail;
993 
994 	/* ELP module wake up */
995 	ret = wlcore_fw_wakeup(wl);
996 	if (ret < 0)
997 		goto fail;
998 
999 out:
1000 	return ret;
1001 
1002 fail:
1003 	wl1271_power_off(wl);
1004 	return ret;
1005 }
1006 
1007 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1008 {
1009 	int ret = 0;
1010 
1011 	ret = wl12xx_set_power_on(wl);
1012 	if (ret < 0)
1013 		goto out;
1014 
1015 	/*
1016 	 * For wl127x based devices we could use the default block
1017 	 * size (512 bytes), but due to a bug in the sdio driver, we
1018 	 * need to set it explicitly after the chip is powered on.  To
1019 	 * simplify the code and since the performance impact is
1020 	 * negligible, we use the same block size for all different
1021 	 * chip types.
1022 	 *
1023 	 * Check if the bus supports blocksize alignment and, if it
1024 	 * doesn't, make sure we don't have the quirk.
1025 	 */
1026 	if (!wl1271_set_block_size(wl))
1027 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1028 
1029 	/* TODO: make sure the lower driver has set things up correctly */
1030 
1031 	ret = wl1271_setup(wl);
1032 	if (ret < 0)
1033 		goto out;
1034 
1035 	ret = wl12xx_fetch_firmware(wl, plt);
1036 	if (ret < 0)
1037 		goto out;
1038 
1039 out:
1040 	return ret;
1041 }
1042 
1043 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1044 {
1045 	int retries = WL1271_BOOT_RETRIES;
1046 	struct wiphy *wiphy = wl->hw->wiphy;
1047 
1048 	static const char* const PLT_MODE[] = {
1049 		"PLT_OFF",
1050 		"PLT_ON",
1051 		"PLT_FEM_DETECT"
1052 	};
1053 
1054 	int ret;
1055 
1056 	mutex_lock(&wl->mutex);
1057 
1058 	wl1271_notice("power up");
1059 
1060 	if (wl->state != WLCORE_STATE_OFF) {
1061 		wl1271_error("cannot go into PLT state because not "
1062 			     "in off state: %d", wl->state);
1063 		ret = -EBUSY;
1064 		goto out;
1065 	}
1066 
1067 	/* Indicate to lower levels that we are now in PLT mode */
1068 	wl->plt = true;
1069 	wl->plt_mode = plt_mode;
1070 
1071 	while (retries) {
1072 		retries--;
1073 		ret = wl12xx_chip_wakeup(wl, true);
1074 		if (ret < 0)
1075 			goto power_off;
1076 
1077 		ret = wl->ops->plt_init(wl);
1078 		if (ret < 0)
1079 			goto power_off;
1080 
1081 		wl->state = WLCORE_STATE_ON;
1082 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1083 			      PLT_MODE[plt_mode],
1084 			      wl->chip.fw_ver_str);
1085 
1086 		/* update hw/fw version info in wiphy struct */
1087 		wiphy->hw_version = wl->chip.id;
1088 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1089 			sizeof(wiphy->fw_version));
1090 
1091 		goto out;
1092 
1093 power_off:
1094 		wl1271_power_off(wl);
1095 	}
1096 
1097 	wl->plt = false;
1098 	wl->plt_mode = PLT_OFF;
1099 
1100 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1101 		     WL1271_BOOT_RETRIES);
1102 out:
1103 	mutex_unlock(&wl->mutex);
1104 
1105 	return ret;
1106 }
1107 
1108 int wl1271_plt_stop(struct wl1271 *wl)
1109 {
1110 	int ret = 0;
1111 
1112 	wl1271_notice("power down");
1113 
1114 	/*
1115 	 * Interrupts must be disabled before setting the state to OFF.
1116 	 * Otherwise, the interrupt handler might be called and exit without
1117 	 * reading the interrupt status.
1118 	 */
1119 	wlcore_disable_interrupts(wl);
1120 	mutex_lock(&wl->mutex);
1121 	if (!wl->plt) {
1122 		mutex_unlock(&wl->mutex);
1123 
1124 		/*
1125 		 * This will not necessarily enable interrupts as interrupts
1126 		 * may have been disabled when op_stop was called. It will,
1127 		 * however, balance the above call to disable_interrupts().
1128 		 */
1129 		wlcore_enable_interrupts(wl);
1130 
1131 		wl1271_error("cannot power down because not in PLT "
1132 			     "state: %d", wl->state);
1133 		ret = -EBUSY;
1134 		goto out;
1135 	}
1136 
1137 	mutex_unlock(&wl->mutex);
1138 
1139 	wl1271_flush_deferred_work(wl);
1140 	cancel_work_sync(&wl->netstack_work);
1141 	cancel_work_sync(&wl->recovery_work);
1142 	cancel_delayed_work_sync(&wl->elp_work);
1143 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1144 	cancel_delayed_work_sync(&wl->connection_loss_work);
1145 
1146 	mutex_lock(&wl->mutex);
1147 	wl1271_power_off(wl);
1148 	wl->flags = 0;
1149 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1150 	wl->state = WLCORE_STATE_OFF;
1151 	wl->plt = false;
1152 	wl->plt_mode = PLT_OFF;
1153 	wl->rx_counter = 0;
1154 	mutex_unlock(&wl->mutex);
1155 
1156 out:
1157 	return ret;
1158 }
1159 
1160 static void wl1271_op_tx(struct ieee80211_hw *hw,
1161 			 struct ieee80211_tx_control *control,
1162 			 struct sk_buff *skb)
1163 {
1164 	struct wl1271 *wl = hw->priv;
1165 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1166 	struct ieee80211_vif *vif = info->control.vif;
1167 	struct wl12xx_vif *wlvif = NULL;
1168 	unsigned long flags;
1169 	int q, mapping;
1170 	u8 hlid;
1171 
1172 	if (vif)
1173 		wlvif = wl12xx_vif_to_data(vif);
1174 
1175 	mapping = skb_get_queue_mapping(skb);
1176 	q = wl1271_tx_get_queue(mapping);
1177 
1178 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1179 
1180 	spin_lock_irqsave(&wl->wl_lock, flags);
1181 
1182 	/*
1183 	 * drop the packet if the link is invalid or the queue is stopped
1184 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1185 	 * allow these packets through.
1186 	 */
1187 	if (hlid == WL12XX_INVALID_LINK_ID ||
1188 	    (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1189 	     (wlcore_is_queue_stopped(wl, q) &&
1190 	      !wlcore_is_queue_stopped_by_reason(wl, q,
1191 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1192 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1193 		ieee80211_free_txskb(hw, skb);
1194 		goto out;
1195 	}
1196 
1197 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1198 		     hlid, q, skb->len);
1199 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1200 
1201 	wl->tx_queue_count[q]++;
1202 
1203 	/*
1204 	 * The workqueue is slow to process the tx_queue and we need stop
1205 	 * the queue here, otherwise the queue will get too long.
1206 	 */
1207 	if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1208 	    !wlcore_is_queue_stopped_by_reason(wl, q,
1209 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1210 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1211 		wlcore_stop_queue_locked(wl, q,
1212 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1213 	}
1214 
1215 	/*
1216 	 * The chip specific setup must run before the first TX packet -
1217 	 * before that, the tx_work will not be initialized!
1218 	 */
1219 
1220 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1221 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1222 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1223 
1224 out:
1225 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1226 }
1227 
1228 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1229 {
1230 	unsigned long flags;
1231 	int q;
1232 
1233 	/* no need to queue a new dummy packet if one is already pending */
1234 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1235 		return 0;
1236 
1237 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1238 
1239 	spin_lock_irqsave(&wl->wl_lock, flags);
1240 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1241 	wl->tx_queue_count[q]++;
1242 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1243 
1244 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1245 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1246 		return wlcore_tx_work_locked(wl);
1247 
1248 	/*
1249 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1250 	 * interrupt handler function
1251 	 */
1252 	return 0;
1253 }
1254 
1255 /*
1256  * The size of the dummy packet should be at least 1400 bytes. However, in
1257  * order to minimize the number of bus transactions, aligning it to 512 bytes
1258  * boundaries could be beneficial, performance wise
1259  */
1260 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1261 
1262 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1263 {
1264 	struct sk_buff *skb;
1265 	struct ieee80211_hdr_3addr *hdr;
1266 	unsigned int dummy_packet_size;
1267 
1268 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1269 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1270 
1271 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1272 	if (!skb) {
1273 		wl1271_warning("Failed to allocate a dummy packet skb");
1274 		return NULL;
1275 	}
1276 
1277 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1278 
1279 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1280 	memset(hdr, 0, sizeof(*hdr));
1281 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1282 					 IEEE80211_STYPE_NULLFUNC |
1283 					 IEEE80211_FCTL_TODS);
1284 
1285 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1286 
1287 	/* Dummy packets require the TID to be management */
1288 	skb->priority = WL1271_TID_MGMT;
1289 
1290 	/* Initialize all fields that might be used */
1291 	skb_set_queue_mapping(skb, 0);
1292 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1293 
1294 	return skb;
1295 }
1296 
1297 
1298 #ifdef CONFIG_PM
1299 static int
1300 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1301 {
1302 	int num_fields = 0, in_field = 0, fields_size = 0;
1303 	int i, pattern_len = 0;
1304 
1305 	if (!p->mask) {
1306 		wl1271_warning("No mask in WoWLAN pattern");
1307 		return -EINVAL;
1308 	}
1309 
1310 	/*
1311 	 * The pattern is broken up into segments of bytes at different offsets
1312 	 * that need to be checked by the FW filter. Each segment is called
1313 	 * a field in the FW API. We verify that the total number of fields
1314 	 * required for this pattern won't exceed FW limits (8)
1315 	 * as well as the total fields buffer won't exceed the FW limit.
1316 	 * Note that if there's a pattern which crosses Ethernet/IP header
1317 	 * boundary a new field is required.
1318 	 */
1319 	for (i = 0; i < p->pattern_len; i++) {
1320 		if (test_bit(i, (unsigned long *)p->mask)) {
1321 			if (!in_field) {
1322 				in_field = 1;
1323 				pattern_len = 1;
1324 			} else {
1325 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1326 					num_fields++;
1327 					fields_size += pattern_len +
1328 						RX_FILTER_FIELD_OVERHEAD;
1329 					pattern_len = 1;
1330 				} else
1331 					pattern_len++;
1332 			}
1333 		} else {
1334 			if (in_field) {
1335 				in_field = 0;
1336 				fields_size += pattern_len +
1337 					RX_FILTER_FIELD_OVERHEAD;
1338 				num_fields++;
1339 			}
1340 		}
1341 	}
1342 
1343 	if (in_field) {
1344 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1345 		num_fields++;
1346 	}
1347 
1348 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1349 		wl1271_warning("RX Filter too complex. Too many segments");
1350 		return -EINVAL;
1351 	}
1352 
1353 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1354 		wl1271_warning("RX filter pattern is too big");
1355 		return -E2BIG;
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1362 {
1363 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1364 }
1365 
1366 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1367 {
1368 	int i;
1369 
1370 	if (filter == NULL)
1371 		return;
1372 
1373 	for (i = 0; i < filter->num_fields; i++)
1374 		kfree(filter->fields[i].pattern);
1375 
1376 	kfree(filter);
1377 }
1378 
1379 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1380 				 u16 offset, u8 flags,
1381 				 u8 *pattern, u8 len)
1382 {
1383 	struct wl12xx_rx_filter_field *field;
1384 
1385 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1386 		wl1271_warning("Max fields per RX filter. can't alloc another");
1387 		return -EINVAL;
1388 	}
1389 
1390 	field = &filter->fields[filter->num_fields];
1391 
1392 	field->pattern = kzalloc(len, GFP_KERNEL);
1393 	if (!field->pattern) {
1394 		wl1271_warning("Failed to allocate RX filter pattern");
1395 		return -ENOMEM;
1396 	}
1397 
1398 	filter->num_fields++;
1399 
1400 	field->offset = cpu_to_le16(offset);
1401 	field->flags = flags;
1402 	field->len = len;
1403 	memcpy(field->pattern, pattern, len);
1404 
1405 	return 0;
1406 }
1407 
1408 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1409 {
1410 	int i, fields_size = 0;
1411 
1412 	for (i = 0; i < filter->num_fields; i++)
1413 		fields_size += filter->fields[i].len +
1414 			sizeof(struct wl12xx_rx_filter_field) -
1415 			sizeof(u8 *);
1416 
1417 	return fields_size;
1418 }
1419 
1420 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1421 				    u8 *buf)
1422 {
1423 	int i;
1424 	struct wl12xx_rx_filter_field *field;
1425 
1426 	for (i = 0; i < filter->num_fields; i++) {
1427 		field = (struct wl12xx_rx_filter_field *)buf;
1428 
1429 		field->offset = filter->fields[i].offset;
1430 		field->flags = filter->fields[i].flags;
1431 		field->len = filter->fields[i].len;
1432 
1433 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1434 		buf += sizeof(struct wl12xx_rx_filter_field) -
1435 			sizeof(u8 *) + field->len;
1436 	}
1437 }
1438 
1439 /*
1440  * Allocates an RX filter returned through f
1441  * which needs to be freed using rx_filter_free()
1442  */
1443 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1444 	struct cfg80211_wowlan_trig_pkt_pattern *p,
1445 	struct wl12xx_rx_filter **f)
1446 {
1447 	int i, j, ret = 0;
1448 	struct wl12xx_rx_filter *filter;
1449 	u16 offset;
1450 	u8 flags, len;
1451 
1452 	filter = wl1271_rx_filter_alloc();
1453 	if (!filter) {
1454 		wl1271_warning("Failed to alloc rx filter");
1455 		ret = -ENOMEM;
1456 		goto err;
1457 	}
1458 
1459 	i = 0;
1460 	while (i < p->pattern_len) {
1461 		if (!test_bit(i, (unsigned long *)p->mask)) {
1462 			i++;
1463 			continue;
1464 		}
1465 
1466 		for (j = i; j < p->pattern_len; j++) {
1467 			if (!test_bit(j, (unsigned long *)p->mask))
1468 				break;
1469 
1470 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1471 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1472 				break;
1473 		}
1474 
1475 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1476 			offset = i;
1477 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1478 		} else {
1479 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1480 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1481 		}
1482 
1483 		len = j - i;
1484 
1485 		ret = wl1271_rx_filter_alloc_field(filter,
1486 						   offset,
1487 						   flags,
1488 						   &p->pattern[i], len);
1489 		if (ret)
1490 			goto err;
1491 
1492 		i = j;
1493 	}
1494 
1495 	filter->action = FILTER_SIGNAL;
1496 
1497 	*f = filter;
1498 	return 0;
1499 
1500 err:
1501 	wl1271_rx_filter_free(filter);
1502 	*f = NULL;
1503 
1504 	return ret;
1505 }
1506 
1507 static int wl1271_configure_wowlan(struct wl1271 *wl,
1508 				   struct cfg80211_wowlan *wow)
1509 {
1510 	int i, ret;
1511 
1512 	if (!wow || wow->any || !wow->n_patterns) {
1513 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1514 							  FILTER_SIGNAL);
1515 		if (ret)
1516 			goto out;
1517 
1518 		ret = wl1271_rx_filter_clear_all(wl);
1519 		if (ret)
1520 			goto out;
1521 
1522 		return 0;
1523 	}
1524 
1525 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1526 		return -EINVAL;
1527 
1528 	/* Validate all incoming patterns before clearing current FW state */
1529 	for (i = 0; i < wow->n_patterns; i++) {
1530 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1531 		if (ret) {
1532 			wl1271_warning("Bad wowlan pattern %d", i);
1533 			return ret;
1534 		}
1535 	}
1536 
1537 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1538 	if (ret)
1539 		goto out;
1540 
1541 	ret = wl1271_rx_filter_clear_all(wl);
1542 	if (ret)
1543 		goto out;
1544 
1545 	/* Translate WoWLAN patterns into filters */
1546 	for (i = 0; i < wow->n_patterns; i++) {
1547 		struct cfg80211_wowlan_trig_pkt_pattern *p;
1548 		struct wl12xx_rx_filter *filter = NULL;
1549 
1550 		p = &wow->patterns[i];
1551 
1552 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1553 		if (ret) {
1554 			wl1271_warning("Failed to create an RX filter from "
1555 				       "wowlan pattern %d", i);
1556 			goto out;
1557 		}
1558 
1559 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1560 
1561 		wl1271_rx_filter_free(filter);
1562 		if (ret)
1563 			goto out;
1564 	}
1565 
1566 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1567 
1568 out:
1569 	return ret;
1570 }
1571 
1572 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1573 					struct wl12xx_vif *wlvif,
1574 					struct cfg80211_wowlan *wow)
1575 {
1576 	int ret = 0;
1577 
1578 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1579 		goto out;
1580 
1581 	ret = wl1271_ps_elp_wakeup(wl);
1582 	if (ret < 0)
1583 		goto out;
1584 
1585 	ret = wl1271_configure_wowlan(wl, wow);
1586 	if (ret < 0)
1587 		goto out_sleep;
1588 
1589 	if ((wl->conf.conn.suspend_wake_up_event ==
1590 	     wl->conf.conn.wake_up_event) &&
1591 	    (wl->conf.conn.suspend_listen_interval ==
1592 	     wl->conf.conn.listen_interval))
1593 		goto out_sleep;
1594 
1595 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1596 				    wl->conf.conn.suspend_wake_up_event,
1597 				    wl->conf.conn.suspend_listen_interval);
1598 
1599 	if (ret < 0)
1600 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1601 
1602 out_sleep:
1603 	wl1271_ps_elp_sleep(wl);
1604 out:
1605 	return ret;
1606 
1607 }
1608 
1609 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1610 				       struct wl12xx_vif *wlvif)
1611 {
1612 	int ret = 0;
1613 
1614 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1615 		goto out;
1616 
1617 	ret = wl1271_ps_elp_wakeup(wl);
1618 	if (ret < 0)
1619 		goto out;
1620 
1621 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1622 
1623 	wl1271_ps_elp_sleep(wl);
1624 out:
1625 	return ret;
1626 
1627 }
1628 
1629 static int wl1271_configure_suspend(struct wl1271 *wl,
1630 				    struct wl12xx_vif *wlvif,
1631 				    struct cfg80211_wowlan *wow)
1632 {
1633 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1634 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1635 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1636 		return wl1271_configure_suspend_ap(wl, wlvif);
1637 	return 0;
1638 }
1639 
1640 static void wl1271_configure_resume(struct wl1271 *wl,
1641 				    struct wl12xx_vif *wlvif)
1642 {
1643 	int ret = 0;
1644 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1645 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1646 
1647 	if ((!is_ap) && (!is_sta))
1648 		return;
1649 
1650 	if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1651 		return;
1652 
1653 	ret = wl1271_ps_elp_wakeup(wl);
1654 	if (ret < 0)
1655 		return;
1656 
1657 	if (is_sta) {
1658 		wl1271_configure_wowlan(wl, NULL);
1659 
1660 		if ((wl->conf.conn.suspend_wake_up_event ==
1661 		     wl->conf.conn.wake_up_event) &&
1662 		    (wl->conf.conn.suspend_listen_interval ==
1663 		     wl->conf.conn.listen_interval))
1664 			goto out_sleep;
1665 
1666 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1667 				    wl->conf.conn.wake_up_event,
1668 				    wl->conf.conn.listen_interval);
1669 
1670 		if (ret < 0)
1671 			wl1271_error("resume: wake up conditions failed: %d",
1672 				     ret);
1673 
1674 	} else if (is_ap) {
1675 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1676 	}
1677 
1678 out_sleep:
1679 	wl1271_ps_elp_sleep(wl);
1680 }
1681 
1682 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1683 			    struct cfg80211_wowlan *wow)
1684 {
1685 	struct wl1271 *wl = hw->priv;
1686 	struct wl12xx_vif *wlvif;
1687 	int ret;
1688 
1689 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1690 	WARN_ON(!wow);
1691 
1692 	/* we want to perform the recovery before suspending */
1693 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1694 		wl1271_warning("postponing suspend to perform recovery");
1695 		return -EBUSY;
1696 	}
1697 
1698 	wl1271_tx_flush(wl);
1699 
1700 	mutex_lock(&wl->mutex);
1701 	wl->wow_enabled = true;
1702 	wl12xx_for_each_wlvif(wl, wlvif) {
1703 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1704 		if (ret < 0) {
1705 			mutex_unlock(&wl->mutex);
1706 			wl1271_warning("couldn't prepare device to suspend");
1707 			return ret;
1708 		}
1709 	}
1710 	mutex_unlock(&wl->mutex);
1711 	/* flush any remaining work */
1712 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1713 
1714 	/*
1715 	 * disable and re-enable interrupts in order to flush
1716 	 * the threaded_irq
1717 	 */
1718 	wlcore_disable_interrupts(wl);
1719 
1720 	/*
1721 	 * set suspended flag to avoid triggering a new threaded_irq
1722 	 * work. no need for spinlock as interrupts are disabled.
1723 	 */
1724 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1725 
1726 	wlcore_enable_interrupts(wl);
1727 	flush_work(&wl->tx_work);
1728 	flush_delayed_work(&wl->elp_work);
1729 
1730 	return 0;
1731 }
1732 
1733 static int wl1271_op_resume(struct ieee80211_hw *hw)
1734 {
1735 	struct wl1271 *wl = hw->priv;
1736 	struct wl12xx_vif *wlvif;
1737 	unsigned long flags;
1738 	bool run_irq_work = false, pending_recovery;
1739 	int ret;
1740 
1741 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1742 		     wl->wow_enabled);
1743 	WARN_ON(!wl->wow_enabled);
1744 
1745 	/*
1746 	 * re-enable irq_work enqueuing, and call irq_work directly if
1747 	 * there is a pending work.
1748 	 */
1749 	spin_lock_irqsave(&wl->wl_lock, flags);
1750 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1751 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1752 		run_irq_work = true;
1753 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1754 
1755 	mutex_lock(&wl->mutex);
1756 
1757 	/* test the recovery flag before calling any SDIO functions */
1758 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1759 				    &wl->flags);
1760 
1761 	if (run_irq_work) {
1762 		wl1271_debug(DEBUG_MAC80211,
1763 			     "run postponed irq_work directly");
1764 
1765 		/* don't talk to the HW if recovery is pending */
1766 		if (!pending_recovery) {
1767 			ret = wlcore_irq_locked(wl);
1768 			if (ret)
1769 				wl12xx_queue_recovery_work(wl);
1770 		}
1771 
1772 		wlcore_enable_interrupts(wl);
1773 	}
1774 
1775 	if (pending_recovery) {
1776 		wl1271_warning("queuing forgotten recovery on resume");
1777 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1778 		goto out;
1779 	}
1780 
1781 	wl12xx_for_each_wlvif(wl, wlvif) {
1782 		wl1271_configure_resume(wl, wlvif);
1783 	}
1784 
1785 out:
1786 	wl->wow_enabled = false;
1787 	mutex_unlock(&wl->mutex);
1788 
1789 	return 0;
1790 }
1791 #endif
1792 
1793 static int wl1271_op_start(struct ieee80211_hw *hw)
1794 {
1795 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1796 
1797 	/*
1798 	 * We have to delay the booting of the hardware because
1799 	 * we need to know the local MAC address before downloading and
1800 	 * initializing the firmware. The MAC address cannot be changed
1801 	 * after boot, and without the proper MAC address, the firmware
1802 	 * will not function properly.
1803 	 *
1804 	 * The MAC address is first known when the corresponding interface
1805 	 * is added. That is where we will initialize the hardware.
1806 	 */
1807 
1808 	return 0;
1809 }
1810 
1811 static void wlcore_op_stop_locked(struct wl1271 *wl)
1812 {
1813 	int i;
1814 
1815 	if (wl->state == WLCORE_STATE_OFF) {
1816 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1817 					&wl->flags))
1818 			wlcore_enable_interrupts(wl);
1819 
1820 		return;
1821 	}
1822 
1823 	/*
1824 	 * this must be before the cancel_work calls below, so that the work
1825 	 * functions don't perform further work.
1826 	 */
1827 	wl->state = WLCORE_STATE_OFF;
1828 
1829 	/*
1830 	 * Use the nosync variant to disable interrupts, so the mutex could be
1831 	 * held while doing so without deadlocking.
1832 	 */
1833 	wlcore_disable_interrupts_nosync(wl);
1834 
1835 	mutex_unlock(&wl->mutex);
1836 
1837 	wlcore_synchronize_interrupts(wl);
1838 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1839 		cancel_work_sync(&wl->recovery_work);
1840 	wl1271_flush_deferred_work(wl);
1841 	cancel_delayed_work_sync(&wl->scan_complete_work);
1842 	cancel_work_sync(&wl->netstack_work);
1843 	cancel_work_sync(&wl->tx_work);
1844 	cancel_delayed_work_sync(&wl->elp_work);
1845 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1846 	cancel_delayed_work_sync(&wl->connection_loss_work);
1847 
1848 	/* let's notify MAC80211 about the remaining pending TX frames */
1849 	wl12xx_tx_reset(wl);
1850 	mutex_lock(&wl->mutex);
1851 
1852 	wl1271_power_off(wl);
1853 	/*
1854 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1855 	 * an interrupt storm. Now that the power is down, it is safe to
1856 	 * re-enable interrupts to balance the disable depth
1857 	 */
1858 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1859 		wlcore_enable_interrupts(wl);
1860 
1861 	wl->band = IEEE80211_BAND_2GHZ;
1862 
1863 	wl->rx_counter = 0;
1864 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1865 	wl->channel_type = NL80211_CHAN_NO_HT;
1866 	wl->tx_blocks_available = 0;
1867 	wl->tx_allocated_blocks = 0;
1868 	wl->tx_results_count = 0;
1869 	wl->tx_packets_count = 0;
1870 	wl->time_offset = 0;
1871 	wl->ap_fw_ps_map = 0;
1872 	wl->ap_ps_map = 0;
1873 	wl->sched_scanning = false;
1874 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1875 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1876 	memset(wl->links_map, 0, sizeof(wl->links_map));
1877 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1878 	wl->active_sta_count = 0;
1879 
1880 	/* The system link is always allocated */
1881 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1882 
1883 	/*
1884 	 * this is performed after the cancel_work calls and the associated
1885 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1886 	 * get executed before all these vars have been reset.
1887 	 */
1888 	wl->flags = 0;
1889 
1890 	wl->tx_blocks_freed = 0;
1891 
1892 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1893 		wl->tx_pkts_freed[i] = 0;
1894 		wl->tx_allocated_pkts[i] = 0;
1895 	}
1896 
1897 	wl1271_debugfs_reset(wl);
1898 
1899 	kfree(wl->fw_status_1);
1900 	wl->fw_status_1 = NULL;
1901 	wl->fw_status_2 = NULL;
1902 	kfree(wl->tx_res_if);
1903 	wl->tx_res_if = NULL;
1904 	kfree(wl->target_mem_map);
1905 	wl->target_mem_map = NULL;
1906 }
1907 
1908 static void wlcore_op_stop(struct ieee80211_hw *hw)
1909 {
1910 	struct wl1271 *wl = hw->priv;
1911 
1912 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1913 
1914 	mutex_lock(&wl->mutex);
1915 
1916 	wlcore_op_stop_locked(wl);
1917 
1918 	mutex_unlock(&wl->mutex);
1919 }
1920 
1921 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1922 {
1923 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
1924 					WL12XX_MAX_RATE_POLICIES);
1925 	if (policy >= WL12XX_MAX_RATE_POLICIES)
1926 		return -EBUSY;
1927 
1928 	__set_bit(policy, wl->rate_policies_map);
1929 	*idx = policy;
1930 	return 0;
1931 }
1932 
1933 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1934 {
1935 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1936 		return;
1937 
1938 	__clear_bit(*idx, wl->rate_policies_map);
1939 	*idx = WL12XX_MAX_RATE_POLICIES;
1940 }
1941 
1942 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
1943 {
1944 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
1945 					WLCORE_MAX_KLV_TEMPLATES);
1946 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
1947 		return -EBUSY;
1948 
1949 	__set_bit(policy, wl->klv_templates_map);
1950 	*idx = policy;
1951 	return 0;
1952 }
1953 
1954 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
1955 {
1956 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
1957 		return;
1958 
1959 	__clear_bit(*idx, wl->klv_templates_map);
1960 	*idx = WLCORE_MAX_KLV_TEMPLATES;
1961 }
1962 
1963 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1964 {
1965 	switch (wlvif->bss_type) {
1966 	case BSS_TYPE_AP_BSS:
1967 		if (wlvif->p2p)
1968 			return WL1271_ROLE_P2P_GO;
1969 		else
1970 			return WL1271_ROLE_AP;
1971 
1972 	case BSS_TYPE_STA_BSS:
1973 		if (wlvif->p2p)
1974 			return WL1271_ROLE_P2P_CL;
1975 		else
1976 			return WL1271_ROLE_STA;
1977 
1978 	case BSS_TYPE_IBSS:
1979 		return WL1271_ROLE_IBSS;
1980 
1981 	default:
1982 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1983 	}
1984 	return WL12XX_INVALID_ROLE_TYPE;
1985 }
1986 
1987 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1988 {
1989 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1990 	int i;
1991 
1992 	/* clear everything but the persistent data */
1993 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1994 
1995 	switch (ieee80211_vif_type_p2p(vif)) {
1996 	case NL80211_IFTYPE_P2P_CLIENT:
1997 		wlvif->p2p = 1;
1998 		/* fall-through */
1999 	case NL80211_IFTYPE_STATION:
2000 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2001 		break;
2002 	case NL80211_IFTYPE_ADHOC:
2003 		wlvif->bss_type = BSS_TYPE_IBSS;
2004 		break;
2005 	case NL80211_IFTYPE_P2P_GO:
2006 		wlvif->p2p = 1;
2007 		/* fall-through */
2008 	case NL80211_IFTYPE_AP:
2009 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2010 		break;
2011 	default:
2012 		wlvif->bss_type = MAX_BSS_TYPE;
2013 		return -EOPNOTSUPP;
2014 	}
2015 
2016 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2017 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2018 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2019 
2020 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2021 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2022 		/* init sta/ibss data */
2023 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2024 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2025 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2026 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2027 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2028 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2029 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2030 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2031 	} else {
2032 		/* init ap data */
2033 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2034 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2035 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2036 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2037 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2038 			wl12xx_allocate_rate_policy(wl,
2039 						&wlvif->ap.ucast_rate_idx[i]);
2040 		wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
2041 		/*
2042 		 * TODO: check if basic_rate shouldn't be
2043 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2044 		 * instead (the same thing for STA above).
2045 		*/
2046 		wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
2047 		/* TODO: this seems to be used only for STA, check it */
2048 		wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
2049 	}
2050 
2051 	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2052 	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2053 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2054 
2055 	/*
2056 	 * mac80211 configures some values globally, while we treat them
2057 	 * per-interface. thus, on init, we have to copy them from wl
2058 	 */
2059 	wlvif->band = wl->band;
2060 	wlvif->channel = wl->channel;
2061 	wlvif->power_level = wl->power_level;
2062 	wlvif->channel_type = wl->channel_type;
2063 
2064 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2065 		  wl1271_rx_streaming_enable_work);
2066 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2067 		  wl1271_rx_streaming_disable_work);
2068 	INIT_LIST_HEAD(&wlvif->list);
2069 
2070 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2071 		    (unsigned long) wlvif);
2072 	return 0;
2073 }
2074 
2075 static bool wl12xx_init_fw(struct wl1271 *wl)
2076 {
2077 	int retries = WL1271_BOOT_RETRIES;
2078 	bool booted = false;
2079 	struct wiphy *wiphy = wl->hw->wiphy;
2080 	int ret;
2081 
2082 	while (retries) {
2083 		retries--;
2084 		ret = wl12xx_chip_wakeup(wl, false);
2085 		if (ret < 0)
2086 			goto power_off;
2087 
2088 		ret = wl->ops->boot(wl);
2089 		if (ret < 0)
2090 			goto power_off;
2091 
2092 		ret = wl1271_hw_init(wl);
2093 		if (ret < 0)
2094 			goto irq_disable;
2095 
2096 		booted = true;
2097 		break;
2098 
2099 irq_disable:
2100 		mutex_unlock(&wl->mutex);
2101 		/* Unlocking the mutex in the middle of handling is
2102 		   inherently unsafe. In this case we deem it safe to do,
2103 		   because we need to let any possibly pending IRQ out of
2104 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2105 		   work function will not do anything.) Also, any other
2106 		   possible concurrent operations will fail due to the
2107 		   current state, hence the wl1271 struct should be safe. */
2108 		wlcore_disable_interrupts(wl);
2109 		wl1271_flush_deferred_work(wl);
2110 		cancel_work_sync(&wl->netstack_work);
2111 		mutex_lock(&wl->mutex);
2112 power_off:
2113 		wl1271_power_off(wl);
2114 	}
2115 
2116 	if (!booted) {
2117 		wl1271_error("firmware boot failed despite %d retries",
2118 			     WL1271_BOOT_RETRIES);
2119 		goto out;
2120 	}
2121 
2122 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2123 
2124 	/* update hw/fw version info in wiphy struct */
2125 	wiphy->hw_version = wl->chip.id;
2126 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2127 		sizeof(wiphy->fw_version));
2128 
2129 	/*
2130 	 * Now we know if 11a is supported (info from the NVS), so disable
2131 	 * 11a channels if not supported
2132 	 */
2133 	if (!wl->enable_11a)
2134 		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2135 
2136 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2137 		     wl->enable_11a ? "" : "not ");
2138 
2139 	wl->state = WLCORE_STATE_ON;
2140 out:
2141 	return booted;
2142 }
2143 
2144 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2145 {
2146 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2147 }
2148 
2149 /*
2150  * Check whether a fw switch (i.e. moving from one loaded
2151  * fw to another) is needed. This function is also responsible
2152  * for updating wl->last_vif_count, so it must be called before
2153  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2154  * will be used).
2155  */
2156 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2157 				  struct vif_counter_data vif_counter_data,
2158 				  bool add)
2159 {
2160 	enum wl12xx_fw_type current_fw = wl->fw_type;
2161 	u8 vif_count = vif_counter_data.counter;
2162 
2163 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2164 		return false;
2165 
2166 	/* increase the vif count if this is a new vif */
2167 	if (add && !vif_counter_data.cur_vif_running)
2168 		vif_count++;
2169 
2170 	wl->last_vif_count = vif_count;
2171 
2172 	/* no need for fw change if the device is OFF */
2173 	if (wl->state == WLCORE_STATE_OFF)
2174 		return false;
2175 
2176 	/* no need for fw change if a single fw is used */
2177 	if (!wl->mr_fw_name)
2178 		return false;
2179 
2180 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2181 		return true;
2182 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2183 		return true;
2184 
2185 	return false;
2186 }
2187 
2188 /*
2189  * Enter "forced psm". Make sure the sta is in psm against the ap,
2190  * to make the fw switch a bit more disconnection-persistent.
2191  */
2192 static void wl12xx_force_active_psm(struct wl1271 *wl)
2193 {
2194 	struct wl12xx_vif *wlvif;
2195 
2196 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2197 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2198 	}
2199 }
2200 
2201 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2202 				   struct ieee80211_vif *vif)
2203 {
2204 	struct wl1271 *wl = hw->priv;
2205 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2206 	struct vif_counter_data vif_count;
2207 	int ret = 0;
2208 	u8 role_type;
2209 	bool booted = false;
2210 
2211 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2212 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2213 
2214 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2215 		     ieee80211_vif_type_p2p(vif), vif->addr);
2216 
2217 	wl12xx_get_vif_count(hw, vif, &vif_count);
2218 
2219 	mutex_lock(&wl->mutex);
2220 	ret = wl1271_ps_elp_wakeup(wl);
2221 	if (ret < 0)
2222 		goto out_unlock;
2223 
2224 	/*
2225 	 * in some very corner case HW recovery scenarios its possible to
2226 	 * get here before __wl1271_op_remove_interface is complete, so
2227 	 * opt out if that is the case.
2228 	 */
2229 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2230 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2231 		ret = -EBUSY;
2232 		goto out;
2233 	}
2234 
2235 
2236 	ret = wl12xx_init_vif_data(wl, vif);
2237 	if (ret < 0)
2238 		goto out;
2239 
2240 	wlvif->wl = wl;
2241 	role_type = wl12xx_get_role_type(wl, wlvif);
2242 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2243 		ret = -EINVAL;
2244 		goto out;
2245 	}
2246 
2247 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2248 		wl12xx_force_active_psm(wl);
2249 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2250 		mutex_unlock(&wl->mutex);
2251 		wl1271_recovery_work(&wl->recovery_work);
2252 		return 0;
2253 	}
2254 
2255 	/*
2256 	 * TODO: after the nvs issue will be solved, move this block
2257 	 * to start(), and make sure here the driver is ON.
2258 	 */
2259 	if (wl->state == WLCORE_STATE_OFF) {
2260 		/*
2261 		 * we still need this in order to configure the fw
2262 		 * while uploading the nvs
2263 		 */
2264 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2265 
2266 		booted = wl12xx_init_fw(wl);
2267 		if (!booted) {
2268 			ret = -EINVAL;
2269 			goto out;
2270 		}
2271 	}
2272 
2273 	ret = wl12xx_cmd_role_enable(wl, vif->addr,
2274 				     role_type, &wlvif->role_id);
2275 	if (ret < 0)
2276 		goto out;
2277 
2278 	ret = wl1271_init_vif_specific(wl, vif);
2279 	if (ret < 0)
2280 		goto out;
2281 
2282 	list_add(&wlvif->list, &wl->wlvif_list);
2283 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2284 
2285 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2286 		wl->ap_count++;
2287 	else
2288 		wl->sta_count++;
2289 out:
2290 	wl1271_ps_elp_sleep(wl);
2291 out_unlock:
2292 	mutex_unlock(&wl->mutex);
2293 
2294 	return ret;
2295 }
2296 
2297 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2298 					 struct ieee80211_vif *vif,
2299 					 bool reset_tx_queues)
2300 {
2301 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2302 	int i, ret;
2303 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2304 
2305 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2306 
2307 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2308 		return;
2309 
2310 	/* because of hardware recovery, we may get here twice */
2311 	if (wl->state == WLCORE_STATE_OFF)
2312 		return;
2313 
2314 	wl1271_info("down");
2315 
2316 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2317 	    wl->scan_vif == vif) {
2318 		/*
2319 		 * Rearm the tx watchdog just before idling scan. This
2320 		 * prevents just-finished scans from triggering the watchdog
2321 		 */
2322 		wl12xx_rearm_tx_watchdog_locked(wl);
2323 
2324 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2325 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2326 		wl->scan_vif = NULL;
2327 		wl->scan.req = NULL;
2328 		ieee80211_scan_completed(wl->hw, true);
2329 	}
2330 
2331 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2332 		/* disable active roles */
2333 		ret = wl1271_ps_elp_wakeup(wl);
2334 		if (ret < 0)
2335 			goto deinit;
2336 
2337 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2338 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2339 			if (wl12xx_dev_role_started(wlvif))
2340 				wl12xx_stop_dev(wl, wlvif);
2341 		}
2342 
2343 		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2344 		if (ret < 0)
2345 			goto deinit;
2346 
2347 		wl1271_ps_elp_sleep(wl);
2348 	}
2349 deinit:
2350 	/* clear all hlids (except system_hlid) */
2351 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2352 
2353 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2354 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2355 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2356 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2357 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2358 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2359 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2360 	} else {
2361 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2362 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2363 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2364 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2365 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2366 			wl12xx_free_rate_policy(wl,
2367 						&wlvif->ap.ucast_rate_idx[i]);
2368 		wl1271_free_ap_keys(wl, wlvif);
2369 	}
2370 
2371 	dev_kfree_skb(wlvif->probereq);
2372 	wlvif->probereq = NULL;
2373 	wl12xx_tx_reset_wlvif(wl, wlvif);
2374 	if (wl->last_wlvif == wlvif)
2375 		wl->last_wlvif = NULL;
2376 	list_del(&wlvif->list);
2377 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2378 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2379 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2380 
2381 	if (is_ap)
2382 		wl->ap_count--;
2383 	else
2384 		wl->sta_count--;
2385 
2386 	/*
2387 	 * Last AP, have more stations. Configure sleep auth according to STA.
2388 	 * Don't do thin on unintended recovery.
2389 	 */
2390 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2391 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2392 		goto unlock;
2393 
2394 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2395 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2396 		/* Configure for power according to debugfs */
2397 		if (sta_auth != WL1271_PSM_ILLEGAL)
2398 			wl1271_acx_sleep_auth(wl, sta_auth);
2399 		/* Configure for power always on */
2400 		else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
2401 			wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
2402 		/* Configure for ELP power saving */
2403 		else
2404 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2405 	}
2406 
2407 unlock:
2408 	mutex_unlock(&wl->mutex);
2409 
2410 	del_timer_sync(&wlvif->rx_streaming_timer);
2411 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2412 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2413 
2414 	mutex_lock(&wl->mutex);
2415 }
2416 
2417 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2418 				       struct ieee80211_vif *vif)
2419 {
2420 	struct wl1271 *wl = hw->priv;
2421 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2422 	struct wl12xx_vif *iter;
2423 	struct vif_counter_data vif_count;
2424 
2425 	wl12xx_get_vif_count(hw, vif, &vif_count);
2426 	mutex_lock(&wl->mutex);
2427 
2428 	if (wl->state == WLCORE_STATE_OFF ||
2429 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2430 		goto out;
2431 
2432 	/*
2433 	 * wl->vif can be null here if someone shuts down the interface
2434 	 * just when hardware recovery has been started.
2435 	 */
2436 	wl12xx_for_each_wlvif(wl, iter) {
2437 		if (iter != wlvif)
2438 			continue;
2439 
2440 		__wl1271_op_remove_interface(wl, vif, true);
2441 		break;
2442 	}
2443 	WARN_ON(iter != wlvif);
2444 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2445 		wl12xx_force_active_psm(wl);
2446 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2447 		wl12xx_queue_recovery_work(wl);
2448 	}
2449 out:
2450 	mutex_unlock(&wl->mutex);
2451 }
2452 
2453 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2454 				      struct ieee80211_vif *vif,
2455 				      enum nl80211_iftype new_type, bool p2p)
2456 {
2457 	struct wl1271 *wl = hw->priv;
2458 	int ret;
2459 
2460 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2461 	wl1271_op_remove_interface(hw, vif);
2462 
2463 	vif->type = new_type;
2464 	vif->p2p = p2p;
2465 	ret = wl1271_op_add_interface(hw, vif);
2466 
2467 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2468 	return ret;
2469 }
2470 
2471 static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2472 			  bool set_assoc)
2473 {
2474 	int ret;
2475 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2476 
2477 	/*
2478 	 * One of the side effects of the JOIN command is that is clears
2479 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2480 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2481 	 * Currently the only valid scenario for JOIN during association
2482 	 * is on roaming, in which case we will also be given new keys.
2483 	 * Keep the below message for now, unless it starts bothering
2484 	 * users who really like to roam a lot :)
2485 	 */
2486 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2487 		wl1271_info("JOIN while associated.");
2488 
2489 	/* clear encryption type */
2490 	wlvif->encryption_type = KEY_NONE;
2491 
2492 	if (set_assoc)
2493 		set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2494 
2495 	if (is_ibss)
2496 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2497 	else
2498 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2499 	if (ret < 0)
2500 		goto out;
2501 
2502 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2503 		goto out;
2504 
2505 	/*
2506 	 * The join command disable the keep-alive mode, shut down its process,
2507 	 * and also clear the template config, so we need to reset it all after
2508 	 * the join. The acx_aid starts the keep-alive process, and the order
2509 	 * of the commands below is relevant.
2510 	 */
2511 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2512 	if (ret < 0)
2513 		goto out;
2514 
2515 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2516 	if (ret < 0)
2517 		goto out;
2518 
2519 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2520 	if (ret < 0)
2521 		goto out;
2522 
2523 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2524 					   wlvif->sta.klv_template_id,
2525 					   ACX_KEEP_ALIVE_TPL_VALID);
2526 	if (ret < 0)
2527 		goto out;
2528 
2529 out:
2530 	return ret;
2531 }
2532 
2533 static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2534 {
2535 	int ret;
2536 
2537 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2538 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2539 
2540 		wl12xx_cmd_stop_channel_switch(wl);
2541 		ieee80211_chswitch_done(vif, false);
2542 	}
2543 
2544 	/* invalidate keep-alive template */
2545 	wl1271_acx_keep_alive_config(wl, wlvif,
2546 				     wlvif->sta.klv_template_id,
2547 				     ACX_KEEP_ALIVE_TPL_INVALID);
2548 
2549 	/* to stop listening to a channel, we disconnect */
2550 	ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2551 	if (ret < 0)
2552 		goto out;
2553 
2554 	/* reset TX security counters on a clean disconnect */
2555 	wlvif->tx_security_last_seq_lsb = 0;
2556 	wlvif->tx_security_seq = 0;
2557 
2558 out:
2559 	return ret;
2560 }
2561 
2562 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2563 {
2564 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2565 	wlvif->rate_set = wlvif->basic_rate_set;
2566 }
2567 
2568 static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2569 				  bool idle)
2570 {
2571 	int ret;
2572 	bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2573 
2574 	if (idle == cur_idle)
2575 		return 0;
2576 
2577 	if (idle) {
2578 		/* no need to croc if we weren't busy (e.g. during boot) */
2579 		if (wl12xx_dev_role_started(wlvif)) {
2580 			ret = wl12xx_stop_dev(wl, wlvif);
2581 			if (ret < 0)
2582 				goto out;
2583 		}
2584 		wlvif->rate_set =
2585 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2586 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2587 		if (ret < 0)
2588 			goto out;
2589 		clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2590 	} else {
2591 		/* The current firmware only supports sched_scan in idle */
2592 		if (wl->sched_scanning) {
2593 			wl1271_scan_sched_scan_stop(wl, wlvif);
2594 			ieee80211_sched_scan_stopped(wl->hw);
2595 		}
2596 
2597 		ret = wl12xx_start_dev(wl, wlvif);
2598 		if (ret < 0)
2599 			goto out;
2600 		set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2601 	}
2602 
2603 out:
2604 	return ret;
2605 }
2606 
2607 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2608 			     struct ieee80211_conf *conf, u32 changed)
2609 {
2610 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2611 	int channel, ret;
2612 
2613 	channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2614 
2615 	/* if the channel changes while joined, join again */
2616 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2617 	    ((wlvif->band != conf->channel->band) ||
2618 	     (wlvif->channel != channel) ||
2619 	     (wlvif->channel_type != conf->channel_type))) {
2620 		/* send all pending packets */
2621 		ret = wlcore_tx_work_locked(wl);
2622 		if (ret < 0)
2623 			return ret;
2624 
2625 		wlvif->band = conf->channel->band;
2626 		wlvif->channel = channel;
2627 		wlvif->channel_type = conf->channel_type;
2628 
2629 		if (is_ap) {
2630 			wl1271_set_band_rate(wl, wlvif);
2631 			ret = wl1271_init_ap_rates(wl, wlvif);
2632 			if (ret < 0)
2633 				wl1271_error("AP rate policy change failed %d",
2634 					     ret);
2635 		} else {
2636 			/*
2637 			 * FIXME: the mac80211 should really provide a fixed
2638 			 * rate to use here. for now, just use the smallest
2639 			 * possible rate for the band as a fixed rate for
2640 			 * association frames and other control messages.
2641 			 */
2642 			if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2643 				wl1271_set_band_rate(wl, wlvif);
2644 
2645 			wlvif->basic_rate =
2646 				wl1271_tx_min_rate_get(wl,
2647 						       wlvif->basic_rate_set);
2648 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2649 			if (ret < 0)
2650 				wl1271_warning("rate policy for channel "
2651 					       "failed %d", ret);
2652 
2653 			/*
2654 			 * change the ROC channel. do it only if we are
2655 			 * not idle. otherwise, CROC will be called
2656 			 * anyway.
2657 			 */
2658 			if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
2659 				      &wlvif->flags) &&
2660 			    wl12xx_dev_role_started(wlvif) &&
2661 			    !(conf->flags & IEEE80211_CONF_IDLE)) {
2662 				ret = wl12xx_stop_dev(wl, wlvif);
2663 				if (ret < 0)
2664 					return ret;
2665 
2666 				ret = wl12xx_start_dev(wl, wlvif);
2667 				if (ret < 0)
2668 					return ret;
2669 			}
2670 		}
2671 	}
2672 
2673 	if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2674 
2675 		if ((conf->flags & IEEE80211_CONF_PS) &&
2676 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2677 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2678 
2679 			int ps_mode;
2680 			char *ps_mode_str;
2681 
2682 			if (wl->conf.conn.forced_ps) {
2683 				ps_mode = STATION_POWER_SAVE_MODE;
2684 				ps_mode_str = "forced";
2685 			} else {
2686 				ps_mode = STATION_AUTO_PS_MODE;
2687 				ps_mode_str = "auto";
2688 			}
2689 
2690 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2691 
2692 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2693 
2694 			if (ret < 0)
2695 				wl1271_warning("enter %s ps failed %d",
2696 					       ps_mode_str, ret);
2697 
2698 		} else if (!(conf->flags & IEEE80211_CONF_PS) &&
2699 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2700 
2701 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
2702 
2703 			ret = wl1271_ps_set_mode(wl, wlvif,
2704 						 STATION_ACTIVE_MODE);
2705 			if (ret < 0)
2706 				wl1271_warning("exit auto ps failed %d", ret);
2707 		}
2708 	}
2709 
2710 	if (conf->power_level != wlvif->power_level) {
2711 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2712 		if (ret < 0)
2713 			return ret;
2714 
2715 		wlvif->power_level = conf->power_level;
2716 	}
2717 
2718 	return 0;
2719 }
2720 
2721 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2722 {
2723 	struct wl1271 *wl = hw->priv;
2724 	struct wl12xx_vif *wlvif;
2725 	struct ieee80211_conf *conf = &hw->conf;
2726 	int channel, ret = 0;
2727 
2728 	channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2729 
2730 	wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2731 		     " changed 0x%x",
2732 		     channel,
2733 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2734 		     conf->power_level,
2735 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2736 			 changed);
2737 
2738 	/*
2739 	 * mac80211 will go to idle nearly immediately after transmitting some
2740 	 * frames, such as the deauth. To make sure those frames reach the air,
2741 	 * wait here until the TX queue is fully flushed.
2742 	 */
2743 	if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
2744 	    ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2745 	     (conf->flags & IEEE80211_CONF_IDLE)))
2746 		wl1271_tx_flush(wl);
2747 
2748 	mutex_lock(&wl->mutex);
2749 
2750 	/* we support configuring the channel and band even while off */
2751 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2752 		wl->band = conf->channel->band;
2753 		wl->channel = channel;
2754 		wl->channel_type = conf->channel_type;
2755 	}
2756 
2757 	if (changed & IEEE80211_CONF_CHANGE_POWER)
2758 		wl->power_level = conf->power_level;
2759 
2760 	if (unlikely(wl->state != WLCORE_STATE_ON))
2761 		goto out;
2762 
2763 	ret = wl1271_ps_elp_wakeup(wl);
2764 	if (ret < 0)
2765 		goto out;
2766 
2767 	/* configure each interface */
2768 	wl12xx_for_each_wlvif(wl, wlvif) {
2769 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2770 		if (ret < 0)
2771 			goto out_sleep;
2772 	}
2773 
2774 out_sleep:
2775 	wl1271_ps_elp_sleep(wl);
2776 
2777 out:
2778 	mutex_unlock(&wl->mutex);
2779 
2780 	return ret;
2781 }
2782 
2783 struct wl1271_filter_params {
2784 	bool enabled;
2785 	int mc_list_length;
2786 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2787 };
2788 
2789 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2790 				       struct netdev_hw_addr_list *mc_list)
2791 {
2792 	struct wl1271_filter_params *fp;
2793 	struct netdev_hw_addr *ha;
2794 
2795 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2796 	if (!fp) {
2797 		wl1271_error("Out of memory setting filters.");
2798 		return 0;
2799 	}
2800 
2801 	/* update multicast filtering parameters */
2802 	fp->mc_list_length = 0;
2803 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2804 		fp->enabled = false;
2805 	} else {
2806 		fp->enabled = true;
2807 		netdev_hw_addr_list_for_each(ha, mc_list) {
2808 			memcpy(fp->mc_list[fp->mc_list_length],
2809 					ha->addr, ETH_ALEN);
2810 			fp->mc_list_length++;
2811 		}
2812 	}
2813 
2814 	return (u64)(unsigned long)fp;
2815 }
2816 
2817 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2818 				  FIF_ALLMULTI | \
2819 				  FIF_FCSFAIL | \
2820 				  FIF_BCN_PRBRESP_PROMISC | \
2821 				  FIF_CONTROL | \
2822 				  FIF_OTHER_BSS)
2823 
2824 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2825 				       unsigned int changed,
2826 				       unsigned int *total, u64 multicast)
2827 {
2828 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2829 	struct wl1271 *wl = hw->priv;
2830 	struct wl12xx_vif *wlvif;
2831 
2832 	int ret;
2833 
2834 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2835 		     " total %x", changed, *total);
2836 
2837 	mutex_lock(&wl->mutex);
2838 
2839 	*total &= WL1271_SUPPORTED_FILTERS;
2840 	changed &= WL1271_SUPPORTED_FILTERS;
2841 
2842 	if (unlikely(wl->state != WLCORE_STATE_ON))
2843 		goto out;
2844 
2845 	ret = wl1271_ps_elp_wakeup(wl);
2846 	if (ret < 0)
2847 		goto out;
2848 
2849 	wl12xx_for_each_wlvif(wl, wlvif) {
2850 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2851 			if (*total & FIF_ALLMULTI)
2852 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
2853 								   false,
2854 								   NULL, 0);
2855 			else if (fp)
2856 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
2857 							fp->enabled,
2858 							fp->mc_list,
2859 							fp->mc_list_length);
2860 			if (ret < 0)
2861 				goto out_sleep;
2862 		}
2863 	}
2864 
2865 	/*
2866 	 * the fw doesn't provide an api to configure the filters. instead,
2867 	 * the filters configuration is based on the active roles / ROC
2868 	 * state.
2869 	 */
2870 
2871 out_sleep:
2872 	wl1271_ps_elp_sleep(wl);
2873 
2874 out:
2875 	mutex_unlock(&wl->mutex);
2876 	kfree(fp);
2877 }
2878 
2879 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2880 				u8 id, u8 key_type, u8 key_size,
2881 				const u8 *key, u8 hlid, u32 tx_seq_32,
2882 				u16 tx_seq_16)
2883 {
2884 	struct wl1271_ap_key *ap_key;
2885 	int i;
2886 
2887 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2888 
2889 	if (key_size > MAX_KEY_SIZE)
2890 		return -EINVAL;
2891 
2892 	/*
2893 	 * Find next free entry in ap_keys. Also check we are not replacing
2894 	 * an existing key.
2895 	 */
2896 	for (i = 0; i < MAX_NUM_KEYS; i++) {
2897 		if (wlvif->ap.recorded_keys[i] == NULL)
2898 			break;
2899 
2900 		if (wlvif->ap.recorded_keys[i]->id == id) {
2901 			wl1271_warning("trying to record key replacement");
2902 			return -EINVAL;
2903 		}
2904 	}
2905 
2906 	if (i == MAX_NUM_KEYS)
2907 		return -EBUSY;
2908 
2909 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2910 	if (!ap_key)
2911 		return -ENOMEM;
2912 
2913 	ap_key->id = id;
2914 	ap_key->key_type = key_type;
2915 	ap_key->key_size = key_size;
2916 	memcpy(ap_key->key, key, key_size);
2917 	ap_key->hlid = hlid;
2918 	ap_key->tx_seq_32 = tx_seq_32;
2919 	ap_key->tx_seq_16 = tx_seq_16;
2920 
2921 	wlvif->ap.recorded_keys[i] = ap_key;
2922 	return 0;
2923 }
2924 
2925 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2926 {
2927 	int i;
2928 
2929 	for (i = 0; i < MAX_NUM_KEYS; i++) {
2930 		kfree(wlvif->ap.recorded_keys[i]);
2931 		wlvif->ap.recorded_keys[i] = NULL;
2932 	}
2933 }
2934 
2935 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2936 {
2937 	int i, ret = 0;
2938 	struct wl1271_ap_key *key;
2939 	bool wep_key_added = false;
2940 
2941 	for (i = 0; i < MAX_NUM_KEYS; i++) {
2942 		u8 hlid;
2943 		if (wlvif->ap.recorded_keys[i] == NULL)
2944 			break;
2945 
2946 		key = wlvif->ap.recorded_keys[i];
2947 		hlid = key->hlid;
2948 		if (hlid == WL12XX_INVALID_LINK_ID)
2949 			hlid = wlvif->ap.bcast_hlid;
2950 
2951 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2952 					    key->id, key->key_type,
2953 					    key->key_size, key->key,
2954 					    hlid, key->tx_seq_32,
2955 					    key->tx_seq_16);
2956 		if (ret < 0)
2957 			goto out;
2958 
2959 		if (key->key_type == KEY_WEP)
2960 			wep_key_added = true;
2961 	}
2962 
2963 	if (wep_key_added) {
2964 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
2965 						     wlvif->ap.bcast_hlid);
2966 		if (ret < 0)
2967 			goto out;
2968 	}
2969 
2970 out:
2971 	wl1271_free_ap_keys(wl, wlvif);
2972 	return ret;
2973 }
2974 
2975 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2976 		       u16 action, u8 id, u8 key_type,
2977 		       u8 key_size, const u8 *key, u32 tx_seq_32,
2978 		       u16 tx_seq_16, struct ieee80211_sta *sta)
2979 {
2980 	int ret;
2981 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2982 
2983 	if (is_ap) {
2984 		struct wl1271_station *wl_sta;
2985 		u8 hlid;
2986 
2987 		if (sta) {
2988 			wl_sta = (struct wl1271_station *)sta->drv_priv;
2989 			hlid = wl_sta->hlid;
2990 		} else {
2991 			hlid = wlvif->ap.bcast_hlid;
2992 		}
2993 
2994 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
2995 			/*
2996 			 * We do not support removing keys after AP shutdown.
2997 			 * Pretend we do to make mac80211 happy.
2998 			 */
2999 			if (action != KEY_ADD_OR_REPLACE)
3000 				return 0;
3001 
3002 			ret = wl1271_record_ap_key(wl, wlvif, id,
3003 					     key_type, key_size,
3004 					     key, hlid, tx_seq_32,
3005 					     tx_seq_16);
3006 		} else {
3007 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3008 					     id, key_type, key_size,
3009 					     key, hlid, tx_seq_32,
3010 					     tx_seq_16);
3011 		}
3012 
3013 		if (ret < 0)
3014 			return ret;
3015 	} else {
3016 		const u8 *addr;
3017 		static const u8 bcast_addr[ETH_ALEN] = {
3018 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3019 		};
3020 
3021 		addr = sta ? sta->addr : bcast_addr;
3022 
3023 		if (is_zero_ether_addr(addr)) {
3024 			/* We dont support TX only encryption */
3025 			return -EOPNOTSUPP;
3026 		}
3027 
3028 		/* The wl1271 does not allow to remove unicast keys - they
3029 		   will be cleared automatically on next CMD_JOIN. Ignore the
3030 		   request silently, as we dont want the mac80211 to emit
3031 		   an error message. */
3032 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3033 			return 0;
3034 
3035 		/* don't remove key if hlid was already deleted */
3036 		if (action == KEY_REMOVE &&
3037 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3038 			return 0;
3039 
3040 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3041 					     id, key_type, key_size,
3042 					     key, addr, tx_seq_32,
3043 					     tx_seq_16);
3044 		if (ret < 0)
3045 			return ret;
3046 
3047 		/* the default WEP key needs to be configured at least once */
3048 		if (key_type == KEY_WEP) {
3049 			ret = wl12xx_cmd_set_default_wep_key(wl,
3050 							wlvif->default_key,
3051 							wlvif->sta.hlid);
3052 			if (ret < 0)
3053 				return ret;
3054 		}
3055 	}
3056 
3057 	return 0;
3058 }
3059 
3060 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3061 			     struct ieee80211_vif *vif,
3062 			     struct ieee80211_sta *sta,
3063 			     struct ieee80211_key_conf *key_conf)
3064 {
3065 	struct wl1271 *wl = hw->priv;
3066 	int ret;
3067 	bool might_change_spare =
3068 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3069 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3070 
3071 	if (might_change_spare) {
3072 		/*
3073 		 * stop the queues and flush to ensure the next packets are
3074 		 * in sync with FW spare block accounting
3075 		 */
3076 		mutex_lock(&wl->mutex);
3077 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3078 		mutex_unlock(&wl->mutex);
3079 
3080 		wl1271_tx_flush(wl);
3081 	}
3082 
3083 	mutex_lock(&wl->mutex);
3084 
3085 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3086 		ret = -EAGAIN;
3087 		goto out_wake_queues;
3088 	}
3089 
3090 	ret = wl1271_ps_elp_wakeup(wl);
3091 	if (ret < 0)
3092 		goto out_wake_queues;
3093 
3094 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3095 
3096 	wl1271_ps_elp_sleep(wl);
3097 
3098 out_wake_queues:
3099 	if (might_change_spare)
3100 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3101 
3102 	mutex_unlock(&wl->mutex);
3103 
3104 	return ret;
3105 }
3106 
3107 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3108 		   struct ieee80211_vif *vif,
3109 		   struct ieee80211_sta *sta,
3110 		   struct ieee80211_key_conf *key_conf)
3111 {
3112 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3113 	int ret;
3114 	u32 tx_seq_32 = 0;
3115 	u16 tx_seq_16 = 0;
3116 	u8 key_type;
3117 
3118 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3119 
3120 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3121 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3122 		     key_conf->cipher, key_conf->keyidx,
3123 		     key_conf->keylen, key_conf->flags);
3124 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3125 
3126 	switch (key_conf->cipher) {
3127 	case WLAN_CIPHER_SUITE_WEP40:
3128 	case WLAN_CIPHER_SUITE_WEP104:
3129 		key_type = KEY_WEP;
3130 
3131 		key_conf->hw_key_idx = key_conf->keyidx;
3132 		break;
3133 	case WLAN_CIPHER_SUITE_TKIP:
3134 		key_type = KEY_TKIP;
3135 
3136 		key_conf->hw_key_idx = key_conf->keyidx;
3137 		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3138 		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3139 		break;
3140 	case WLAN_CIPHER_SUITE_CCMP:
3141 		key_type = KEY_AES;
3142 
3143 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3144 		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3145 		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3146 		break;
3147 	case WL1271_CIPHER_SUITE_GEM:
3148 		key_type = KEY_GEM;
3149 		tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3150 		tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3151 		break;
3152 	default:
3153 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3154 
3155 		return -EOPNOTSUPP;
3156 	}
3157 
3158 	switch (cmd) {
3159 	case SET_KEY:
3160 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3161 				 key_conf->keyidx, key_type,
3162 				 key_conf->keylen, key_conf->key,
3163 				 tx_seq_32, tx_seq_16, sta);
3164 		if (ret < 0) {
3165 			wl1271_error("Could not add or replace key");
3166 			return ret;
3167 		}
3168 
3169 		/*
3170 		 * reconfiguring arp response if the unicast (or common)
3171 		 * encryption key type was changed
3172 		 */
3173 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3174 		    (sta || key_type == KEY_WEP) &&
3175 		    wlvif->encryption_type != key_type) {
3176 			wlvif->encryption_type = key_type;
3177 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3178 			if (ret < 0) {
3179 				wl1271_warning("build arp rsp failed: %d", ret);
3180 				return ret;
3181 			}
3182 		}
3183 		break;
3184 
3185 	case DISABLE_KEY:
3186 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3187 				     key_conf->keyidx, key_type,
3188 				     key_conf->keylen, key_conf->key,
3189 				     0, 0, sta);
3190 		if (ret < 0) {
3191 			wl1271_error("Could not remove key");
3192 			return ret;
3193 		}
3194 		break;
3195 
3196 	default:
3197 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3198 		return -EOPNOTSUPP;
3199 	}
3200 
3201 	return ret;
3202 }
3203 EXPORT_SYMBOL_GPL(wlcore_set_key);
3204 
3205 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3206 			     struct ieee80211_vif *vif,
3207 			     struct cfg80211_scan_request *req)
3208 {
3209 	struct wl1271 *wl = hw->priv;
3210 	int ret;
3211 	u8 *ssid = NULL;
3212 	size_t len = 0;
3213 
3214 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3215 
3216 	if (req->n_ssids) {
3217 		ssid = req->ssids[0].ssid;
3218 		len = req->ssids[0].ssid_len;
3219 	}
3220 
3221 	mutex_lock(&wl->mutex);
3222 
3223 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3224 		/*
3225 		 * We cannot return -EBUSY here because cfg80211 will expect
3226 		 * a call to ieee80211_scan_completed if we do - in this case
3227 		 * there won't be any call.
3228 		 */
3229 		ret = -EAGAIN;
3230 		goto out;
3231 	}
3232 
3233 	ret = wl1271_ps_elp_wakeup(wl);
3234 	if (ret < 0)
3235 		goto out;
3236 
3237 	/* fail if there is any role in ROC */
3238 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3239 		/* don't allow scanning right now */
3240 		ret = -EBUSY;
3241 		goto out_sleep;
3242 	}
3243 
3244 	ret = wl1271_scan(hw->priv, vif, ssid, len, req);
3245 out_sleep:
3246 	wl1271_ps_elp_sleep(wl);
3247 out:
3248 	mutex_unlock(&wl->mutex);
3249 
3250 	return ret;
3251 }
3252 
3253 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3254 				     struct ieee80211_vif *vif)
3255 {
3256 	struct wl1271 *wl = hw->priv;
3257 	int ret;
3258 
3259 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3260 
3261 	mutex_lock(&wl->mutex);
3262 
3263 	if (unlikely(wl->state != WLCORE_STATE_ON))
3264 		goto out;
3265 
3266 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3267 		goto out;
3268 
3269 	ret = wl1271_ps_elp_wakeup(wl);
3270 	if (ret < 0)
3271 		goto out;
3272 
3273 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3274 		ret = wl1271_scan_stop(wl);
3275 		if (ret < 0)
3276 			goto out_sleep;
3277 	}
3278 
3279 	/*
3280 	 * Rearm the tx watchdog just before idling scan. This
3281 	 * prevents just-finished scans from triggering the watchdog
3282 	 */
3283 	wl12xx_rearm_tx_watchdog_locked(wl);
3284 
3285 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3286 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3287 	wl->scan_vif = NULL;
3288 	wl->scan.req = NULL;
3289 	ieee80211_scan_completed(wl->hw, true);
3290 
3291 out_sleep:
3292 	wl1271_ps_elp_sleep(wl);
3293 out:
3294 	mutex_unlock(&wl->mutex);
3295 
3296 	cancel_delayed_work_sync(&wl->scan_complete_work);
3297 }
3298 
3299 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3300 				      struct ieee80211_vif *vif,
3301 				      struct cfg80211_sched_scan_request *req,
3302 				      struct ieee80211_sched_scan_ies *ies)
3303 {
3304 	struct wl1271 *wl = hw->priv;
3305 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3306 	int ret;
3307 
3308 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3309 
3310 	mutex_lock(&wl->mutex);
3311 
3312 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3313 		ret = -EAGAIN;
3314 		goto out;
3315 	}
3316 
3317 	ret = wl1271_ps_elp_wakeup(wl);
3318 	if (ret < 0)
3319 		goto out;
3320 
3321 	ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
3322 	if (ret < 0)
3323 		goto out_sleep;
3324 
3325 	ret = wl1271_scan_sched_scan_start(wl, wlvif);
3326 	if (ret < 0)
3327 		goto out_sleep;
3328 
3329 	wl->sched_scanning = true;
3330 
3331 out_sleep:
3332 	wl1271_ps_elp_sleep(wl);
3333 out:
3334 	mutex_unlock(&wl->mutex);
3335 	return ret;
3336 }
3337 
3338 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3339 				      struct ieee80211_vif *vif)
3340 {
3341 	struct wl1271 *wl = hw->priv;
3342 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3343 	int ret;
3344 
3345 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3346 
3347 	mutex_lock(&wl->mutex);
3348 
3349 	if (unlikely(wl->state != WLCORE_STATE_ON))
3350 		goto out;
3351 
3352 	ret = wl1271_ps_elp_wakeup(wl);
3353 	if (ret < 0)
3354 		goto out;
3355 
3356 	wl1271_scan_sched_scan_stop(wl, wlvif);
3357 
3358 	wl1271_ps_elp_sleep(wl);
3359 out:
3360 	mutex_unlock(&wl->mutex);
3361 }
3362 
3363 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3364 {
3365 	struct wl1271 *wl = hw->priv;
3366 	int ret = 0;
3367 
3368 	mutex_lock(&wl->mutex);
3369 
3370 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3371 		ret = -EAGAIN;
3372 		goto out;
3373 	}
3374 
3375 	ret = wl1271_ps_elp_wakeup(wl);
3376 	if (ret < 0)
3377 		goto out;
3378 
3379 	ret = wl1271_acx_frag_threshold(wl, value);
3380 	if (ret < 0)
3381 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3382 
3383 	wl1271_ps_elp_sleep(wl);
3384 
3385 out:
3386 	mutex_unlock(&wl->mutex);
3387 
3388 	return ret;
3389 }
3390 
3391 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3392 {
3393 	struct wl1271 *wl = hw->priv;
3394 	struct wl12xx_vif *wlvif;
3395 	int ret = 0;
3396 
3397 	mutex_lock(&wl->mutex);
3398 
3399 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3400 		ret = -EAGAIN;
3401 		goto out;
3402 	}
3403 
3404 	ret = wl1271_ps_elp_wakeup(wl);
3405 	if (ret < 0)
3406 		goto out;
3407 
3408 	wl12xx_for_each_wlvif(wl, wlvif) {
3409 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3410 		if (ret < 0)
3411 			wl1271_warning("set rts threshold failed: %d", ret);
3412 	}
3413 	wl1271_ps_elp_sleep(wl);
3414 
3415 out:
3416 	mutex_unlock(&wl->mutex);
3417 
3418 	return ret;
3419 }
3420 
3421 static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
3422 			    int offset)
3423 {
3424 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3425 	u8 ssid_len;
3426 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
3427 					 skb->len - offset);
3428 
3429 	if (!ptr) {
3430 		wl1271_error("No SSID in IEs!");
3431 		return -ENOENT;
3432 	}
3433 
3434 	ssid_len = ptr[1];
3435 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
3436 		wl1271_error("SSID is too long!");
3437 		return -EINVAL;
3438 	}
3439 
3440 	wlvif->ssid_len = ssid_len;
3441 	memcpy(wlvif->ssid, ptr+2, ssid_len);
3442 	return 0;
3443 }
3444 
3445 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3446 {
3447 	int len;
3448 	const u8 *next, *end = skb->data + skb->len;
3449 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3450 					skb->len - ieoffset);
3451 	if (!ie)
3452 		return;
3453 	len = ie[1] + 2;
3454 	next = ie + len;
3455 	memmove(ie, next, end - next);
3456 	skb_trim(skb, skb->len - len);
3457 }
3458 
3459 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3460 					    unsigned int oui, u8 oui_type,
3461 					    int ieoffset)
3462 {
3463 	int len;
3464 	const u8 *next, *end = skb->data + skb->len;
3465 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3466 					       skb->data + ieoffset,
3467 					       skb->len - ieoffset);
3468 	if (!ie)
3469 		return;
3470 	len = ie[1] + 2;
3471 	next = ie + len;
3472 	memmove(ie, next, end - next);
3473 	skb_trim(skb, skb->len - len);
3474 }
3475 
3476 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3477 					 struct ieee80211_vif *vif)
3478 {
3479 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3480 	struct sk_buff *skb;
3481 	int ret;
3482 
3483 	skb = ieee80211_proberesp_get(wl->hw, vif);
3484 	if (!skb)
3485 		return -EOPNOTSUPP;
3486 
3487 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3488 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3489 				      skb->data,
3490 				      skb->len, 0,
3491 				      rates);
3492 	dev_kfree_skb(skb);
3493 
3494 	if (ret < 0)
3495 		goto out;
3496 
3497 	wl1271_debug(DEBUG_AP, "probe response updated");
3498 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3499 
3500 out:
3501 	return ret;
3502 }
3503 
3504 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3505 					     struct ieee80211_vif *vif,
3506 					     u8 *probe_rsp_data,
3507 					     size_t probe_rsp_len,
3508 					     u32 rates)
3509 {
3510 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3511 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3512 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3513 	int ssid_ie_offset, ie_offset, templ_len;
3514 	const u8 *ptr;
3515 
3516 	/* no need to change probe response if the SSID is set correctly */
3517 	if (wlvif->ssid_len > 0)
3518 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3519 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3520 					       probe_rsp_data,
3521 					       probe_rsp_len, 0,
3522 					       rates);
3523 
3524 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3525 		wl1271_error("probe_rsp template too big");
3526 		return -EINVAL;
3527 	}
3528 
3529 	/* start searching from IE offset */
3530 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3531 
3532 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3533 			       probe_rsp_len - ie_offset);
3534 	if (!ptr) {
3535 		wl1271_error("No SSID in beacon!");
3536 		return -EINVAL;
3537 	}
3538 
3539 	ssid_ie_offset = ptr - probe_rsp_data;
3540 	ptr += (ptr[1] + 2);
3541 
3542 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3543 
3544 	/* insert SSID from bss_conf */
3545 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3546 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3547 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3548 	       bss_conf->ssid, bss_conf->ssid_len);
3549 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3550 
3551 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3552 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3553 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3554 
3555 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3556 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3557 				       probe_rsp_templ,
3558 				       templ_len, 0,
3559 				       rates);
3560 }
3561 
3562 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3563 				       struct ieee80211_vif *vif,
3564 				       struct ieee80211_bss_conf *bss_conf,
3565 				       u32 changed)
3566 {
3567 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3568 	int ret = 0;
3569 
3570 	if (changed & BSS_CHANGED_ERP_SLOT) {
3571 		if (bss_conf->use_short_slot)
3572 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3573 		else
3574 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3575 		if (ret < 0) {
3576 			wl1271_warning("Set slot time failed %d", ret);
3577 			goto out;
3578 		}
3579 	}
3580 
3581 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3582 		if (bss_conf->use_short_preamble)
3583 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3584 		else
3585 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3586 	}
3587 
3588 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3589 		if (bss_conf->use_cts_prot)
3590 			ret = wl1271_acx_cts_protect(wl, wlvif,
3591 						     CTSPROTECT_ENABLE);
3592 		else
3593 			ret = wl1271_acx_cts_protect(wl, wlvif,
3594 						     CTSPROTECT_DISABLE);
3595 		if (ret < 0) {
3596 			wl1271_warning("Set ctsprotect failed %d", ret);
3597 			goto out;
3598 		}
3599 	}
3600 
3601 out:
3602 	return ret;
3603 }
3604 
3605 static int wlcore_set_beacon_template(struct wl1271 *wl,
3606 				      struct ieee80211_vif *vif,
3607 				      bool is_ap)
3608 {
3609 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3610 	struct ieee80211_hdr *hdr;
3611 	u32 min_rate;
3612 	int ret;
3613 	int ieoffset = offsetof(struct ieee80211_mgmt,
3614 				u.beacon.variable);
3615 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3616 	u16 tmpl_id;
3617 
3618 	if (!beacon) {
3619 		ret = -EINVAL;
3620 		goto out;
3621 	}
3622 
3623 	wl1271_debug(DEBUG_MASTER, "beacon updated");
3624 
3625 	ret = wl1271_ssid_set(vif, beacon, ieoffset);
3626 	if (ret < 0) {
3627 		dev_kfree_skb(beacon);
3628 		goto out;
3629 	}
3630 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3631 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3632 		CMD_TEMPL_BEACON;
3633 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3634 				      beacon->data,
3635 				      beacon->len, 0,
3636 				      min_rate);
3637 	if (ret < 0) {
3638 		dev_kfree_skb(beacon);
3639 		goto out;
3640 	}
3641 
3642 	/*
3643 	 * In case we already have a probe-resp beacon set explicitly
3644 	 * by usermode, don't use the beacon data.
3645 	 */
3646 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3647 		goto end_bcn;
3648 
3649 	/* remove TIM ie from probe response */
3650 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3651 
3652 	/*
3653 	 * remove p2p ie from probe response.
3654 	 * the fw reponds to probe requests that don't include
3655 	 * the p2p ie. probe requests with p2p ie will be passed,
3656 	 * and will be responded by the supplicant (the spec
3657 	 * forbids including the p2p ie when responding to probe
3658 	 * requests that didn't include it).
3659 	 */
3660 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3661 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3662 
3663 	hdr = (struct ieee80211_hdr *) beacon->data;
3664 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3665 					 IEEE80211_STYPE_PROBE_RESP);
3666 	if (is_ap)
3667 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3668 							   beacon->data,
3669 							   beacon->len,
3670 							   min_rate);
3671 	else
3672 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3673 					      CMD_TEMPL_PROBE_RESPONSE,
3674 					      beacon->data,
3675 					      beacon->len, 0,
3676 					      min_rate);
3677 end_bcn:
3678 	dev_kfree_skb(beacon);
3679 	if (ret < 0)
3680 		goto out;
3681 
3682 out:
3683 	return ret;
3684 }
3685 
3686 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3687 					  struct ieee80211_vif *vif,
3688 					  struct ieee80211_bss_conf *bss_conf,
3689 					  u32 changed)
3690 {
3691 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3692 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3693 	int ret = 0;
3694 
3695 	if ((changed & BSS_CHANGED_BEACON_INT)) {
3696 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3697 			bss_conf->beacon_int);
3698 
3699 		wlvif->beacon_int = bss_conf->beacon_int;
3700 	}
3701 
3702 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3703 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3704 
3705 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3706 	}
3707 
3708 	if ((changed & BSS_CHANGED_BEACON)) {
3709 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
3710 		if (ret < 0)
3711 			goto out;
3712 	}
3713 
3714 out:
3715 	if (ret != 0)
3716 		wl1271_error("beacon info change failed: %d", ret);
3717 	return ret;
3718 }
3719 
3720 /* AP mode changes */
3721 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3722 				       struct ieee80211_vif *vif,
3723 				       struct ieee80211_bss_conf *bss_conf,
3724 				       u32 changed)
3725 {
3726 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3727 	int ret = 0;
3728 
3729 	if ((changed & BSS_CHANGED_BASIC_RATES)) {
3730 		u32 rates = bss_conf->basic_rates;
3731 
3732 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3733 								 wlvif->band);
3734 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3735 							wlvif->basic_rate_set);
3736 
3737 		ret = wl1271_init_ap_rates(wl, wlvif);
3738 		if (ret < 0) {
3739 			wl1271_error("AP rate policy change failed %d", ret);
3740 			goto out;
3741 		}
3742 
3743 		ret = wl1271_ap_init_templates(wl, vif);
3744 		if (ret < 0)
3745 			goto out;
3746 
3747 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3748 		if (ret < 0)
3749 			goto out;
3750 
3751 		ret = wlcore_set_beacon_template(wl, vif, true);
3752 		if (ret < 0)
3753 			goto out;
3754 	}
3755 
3756 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3757 	if (ret < 0)
3758 		goto out;
3759 
3760 	if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3761 		if (bss_conf->enable_beacon) {
3762 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3763 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3764 				if (ret < 0)
3765 					goto out;
3766 
3767 				ret = wl1271_ap_init_hwenc(wl, wlvif);
3768 				if (ret < 0)
3769 					goto out;
3770 
3771 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3772 				wl1271_debug(DEBUG_AP, "started AP");
3773 			}
3774 		} else {
3775 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3776 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3777 				if (ret < 0)
3778 					goto out;
3779 
3780 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3781 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3782 					  &wlvif->flags);
3783 				wl1271_debug(DEBUG_AP, "stopped AP");
3784 			}
3785 		}
3786 	}
3787 
3788 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3789 	if (ret < 0)
3790 		goto out;
3791 
3792 	/* Handle HT information change */
3793 	if ((changed & BSS_CHANGED_HT) &&
3794 	    (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3795 		ret = wl1271_acx_set_ht_information(wl, wlvif,
3796 					bss_conf->ht_operation_mode);
3797 		if (ret < 0) {
3798 			wl1271_warning("Set ht information failed %d", ret);
3799 			goto out;
3800 		}
3801 	}
3802 
3803 out:
3804 	return;
3805 }
3806 
3807 /* STA/IBSS mode changes */
3808 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3809 					struct ieee80211_vif *vif,
3810 					struct ieee80211_bss_conf *bss_conf,
3811 					u32 changed)
3812 {
3813 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3814 	bool do_join = false, set_assoc = false;
3815 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3816 	bool ibss_joined = false;
3817 	u32 sta_rate_set = 0;
3818 	int ret;
3819 	struct ieee80211_sta *sta;
3820 	bool sta_exists = false;
3821 	struct ieee80211_sta_ht_cap sta_ht_cap;
3822 
3823 	if (is_ibss) {
3824 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3825 						     changed);
3826 		if (ret < 0)
3827 			goto out;
3828 	}
3829 
3830 	if (changed & BSS_CHANGED_IBSS) {
3831 		if (bss_conf->ibss_joined) {
3832 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3833 			ibss_joined = true;
3834 		} else {
3835 			if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
3836 					       &wlvif->flags))
3837 				wl1271_unjoin(wl, wlvif);
3838 		}
3839 	}
3840 
3841 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3842 		do_join = true;
3843 
3844 	/* Need to update the SSID (for filtering etc) */
3845 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3846 		do_join = true;
3847 
3848 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3849 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3850 			     bss_conf->enable_beacon ? "enabled" : "disabled");
3851 
3852 		do_join = true;
3853 	}
3854 
3855 	if (changed & BSS_CHANGED_IDLE && !is_ibss) {
3856 		ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
3857 		if (ret < 0)
3858 			wl1271_warning("idle mode change failed %d", ret);
3859 	}
3860 
3861 	if ((changed & BSS_CHANGED_CQM)) {
3862 		bool enable = false;
3863 		if (bss_conf->cqm_rssi_thold)
3864 			enable = true;
3865 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3866 						  bss_conf->cqm_rssi_thold,
3867 						  bss_conf->cqm_rssi_hyst);
3868 		if (ret < 0)
3869 			goto out;
3870 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3871 	}
3872 
3873 	if (changed & BSS_CHANGED_BSSID)
3874 		if (!is_zero_ether_addr(bss_conf->bssid)) {
3875 			ret = wl12xx_cmd_build_null_data(wl, wlvif);
3876 			if (ret < 0)
3877 				goto out;
3878 
3879 			ret = wl1271_build_qos_null_data(wl, vif);
3880 			if (ret < 0)
3881 				goto out;
3882 		}
3883 
3884 	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3885 		rcu_read_lock();
3886 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
3887 		if (!sta)
3888 			goto sta_not_found;
3889 
3890 		/* save the supp_rates of the ap */
3891 		sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3892 		if (sta->ht_cap.ht_supported)
3893 			sta_rate_set |=
3894 			  (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
3895 			  (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
3896 		sta_ht_cap = sta->ht_cap;
3897 		sta_exists = true;
3898 
3899 sta_not_found:
3900 		rcu_read_unlock();
3901 	}
3902 
3903 	if ((changed & BSS_CHANGED_ASSOC)) {
3904 		if (bss_conf->assoc) {
3905 			u32 rates;
3906 			int ieoffset;
3907 			wlvif->aid = bss_conf->aid;
3908 			wlvif->channel_type = bss_conf->channel_type;
3909 			wlvif->beacon_int = bss_conf->beacon_int;
3910 			do_join = true;
3911 			set_assoc = true;
3912 
3913 			/*
3914 			 * use basic rates from AP, and determine lowest rate
3915 			 * to use with control frames.
3916 			 */
3917 			rates = bss_conf->basic_rates;
3918 			wlvif->basic_rate_set =
3919 				wl1271_tx_enabled_rates_get(wl, rates,
3920 							    wlvif->band);
3921 			wlvif->basic_rate =
3922 				wl1271_tx_min_rate_get(wl,
3923 						       wlvif->basic_rate_set);
3924 			if (sta_rate_set)
3925 				wlvif->rate_set =
3926 					wl1271_tx_enabled_rates_get(wl,
3927 								sta_rate_set,
3928 								wlvif->band);
3929 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3930 			if (ret < 0)
3931 				goto out;
3932 
3933 			/*
3934 			 * with wl1271, we don't need to update the
3935 			 * beacon_int and dtim_period, because the firmware
3936 			 * updates it by itself when the first beacon is
3937 			 * received after a join.
3938 			 */
3939 			ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3940 			if (ret < 0)
3941 				goto out;
3942 
3943 			/*
3944 			 * Get a template for hardware connection maintenance
3945 			 */
3946 			dev_kfree_skb(wlvif->probereq);
3947 			wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3948 									wlvif,
3949 									NULL);
3950 			ieoffset = offsetof(struct ieee80211_mgmt,
3951 					    u.probe_req.variable);
3952 			wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
3953 
3954 			/* enable the connection monitoring feature */
3955 			ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3956 			if (ret < 0)
3957 				goto out;
3958 		} else {
3959 			/* use defaults when not associated */
3960 			bool was_assoc =
3961 			    !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
3962 						 &wlvif->flags);
3963 			bool was_ifup =
3964 			    !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
3965 						 &wlvif->flags);
3966 			wlvif->aid = 0;
3967 
3968 			/* free probe-request template */
3969 			dev_kfree_skb(wlvif->probereq);
3970 			wlvif->probereq = NULL;
3971 
3972 			/* revert back to minimum rates for the current band */
3973 			wl1271_set_band_rate(wl, wlvif);
3974 			wlvif->basic_rate =
3975 				wl1271_tx_min_rate_get(wl,
3976 						       wlvif->basic_rate_set);
3977 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3978 			if (ret < 0)
3979 				goto out;
3980 
3981 			/* disable connection monitor features */
3982 			ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3983 
3984 			/* Disable the keep-alive feature */
3985 			ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3986 			if (ret < 0)
3987 				goto out;
3988 
3989 			/* restore the bssid filter and go to dummy bssid */
3990 			if (was_assoc) {
3991 				/*
3992 				 * we might have to disable roc, if there was
3993 				 * no IF_OPER_UP notification.
3994 				 */
3995 				if (!was_ifup) {
3996 					ret = wl12xx_croc(wl, wlvif->role_id);
3997 					if (ret < 0)
3998 						goto out;
3999 				}
4000 				/*
4001 				 * (we also need to disable roc in case of
4002 				 * roaming on the same channel. until we will
4003 				 * have a better flow...)
4004 				 */
4005 				if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
4006 					ret = wl12xx_croc(wl,
4007 							  wlvif->dev_role_id);
4008 					if (ret < 0)
4009 						goto out;
4010 				}
4011 
4012 				wl1271_unjoin(wl, wlvif);
4013 				if (!bss_conf->idle)
4014 					wl12xx_start_dev(wl, wlvif);
4015 			}
4016 		}
4017 	}
4018 
4019 	if (changed & BSS_CHANGED_IBSS) {
4020 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4021 			     bss_conf->ibss_joined);
4022 
4023 		if (bss_conf->ibss_joined) {
4024 			u32 rates = bss_conf->basic_rates;
4025 			wlvif->basic_rate_set =
4026 				wl1271_tx_enabled_rates_get(wl, rates,
4027 							    wlvif->band);
4028 			wlvif->basic_rate =
4029 				wl1271_tx_min_rate_get(wl,
4030 						       wlvif->basic_rate_set);
4031 
4032 			/* by default, use 11b + OFDM rates */
4033 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4034 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4035 			if (ret < 0)
4036 				goto out;
4037 		}
4038 	}
4039 
4040 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4041 	if (ret < 0)
4042 		goto out;
4043 
4044 	if (do_join) {
4045 		ret = wl1271_join(wl, wlvif, set_assoc);
4046 		if (ret < 0) {
4047 			wl1271_warning("cmd join failed %d", ret);
4048 			goto out;
4049 		}
4050 
4051 		/* ROC until connected (after EAPOL exchange) */
4052 		if (!is_ibss) {
4053 			ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
4054 			if (ret < 0)
4055 				goto out;
4056 
4057 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4058 				wl12xx_set_authorized(wl, wlvif);
4059 		}
4060 		/*
4061 		 * stop device role if started (we might already be in
4062 		 * STA/IBSS role).
4063 		 */
4064 		if (wl12xx_dev_role_started(wlvif)) {
4065 			ret = wl12xx_stop_dev(wl, wlvif);
4066 			if (ret < 0)
4067 				goto out;
4068 		}
4069 	}
4070 
4071 	/* Handle new association with HT. Do this after join. */
4072 	if (sta_exists) {
4073 		if ((changed & BSS_CHANGED_HT) &&
4074 		    (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
4075 			ret = wl1271_acx_set_ht_capabilities(wl,
4076 							     &sta_ht_cap,
4077 							     true,
4078 							     wlvif->sta.hlid);
4079 			if (ret < 0) {
4080 				wl1271_warning("Set ht cap true failed %d",
4081 					       ret);
4082 				goto out;
4083 			}
4084 		}
4085 		/* handle new association without HT and disassociation */
4086 		else if (changed & BSS_CHANGED_ASSOC) {
4087 			ret = wl1271_acx_set_ht_capabilities(wl,
4088 							     &sta_ht_cap,
4089 							     false,
4090 							     wlvif->sta.hlid);
4091 			if (ret < 0) {
4092 				wl1271_warning("Set ht cap false failed %d",
4093 					       ret);
4094 				goto out;
4095 			}
4096 		}
4097 	}
4098 
4099 	/* Handle HT information change. Done after join. */
4100 	if ((changed & BSS_CHANGED_HT) &&
4101 	    (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
4102 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4103 					bss_conf->ht_operation_mode);
4104 		if (ret < 0) {
4105 			wl1271_warning("Set ht information failed %d", ret);
4106 			goto out;
4107 		}
4108 	}
4109 
4110 	/* Handle arp filtering. Done after join. */
4111 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4112 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4113 		__be32 addr = bss_conf->arp_addr_list[0];
4114 		wlvif->sta.qos = bss_conf->qos;
4115 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4116 
4117 		if (bss_conf->arp_addr_cnt == 1 &&
4118 		    bss_conf->arp_filter_enabled) {
4119 			wlvif->ip_addr = addr;
4120 			/*
4121 			 * The template should have been configured only upon
4122 			 * association. however, it seems that the correct ip
4123 			 * isn't being set (when sending), so we have to
4124 			 * reconfigure the template upon every ip change.
4125 			 */
4126 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4127 			if (ret < 0) {
4128 				wl1271_warning("build arp rsp failed: %d", ret);
4129 				goto out;
4130 			}
4131 
4132 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4133 				(ACX_ARP_FILTER_ARP_FILTERING |
4134 				 ACX_ARP_FILTER_AUTO_ARP),
4135 				addr);
4136 		} else {
4137 			wlvif->ip_addr = 0;
4138 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4139 		}
4140 
4141 		if (ret < 0)
4142 			goto out;
4143 	}
4144 
4145 out:
4146 	return;
4147 }
4148 
4149 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4150 				       struct ieee80211_vif *vif,
4151 				       struct ieee80211_bss_conf *bss_conf,
4152 				       u32 changed)
4153 {
4154 	struct wl1271 *wl = hw->priv;
4155 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4156 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4157 	int ret;
4158 
4159 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
4160 		     (int)changed);
4161 
4162 	/*
4163 	 * make sure to cancel pending disconnections if our association
4164 	 * state changed
4165 	 */
4166 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4167 		cancel_delayed_work_sync(&wl->connection_loss_work);
4168 
4169 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4170 	    !bss_conf->enable_beacon)
4171 		wl1271_tx_flush(wl);
4172 
4173 	mutex_lock(&wl->mutex);
4174 
4175 	if (unlikely(wl->state != WLCORE_STATE_ON))
4176 		goto out;
4177 
4178 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4179 		goto out;
4180 
4181 	ret = wl1271_ps_elp_wakeup(wl);
4182 	if (ret < 0)
4183 		goto out;
4184 
4185 	if (is_ap)
4186 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4187 	else
4188 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4189 
4190 	wl1271_ps_elp_sleep(wl);
4191 
4192 out:
4193 	mutex_unlock(&wl->mutex);
4194 }
4195 
4196 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4197 			     struct ieee80211_vif *vif, u16 queue,
4198 			     const struct ieee80211_tx_queue_params *params)
4199 {
4200 	struct wl1271 *wl = hw->priv;
4201 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4202 	u8 ps_scheme;
4203 	int ret = 0;
4204 
4205 	mutex_lock(&wl->mutex);
4206 
4207 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4208 
4209 	if (params->uapsd)
4210 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4211 	else
4212 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4213 
4214 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4215 		goto out;
4216 
4217 	ret = wl1271_ps_elp_wakeup(wl);
4218 	if (ret < 0)
4219 		goto out;
4220 
4221 	/*
4222 	 * the txop is confed in units of 32us by the mac80211,
4223 	 * we need us
4224 	 */
4225 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4226 				params->cw_min, params->cw_max,
4227 				params->aifs, params->txop << 5);
4228 	if (ret < 0)
4229 		goto out_sleep;
4230 
4231 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4232 				 CONF_CHANNEL_TYPE_EDCF,
4233 				 wl1271_tx_get_queue(queue),
4234 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4235 				 0, 0);
4236 
4237 out_sleep:
4238 	wl1271_ps_elp_sleep(wl);
4239 
4240 out:
4241 	mutex_unlock(&wl->mutex);
4242 
4243 	return ret;
4244 }
4245 
4246 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4247 			     struct ieee80211_vif *vif)
4248 {
4249 
4250 	struct wl1271 *wl = hw->priv;
4251 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4252 	u64 mactime = ULLONG_MAX;
4253 	int ret;
4254 
4255 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4256 
4257 	mutex_lock(&wl->mutex);
4258 
4259 	if (unlikely(wl->state != WLCORE_STATE_ON))
4260 		goto out;
4261 
4262 	ret = wl1271_ps_elp_wakeup(wl);
4263 	if (ret < 0)
4264 		goto out;
4265 
4266 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4267 	if (ret < 0)
4268 		goto out_sleep;
4269 
4270 out_sleep:
4271 	wl1271_ps_elp_sleep(wl);
4272 
4273 out:
4274 	mutex_unlock(&wl->mutex);
4275 	return mactime;
4276 }
4277 
4278 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4279 				struct survey_info *survey)
4280 {
4281 	struct ieee80211_conf *conf = &hw->conf;
4282 
4283 	if (idx != 0)
4284 		return -ENOENT;
4285 
4286 	survey->channel = conf->channel;
4287 	survey->filled = 0;
4288 	return 0;
4289 }
4290 
4291 static int wl1271_allocate_sta(struct wl1271 *wl,
4292 			     struct wl12xx_vif *wlvif,
4293 			     struct ieee80211_sta *sta)
4294 {
4295 	struct wl1271_station *wl_sta;
4296 	int ret;
4297 
4298 
4299 	if (wl->active_sta_count >= AP_MAX_STATIONS) {
4300 		wl1271_warning("could not allocate HLID - too much stations");
4301 		return -EBUSY;
4302 	}
4303 
4304 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4305 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4306 	if (ret < 0) {
4307 		wl1271_warning("could not allocate HLID - too many links");
4308 		return -EBUSY;
4309 	}
4310 
4311 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4312 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4313 	wl->active_sta_count++;
4314 	return 0;
4315 }
4316 
4317 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4318 {
4319 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4320 		return;
4321 
4322 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4323 	memset(wl->links[hlid].addr, 0, ETH_ALEN);
4324 	wl->links[hlid].ba_bitmap = 0;
4325 	__clear_bit(hlid, &wl->ap_ps_map);
4326 	__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4327 	wl12xx_free_link(wl, wlvif, &hlid);
4328 	wl->active_sta_count--;
4329 
4330 	/*
4331 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4332 	 * chance to return STA-buffered packets before complaining.
4333 	 */
4334 	if (wl->active_sta_count == 0)
4335 		wl12xx_rearm_tx_watchdog_locked(wl);
4336 }
4337 
4338 static int wl12xx_sta_add(struct wl1271 *wl,
4339 			  struct wl12xx_vif *wlvif,
4340 			  struct ieee80211_sta *sta)
4341 {
4342 	struct wl1271_station *wl_sta;
4343 	int ret = 0;
4344 	u8 hlid;
4345 
4346 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4347 
4348 	ret = wl1271_allocate_sta(wl, wlvif, sta);
4349 	if (ret < 0)
4350 		return ret;
4351 
4352 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4353 	hlid = wl_sta->hlid;
4354 
4355 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4356 	if (ret < 0)
4357 		wl1271_free_sta(wl, wlvif, hlid);
4358 
4359 	return ret;
4360 }
4361 
4362 static int wl12xx_sta_remove(struct wl1271 *wl,
4363 			     struct wl12xx_vif *wlvif,
4364 			     struct ieee80211_sta *sta)
4365 {
4366 	struct wl1271_station *wl_sta;
4367 	int ret = 0, id;
4368 
4369 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4370 
4371 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4372 	id = wl_sta->hlid;
4373 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4374 		return -EINVAL;
4375 
4376 	ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4377 	if (ret < 0)
4378 		return ret;
4379 
4380 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4381 	return ret;
4382 }
4383 
4384 static int wl12xx_update_sta_state(struct wl1271 *wl,
4385 				   struct wl12xx_vif *wlvif,
4386 				   struct ieee80211_sta *sta,
4387 				   enum ieee80211_sta_state old_state,
4388 				   enum ieee80211_sta_state new_state)
4389 {
4390 	struct wl1271_station *wl_sta;
4391 	u8 hlid;
4392 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4393 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4394 	int ret;
4395 
4396 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4397 	hlid = wl_sta->hlid;
4398 
4399 	/* Add station (AP mode) */
4400 	if (is_ap &&
4401 	    old_state == IEEE80211_STA_NOTEXIST &&
4402 	    new_state == IEEE80211_STA_NONE)
4403 		return wl12xx_sta_add(wl, wlvif, sta);
4404 
4405 	/* Remove station (AP mode) */
4406 	if (is_ap &&
4407 	    old_state == IEEE80211_STA_NONE &&
4408 	    new_state == IEEE80211_STA_NOTEXIST) {
4409 		/* must not fail */
4410 		wl12xx_sta_remove(wl, wlvif, sta);
4411 		return 0;
4412 	}
4413 
4414 	/* Authorize station (AP mode) */
4415 	if (is_ap &&
4416 	    new_state == IEEE80211_STA_AUTHORIZED) {
4417 		ret = wl12xx_cmd_set_peer_state(wl, hlid);
4418 		if (ret < 0)
4419 			return ret;
4420 
4421 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4422 						     hlid);
4423 		return ret;
4424 	}
4425 
4426 	/* Authorize station */
4427 	if (is_sta &&
4428 	    new_state == IEEE80211_STA_AUTHORIZED) {
4429 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4430 		return wl12xx_set_authorized(wl, wlvif);
4431 	}
4432 
4433 	if (is_sta &&
4434 	    old_state == IEEE80211_STA_AUTHORIZED &&
4435 	    new_state == IEEE80211_STA_ASSOC) {
4436 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4437 		return 0;
4438 	}
4439 
4440 	return 0;
4441 }
4442 
4443 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4444 			       struct ieee80211_vif *vif,
4445 			       struct ieee80211_sta *sta,
4446 			       enum ieee80211_sta_state old_state,
4447 			       enum ieee80211_sta_state new_state)
4448 {
4449 	struct wl1271 *wl = hw->priv;
4450 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4451 	int ret;
4452 
4453 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4454 		     sta->aid, old_state, new_state);
4455 
4456 	mutex_lock(&wl->mutex);
4457 
4458 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4459 		ret = -EBUSY;
4460 		goto out;
4461 	}
4462 
4463 	ret = wl1271_ps_elp_wakeup(wl);
4464 	if (ret < 0)
4465 		goto out;
4466 
4467 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4468 
4469 	wl1271_ps_elp_sleep(wl);
4470 out:
4471 	mutex_unlock(&wl->mutex);
4472 	if (new_state < old_state)
4473 		return 0;
4474 	return ret;
4475 }
4476 
4477 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4478 				  struct ieee80211_vif *vif,
4479 				  enum ieee80211_ampdu_mlme_action action,
4480 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4481 				  u8 buf_size)
4482 {
4483 	struct wl1271 *wl = hw->priv;
4484 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4485 	int ret;
4486 	u8 hlid, *ba_bitmap;
4487 
4488 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4489 		     tid);
4490 
4491 	/* sanity check - the fields in FW are only 8bits wide */
4492 	if (WARN_ON(tid > 0xFF))
4493 		return -ENOTSUPP;
4494 
4495 	mutex_lock(&wl->mutex);
4496 
4497 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
4498 		ret = -EAGAIN;
4499 		goto out;
4500 	}
4501 
4502 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4503 		hlid = wlvif->sta.hlid;
4504 		ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4505 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4506 		struct wl1271_station *wl_sta;
4507 
4508 		wl_sta = (struct wl1271_station *)sta->drv_priv;
4509 		hlid = wl_sta->hlid;
4510 		ba_bitmap = &wl->links[hlid].ba_bitmap;
4511 	} else {
4512 		ret = -EINVAL;
4513 		goto out;
4514 	}
4515 
4516 	ret = wl1271_ps_elp_wakeup(wl);
4517 	if (ret < 0)
4518 		goto out;
4519 
4520 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4521 		     tid, action);
4522 
4523 	switch (action) {
4524 	case IEEE80211_AMPDU_RX_START:
4525 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
4526 			ret = -ENOTSUPP;
4527 			break;
4528 		}
4529 
4530 		if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4531 			ret = -EBUSY;
4532 			wl1271_error("exceeded max RX BA sessions");
4533 			break;
4534 		}
4535 
4536 		if (*ba_bitmap & BIT(tid)) {
4537 			ret = -EINVAL;
4538 			wl1271_error("cannot enable RX BA session on active "
4539 				     "tid: %d", tid);
4540 			break;
4541 		}
4542 
4543 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4544 							 hlid);
4545 		if (!ret) {
4546 			*ba_bitmap |= BIT(tid);
4547 			wl->ba_rx_session_count++;
4548 		}
4549 		break;
4550 
4551 	case IEEE80211_AMPDU_RX_STOP:
4552 		if (!(*ba_bitmap & BIT(tid))) {
4553 			/*
4554 			 * this happens on reconfig - so only output a debug
4555 			 * message for now, and don't fail the function.
4556 			 */
4557 			wl1271_debug(DEBUG_MAC80211,
4558 				     "no active RX BA session on tid: %d",
4559 				     tid);
4560 			ret = 0;
4561 			break;
4562 		}
4563 
4564 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4565 							 hlid);
4566 		if (!ret) {
4567 			*ba_bitmap &= ~BIT(tid);
4568 			wl->ba_rx_session_count--;
4569 		}
4570 		break;
4571 
4572 	/*
4573 	 * The BA initiator session management in FW independently.
4574 	 * Falling break here on purpose for all TX APDU commands.
4575 	 */
4576 	case IEEE80211_AMPDU_TX_START:
4577 	case IEEE80211_AMPDU_TX_STOP:
4578 	case IEEE80211_AMPDU_TX_OPERATIONAL:
4579 		ret = -EINVAL;
4580 		break;
4581 
4582 	default:
4583 		wl1271_error("Incorrect ampdu action id=%x\n", action);
4584 		ret = -EINVAL;
4585 	}
4586 
4587 	wl1271_ps_elp_sleep(wl);
4588 
4589 out:
4590 	mutex_unlock(&wl->mutex);
4591 
4592 	return ret;
4593 }
4594 
4595 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4596 				   struct ieee80211_vif *vif,
4597 				   const struct cfg80211_bitrate_mask *mask)
4598 {
4599 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4600 	struct wl1271 *wl = hw->priv;
4601 	int i, ret = 0;
4602 
4603 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4604 		mask->control[NL80211_BAND_2GHZ].legacy,
4605 		mask->control[NL80211_BAND_5GHZ].legacy);
4606 
4607 	mutex_lock(&wl->mutex);
4608 
4609 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
4610 		wlvif->bitrate_masks[i] =
4611 			wl1271_tx_enabled_rates_get(wl,
4612 						    mask->control[i].legacy,
4613 						    i);
4614 
4615 	if (unlikely(wl->state != WLCORE_STATE_ON))
4616 		goto out;
4617 
4618 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4619 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4620 
4621 		ret = wl1271_ps_elp_wakeup(wl);
4622 		if (ret < 0)
4623 			goto out;
4624 
4625 		wl1271_set_band_rate(wl, wlvif);
4626 		wlvif->basic_rate =
4627 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4628 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4629 
4630 		wl1271_ps_elp_sleep(wl);
4631 	}
4632 out:
4633 	mutex_unlock(&wl->mutex);
4634 
4635 	return ret;
4636 }
4637 
4638 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4639 				     struct ieee80211_channel_switch *ch_switch)
4640 {
4641 	struct wl1271 *wl = hw->priv;
4642 	struct wl12xx_vif *wlvif;
4643 	int ret;
4644 
4645 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4646 
4647 	wl1271_tx_flush(wl);
4648 
4649 	mutex_lock(&wl->mutex);
4650 
4651 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4652 		wl12xx_for_each_wlvif_sta(wl, wlvif) {
4653 			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4654 			ieee80211_chswitch_done(vif, false);
4655 		}
4656 		goto out;
4657 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4658 		goto out;
4659 	}
4660 
4661 	ret = wl1271_ps_elp_wakeup(wl);
4662 	if (ret < 0)
4663 		goto out;
4664 
4665 	/* TODO: change mac80211 to pass vif as param */
4666 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
4667 		ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
4668 
4669 		if (!ret)
4670 			set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4671 	}
4672 
4673 	wl1271_ps_elp_sleep(wl);
4674 
4675 out:
4676 	mutex_unlock(&wl->mutex);
4677 }
4678 
4679 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4680 {
4681 	struct wl1271 *wl = hw->priv;
4682 
4683 	wl1271_tx_flush(wl);
4684 }
4685 
4686 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4687 {
4688 	struct wl1271 *wl = hw->priv;
4689 	bool ret = false;
4690 
4691 	mutex_lock(&wl->mutex);
4692 
4693 	if (unlikely(wl->state != WLCORE_STATE_ON))
4694 		goto out;
4695 
4696 	/* packets are considered pending if in the TX queue or the FW */
4697 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4698 out:
4699 	mutex_unlock(&wl->mutex);
4700 
4701 	return ret;
4702 }
4703 
4704 /* can't be const, mac80211 writes to this */
4705 static struct ieee80211_rate wl1271_rates[] = {
4706 	{ .bitrate = 10,
4707 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
4708 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4709 	{ .bitrate = 20,
4710 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
4711 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4712 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4713 	{ .bitrate = 55,
4714 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4715 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4716 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4717 	{ .bitrate = 110,
4718 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
4719 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4720 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4721 	{ .bitrate = 60,
4722 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
4723 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4724 	{ .bitrate = 90,
4725 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
4726 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4727 	{ .bitrate = 120,
4728 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
4729 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4730 	{ .bitrate = 180,
4731 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
4732 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4733 	{ .bitrate = 240,
4734 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
4735 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4736 	{ .bitrate = 360,
4737 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4738 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4739 	{ .bitrate = 480,
4740 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
4741 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4742 	{ .bitrate = 540,
4743 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
4744 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4745 };
4746 
4747 /* can't be const, mac80211 writes to this */
4748 static struct ieee80211_channel wl1271_channels[] = {
4749 	{ .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4750 	{ .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4751 	{ .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4752 	{ .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4753 	{ .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4754 	{ .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4755 	{ .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4756 	{ .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4757 	{ .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4758 	{ .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4759 	{ .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4760 	{ .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4761 	{ .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4762 	{ .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4763 };
4764 
4765 /* can't be const, mac80211 writes to this */
4766 static struct ieee80211_supported_band wl1271_band_2ghz = {
4767 	.channels = wl1271_channels,
4768 	.n_channels = ARRAY_SIZE(wl1271_channels),
4769 	.bitrates = wl1271_rates,
4770 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
4771 };
4772 
4773 /* 5 GHz data rates for WL1273 */
4774 static struct ieee80211_rate wl1271_rates_5ghz[] = {
4775 	{ .bitrate = 60,
4776 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
4777 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4778 	{ .bitrate = 90,
4779 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
4780 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4781 	{ .bitrate = 120,
4782 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
4783 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4784 	{ .bitrate = 180,
4785 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
4786 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4787 	{ .bitrate = 240,
4788 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
4789 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4790 	{ .bitrate = 360,
4791 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4792 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4793 	{ .bitrate = 480,
4794 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
4795 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4796 	{ .bitrate = 540,
4797 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
4798 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4799 };
4800 
4801 /* 5 GHz band channels for WL1273 */
4802 static struct ieee80211_channel wl1271_channels_5ghz[] = {
4803 	{ .hw_value = 7, .center_freq = 5035, .max_power = 25 },
4804 	{ .hw_value = 8, .center_freq = 5040, .max_power = 25 },
4805 	{ .hw_value = 9, .center_freq = 5045, .max_power = 25 },
4806 	{ .hw_value = 11, .center_freq = 5055, .max_power = 25 },
4807 	{ .hw_value = 12, .center_freq = 5060, .max_power = 25 },
4808 	{ .hw_value = 16, .center_freq = 5080, .max_power = 25 },
4809 	{ .hw_value = 34, .center_freq = 5170, .max_power = 25 },
4810 	{ .hw_value = 36, .center_freq = 5180, .max_power = 25 },
4811 	{ .hw_value = 38, .center_freq = 5190, .max_power = 25 },
4812 	{ .hw_value = 40, .center_freq = 5200, .max_power = 25 },
4813 	{ .hw_value = 42, .center_freq = 5210, .max_power = 25 },
4814 	{ .hw_value = 44, .center_freq = 5220, .max_power = 25 },
4815 	{ .hw_value = 46, .center_freq = 5230, .max_power = 25 },
4816 	{ .hw_value = 48, .center_freq = 5240, .max_power = 25 },
4817 	{ .hw_value = 52, .center_freq = 5260, .max_power = 25 },
4818 	{ .hw_value = 56, .center_freq = 5280, .max_power = 25 },
4819 	{ .hw_value = 60, .center_freq = 5300, .max_power = 25 },
4820 	{ .hw_value = 64, .center_freq = 5320, .max_power = 25 },
4821 	{ .hw_value = 100, .center_freq = 5500, .max_power = 25 },
4822 	{ .hw_value = 104, .center_freq = 5520, .max_power = 25 },
4823 	{ .hw_value = 108, .center_freq = 5540, .max_power = 25 },
4824 	{ .hw_value = 112, .center_freq = 5560, .max_power = 25 },
4825 	{ .hw_value = 116, .center_freq = 5580, .max_power = 25 },
4826 	{ .hw_value = 120, .center_freq = 5600, .max_power = 25 },
4827 	{ .hw_value = 124, .center_freq = 5620, .max_power = 25 },
4828 	{ .hw_value = 128, .center_freq = 5640, .max_power = 25 },
4829 	{ .hw_value = 132, .center_freq = 5660, .max_power = 25 },
4830 	{ .hw_value = 136, .center_freq = 5680, .max_power = 25 },
4831 	{ .hw_value = 140, .center_freq = 5700, .max_power = 25 },
4832 	{ .hw_value = 149, .center_freq = 5745, .max_power = 25 },
4833 	{ .hw_value = 153, .center_freq = 5765, .max_power = 25 },
4834 	{ .hw_value = 157, .center_freq = 5785, .max_power = 25 },
4835 	{ .hw_value = 161, .center_freq = 5805, .max_power = 25 },
4836 	{ .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4837 };
4838 
4839 static struct ieee80211_supported_band wl1271_band_5ghz = {
4840 	.channels = wl1271_channels_5ghz,
4841 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4842 	.bitrates = wl1271_rates_5ghz,
4843 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4844 };
4845 
4846 static const struct ieee80211_ops wl1271_ops = {
4847 	.start = wl1271_op_start,
4848 	.stop = wlcore_op_stop,
4849 	.add_interface = wl1271_op_add_interface,
4850 	.remove_interface = wl1271_op_remove_interface,
4851 	.change_interface = wl12xx_op_change_interface,
4852 #ifdef CONFIG_PM
4853 	.suspend = wl1271_op_suspend,
4854 	.resume = wl1271_op_resume,
4855 #endif
4856 	.config = wl1271_op_config,
4857 	.prepare_multicast = wl1271_op_prepare_multicast,
4858 	.configure_filter = wl1271_op_configure_filter,
4859 	.tx = wl1271_op_tx,
4860 	.set_key = wlcore_op_set_key,
4861 	.hw_scan = wl1271_op_hw_scan,
4862 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
4863 	.sched_scan_start = wl1271_op_sched_scan_start,
4864 	.sched_scan_stop = wl1271_op_sched_scan_stop,
4865 	.bss_info_changed = wl1271_op_bss_info_changed,
4866 	.set_frag_threshold = wl1271_op_set_frag_threshold,
4867 	.set_rts_threshold = wl1271_op_set_rts_threshold,
4868 	.conf_tx = wl1271_op_conf_tx,
4869 	.get_tsf = wl1271_op_get_tsf,
4870 	.get_survey = wl1271_op_get_survey,
4871 	.sta_state = wl12xx_op_sta_state,
4872 	.ampdu_action = wl1271_op_ampdu_action,
4873 	.tx_frames_pending = wl1271_tx_frames_pending,
4874 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
4875 	.channel_switch = wl12xx_op_channel_switch,
4876 	.flush = wlcore_op_flush,
4877 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
4878 };
4879 
4880 
4881 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
4882 {
4883 	u8 idx;
4884 
4885 	BUG_ON(band >= 2);
4886 
4887 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
4888 		wl1271_error("Illegal RX rate from HW: %d", rate);
4889 		return 0;
4890 	}
4891 
4892 	idx = wl->band_rate_to_idx[band][rate];
4893 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4894 		wl1271_error("Unsupported RX rate from HW: %d", rate);
4895 		return 0;
4896 	}
4897 
4898 	return idx;
4899 }
4900 
4901 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
4902 					       struct device_attribute *attr,
4903 					       char *buf)
4904 {
4905 	struct wl1271 *wl = dev_get_drvdata(dev);
4906 	ssize_t len;
4907 
4908 	len = PAGE_SIZE;
4909 
4910 	mutex_lock(&wl->mutex);
4911 	len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
4912 		       wl->sg_enabled);
4913 	mutex_unlock(&wl->mutex);
4914 
4915 	return len;
4916 
4917 }
4918 
4919 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4920 						struct device_attribute *attr,
4921 						const char *buf, size_t count)
4922 {
4923 	struct wl1271 *wl = dev_get_drvdata(dev);
4924 	unsigned long res;
4925 	int ret;
4926 
4927 	ret = kstrtoul(buf, 10, &res);
4928 	if (ret < 0) {
4929 		wl1271_warning("incorrect value written to bt_coex_mode");
4930 		return count;
4931 	}
4932 
4933 	mutex_lock(&wl->mutex);
4934 
4935 	res = !!res;
4936 
4937 	if (res == wl->sg_enabled)
4938 		goto out;
4939 
4940 	wl->sg_enabled = res;
4941 
4942 	if (unlikely(wl->state != WLCORE_STATE_ON))
4943 		goto out;
4944 
4945 	ret = wl1271_ps_elp_wakeup(wl);
4946 	if (ret < 0)
4947 		goto out;
4948 
4949 	wl1271_acx_sg_enable(wl, wl->sg_enabled);
4950 	wl1271_ps_elp_sleep(wl);
4951 
4952  out:
4953 	mutex_unlock(&wl->mutex);
4954 	return count;
4955 }
4956 
4957 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
4958 		   wl1271_sysfs_show_bt_coex_state,
4959 		   wl1271_sysfs_store_bt_coex_state);
4960 
4961 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
4962 					   struct device_attribute *attr,
4963 					   char *buf)
4964 {
4965 	struct wl1271 *wl = dev_get_drvdata(dev);
4966 	ssize_t len;
4967 
4968 	len = PAGE_SIZE;
4969 
4970 	mutex_lock(&wl->mutex);
4971 	if (wl->hw_pg_ver >= 0)
4972 		len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
4973 	else
4974 		len = snprintf(buf, len, "n/a\n");
4975 	mutex_unlock(&wl->mutex);
4976 
4977 	return len;
4978 }
4979 
4980 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
4981 		   wl1271_sysfs_show_hw_pg_ver, NULL);
4982 
4983 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
4984 				       struct bin_attribute *bin_attr,
4985 				       char *buffer, loff_t pos, size_t count)
4986 {
4987 	struct device *dev = container_of(kobj, struct device, kobj);
4988 	struct wl1271 *wl = dev_get_drvdata(dev);
4989 	ssize_t len;
4990 	int ret;
4991 
4992 	ret = mutex_lock_interruptible(&wl->mutex);
4993 	if (ret < 0)
4994 		return -ERESTARTSYS;
4995 
4996 	/* Let only one thread read the log at a time, blocking others */
4997 	while (wl->fwlog_size == 0) {
4998 		DEFINE_WAIT(wait);
4999 
5000 		prepare_to_wait_exclusive(&wl->fwlog_waitq,
5001 					  &wait,
5002 					  TASK_INTERRUPTIBLE);
5003 
5004 		if (wl->fwlog_size != 0) {
5005 			finish_wait(&wl->fwlog_waitq, &wait);
5006 			break;
5007 		}
5008 
5009 		mutex_unlock(&wl->mutex);
5010 
5011 		schedule();
5012 		finish_wait(&wl->fwlog_waitq, &wait);
5013 
5014 		if (signal_pending(current))
5015 			return -ERESTARTSYS;
5016 
5017 		ret = mutex_lock_interruptible(&wl->mutex);
5018 		if (ret < 0)
5019 			return -ERESTARTSYS;
5020 	}
5021 
5022 	/* Check if the fwlog is still valid */
5023 	if (wl->fwlog_size < 0) {
5024 		mutex_unlock(&wl->mutex);
5025 		return 0;
5026 	}
5027 
5028 	/* Seeking is not supported - old logs are not kept. Disregard pos. */
5029 	len = min(count, (size_t)wl->fwlog_size);
5030 	wl->fwlog_size -= len;
5031 	memcpy(buffer, wl->fwlog, len);
5032 
5033 	/* Make room for new messages */
5034 	memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5035 
5036 	mutex_unlock(&wl->mutex);
5037 
5038 	return len;
5039 }
5040 
5041 static struct bin_attribute fwlog_attr = {
5042 	.attr = {.name = "fwlog", .mode = S_IRUSR},
5043 	.read = wl1271_sysfs_read_fwlog,
5044 };
5045 
5046 static void wl1271_connection_loss_work(struct work_struct *work)
5047 {
5048 	struct delayed_work *dwork;
5049 	struct wl1271 *wl;
5050 	struct ieee80211_vif *vif;
5051 	struct wl12xx_vif *wlvif;
5052 
5053 	dwork = container_of(work, struct delayed_work, work);
5054 	wl = container_of(dwork, struct wl1271, connection_loss_work);
5055 
5056 	wl1271_info("Connection loss work.");
5057 
5058 	mutex_lock(&wl->mutex);
5059 
5060 	if (unlikely(wl->state != WLCORE_STATE_ON))
5061 		goto out;
5062 
5063 	/* Call mac80211 connection loss */
5064 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
5065 		if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5066 			goto out;
5067 		vif = wl12xx_wlvif_to_vif(wlvif);
5068 		ieee80211_connection_loss(vif);
5069 	}
5070 out:
5071 	mutex_unlock(&wl->mutex);
5072 }
5073 
5074 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5075 {
5076 	int i;
5077 
5078 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5079 		     oui, nic);
5080 
5081 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5082 		wl1271_warning("NIC part of the MAC address wraps around!");
5083 
5084 	for (i = 0; i < wl->num_mac_addr; i++) {
5085 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5086 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5087 		wl->addresses[i].addr[2] = (u8) oui;
5088 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5089 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5090 		wl->addresses[i].addr[5] = (u8) nic;
5091 		nic++;
5092 	}
5093 
5094 	/* we may be one address short at the most */
5095 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5096 
5097 	/*
5098 	 * turn on the LAA bit in the first address and use it as
5099 	 * the last address.
5100 	 */
5101 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5102 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5103 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5104 		       sizeof(wl->addresses[0]));
5105 		/* LAA bit */
5106 		wl->addresses[idx].addr[2] |= BIT(1);
5107 	}
5108 
5109 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5110 	wl->hw->wiphy->addresses = wl->addresses;
5111 }
5112 
5113 static int wl12xx_get_hw_info(struct wl1271 *wl)
5114 {
5115 	int ret;
5116 
5117 	ret = wl12xx_set_power_on(wl);
5118 	if (ret < 0)
5119 		goto out;
5120 
5121 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5122 	if (ret < 0)
5123 		goto out;
5124 
5125 	wl->fuse_oui_addr = 0;
5126 	wl->fuse_nic_addr = 0;
5127 
5128 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5129 	if (ret < 0)
5130 		goto out;
5131 
5132 	if (wl->ops->get_mac)
5133 		ret = wl->ops->get_mac(wl);
5134 
5135 out:
5136 	wl1271_power_off(wl);
5137 	return ret;
5138 }
5139 
5140 static int wl1271_register_hw(struct wl1271 *wl)
5141 {
5142 	int ret;
5143 	u32 oui_addr = 0, nic_addr = 0;
5144 
5145 	if (wl->mac80211_registered)
5146 		return 0;
5147 
5148 	if (wl->nvs_len >= 12) {
5149 		/* NOTE: The wl->nvs->nvs element must be first, in
5150 		 * order to simplify the casting, we assume it is at
5151 		 * the beginning of the wl->nvs structure.
5152 		 */
5153 		u8 *nvs_ptr = (u8 *)wl->nvs;
5154 
5155 		oui_addr =
5156 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5157 		nic_addr =
5158 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5159 	}
5160 
5161 	/* if the MAC address is zeroed in the NVS derive from fuse */
5162 	if (oui_addr == 0 && nic_addr == 0) {
5163 		oui_addr = wl->fuse_oui_addr;
5164 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
5165 		nic_addr = wl->fuse_nic_addr + 1;
5166 	}
5167 
5168 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5169 
5170 	ret = ieee80211_register_hw(wl->hw);
5171 	if (ret < 0) {
5172 		wl1271_error("unable to register mac80211 hw: %d", ret);
5173 		goto out;
5174 	}
5175 
5176 	wl->mac80211_registered = true;
5177 
5178 	wl1271_debugfs_init(wl);
5179 
5180 	wl1271_notice("loaded");
5181 
5182 out:
5183 	return ret;
5184 }
5185 
5186 static void wl1271_unregister_hw(struct wl1271 *wl)
5187 {
5188 	if (wl->plt)
5189 		wl1271_plt_stop(wl);
5190 
5191 	ieee80211_unregister_hw(wl->hw);
5192 	wl->mac80211_registered = false;
5193 
5194 }
5195 
5196 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5197 	{
5198 		.max = 3,
5199 		.types = BIT(NL80211_IFTYPE_STATION),
5200 	},
5201 	{
5202 		.max = 1,
5203 		.types = BIT(NL80211_IFTYPE_AP) |
5204 			 BIT(NL80211_IFTYPE_P2P_GO) |
5205 			 BIT(NL80211_IFTYPE_P2P_CLIENT),
5206 	},
5207 };
5208 
5209 static const struct ieee80211_iface_combination
5210 wlcore_iface_combinations[] = {
5211 	{
5212 	  .num_different_channels = 1,
5213 	  .max_interfaces = 3,
5214 	  .limits = wlcore_iface_limits,
5215 	  .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5216 	},
5217 };
5218 
5219 static int wl1271_init_ieee80211(struct wl1271 *wl)
5220 {
5221 	static const u32 cipher_suites[] = {
5222 		WLAN_CIPHER_SUITE_WEP40,
5223 		WLAN_CIPHER_SUITE_WEP104,
5224 		WLAN_CIPHER_SUITE_TKIP,
5225 		WLAN_CIPHER_SUITE_CCMP,
5226 		WL1271_CIPHER_SUITE_GEM,
5227 	};
5228 
5229 	/* The tx descriptor buffer */
5230 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5231 
5232 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5233 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5234 
5235 	/* unit us */
5236 	/* FIXME: find a proper value */
5237 	wl->hw->channel_change_time = 10000;
5238 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5239 
5240 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5241 		IEEE80211_HW_SUPPORTS_PS |
5242 		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5243 		IEEE80211_HW_SUPPORTS_UAPSD |
5244 		IEEE80211_HW_HAS_RATE_CONTROL |
5245 		IEEE80211_HW_CONNECTION_MONITOR |
5246 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5247 		IEEE80211_HW_SPECTRUM_MGMT |
5248 		IEEE80211_HW_AP_LINK_PS |
5249 		IEEE80211_HW_AMPDU_AGGREGATION |
5250 		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5251 		IEEE80211_HW_SCAN_WHILE_IDLE;
5252 
5253 	wl->hw->wiphy->cipher_suites = cipher_suites;
5254 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5255 
5256 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5257 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5258 		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5259 	wl->hw->wiphy->max_scan_ssids = 1;
5260 	wl->hw->wiphy->max_sched_scan_ssids = 16;
5261 	wl->hw->wiphy->max_match_sets = 16;
5262 	/*
5263 	 * Maximum length of elements in scanning probe request templates
5264 	 * should be the maximum length possible for a template, without
5265 	 * the IEEE80211 header of the template
5266 	 */
5267 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5268 			sizeof(struct ieee80211_header);
5269 
5270 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5271 		sizeof(struct ieee80211_header);
5272 
5273 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5274 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5275 
5276 	/* make sure all our channels fit in the scanned_ch bitmask */
5277 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5278 		     ARRAY_SIZE(wl1271_channels_5ghz) >
5279 		     WL1271_MAX_CHANNELS);
5280 	/*
5281 	 * We keep local copies of the band structs because we need to
5282 	 * modify them on a per-device basis.
5283 	 */
5284 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5285 	       sizeof(wl1271_band_2ghz));
5286 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5287 	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
5288 	       sizeof(*wl->ht_cap));
5289 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5290 	       sizeof(wl1271_band_5ghz));
5291 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5292 	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
5293 	       sizeof(*wl->ht_cap));
5294 
5295 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5296 		&wl->bands[IEEE80211_BAND_2GHZ];
5297 	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5298 		&wl->bands[IEEE80211_BAND_5GHZ];
5299 
5300 	wl->hw->queues = 4;
5301 	wl->hw->max_rates = 1;
5302 
5303 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5304 
5305 	/* the FW answers probe-requests in AP-mode */
5306 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5307 	wl->hw->wiphy->probe_resp_offload =
5308 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5309 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5310 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5311 
5312 	/* allowed interface combinations */
5313 	wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5314 	wl->hw->wiphy->n_iface_combinations =
5315 		ARRAY_SIZE(wlcore_iface_combinations);
5316 
5317 	SET_IEEE80211_DEV(wl->hw, wl->dev);
5318 
5319 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
5320 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5321 
5322 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5323 
5324 	return 0;
5325 }
5326 
5327 #define WL1271_DEFAULT_CHANNEL 0
5328 
5329 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
5330 {
5331 	struct ieee80211_hw *hw;
5332 	struct wl1271 *wl;
5333 	int i, j, ret;
5334 	unsigned int order;
5335 
5336 	BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5337 
5338 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5339 	if (!hw) {
5340 		wl1271_error("could not alloc ieee80211_hw");
5341 		ret = -ENOMEM;
5342 		goto err_hw_alloc;
5343 	}
5344 
5345 	wl = hw->priv;
5346 	memset(wl, 0, sizeof(*wl));
5347 
5348 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
5349 	if (!wl->priv) {
5350 		wl1271_error("could not alloc wl priv");
5351 		ret = -ENOMEM;
5352 		goto err_priv_alloc;
5353 	}
5354 
5355 	INIT_LIST_HEAD(&wl->wlvif_list);
5356 
5357 	wl->hw = hw;
5358 
5359 	for (i = 0; i < NUM_TX_QUEUES; i++)
5360 		for (j = 0; j < WL12XX_MAX_LINKS; j++)
5361 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
5362 
5363 	skb_queue_head_init(&wl->deferred_rx_queue);
5364 	skb_queue_head_init(&wl->deferred_tx_queue);
5365 
5366 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5367 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5368 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
5369 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5370 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5371 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5372 	INIT_DELAYED_WORK(&wl->connection_loss_work,
5373 			  wl1271_connection_loss_work);
5374 
5375 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5376 	if (!wl->freezable_wq) {
5377 		ret = -ENOMEM;
5378 		goto err_hw;
5379 	}
5380 
5381 	wl->channel = WL1271_DEFAULT_CHANNEL;
5382 	wl->rx_counter = 0;
5383 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5384 	wl->band = IEEE80211_BAND_2GHZ;
5385 	wl->channel_type = NL80211_CHAN_NO_HT;
5386 	wl->flags = 0;
5387 	wl->sg_enabled = true;
5388 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
5389 	wl->hw_pg_ver = -1;
5390 	wl->ap_ps_map = 0;
5391 	wl->ap_fw_ps_map = 0;
5392 	wl->quirks = 0;
5393 	wl->platform_quirks = 0;
5394 	wl->sched_scanning = false;
5395 	wl->system_hlid = WL12XX_SYSTEM_HLID;
5396 	wl->active_sta_count = 0;
5397 	wl->fwlog_size = 0;
5398 	init_waitqueue_head(&wl->fwlog_waitq);
5399 
5400 	/* The system link is always allocated */
5401 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5402 
5403 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5404 	for (i = 0; i < wl->num_tx_desc; i++)
5405 		wl->tx_frames[i] = NULL;
5406 
5407 	spin_lock_init(&wl->wl_lock);
5408 
5409 	wl->state = WLCORE_STATE_OFF;
5410 	wl->fw_type = WL12XX_FW_TYPE_NONE;
5411 	mutex_init(&wl->mutex);
5412 	mutex_init(&wl->flush_mutex);
5413 	init_completion(&wl->nvs_loading_complete);
5414 
5415 	order = get_order(aggr_buf_size);
5416 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5417 	if (!wl->aggr_buf) {
5418 		ret = -ENOMEM;
5419 		goto err_wq;
5420 	}
5421 	wl->aggr_buf_size = aggr_buf_size;
5422 
5423 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5424 	if (!wl->dummy_packet) {
5425 		ret = -ENOMEM;
5426 		goto err_aggr;
5427 	}
5428 
5429 	/* Allocate one page for the FW log */
5430 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5431 	if (!wl->fwlog) {
5432 		ret = -ENOMEM;
5433 		goto err_dummy_packet;
5434 	}
5435 
5436 	wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
5437 	if (!wl->mbox) {
5438 		ret = -ENOMEM;
5439 		goto err_fwlog;
5440 	}
5441 
5442 	return hw;
5443 
5444 err_fwlog:
5445 	free_page((unsigned long)wl->fwlog);
5446 
5447 err_dummy_packet:
5448 	dev_kfree_skb(wl->dummy_packet);
5449 
5450 err_aggr:
5451 	free_pages((unsigned long)wl->aggr_buf, order);
5452 
5453 err_wq:
5454 	destroy_workqueue(wl->freezable_wq);
5455 
5456 err_hw:
5457 	wl1271_debugfs_exit(wl);
5458 	kfree(wl->priv);
5459 
5460 err_priv_alloc:
5461 	ieee80211_free_hw(hw);
5462 
5463 err_hw_alloc:
5464 
5465 	return ERR_PTR(ret);
5466 }
5467 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5468 
5469 int wlcore_free_hw(struct wl1271 *wl)
5470 {
5471 	/* Unblock any fwlog readers */
5472 	mutex_lock(&wl->mutex);
5473 	wl->fwlog_size = -1;
5474 	wake_up_interruptible_all(&wl->fwlog_waitq);
5475 	mutex_unlock(&wl->mutex);
5476 
5477 	device_remove_bin_file(wl->dev, &fwlog_attr);
5478 
5479 	device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5480 
5481 	device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5482 	free_page((unsigned long)wl->fwlog);
5483 	dev_kfree_skb(wl->dummy_packet);
5484 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5485 
5486 	wl1271_debugfs_exit(wl);
5487 
5488 	vfree(wl->fw);
5489 	wl->fw = NULL;
5490 	wl->fw_type = WL12XX_FW_TYPE_NONE;
5491 	kfree(wl->nvs);
5492 	wl->nvs = NULL;
5493 
5494 	kfree(wl->fw_status_1);
5495 	kfree(wl->tx_res_if);
5496 	destroy_workqueue(wl->freezable_wq);
5497 
5498 	kfree(wl->priv);
5499 	ieee80211_free_hw(wl->hw);
5500 
5501 	return 0;
5502 }
5503 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5504 
5505 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5506 {
5507 	struct wl1271 *wl = cookie;
5508 	unsigned long flags;
5509 
5510 	wl1271_debug(DEBUG_IRQ, "IRQ");
5511 
5512 	/* complete the ELP completion */
5513 	spin_lock_irqsave(&wl->wl_lock, flags);
5514 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5515 	if (wl->elp_compl) {
5516 		complete(wl->elp_compl);
5517 		wl->elp_compl = NULL;
5518 	}
5519 
5520 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5521 		/* don't enqueue a work right now. mark it as pending */
5522 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5523 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5524 		disable_irq_nosync(wl->irq);
5525 		pm_wakeup_event(wl->dev, 0);
5526 		spin_unlock_irqrestore(&wl->wl_lock, flags);
5527 		return IRQ_HANDLED;
5528 	}
5529 	spin_unlock_irqrestore(&wl->wl_lock, flags);
5530 
5531 	return IRQ_WAKE_THREAD;
5532 }
5533 
5534 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5535 {
5536 	struct wl1271 *wl = context;
5537 	struct platform_device *pdev = wl->pdev;
5538 	struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5539 	unsigned long irqflags;
5540 	int ret;
5541 
5542 	if (fw) {
5543 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5544 		if (!wl->nvs) {
5545 			wl1271_error("Could not allocate nvs data");
5546 			goto out;
5547 		}
5548 		wl->nvs_len = fw->size;
5549 	} else {
5550 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5551 			     WL12XX_NVS_NAME);
5552 		wl->nvs = NULL;
5553 		wl->nvs_len = 0;
5554 	}
5555 
5556 	ret = wl->ops->setup(wl);
5557 	if (ret < 0)
5558 		goto out_free_nvs;
5559 
5560 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5561 
5562 	/* adjust some runtime configuration parameters */
5563 	wlcore_adjust_conf(wl);
5564 
5565 	wl->irq = platform_get_irq(pdev, 0);
5566 	wl->platform_quirks = pdata->platform_quirks;
5567 	wl->set_power = pdata->set_power;
5568 	wl->if_ops = pdata->ops;
5569 
5570 	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5571 		irqflags = IRQF_TRIGGER_RISING;
5572 	else
5573 		irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5574 
5575 	ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
5576 				   irqflags,
5577 				   pdev->name, wl);
5578 	if (ret < 0) {
5579 		wl1271_error("request_irq() failed: %d", ret);
5580 		goto out_free_nvs;
5581 	}
5582 
5583 #ifdef CONFIG_PM
5584 	ret = enable_irq_wake(wl->irq);
5585 	if (!ret) {
5586 		wl->irq_wake_enabled = true;
5587 		device_init_wakeup(wl->dev, 1);
5588 		if (pdata->pwr_in_suspend) {
5589 			wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5590 			wl->hw->wiphy->wowlan.n_patterns =
5591 				WL1271_MAX_RX_FILTERS;
5592 			wl->hw->wiphy->wowlan.pattern_min_len = 1;
5593 			wl->hw->wiphy->wowlan.pattern_max_len =
5594 				WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5595 		}
5596 	}
5597 #endif
5598 	disable_irq(wl->irq);
5599 
5600 	ret = wl12xx_get_hw_info(wl);
5601 	if (ret < 0) {
5602 		wl1271_error("couldn't get hw info");
5603 		goto out_irq;
5604 	}
5605 
5606 	ret = wl->ops->identify_chip(wl);
5607 	if (ret < 0)
5608 		goto out_irq;
5609 
5610 	ret = wl1271_init_ieee80211(wl);
5611 	if (ret)
5612 		goto out_irq;
5613 
5614 	ret = wl1271_register_hw(wl);
5615 	if (ret)
5616 		goto out_irq;
5617 
5618 	/* Create sysfs file to control bt coex state */
5619 	ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5620 	if (ret < 0) {
5621 		wl1271_error("failed to create sysfs file bt_coex_state");
5622 		goto out_unreg;
5623 	}
5624 
5625 	/* Create sysfs file to get HW PG version */
5626 	ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5627 	if (ret < 0) {
5628 		wl1271_error("failed to create sysfs file hw_pg_ver");
5629 		goto out_bt_coex_state;
5630 	}
5631 
5632 	/* Create sysfs file for the FW log */
5633 	ret = device_create_bin_file(wl->dev, &fwlog_attr);
5634 	if (ret < 0) {
5635 		wl1271_error("failed to create sysfs file fwlog");
5636 		goto out_hw_pg_ver;
5637 	}
5638 
5639 	wl->initialized = true;
5640 	goto out;
5641 
5642 out_hw_pg_ver:
5643 	device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5644 
5645 out_bt_coex_state:
5646 	device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5647 
5648 out_unreg:
5649 	wl1271_unregister_hw(wl);
5650 
5651 out_irq:
5652 	free_irq(wl->irq, wl);
5653 
5654 out_free_nvs:
5655 	kfree(wl->nvs);
5656 
5657 out:
5658 	release_firmware(fw);
5659 	complete_all(&wl->nvs_loading_complete);
5660 }
5661 
5662 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5663 {
5664 	int ret;
5665 
5666 	if (!wl->ops || !wl->ptable)
5667 		return -EINVAL;
5668 
5669 	wl->dev = &pdev->dev;
5670 	wl->pdev = pdev;
5671 	platform_set_drvdata(pdev, wl);
5672 
5673 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
5674 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
5675 				      wl, wlcore_nvs_cb);
5676 	if (ret < 0) {
5677 		wl1271_error("request_firmware_nowait failed: %d", ret);
5678 		complete_all(&wl->nvs_loading_complete);
5679 	}
5680 
5681 	return ret;
5682 }
5683 EXPORT_SYMBOL_GPL(wlcore_probe);
5684 
5685 int __devexit wlcore_remove(struct platform_device *pdev)
5686 {
5687 	struct wl1271 *wl = platform_get_drvdata(pdev);
5688 
5689 	wait_for_completion(&wl->nvs_loading_complete);
5690 	if (!wl->initialized)
5691 		return 0;
5692 
5693 	if (wl->irq_wake_enabled) {
5694 		device_init_wakeup(wl->dev, 0);
5695 		disable_irq_wake(wl->irq);
5696 	}
5697 	wl1271_unregister_hw(wl);
5698 	free_irq(wl->irq, wl);
5699 	wlcore_free_hw(wl);
5700 
5701 	return 0;
5702 }
5703 EXPORT_SYMBOL_GPL(wlcore_remove);
5704 
5705 u32 wl12xx_debug_level = DEBUG_NONE;
5706 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5707 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5708 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5709 
5710 module_param_named(fwlog, fwlog_param, charp, 0);
5711 MODULE_PARM_DESC(fwlog,
5712 		 "FW logger options: continuous, ondemand, dbgpins or disable");
5713 
5714 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5715 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5716 
5717 module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5718 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5719 
5720 MODULE_LICENSE("GPL");
5721 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5722 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
5723 MODULE_FIRMWARE(WL12XX_NVS_NAME);
5724