xref: /openbmc/linux/drivers/net/wireless/ti/wlcore/main.c (revision 55fd7e02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_SUSPEND_SLEEP 100
34 #define WL1271_WAKEUP_TIMEOUT 500
35 
36 static char *fwlog_param;
37 static int fwlog_mem_blocks = -1;
38 static int bug_on_recovery = -1;
39 static int no_recovery     = -1;
40 
41 static void __wl1271_op_remove_interface(struct wl1271 *wl,
42 					 struct ieee80211_vif *vif,
43 					 bool reset_tx_queues);
44 static void wlcore_op_stop_locked(struct wl1271 *wl);
45 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
46 
47 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
48 {
49 	int ret;
50 
51 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
52 		return -EINVAL;
53 
54 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
55 		return 0;
56 
57 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
58 		return 0;
59 
60 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
61 	if (ret < 0)
62 		return ret;
63 
64 	wl1271_info("Association completed.");
65 	return 0;
66 }
67 
68 static void wl1271_reg_notify(struct wiphy *wiphy,
69 			      struct regulatory_request *request)
70 {
71 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
72 	struct wl1271 *wl = hw->priv;
73 
74 	/* copy the current dfs region */
75 	if (request)
76 		wl->dfs_region = request->dfs_region;
77 
78 	wlcore_regdomain_config(wl);
79 }
80 
81 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
82 				   bool enable)
83 {
84 	int ret = 0;
85 
86 	/* we should hold wl->mutex */
87 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
88 	if (ret < 0)
89 		goto out;
90 
91 	if (enable)
92 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
93 	else
94 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
95 out:
96 	return ret;
97 }
98 
99 /*
100  * this function is being called when the rx_streaming interval
101  * has beed changed or rx_streaming should be disabled
102  */
103 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
104 {
105 	int ret = 0;
106 	int period = wl->conf.rx_streaming.interval;
107 
108 	/* don't reconfigure if rx_streaming is disabled */
109 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
110 		goto out;
111 
112 	/* reconfigure/disable according to new streaming_period */
113 	if (period &&
114 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
115 	    (wl->conf.rx_streaming.always ||
116 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
117 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
118 	else {
119 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
120 		/* don't cancel_work_sync since we might deadlock */
121 		del_timer_sync(&wlvif->rx_streaming_timer);
122 	}
123 out:
124 	return ret;
125 }
126 
127 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
128 {
129 	int ret;
130 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
131 						rx_streaming_enable_work);
132 	struct wl1271 *wl = wlvif->wl;
133 
134 	mutex_lock(&wl->mutex);
135 
136 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
137 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
138 	    (!wl->conf.rx_streaming.always &&
139 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
140 		goto out;
141 
142 	if (!wl->conf.rx_streaming.interval)
143 		goto out;
144 
145 	ret = pm_runtime_get_sync(wl->dev);
146 	if (ret < 0) {
147 		pm_runtime_put_noidle(wl->dev);
148 		goto out;
149 	}
150 
151 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 	if (ret < 0)
153 		goto out_sleep;
154 
155 	/* stop it after some time of inactivity */
156 	mod_timer(&wlvif->rx_streaming_timer,
157 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
158 
159 out_sleep:
160 	pm_runtime_mark_last_busy(wl->dev);
161 	pm_runtime_put_autosuspend(wl->dev);
162 out:
163 	mutex_unlock(&wl->mutex);
164 }
165 
166 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
167 {
168 	int ret;
169 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
170 						rx_streaming_disable_work);
171 	struct wl1271 *wl = wlvif->wl;
172 
173 	mutex_lock(&wl->mutex);
174 
175 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
176 		goto out;
177 
178 	ret = pm_runtime_get_sync(wl->dev);
179 	if (ret < 0) {
180 		pm_runtime_put_noidle(wl->dev);
181 		goto out;
182 	}
183 
184 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
185 	if (ret)
186 		goto out_sleep;
187 
188 out_sleep:
189 	pm_runtime_mark_last_busy(wl->dev);
190 	pm_runtime_put_autosuspend(wl->dev);
191 out:
192 	mutex_unlock(&wl->mutex);
193 }
194 
195 static void wl1271_rx_streaming_timer(struct timer_list *t)
196 {
197 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
198 	struct wl1271 *wl = wlvif->wl;
199 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
200 }
201 
202 /* wl->mutex must be taken */
203 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
204 {
205 	/* if the watchdog is not armed, don't do anything */
206 	if (wl->tx_allocated_blocks == 0)
207 		return;
208 
209 	cancel_delayed_work(&wl->tx_watchdog_work);
210 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
211 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
212 }
213 
214 static void wlcore_rc_update_work(struct work_struct *work)
215 {
216 	int ret;
217 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
218 						rc_update_work);
219 	struct wl1271 *wl = wlvif->wl;
220 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
221 
222 	mutex_lock(&wl->mutex);
223 
224 	if (unlikely(wl->state != WLCORE_STATE_ON))
225 		goto out;
226 
227 	ret = pm_runtime_get_sync(wl->dev);
228 	if (ret < 0) {
229 		pm_runtime_put_noidle(wl->dev);
230 		goto out;
231 	}
232 
233 	if (ieee80211_vif_is_mesh(vif)) {
234 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
235 						     true, wlvif->sta.hlid);
236 		if (ret < 0)
237 			goto out_sleep;
238 	} else {
239 		wlcore_hw_sta_rc_update(wl, wlvif);
240 	}
241 
242 out_sleep:
243 	pm_runtime_mark_last_busy(wl->dev);
244 	pm_runtime_put_autosuspend(wl->dev);
245 out:
246 	mutex_unlock(&wl->mutex);
247 }
248 
249 static void wl12xx_tx_watchdog_work(struct work_struct *work)
250 {
251 	struct delayed_work *dwork;
252 	struct wl1271 *wl;
253 
254 	dwork = to_delayed_work(work);
255 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
256 
257 	mutex_lock(&wl->mutex);
258 
259 	if (unlikely(wl->state != WLCORE_STATE_ON))
260 		goto out;
261 
262 	/* Tx went out in the meantime - everything is ok */
263 	if (unlikely(wl->tx_allocated_blocks == 0))
264 		goto out;
265 
266 	/*
267 	 * if a ROC is in progress, we might not have any Tx for a long
268 	 * time (e.g. pending Tx on the non-ROC channels)
269 	 */
270 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
271 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
272 			     wl->conf.tx.tx_watchdog_timeout);
273 		wl12xx_rearm_tx_watchdog_locked(wl);
274 		goto out;
275 	}
276 
277 	/*
278 	 * if a scan is in progress, we might not have any Tx for a long
279 	 * time
280 	 */
281 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
282 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
283 			     wl->conf.tx.tx_watchdog_timeout);
284 		wl12xx_rearm_tx_watchdog_locked(wl);
285 		goto out;
286 	}
287 
288 	/*
289 	* AP might cache a frame for a long time for a sleeping station,
290 	* so rearm the timer if there's an AP interface with stations. If
291 	* Tx is genuinely stuck we will most hopefully discover it when all
292 	* stations are removed due to inactivity.
293 	*/
294 	if (wl->active_sta_count) {
295 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
296 			     " %d stations",
297 			      wl->conf.tx.tx_watchdog_timeout,
298 			      wl->active_sta_count);
299 		wl12xx_rearm_tx_watchdog_locked(wl);
300 		goto out;
301 	}
302 
303 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
304 		     wl->conf.tx.tx_watchdog_timeout);
305 	wl12xx_queue_recovery_work(wl);
306 
307 out:
308 	mutex_unlock(&wl->mutex);
309 }
310 
311 static void wlcore_adjust_conf(struct wl1271 *wl)
312 {
313 
314 	if (fwlog_param) {
315 		if (!strcmp(fwlog_param, "continuous")) {
316 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
318 		} else if (!strcmp(fwlog_param, "dbgpins")) {
319 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
320 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
321 		} else if (!strcmp(fwlog_param, "disable")) {
322 			wl->conf.fwlog.mem_blocks = 0;
323 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
324 		} else {
325 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
326 		}
327 	}
328 
329 	if (bug_on_recovery != -1)
330 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
331 
332 	if (no_recovery != -1)
333 		wl->conf.recovery.no_recovery = (u8) no_recovery;
334 }
335 
336 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
337 					struct wl12xx_vif *wlvif,
338 					u8 hlid, u8 tx_pkts)
339 {
340 	bool fw_ps;
341 
342 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
343 
344 	/*
345 	 * Wake up from high level PS if the STA is asleep with too little
346 	 * packets in FW or if the STA is awake.
347 	 */
348 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
349 		wl12xx_ps_link_end(wl, wlvif, hlid);
350 
351 	/*
352 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
353 	 * Make an exception if this is the only connected link. In this
354 	 * case FW-memory congestion is less of a problem.
355 	 * Note that a single connected STA means 2*ap_count + 1 active links,
356 	 * since we must account for the global and broadcast AP links
357 	 * for each AP. The "fw_ps" check assures us the other link is a STA
358 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
359 	 */
360 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
361 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
362 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
363 }
364 
365 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
366 					   struct wl12xx_vif *wlvif,
367 					   struct wl_fw_status *status)
368 {
369 	unsigned long cur_fw_ps_map;
370 	u8 hlid;
371 
372 	cur_fw_ps_map = status->link_ps_bitmap;
373 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
374 		wl1271_debug(DEBUG_PSM,
375 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
376 			     wl->ap_fw_ps_map, cur_fw_ps_map,
377 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
378 
379 		wl->ap_fw_ps_map = cur_fw_ps_map;
380 	}
381 
382 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
383 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
384 					    wl->links[hlid].allocated_pkts);
385 }
386 
387 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
388 {
389 	struct wl12xx_vif *wlvif;
390 	u32 old_tx_blk_count = wl->tx_blocks_available;
391 	int avail, freed_blocks;
392 	int i;
393 	int ret;
394 	struct wl1271_link *lnk;
395 
396 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
397 				   wl->raw_fw_status,
398 				   wl->fw_status_len, false);
399 	if (ret < 0)
400 		return ret;
401 
402 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
403 
404 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
405 		     "drv_rx_counter = %d, tx_results_counter = %d)",
406 		     status->intr,
407 		     status->fw_rx_counter,
408 		     status->drv_rx_counter,
409 		     status->tx_results_counter);
410 
411 	for (i = 0; i < NUM_TX_QUEUES; i++) {
412 		/* prevent wrap-around in freed-packets counter */
413 		wl->tx_allocated_pkts[i] -=
414 				(status->counters.tx_released_pkts[i] -
415 				wl->tx_pkts_freed[i]) & 0xff;
416 
417 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
418 	}
419 
420 
421 	for_each_set_bit(i, wl->links_map, wl->num_links) {
422 		u8 diff;
423 		lnk = &wl->links[i];
424 
425 		/* prevent wrap-around in freed-packets counter */
426 		diff = (status->counters.tx_lnk_free_pkts[i] -
427 		       lnk->prev_freed_pkts) & 0xff;
428 
429 		if (diff == 0)
430 			continue;
431 
432 		lnk->allocated_pkts -= diff;
433 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
434 
435 		/* accumulate the prev_freed_pkts counter */
436 		lnk->total_freed_pkts += diff;
437 	}
438 
439 	/* prevent wrap-around in total blocks counter */
440 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
441 		freed_blocks = status->total_released_blks -
442 			       wl->tx_blocks_freed;
443 	else
444 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
445 			       status->total_released_blks;
446 
447 	wl->tx_blocks_freed = status->total_released_blks;
448 
449 	wl->tx_allocated_blocks -= freed_blocks;
450 
451 	/*
452 	 * If the FW freed some blocks:
453 	 * If we still have allocated blocks - re-arm the timer, Tx is
454 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
455 	 */
456 	if (freed_blocks) {
457 		if (wl->tx_allocated_blocks)
458 			wl12xx_rearm_tx_watchdog_locked(wl);
459 		else
460 			cancel_delayed_work(&wl->tx_watchdog_work);
461 	}
462 
463 	avail = status->tx_total - wl->tx_allocated_blocks;
464 
465 	/*
466 	 * The FW might change the total number of TX memblocks before
467 	 * we get a notification about blocks being released. Thus, the
468 	 * available blocks calculation might yield a temporary result
469 	 * which is lower than the actual available blocks. Keeping in
470 	 * mind that only blocks that were allocated can be moved from
471 	 * TX to RX, tx_blocks_available should never decrease here.
472 	 */
473 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
474 				      avail);
475 
476 	/* if more blocks are available now, tx work can be scheduled */
477 	if (wl->tx_blocks_available > old_tx_blk_count)
478 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
479 
480 	/* for AP update num of allocated TX blocks per link and ps status */
481 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
482 		wl12xx_irq_update_links_status(wl, wlvif, status);
483 	}
484 
485 	/* update the host-chipset time offset */
486 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
487 		(s64)(status->fw_localtime);
488 
489 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
490 
491 	return 0;
492 }
493 
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
495 {
496 	struct sk_buff *skb;
497 
498 	/* Pass all received frames to the network stack */
499 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 		ieee80211_rx_ni(wl->hw, skb);
501 
502 	/* Return sent skbs to the network stack */
503 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 		ieee80211_tx_status_ni(wl->hw, skb);
505 }
506 
507 static void wl1271_netstack_work(struct work_struct *work)
508 {
509 	struct wl1271 *wl =
510 		container_of(work, struct wl1271, netstack_work);
511 
512 	do {
513 		wl1271_flush_deferred_work(wl);
514 	} while (skb_queue_len(&wl->deferred_rx_queue));
515 }
516 
517 #define WL1271_IRQ_MAX_LOOPS 256
518 
519 static int wlcore_irq_locked(struct wl1271 *wl)
520 {
521 	int ret = 0;
522 	u32 intr;
523 	int loopcount = WL1271_IRQ_MAX_LOOPS;
524 	bool done = false;
525 	unsigned int defer_count;
526 	unsigned long flags;
527 
528 	/*
529 	 * In case edge triggered interrupt must be used, we cannot iterate
530 	 * more than once without introducing race conditions with the hardirq.
531 	 */
532 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
533 		loopcount = 1;
534 
535 	wl1271_debug(DEBUG_IRQ, "IRQ work");
536 
537 	if (unlikely(wl->state != WLCORE_STATE_ON))
538 		goto out;
539 
540 	ret = pm_runtime_get_sync(wl->dev);
541 	if (ret < 0) {
542 		pm_runtime_put_noidle(wl->dev);
543 		goto out;
544 	}
545 
546 	while (!done && loopcount--) {
547 		smp_mb__after_atomic();
548 
549 		ret = wlcore_fw_status(wl, wl->fw_status);
550 		if (ret < 0)
551 			goto err_ret;
552 
553 		wlcore_hw_tx_immediate_compl(wl);
554 
555 		intr = wl->fw_status->intr;
556 		intr &= WLCORE_ALL_INTR_MASK;
557 		if (!intr) {
558 			done = true;
559 			continue;
560 		}
561 
562 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 			wl1271_error("HW watchdog interrupt received! starting recovery.");
564 			wl->watchdog_recovery = true;
565 			ret = -EIO;
566 
567 			/* restarting the chip. ignore any other interrupt. */
568 			goto err_ret;
569 		}
570 
571 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 			wl1271_error("SW watchdog interrupt received! "
573 				     "starting recovery.");
574 			wl->watchdog_recovery = true;
575 			ret = -EIO;
576 
577 			/* restarting the chip. ignore any other interrupt. */
578 			goto err_ret;
579 		}
580 
581 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583 
584 			ret = wlcore_rx(wl, wl->fw_status);
585 			if (ret < 0)
586 				goto err_ret;
587 
588 			/* Check if any tx blocks were freed */
589 			spin_lock_irqsave(&wl->wl_lock, flags);
590 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
591 			    wl1271_tx_total_queue_count(wl) > 0) {
592 				spin_unlock_irqrestore(&wl->wl_lock, flags);
593 				/*
594 				 * In order to avoid starvation of the TX path,
595 				 * call the work function directly.
596 				 */
597 				ret = wlcore_tx_work_locked(wl);
598 				if (ret < 0)
599 					goto err_ret;
600 			} else {
601 				spin_unlock_irqrestore(&wl->wl_lock, flags);
602 			}
603 
604 			/* check for tx results */
605 			ret = wlcore_hw_tx_delayed_compl(wl);
606 			if (ret < 0)
607 				goto err_ret;
608 
609 			/* Make sure the deferred queues don't get too long */
610 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
611 				      skb_queue_len(&wl->deferred_rx_queue);
612 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
613 				wl1271_flush_deferred_work(wl);
614 		}
615 
616 		if (intr & WL1271_ACX_INTR_EVENT_A) {
617 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
618 			ret = wl1271_event_handle(wl, 0);
619 			if (ret < 0)
620 				goto err_ret;
621 		}
622 
623 		if (intr & WL1271_ACX_INTR_EVENT_B) {
624 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
625 			ret = wl1271_event_handle(wl, 1);
626 			if (ret < 0)
627 				goto err_ret;
628 		}
629 
630 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
631 			wl1271_debug(DEBUG_IRQ,
632 				     "WL1271_ACX_INTR_INIT_COMPLETE");
633 
634 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
635 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
636 	}
637 
638 err_ret:
639 	pm_runtime_mark_last_busy(wl->dev);
640 	pm_runtime_put_autosuspend(wl->dev);
641 
642 out:
643 	return ret;
644 }
645 
646 static irqreturn_t wlcore_irq(int irq, void *cookie)
647 {
648 	int ret;
649 	unsigned long flags;
650 	struct wl1271 *wl = cookie;
651 
652 	/* complete the ELP completion */
653 	spin_lock_irqsave(&wl->wl_lock, flags);
654 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
655 	if (wl->elp_compl) {
656 		complete(wl->elp_compl);
657 		wl->elp_compl = NULL;
658 	}
659 
660 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
661 		/* don't enqueue a work right now. mark it as pending */
662 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
663 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
664 		disable_irq_nosync(wl->irq);
665 		pm_wakeup_event(wl->dev, 0);
666 		spin_unlock_irqrestore(&wl->wl_lock, flags);
667 		goto out_handled;
668 	}
669 	spin_unlock_irqrestore(&wl->wl_lock, flags);
670 
671 	/* TX might be handled here, avoid redundant work */
672 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
673 	cancel_work_sync(&wl->tx_work);
674 
675 	mutex_lock(&wl->mutex);
676 
677 	ret = wlcore_irq_locked(wl);
678 	if (ret)
679 		wl12xx_queue_recovery_work(wl);
680 
681 	spin_lock_irqsave(&wl->wl_lock, flags);
682 	/* In case TX was not handled here, queue TX work */
683 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
685 	    wl1271_tx_total_queue_count(wl) > 0)
686 		ieee80211_queue_work(wl->hw, &wl->tx_work);
687 	spin_unlock_irqrestore(&wl->wl_lock, flags);
688 
689 	mutex_unlock(&wl->mutex);
690 
691 out_handled:
692 	spin_lock_irqsave(&wl->wl_lock, flags);
693 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
694 	spin_unlock_irqrestore(&wl->wl_lock, flags);
695 
696 	return IRQ_HANDLED;
697 }
698 
699 struct vif_counter_data {
700 	u8 counter;
701 
702 	struct ieee80211_vif *cur_vif;
703 	bool cur_vif_running;
704 };
705 
706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 				  struct ieee80211_vif *vif)
708 {
709 	struct vif_counter_data *counter = data;
710 
711 	counter->counter++;
712 	if (counter->cur_vif == vif)
713 		counter->cur_vif_running = true;
714 }
715 
716 /* caller must not hold wl->mutex, as it might deadlock */
717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 			       struct ieee80211_vif *cur_vif,
719 			       struct vif_counter_data *data)
720 {
721 	memset(data, 0, sizeof(*data));
722 	data->cur_vif = cur_vif;
723 
724 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 					    wl12xx_vif_count_iter, data);
726 }
727 
728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 {
730 	const struct firmware *fw;
731 	const char *fw_name;
732 	enum wl12xx_fw_type fw_type;
733 	int ret;
734 
735 	if (plt) {
736 		fw_type = WL12XX_FW_TYPE_PLT;
737 		fw_name = wl->plt_fw_name;
738 	} else {
739 		/*
740 		 * we can't call wl12xx_get_vif_count() here because
741 		 * wl->mutex is taken, so use the cached last_vif_count value
742 		 */
743 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 			fw_type = WL12XX_FW_TYPE_MULTI;
745 			fw_name = wl->mr_fw_name;
746 		} else {
747 			fw_type = WL12XX_FW_TYPE_NORMAL;
748 			fw_name = wl->sr_fw_name;
749 		}
750 	}
751 
752 	if (wl->fw_type == fw_type)
753 		return 0;
754 
755 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756 
757 	ret = request_firmware(&fw, fw_name, wl->dev);
758 
759 	if (ret < 0) {
760 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
761 		return ret;
762 	}
763 
764 	if (fw->size % 4) {
765 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
766 			     fw->size);
767 		ret = -EILSEQ;
768 		goto out;
769 	}
770 
771 	vfree(wl->fw);
772 	wl->fw_type = WL12XX_FW_TYPE_NONE;
773 	wl->fw_len = fw->size;
774 	wl->fw = vmalloc(wl->fw_len);
775 
776 	if (!wl->fw) {
777 		wl1271_error("could not allocate memory for the firmware");
778 		ret = -ENOMEM;
779 		goto out;
780 	}
781 
782 	memcpy(wl->fw, fw->data, wl->fw_len);
783 	ret = 0;
784 	wl->fw_type = fw_type;
785 out:
786 	release_firmware(fw);
787 
788 	return ret;
789 }
790 
791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 {
793 	/* Avoid a recursive recovery */
794 	if (wl->state == WLCORE_STATE_ON) {
795 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
796 				  &wl->flags));
797 
798 		wl->state = WLCORE_STATE_RESTARTING;
799 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
801 	}
802 }
803 
804 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
805 {
806 	size_t len;
807 
808 	/* Make sure we have enough room */
809 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
810 
811 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
812 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
813 	wl->fwlog_size += len;
814 
815 	return len;
816 }
817 
818 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
819 {
820 	u32 end_of_log = 0;
821 	int error;
822 
823 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
824 		return;
825 
826 	wl1271_info("Reading FW panic log");
827 
828 	/*
829 	 * Make sure the chip is awake and the logger isn't active.
830 	 * Do not send a stop fwlog command if the fw is hanged or if
831 	 * dbgpins are used (due to some fw bug).
832 	 */
833 	error = pm_runtime_get_sync(wl->dev);
834 	if (error < 0) {
835 		pm_runtime_put_noidle(wl->dev);
836 		return;
837 	}
838 	if (!wl->watchdog_recovery &&
839 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
840 		wl12xx_cmd_stop_fwlog(wl);
841 
842 	/* Traverse the memory blocks linked list */
843 	do {
844 		end_of_log = wlcore_event_fw_logger(wl);
845 		if (end_of_log == 0) {
846 			msleep(100);
847 			end_of_log = wlcore_event_fw_logger(wl);
848 		}
849 	} while (end_of_log != 0);
850 }
851 
852 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
853 				   u8 hlid, struct ieee80211_sta *sta)
854 {
855 	struct wl1271_station *wl_sta;
856 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
857 
858 	wl_sta = (void *)sta->drv_priv;
859 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
860 
861 	/*
862 	 * increment the initial seq number on recovery to account for
863 	 * transmitted packets that we haven't yet got in the FW status
864 	 */
865 	if (wlvif->encryption_type == KEY_GEM)
866 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
867 
868 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
869 		wl_sta->total_freed_pkts += sqn_recovery_padding;
870 }
871 
872 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
873 					struct wl12xx_vif *wlvif,
874 					u8 hlid, const u8 *addr)
875 {
876 	struct ieee80211_sta *sta;
877 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
878 
879 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
880 		    is_zero_ether_addr(addr)))
881 		return;
882 
883 	rcu_read_lock();
884 	sta = ieee80211_find_sta(vif, addr);
885 	if (sta)
886 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
887 	rcu_read_unlock();
888 }
889 
890 static void wlcore_print_recovery(struct wl1271 *wl)
891 {
892 	u32 pc = 0;
893 	u32 hint_sts = 0;
894 	int ret;
895 
896 	wl1271_info("Hardware recovery in progress. FW ver: %s",
897 		    wl->chip.fw_ver_str);
898 
899 	/* change partitions momentarily so we can read the FW pc */
900 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
901 	if (ret < 0)
902 		return;
903 
904 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
905 	if (ret < 0)
906 		return;
907 
908 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
909 	if (ret < 0)
910 		return;
911 
912 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
913 				pc, hint_sts, ++wl->recovery_count);
914 
915 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
916 }
917 
918 
919 static void wl1271_recovery_work(struct work_struct *work)
920 {
921 	struct wl1271 *wl =
922 		container_of(work, struct wl1271, recovery_work);
923 	struct wl12xx_vif *wlvif;
924 	struct ieee80211_vif *vif;
925 	int error;
926 
927 	mutex_lock(&wl->mutex);
928 
929 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
930 		goto out_unlock;
931 
932 	error = pm_runtime_get_sync(wl->dev);
933 	if (error < 0) {
934 		wl1271_warning("Enable for recovery failed");
935 		pm_runtime_put_noidle(wl->dev);
936 	}
937 	wlcore_disable_interrupts_nosync(wl);
938 
939 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
940 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
941 			wl12xx_read_fwlog_panic(wl);
942 		wlcore_print_recovery(wl);
943 	}
944 
945 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
946 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
947 
948 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
949 
950 	if (wl->conf.recovery.no_recovery) {
951 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
952 		goto out_unlock;
953 	}
954 
955 	/* Prevent spurious TX during FW restart */
956 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
957 
958 	/* reboot the chipset */
959 	while (!list_empty(&wl->wlvif_list)) {
960 		wlvif = list_first_entry(&wl->wlvif_list,
961 				       struct wl12xx_vif, list);
962 		vif = wl12xx_wlvif_to_vif(wlvif);
963 
964 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
965 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
966 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
967 						    vif->bss_conf.bssid);
968 		}
969 
970 		__wl1271_op_remove_interface(wl, vif, false);
971 	}
972 
973 	wlcore_op_stop_locked(wl);
974 	pm_runtime_mark_last_busy(wl->dev);
975 	pm_runtime_put_autosuspend(wl->dev);
976 
977 	ieee80211_restart_hw(wl->hw);
978 
979 	/*
980 	 * Its safe to enable TX now - the queues are stopped after a request
981 	 * to restart the HW.
982 	 */
983 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
984 
985 out_unlock:
986 	wl->watchdog_recovery = false;
987 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
988 	mutex_unlock(&wl->mutex);
989 }
990 
991 static int wlcore_fw_wakeup(struct wl1271 *wl)
992 {
993 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
994 }
995 
996 static int wl1271_setup(struct wl1271 *wl)
997 {
998 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
999 	if (!wl->raw_fw_status)
1000 		goto err;
1001 
1002 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1003 	if (!wl->fw_status)
1004 		goto err;
1005 
1006 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1007 	if (!wl->tx_res_if)
1008 		goto err;
1009 
1010 	return 0;
1011 err:
1012 	kfree(wl->fw_status);
1013 	kfree(wl->raw_fw_status);
1014 	return -ENOMEM;
1015 }
1016 
1017 static int wl12xx_set_power_on(struct wl1271 *wl)
1018 {
1019 	int ret;
1020 
1021 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1022 	ret = wl1271_power_on(wl);
1023 	if (ret < 0)
1024 		goto out;
1025 	msleep(WL1271_POWER_ON_SLEEP);
1026 	wl1271_io_reset(wl);
1027 	wl1271_io_init(wl);
1028 
1029 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1030 	if (ret < 0)
1031 		goto fail;
1032 
1033 	/* ELP module wake up */
1034 	ret = wlcore_fw_wakeup(wl);
1035 	if (ret < 0)
1036 		goto fail;
1037 
1038 out:
1039 	return ret;
1040 
1041 fail:
1042 	wl1271_power_off(wl);
1043 	return ret;
1044 }
1045 
1046 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1047 {
1048 	int ret = 0;
1049 
1050 	ret = wl12xx_set_power_on(wl);
1051 	if (ret < 0)
1052 		goto out;
1053 
1054 	/*
1055 	 * For wl127x based devices we could use the default block
1056 	 * size (512 bytes), but due to a bug in the sdio driver, we
1057 	 * need to set it explicitly after the chip is powered on.  To
1058 	 * simplify the code and since the performance impact is
1059 	 * negligible, we use the same block size for all different
1060 	 * chip types.
1061 	 *
1062 	 * Check if the bus supports blocksize alignment and, if it
1063 	 * doesn't, make sure we don't have the quirk.
1064 	 */
1065 	if (!wl1271_set_block_size(wl))
1066 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1067 
1068 	/* TODO: make sure the lower driver has set things up correctly */
1069 
1070 	ret = wl1271_setup(wl);
1071 	if (ret < 0)
1072 		goto out;
1073 
1074 	ret = wl12xx_fetch_firmware(wl, plt);
1075 	if (ret < 0) {
1076 		kfree(wl->fw_status);
1077 		kfree(wl->raw_fw_status);
1078 		kfree(wl->tx_res_if);
1079 	}
1080 
1081 out:
1082 	return ret;
1083 }
1084 
1085 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1086 {
1087 	int retries = WL1271_BOOT_RETRIES;
1088 	struct wiphy *wiphy = wl->hw->wiphy;
1089 
1090 	static const char* const PLT_MODE[] = {
1091 		"PLT_OFF",
1092 		"PLT_ON",
1093 		"PLT_FEM_DETECT",
1094 		"PLT_CHIP_AWAKE"
1095 	};
1096 
1097 	int ret;
1098 
1099 	mutex_lock(&wl->mutex);
1100 
1101 	wl1271_notice("power up");
1102 
1103 	if (wl->state != WLCORE_STATE_OFF) {
1104 		wl1271_error("cannot go into PLT state because not "
1105 			     "in off state: %d", wl->state);
1106 		ret = -EBUSY;
1107 		goto out;
1108 	}
1109 
1110 	/* Indicate to lower levels that we are now in PLT mode */
1111 	wl->plt = true;
1112 	wl->plt_mode = plt_mode;
1113 
1114 	while (retries) {
1115 		retries--;
1116 		ret = wl12xx_chip_wakeup(wl, true);
1117 		if (ret < 0)
1118 			goto power_off;
1119 
1120 		if (plt_mode != PLT_CHIP_AWAKE) {
1121 			ret = wl->ops->plt_init(wl);
1122 			if (ret < 0)
1123 				goto power_off;
1124 		}
1125 
1126 		wl->state = WLCORE_STATE_ON;
1127 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1128 			      PLT_MODE[plt_mode],
1129 			      wl->chip.fw_ver_str);
1130 
1131 		/* update hw/fw version info in wiphy struct */
1132 		wiphy->hw_version = wl->chip.id;
1133 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1134 			sizeof(wiphy->fw_version));
1135 
1136 		goto out;
1137 
1138 power_off:
1139 		wl1271_power_off(wl);
1140 	}
1141 
1142 	wl->plt = false;
1143 	wl->plt_mode = PLT_OFF;
1144 
1145 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1146 		     WL1271_BOOT_RETRIES);
1147 out:
1148 	mutex_unlock(&wl->mutex);
1149 
1150 	return ret;
1151 }
1152 
1153 int wl1271_plt_stop(struct wl1271 *wl)
1154 {
1155 	int ret = 0;
1156 
1157 	wl1271_notice("power down");
1158 
1159 	/*
1160 	 * Interrupts must be disabled before setting the state to OFF.
1161 	 * Otherwise, the interrupt handler might be called and exit without
1162 	 * reading the interrupt status.
1163 	 */
1164 	wlcore_disable_interrupts(wl);
1165 	mutex_lock(&wl->mutex);
1166 	if (!wl->plt) {
1167 		mutex_unlock(&wl->mutex);
1168 
1169 		/*
1170 		 * This will not necessarily enable interrupts as interrupts
1171 		 * may have been disabled when op_stop was called. It will,
1172 		 * however, balance the above call to disable_interrupts().
1173 		 */
1174 		wlcore_enable_interrupts(wl);
1175 
1176 		wl1271_error("cannot power down because not in PLT "
1177 			     "state: %d", wl->state);
1178 		ret = -EBUSY;
1179 		goto out;
1180 	}
1181 
1182 	mutex_unlock(&wl->mutex);
1183 
1184 	wl1271_flush_deferred_work(wl);
1185 	cancel_work_sync(&wl->netstack_work);
1186 	cancel_work_sync(&wl->recovery_work);
1187 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1188 
1189 	mutex_lock(&wl->mutex);
1190 	wl1271_power_off(wl);
1191 	wl->flags = 0;
1192 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1193 	wl->state = WLCORE_STATE_OFF;
1194 	wl->plt = false;
1195 	wl->plt_mode = PLT_OFF;
1196 	wl->rx_counter = 0;
1197 	mutex_unlock(&wl->mutex);
1198 
1199 out:
1200 	return ret;
1201 }
1202 
1203 static void wl1271_op_tx(struct ieee80211_hw *hw,
1204 			 struct ieee80211_tx_control *control,
1205 			 struct sk_buff *skb)
1206 {
1207 	struct wl1271 *wl = hw->priv;
1208 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1209 	struct ieee80211_vif *vif = info->control.vif;
1210 	struct wl12xx_vif *wlvif = NULL;
1211 	unsigned long flags;
1212 	int q, mapping;
1213 	u8 hlid;
1214 
1215 	if (!vif) {
1216 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1217 		ieee80211_free_txskb(hw, skb);
1218 		return;
1219 	}
1220 
1221 	wlvif = wl12xx_vif_to_data(vif);
1222 	mapping = skb_get_queue_mapping(skb);
1223 	q = wl1271_tx_get_queue(mapping);
1224 
1225 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1226 
1227 	spin_lock_irqsave(&wl->wl_lock, flags);
1228 
1229 	/*
1230 	 * drop the packet if the link is invalid or the queue is stopped
1231 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1232 	 * allow these packets through.
1233 	 */
1234 	if (hlid == WL12XX_INVALID_LINK_ID ||
1235 	    (!test_bit(hlid, wlvif->links_map)) ||
1236 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1237 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1238 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1239 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1240 		ieee80211_free_txskb(hw, skb);
1241 		goto out;
1242 	}
1243 
1244 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1245 		     hlid, q, skb->len);
1246 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1247 
1248 	wl->tx_queue_count[q]++;
1249 	wlvif->tx_queue_count[q]++;
1250 
1251 	/*
1252 	 * The workqueue is slow to process the tx_queue and we need stop
1253 	 * the queue here, otherwise the queue will get too long.
1254 	 */
1255 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1256 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1257 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1258 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1259 		wlcore_stop_queue_locked(wl, wlvif, q,
1260 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1261 	}
1262 
1263 	/*
1264 	 * The chip specific setup must run before the first TX packet -
1265 	 * before that, the tx_work will not be initialized!
1266 	 */
1267 
1268 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1269 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1270 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1271 
1272 out:
1273 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1274 }
1275 
1276 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1277 {
1278 	unsigned long flags;
1279 	int q;
1280 
1281 	/* no need to queue a new dummy packet if one is already pending */
1282 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1283 		return 0;
1284 
1285 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1286 
1287 	spin_lock_irqsave(&wl->wl_lock, flags);
1288 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1289 	wl->tx_queue_count[q]++;
1290 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1291 
1292 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1293 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1294 		return wlcore_tx_work_locked(wl);
1295 
1296 	/*
1297 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1298 	 * interrupt handler function
1299 	 */
1300 	return 0;
1301 }
1302 
1303 /*
1304  * The size of the dummy packet should be at least 1400 bytes. However, in
1305  * order to minimize the number of bus transactions, aligning it to 512 bytes
1306  * boundaries could be beneficial, performance wise
1307  */
1308 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1309 
1310 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1311 {
1312 	struct sk_buff *skb;
1313 	struct ieee80211_hdr_3addr *hdr;
1314 	unsigned int dummy_packet_size;
1315 
1316 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1317 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1318 
1319 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1320 	if (!skb) {
1321 		wl1271_warning("Failed to allocate a dummy packet skb");
1322 		return NULL;
1323 	}
1324 
1325 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1326 
1327 	hdr = skb_put_zero(skb, sizeof(*hdr));
1328 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1329 					 IEEE80211_STYPE_NULLFUNC |
1330 					 IEEE80211_FCTL_TODS);
1331 
1332 	skb_put_zero(skb, dummy_packet_size);
1333 
1334 	/* Dummy packets require the TID to be management */
1335 	skb->priority = WL1271_TID_MGMT;
1336 
1337 	/* Initialize all fields that might be used */
1338 	skb_set_queue_mapping(skb, 0);
1339 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1340 
1341 	return skb;
1342 }
1343 
1344 
1345 static int
1346 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1347 {
1348 	int num_fields = 0, in_field = 0, fields_size = 0;
1349 	int i, pattern_len = 0;
1350 
1351 	if (!p->mask) {
1352 		wl1271_warning("No mask in WoWLAN pattern");
1353 		return -EINVAL;
1354 	}
1355 
1356 	/*
1357 	 * The pattern is broken up into segments of bytes at different offsets
1358 	 * that need to be checked by the FW filter. Each segment is called
1359 	 * a field in the FW API. We verify that the total number of fields
1360 	 * required for this pattern won't exceed FW limits (8)
1361 	 * as well as the total fields buffer won't exceed the FW limit.
1362 	 * Note that if there's a pattern which crosses Ethernet/IP header
1363 	 * boundary a new field is required.
1364 	 */
1365 	for (i = 0; i < p->pattern_len; i++) {
1366 		if (test_bit(i, (unsigned long *)p->mask)) {
1367 			if (!in_field) {
1368 				in_field = 1;
1369 				pattern_len = 1;
1370 			} else {
1371 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1372 					num_fields++;
1373 					fields_size += pattern_len +
1374 						RX_FILTER_FIELD_OVERHEAD;
1375 					pattern_len = 1;
1376 				} else
1377 					pattern_len++;
1378 			}
1379 		} else {
1380 			if (in_field) {
1381 				in_field = 0;
1382 				fields_size += pattern_len +
1383 					RX_FILTER_FIELD_OVERHEAD;
1384 				num_fields++;
1385 			}
1386 		}
1387 	}
1388 
1389 	if (in_field) {
1390 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1391 		num_fields++;
1392 	}
1393 
1394 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1395 		wl1271_warning("RX Filter too complex. Too many segments");
1396 		return -EINVAL;
1397 	}
1398 
1399 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1400 		wl1271_warning("RX filter pattern is too big");
1401 		return -E2BIG;
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1408 {
1409 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1410 }
1411 
1412 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1413 {
1414 	int i;
1415 
1416 	if (filter == NULL)
1417 		return;
1418 
1419 	for (i = 0; i < filter->num_fields; i++)
1420 		kfree(filter->fields[i].pattern);
1421 
1422 	kfree(filter);
1423 }
1424 
1425 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1426 				 u16 offset, u8 flags,
1427 				 const u8 *pattern, u8 len)
1428 {
1429 	struct wl12xx_rx_filter_field *field;
1430 
1431 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1432 		wl1271_warning("Max fields per RX filter. can't alloc another");
1433 		return -EINVAL;
1434 	}
1435 
1436 	field = &filter->fields[filter->num_fields];
1437 
1438 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1439 	if (!field->pattern) {
1440 		wl1271_warning("Failed to allocate RX filter pattern");
1441 		return -ENOMEM;
1442 	}
1443 
1444 	filter->num_fields++;
1445 
1446 	field->offset = cpu_to_le16(offset);
1447 	field->flags = flags;
1448 	field->len = len;
1449 
1450 	return 0;
1451 }
1452 
1453 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1454 {
1455 	int i, fields_size = 0;
1456 
1457 	for (i = 0; i < filter->num_fields; i++)
1458 		fields_size += filter->fields[i].len +
1459 			sizeof(struct wl12xx_rx_filter_field) -
1460 			sizeof(u8 *);
1461 
1462 	return fields_size;
1463 }
1464 
1465 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1466 				    u8 *buf)
1467 {
1468 	int i;
1469 	struct wl12xx_rx_filter_field *field;
1470 
1471 	for (i = 0; i < filter->num_fields; i++) {
1472 		field = (struct wl12xx_rx_filter_field *)buf;
1473 
1474 		field->offset = filter->fields[i].offset;
1475 		field->flags = filter->fields[i].flags;
1476 		field->len = filter->fields[i].len;
1477 
1478 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1479 		buf += sizeof(struct wl12xx_rx_filter_field) -
1480 			sizeof(u8 *) + field->len;
1481 	}
1482 }
1483 
1484 /*
1485  * Allocates an RX filter returned through f
1486  * which needs to be freed using rx_filter_free()
1487  */
1488 static int
1489 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1490 					   struct wl12xx_rx_filter **f)
1491 {
1492 	int i, j, ret = 0;
1493 	struct wl12xx_rx_filter *filter;
1494 	u16 offset;
1495 	u8 flags, len;
1496 
1497 	filter = wl1271_rx_filter_alloc();
1498 	if (!filter) {
1499 		wl1271_warning("Failed to alloc rx filter");
1500 		ret = -ENOMEM;
1501 		goto err;
1502 	}
1503 
1504 	i = 0;
1505 	while (i < p->pattern_len) {
1506 		if (!test_bit(i, (unsigned long *)p->mask)) {
1507 			i++;
1508 			continue;
1509 		}
1510 
1511 		for (j = i; j < p->pattern_len; j++) {
1512 			if (!test_bit(j, (unsigned long *)p->mask))
1513 				break;
1514 
1515 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1516 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1517 				break;
1518 		}
1519 
1520 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1521 			offset = i;
1522 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1523 		} else {
1524 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1525 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1526 		}
1527 
1528 		len = j - i;
1529 
1530 		ret = wl1271_rx_filter_alloc_field(filter,
1531 						   offset,
1532 						   flags,
1533 						   &p->pattern[i], len);
1534 		if (ret)
1535 			goto err;
1536 
1537 		i = j;
1538 	}
1539 
1540 	filter->action = FILTER_SIGNAL;
1541 
1542 	*f = filter;
1543 	return 0;
1544 
1545 err:
1546 	wl1271_rx_filter_free(filter);
1547 	*f = NULL;
1548 
1549 	return ret;
1550 }
1551 
1552 static int wl1271_configure_wowlan(struct wl1271 *wl,
1553 				   struct cfg80211_wowlan *wow)
1554 {
1555 	int i, ret;
1556 
1557 	if (!wow || wow->any || !wow->n_patterns) {
1558 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1559 							  FILTER_SIGNAL);
1560 		if (ret)
1561 			goto out;
1562 
1563 		ret = wl1271_rx_filter_clear_all(wl);
1564 		if (ret)
1565 			goto out;
1566 
1567 		return 0;
1568 	}
1569 
1570 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1571 		return -EINVAL;
1572 
1573 	/* Validate all incoming patterns before clearing current FW state */
1574 	for (i = 0; i < wow->n_patterns; i++) {
1575 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1576 		if (ret) {
1577 			wl1271_warning("Bad wowlan pattern %d", i);
1578 			return ret;
1579 		}
1580 	}
1581 
1582 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1583 	if (ret)
1584 		goto out;
1585 
1586 	ret = wl1271_rx_filter_clear_all(wl);
1587 	if (ret)
1588 		goto out;
1589 
1590 	/* Translate WoWLAN patterns into filters */
1591 	for (i = 0; i < wow->n_patterns; i++) {
1592 		struct cfg80211_pkt_pattern *p;
1593 		struct wl12xx_rx_filter *filter = NULL;
1594 
1595 		p = &wow->patterns[i];
1596 
1597 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1598 		if (ret) {
1599 			wl1271_warning("Failed to create an RX filter from "
1600 				       "wowlan pattern %d", i);
1601 			goto out;
1602 		}
1603 
1604 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1605 
1606 		wl1271_rx_filter_free(filter);
1607 		if (ret)
1608 			goto out;
1609 	}
1610 
1611 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1612 
1613 out:
1614 	return ret;
1615 }
1616 
1617 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1618 					struct wl12xx_vif *wlvif,
1619 					struct cfg80211_wowlan *wow)
1620 {
1621 	int ret = 0;
1622 
1623 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1624 		goto out;
1625 
1626 	ret = wl1271_configure_wowlan(wl, wow);
1627 	if (ret < 0)
1628 		goto out;
1629 
1630 	if ((wl->conf.conn.suspend_wake_up_event ==
1631 	     wl->conf.conn.wake_up_event) &&
1632 	    (wl->conf.conn.suspend_listen_interval ==
1633 	     wl->conf.conn.listen_interval))
1634 		goto out;
1635 
1636 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1637 				    wl->conf.conn.suspend_wake_up_event,
1638 				    wl->conf.conn.suspend_listen_interval);
1639 
1640 	if (ret < 0)
1641 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1642 out:
1643 	return ret;
1644 
1645 }
1646 
1647 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1648 					struct wl12xx_vif *wlvif,
1649 					struct cfg80211_wowlan *wow)
1650 {
1651 	int ret = 0;
1652 
1653 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1654 		goto out;
1655 
1656 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1657 	if (ret < 0)
1658 		goto out;
1659 
1660 	ret = wl1271_configure_wowlan(wl, wow);
1661 	if (ret < 0)
1662 		goto out;
1663 
1664 out:
1665 	return ret;
1666 
1667 }
1668 
1669 static int wl1271_configure_suspend(struct wl1271 *wl,
1670 				    struct wl12xx_vif *wlvif,
1671 				    struct cfg80211_wowlan *wow)
1672 {
1673 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1674 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1675 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1676 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1677 	return 0;
1678 }
1679 
1680 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1681 {
1682 	int ret = 0;
1683 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1684 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1685 
1686 	if ((!is_ap) && (!is_sta))
1687 		return;
1688 
1689 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1690 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1691 		return;
1692 
1693 	wl1271_configure_wowlan(wl, NULL);
1694 
1695 	if (is_sta) {
1696 		if ((wl->conf.conn.suspend_wake_up_event ==
1697 		     wl->conf.conn.wake_up_event) &&
1698 		    (wl->conf.conn.suspend_listen_interval ==
1699 		     wl->conf.conn.listen_interval))
1700 			return;
1701 
1702 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1703 				    wl->conf.conn.wake_up_event,
1704 				    wl->conf.conn.listen_interval);
1705 
1706 		if (ret < 0)
1707 			wl1271_error("resume: wake up conditions failed: %d",
1708 				     ret);
1709 
1710 	} else if (is_ap) {
1711 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1712 	}
1713 }
1714 
1715 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1716 					    struct cfg80211_wowlan *wow)
1717 {
1718 	struct wl1271 *wl = hw->priv;
1719 	struct wl12xx_vif *wlvif;
1720 	unsigned long flags;
1721 	int ret;
1722 
1723 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1724 	WARN_ON(!wow);
1725 
1726 	/* we want to perform the recovery before suspending */
1727 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1728 		wl1271_warning("postponing suspend to perform recovery");
1729 		return -EBUSY;
1730 	}
1731 
1732 	wl1271_tx_flush(wl);
1733 
1734 	mutex_lock(&wl->mutex);
1735 
1736 	ret = pm_runtime_get_sync(wl->dev);
1737 	if (ret < 0) {
1738 		pm_runtime_put_noidle(wl->dev);
1739 		mutex_unlock(&wl->mutex);
1740 		return ret;
1741 	}
1742 
1743 	wl->wow_enabled = true;
1744 	wl12xx_for_each_wlvif(wl, wlvif) {
1745 		if (wlcore_is_p2p_mgmt(wlvif))
1746 			continue;
1747 
1748 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1749 		if (ret < 0) {
1750 			goto out_sleep;
1751 		}
1752 	}
1753 
1754 	/* disable fast link flow control notifications from FW */
1755 	ret = wlcore_hw_interrupt_notify(wl, false);
1756 	if (ret < 0)
1757 		goto out_sleep;
1758 
1759 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1760 	ret = wlcore_hw_rx_ba_filter(wl,
1761 				     !!wl->conf.conn.suspend_rx_ba_activity);
1762 	if (ret < 0)
1763 		goto out_sleep;
1764 
1765 out_sleep:
1766 	pm_runtime_put_noidle(wl->dev);
1767 	mutex_unlock(&wl->mutex);
1768 
1769 	if (ret < 0) {
1770 		wl1271_warning("couldn't prepare device to suspend");
1771 		return ret;
1772 	}
1773 
1774 	/* flush any remaining work */
1775 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1776 
1777 	flush_work(&wl->tx_work);
1778 
1779 	/*
1780 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1781 	 * it on resume anyway.
1782 	 */
1783 	cancel_delayed_work(&wl->tx_watchdog_work);
1784 
1785 	/*
1786 	 * set suspended flag to avoid triggering a new threaded_irq
1787 	 * work.
1788 	 */
1789 	spin_lock_irqsave(&wl->wl_lock, flags);
1790 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1791 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1792 
1793 	return pm_runtime_force_suspend(wl->dev);
1794 }
1795 
1796 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1797 {
1798 	struct wl1271 *wl = hw->priv;
1799 	struct wl12xx_vif *wlvif;
1800 	unsigned long flags;
1801 	bool run_irq_work = false, pending_recovery;
1802 	int ret;
1803 
1804 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1805 		     wl->wow_enabled);
1806 	WARN_ON(!wl->wow_enabled);
1807 
1808 	ret = pm_runtime_force_resume(wl->dev);
1809 	if (ret < 0) {
1810 		wl1271_error("ELP wakeup failure!");
1811 		goto out_sleep;
1812 	}
1813 
1814 	/*
1815 	 * re-enable irq_work enqueuing, and call irq_work directly if
1816 	 * there is a pending work.
1817 	 */
1818 	spin_lock_irqsave(&wl->wl_lock, flags);
1819 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1820 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1821 		run_irq_work = true;
1822 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1823 
1824 	mutex_lock(&wl->mutex);
1825 
1826 	/* test the recovery flag before calling any SDIO functions */
1827 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1828 				    &wl->flags);
1829 
1830 	if (run_irq_work) {
1831 		wl1271_debug(DEBUG_MAC80211,
1832 			     "run postponed irq_work directly");
1833 
1834 		/* don't talk to the HW if recovery is pending */
1835 		if (!pending_recovery) {
1836 			ret = wlcore_irq_locked(wl);
1837 			if (ret)
1838 				wl12xx_queue_recovery_work(wl);
1839 		}
1840 
1841 		wlcore_enable_interrupts(wl);
1842 	}
1843 
1844 	if (pending_recovery) {
1845 		wl1271_warning("queuing forgotten recovery on resume");
1846 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1847 		goto out_sleep;
1848 	}
1849 
1850 	ret = pm_runtime_get_sync(wl->dev);
1851 	if (ret < 0) {
1852 		pm_runtime_put_noidle(wl->dev);
1853 		goto out;
1854 	}
1855 
1856 	wl12xx_for_each_wlvif(wl, wlvif) {
1857 		if (wlcore_is_p2p_mgmt(wlvif))
1858 			continue;
1859 
1860 		wl1271_configure_resume(wl, wlvif);
1861 	}
1862 
1863 	ret = wlcore_hw_interrupt_notify(wl, true);
1864 	if (ret < 0)
1865 		goto out_sleep;
1866 
1867 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1868 	ret = wlcore_hw_rx_ba_filter(wl, false);
1869 	if (ret < 0)
1870 		goto out_sleep;
1871 
1872 out_sleep:
1873 	pm_runtime_mark_last_busy(wl->dev);
1874 	pm_runtime_put_autosuspend(wl->dev);
1875 
1876 out:
1877 	wl->wow_enabled = false;
1878 
1879 	/*
1880 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1881 	 * That way we avoid possible conditions where Tx-complete interrupts
1882 	 * fail to arrive and we perform a spurious recovery.
1883 	 */
1884 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1885 	mutex_unlock(&wl->mutex);
1886 
1887 	return 0;
1888 }
1889 
1890 static int wl1271_op_start(struct ieee80211_hw *hw)
1891 {
1892 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1893 
1894 	/*
1895 	 * We have to delay the booting of the hardware because
1896 	 * we need to know the local MAC address before downloading and
1897 	 * initializing the firmware. The MAC address cannot be changed
1898 	 * after boot, and without the proper MAC address, the firmware
1899 	 * will not function properly.
1900 	 *
1901 	 * The MAC address is first known when the corresponding interface
1902 	 * is added. That is where we will initialize the hardware.
1903 	 */
1904 
1905 	return 0;
1906 }
1907 
1908 static void wlcore_op_stop_locked(struct wl1271 *wl)
1909 {
1910 	int i;
1911 
1912 	if (wl->state == WLCORE_STATE_OFF) {
1913 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1914 					&wl->flags))
1915 			wlcore_enable_interrupts(wl);
1916 
1917 		return;
1918 	}
1919 
1920 	/*
1921 	 * this must be before the cancel_work calls below, so that the work
1922 	 * functions don't perform further work.
1923 	 */
1924 	wl->state = WLCORE_STATE_OFF;
1925 
1926 	/*
1927 	 * Use the nosync variant to disable interrupts, so the mutex could be
1928 	 * held while doing so without deadlocking.
1929 	 */
1930 	wlcore_disable_interrupts_nosync(wl);
1931 
1932 	mutex_unlock(&wl->mutex);
1933 
1934 	wlcore_synchronize_interrupts(wl);
1935 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1936 		cancel_work_sync(&wl->recovery_work);
1937 	wl1271_flush_deferred_work(wl);
1938 	cancel_delayed_work_sync(&wl->scan_complete_work);
1939 	cancel_work_sync(&wl->netstack_work);
1940 	cancel_work_sync(&wl->tx_work);
1941 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1942 
1943 	/* let's notify MAC80211 about the remaining pending TX frames */
1944 	mutex_lock(&wl->mutex);
1945 	wl12xx_tx_reset(wl);
1946 
1947 	wl1271_power_off(wl);
1948 	/*
1949 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1950 	 * an interrupt storm. Now that the power is down, it is safe to
1951 	 * re-enable interrupts to balance the disable depth
1952 	 */
1953 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1954 		wlcore_enable_interrupts(wl);
1955 
1956 	wl->band = NL80211_BAND_2GHZ;
1957 
1958 	wl->rx_counter = 0;
1959 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1960 	wl->channel_type = NL80211_CHAN_NO_HT;
1961 	wl->tx_blocks_available = 0;
1962 	wl->tx_allocated_blocks = 0;
1963 	wl->tx_results_count = 0;
1964 	wl->tx_packets_count = 0;
1965 	wl->time_offset = 0;
1966 	wl->ap_fw_ps_map = 0;
1967 	wl->ap_ps_map = 0;
1968 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1969 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1970 	memset(wl->links_map, 0, sizeof(wl->links_map));
1971 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1972 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1973 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1974 	wl->active_sta_count = 0;
1975 	wl->active_link_count = 0;
1976 
1977 	/* The system link is always allocated */
1978 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1979 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1980 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1981 
1982 	/*
1983 	 * this is performed after the cancel_work calls and the associated
1984 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1985 	 * get executed before all these vars have been reset.
1986 	 */
1987 	wl->flags = 0;
1988 
1989 	wl->tx_blocks_freed = 0;
1990 
1991 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1992 		wl->tx_pkts_freed[i] = 0;
1993 		wl->tx_allocated_pkts[i] = 0;
1994 	}
1995 
1996 	wl1271_debugfs_reset(wl);
1997 
1998 	kfree(wl->raw_fw_status);
1999 	wl->raw_fw_status = NULL;
2000 	kfree(wl->fw_status);
2001 	wl->fw_status = NULL;
2002 	kfree(wl->tx_res_if);
2003 	wl->tx_res_if = NULL;
2004 	kfree(wl->target_mem_map);
2005 	wl->target_mem_map = NULL;
2006 
2007 	/*
2008 	 * FW channels must be re-calibrated after recovery,
2009 	 * save current Reg-Domain channel configuration and clear it.
2010 	 */
2011 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2012 	       sizeof(wl->reg_ch_conf_pending));
2013 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2014 }
2015 
2016 static void wlcore_op_stop(struct ieee80211_hw *hw)
2017 {
2018 	struct wl1271 *wl = hw->priv;
2019 
2020 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2021 
2022 	mutex_lock(&wl->mutex);
2023 
2024 	wlcore_op_stop_locked(wl);
2025 
2026 	mutex_unlock(&wl->mutex);
2027 }
2028 
2029 static void wlcore_channel_switch_work(struct work_struct *work)
2030 {
2031 	struct delayed_work *dwork;
2032 	struct wl1271 *wl;
2033 	struct ieee80211_vif *vif;
2034 	struct wl12xx_vif *wlvif;
2035 	int ret;
2036 
2037 	dwork = to_delayed_work(work);
2038 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2039 	wl = wlvif->wl;
2040 
2041 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2042 
2043 	mutex_lock(&wl->mutex);
2044 
2045 	if (unlikely(wl->state != WLCORE_STATE_ON))
2046 		goto out;
2047 
2048 	/* check the channel switch is still ongoing */
2049 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2050 		goto out;
2051 
2052 	vif = wl12xx_wlvif_to_vif(wlvif);
2053 	ieee80211_chswitch_done(vif, false);
2054 
2055 	ret = pm_runtime_get_sync(wl->dev);
2056 	if (ret < 0) {
2057 		pm_runtime_put_noidle(wl->dev);
2058 		goto out;
2059 	}
2060 
2061 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2062 
2063 	pm_runtime_mark_last_busy(wl->dev);
2064 	pm_runtime_put_autosuspend(wl->dev);
2065 out:
2066 	mutex_unlock(&wl->mutex);
2067 }
2068 
2069 static void wlcore_connection_loss_work(struct work_struct *work)
2070 {
2071 	struct delayed_work *dwork;
2072 	struct wl1271 *wl;
2073 	struct ieee80211_vif *vif;
2074 	struct wl12xx_vif *wlvif;
2075 
2076 	dwork = to_delayed_work(work);
2077 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2078 	wl = wlvif->wl;
2079 
2080 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2081 
2082 	mutex_lock(&wl->mutex);
2083 
2084 	if (unlikely(wl->state != WLCORE_STATE_ON))
2085 		goto out;
2086 
2087 	/* Call mac80211 connection loss */
2088 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2089 		goto out;
2090 
2091 	vif = wl12xx_wlvif_to_vif(wlvif);
2092 	ieee80211_connection_loss(vif);
2093 out:
2094 	mutex_unlock(&wl->mutex);
2095 }
2096 
2097 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2098 {
2099 	struct delayed_work *dwork;
2100 	struct wl1271 *wl;
2101 	struct wl12xx_vif *wlvif;
2102 	unsigned long time_spare;
2103 	int ret;
2104 
2105 	dwork = to_delayed_work(work);
2106 	wlvif = container_of(dwork, struct wl12xx_vif,
2107 			     pending_auth_complete_work);
2108 	wl = wlvif->wl;
2109 
2110 	mutex_lock(&wl->mutex);
2111 
2112 	if (unlikely(wl->state != WLCORE_STATE_ON))
2113 		goto out;
2114 
2115 	/*
2116 	 * Make sure a second really passed since the last auth reply. Maybe
2117 	 * a second auth reply arrived while we were stuck on the mutex.
2118 	 * Check for a little less than the timeout to protect from scheduler
2119 	 * irregularities.
2120 	 */
2121 	time_spare = jiffies +
2122 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2123 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2124 		goto out;
2125 
2126 	ret = pm_runtime_get_sync(wl->dev);
2127 	if (ret < 0) {
2128 		pm_runtime_put_noidle(wl->dev);
2129 		goto out;
2130 	}
2131 
2132 	/* cancel the ROC if active */
2133 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2134 
2135 	pm_runtime_mark_last_busy(wl->dev);
2136 	pm_runtime_put_autosuspend(wl->dev);
2137 out:
2138 	mutex_unlock(&wl->mutex);
2139 }
2140 
2141 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2142 {
2143 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2144 					WL12XX_MAX_RATE_POLICIES);
2145 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2146 		return -EBUSY;
2147 
2148 	__set_bit(policy, wl->rate_policies_map);
2149 	*idx = policy;
2150 	return 0;
2151 }
2152 
2153 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2154 {
2155 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2156 		return;
2157 
2158 	__clear_bit(*idx, wl->rate_policies_map);
2159 	*idx = WL12XX_MAX_RATE_POLICIES;
2160 }
2161 
2162 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2163 {
2164 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2165 					WLCORE_MAX_KLV_TEMPLATES);
2166 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2167 		return -EBUSY;
2168 
2169 	__set_bit(policy, wl->klv_templates_map);
2170 	*idx = policy;
2171 	return 0;
2172 }
2173 
2174 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2175 {
2176 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2177 		return;
2178 
2179 	__clear_bit(*idx, wl->klv_templates_map);
2180 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2181 }
2182 
2183 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2184 {
2185 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2186 
2187 	switch (wlvif->bss_type) {
2188 	case BSS_TYPE_AP_BSS:
2189 		if (wlvif->p2p)
2190 			return WL1271_ROLE_P2P_GO;
2191 		else if (ieee80211_vif_is_mesh(vif))
2192 			return WL1271_ROLE_MESH_POINT;
2193 		else
2194 			return WL1271_ROLE_AP;
2195 
2196 	case BSS_TYPE_STA_BSS:
2197 		if (wlvif->p2p)
2198 			return WL1271_ROLE_P2P_CL;
2199 		else
2200 			return WL1271_ROLE_STA;
2201 
2202 	case BSS_TYPE_IBSS:
2203 		return WL1271_ROLE_IBSS;
2204 
2205 	default:
2206 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2207 	}
2208 	return WL12XX_INVALID_ROLE_TYPE;
2209 }
2210 
2211 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2212 {
2213 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2214 	int i;
2215 
2216 	/* clear everything but the persistent data */
2217 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2218 
2219 	switch (ieee80211_vif_type_p2p(vif)) {
2220 	case NL80211_IFTYPE_P2P_CLIENT:
2221 		wlvif->p2p = 1;
2222 		/* fall-through */
2223 	case NL80211_IFTYPE_STATION:
2224 	case NL80211_IFTYPE_P2P_DEVICE:
2225 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2226 		break;
2227 	case NL80211_IFTYPE_ADHOC:
2228 		wlvif->bss_type = BSS_TYPE_IBSS;
2229 		break;
2230 	case NL80211_IFTYPE_P2P_GO:
2231 		wlvif->p2p = 1;
2232 		/* fall-through */
2233 	case NL80211_IFTYPE_AP:
2234 	case NL80211_IFTYPE_MESH_POINT:
2235 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2236 		break;
2237 	default:
2238 		wlvif->bss_type = MAX_BSS_TYPE;
2239 		return -EOPNOTSUPP;
2240 	}
2241 
2242 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2243 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2244 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2245 
2246 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2247 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2248 		/* init sta/ibss data */
2249 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2250 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2251 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2252 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2253 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2254 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2255 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2256 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2257 	} else {
2258 		/* init ap data */
2259 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2260 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2261 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2262 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2263 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2264 			wl12xx_allocate_rate_policy(wl,
2265 						&wlvif->ap.ucast_rate_idx[i]);
2266 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2267 		/*
2268 		 * TODO: check if basic_rate shouldn't be
2269 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2270 		 * instead (the same thing for STA above).
2271 		*/
2272 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2273 		/* TODO: this seems to be used only for STA, check it */
2274 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2275 	}
2276 
2277 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2278 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2279 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2280 
2281 	/*
2282 	 * mac80211 configures some values globally, while we treat them
2283 	 * per-interface. thus, on init, we have to copy them from wl
2284 	 */
2285 	wlvif->band = wl->band;
2286 	wlvif->channel = wl->channel;
2287 	wlvif->power_level = wl->power_level;
2288 	wlvif->channel_type = wl->channel_type;
2289 
2290 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2291 		  wl1271_rx_streaming_enable_work);
2292 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2293 		  wl1271_rx_streaming_disable_work);
2294 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2295 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2296 			  wlcore_channel_switch_work);
2297 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2298 			  wlcore_connection_loss_work);
2299 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2300 			  wlcore_pending_auth_complete_work);
2301 	INIT_LIST_HEAD(&wlvif->list);
2302 
2303 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2304 	return 0;
2305 }
2306 
2307 static int wl12xx_init_fw(struct wl1271 *wl)
2308 {
2309 	int retries = WL1271_BOOT_RETRIES;
2310 	bool booted = false;
2311 	struct wiphy *wiphy = wl->hw->wiphy;
2312 	int ret;
2313 
2314 	while (retries) {
2315 		retries--;
2316 		ret = wl12xx_chip_wakeup(wl, false);
2317 		if (ret < 0)
2318 			goto power_off;
2319 
2320 		ret = wl->ops->boot(wl);
2321 		if (ret < 0)
2322 			goto power_off;
2323 
2324 		ret = wl1271_hw_init(wl);
2325 		if (ret < 0)
2326 			goto irq_disable;
2327 
2328 		booted = true;
2329 		break;
2330 
2331 irq_disable:
2332 		mutex_unlock(&wl->mutex);
2333 		/* Unlocking the mutex in the middle of handling is
2334 		   inherently unsafe. In this case we deem it safe to do,
2335 		   because we need to let any possibly pending IRQ out of
2336 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2337 		   work function will not do anything.) Also, any other
2338 		   possible concurrent operations will fail due to the
2339 		   current state, hence the wl1271 struct should be safe. */
2340 		wlcore_disable_interrupts(wl);
2341 		wl1271_flush_deferred_work(wl);
2342 		cancel_work_sync(&wl->netstack_work);
2343 		mutex_lock(&wl->mutex);
2344 power_off:
2345 		wl1271_power_off(wl);
2346 	}
2347 
2348 	if (!booted) {
2349 		wl1271_error("firmware boot failed despite %d retries",
2350 			     WL1271_BOOT_RETRIES);
2351 		goto out;
2352 	}
2353 
2354 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2355 
2356 	/* update hw/fw version info in wiphy struct */
2357 	wiphy->hw_version = wl->chip.id;
2358 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2359 		sizeof(wiphy->fw_version));
2360 
2361 	/*
2362 	 * Now we know if 11a is supported (info from the NVS), so disable
2363 	 * 11a channels if not supported
2364 	 */
2365 	if (!wl->enable_11a)
2366 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2367 
2368 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2369 		     wl->enable_11a ? "" : "not ");
2370 
2371 	wl->state = WLCORE_STATE_ON;
2372 out:
2373 	return ret;
2374 }
2375 
2376 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2377 {
2378 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2379 }
2380 
2381 /*
2382  * Check whether a fw switch (i.e. moving from one loaded
2383  * fw to another) is needed. This function is also responsible
2384  * for updating wl->last_vif_count, so it must be called before
2385  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2386  * will be used).
2387  */
2388 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2389 				  struct vif_counter_data vif_counter_data,
2390 				  bool add)
2391 {
2392 	enum wl12xx_fw_type current_fw = wl->fw_type;
2393 	u8 vif_count = vif_counter_data.counter;
2394 
2395 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2396 		return false;
2397 
2398 	/* increase the vif count if this is a new vif */
2399 	if (add && !vif_counter_data.cur_vif_running)
2400 		vif_count++;
2401 
2402 	wl->last_vif_count = vif_count;
2403 
2404 	/* no need for fw change if the device is OFF */
2405 	if (wl->state == WLCORE_STATE_OFF)
2406 		return false;
2407 
2408 	/* no need for fw change if a single fw is used */
2409 	if (!wl->mr_fw_name)
2410 		return false;
2411 
2412 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2413 		return true;
2414 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2415 		return true;
2416 
2417 	return false;
2418 }
2419 
2420 /*
2421  * Enter "forced psm". Make sure the sta is in psm against the ap,
2422  * to make the fw switch a bit more disconnection-persistent.
2423  */
2424 static void wl12xx_force_active_psm(struct wl1271 *wl)
2425 {
2426 	struct wl12xx_vif *wlvif;
2427 
2428 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2429 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2430 	}
2431 }
2432 
2433 struct wlcore_hw_queue_iter_data {
2434 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2435 	/* current vif */
2436 	struct ieee80211_vif *vif;
2437 	/* is the current vif among those iterated */
2438 	bool cur_running;
2439 };
2440 
2441 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2442 				 struct ieee80211_vif *vif)
2443 {
2444 	struct wlcore_hw_queue_iter_data *iter_data = data;
2445 
2446 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2447 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2448 		return;
2449 
2450 	if (iter_data->cur_running || vif == iter_data->vif) {
2451 		iter_data->cur_running = true;
2452 		return;
2453 	}
2454 
2455 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2456 }
2457 
2458 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2459 					 struct wl12xx_vif *wlvif)
2460 {
2461 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2462 	struct wlcore_hw_queue_iter_data iter_data = {};
2463 	int i, q_base;
2464 
2465 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2466 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2467 		return 0;
2468 	}
2469 
2470 	iter_data.vif = vif;
2471 
2472 	/* mark all bits taken by active interfaces */
2473 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2474 					IEEE80211_IFACE_ITER_RESUME_ALL,
2475 					wlcore_hw_queue_iter, &iter_data);
2476 
2477 	/* the current vif is already running in mac80211 (resume/recovery) */
2478 	if (iter_data.cur_running) {
2479 		wlvif->hw_queue_base = vif->hw_queue[0];
2480 		wl1271_debug(DEBUG_MAC80211,
2481 			     "using pre-allocated hw queue base %d",
2482 			     wlvif->hw_queue_base);
2483 
2484 		/* interface type might have changed type */
2485 		goto adjust_cab_queue;
2486 	}
2487 
2488 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2489 				     WLCORE_NUM_MAC_ADDRESSES);
2490 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2491 		return -EBUSY;
2492 
2493 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2494 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2495 		     wlvif->hw_queue_base);
2496 
2497 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2498 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2499 		/* register hw queues in mac80211 */
2500 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2501 	}
2502 
2503 adjust_cab_queue:
2504 	/* the last places are reserved for cab queues per interface */
2505 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2506 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2507 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2508 	else
2509 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2510 
2511 	return 0;
2512 }
2513 
2514 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2515 				   struct ieee80211_vif *vif)
2516 {
2517 	struct wl1271 *wl = hw->priv;
2518 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2519 	struct vif_counter_data vif_count;
2520 	int ret = 0;
2521 	u8 role_type;
2522 
2523 	if (wl->plt) {
2524 		wl1271_error("Adding Interface not allowed while in PLT mode");
2525 		return -EBUSY;
2526 	}
2527 
2528 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2529 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2530 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2531 
2532 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2533 		     ieee80211_vif_type_p2p(vif), vif->addr);
2534 
2535 	wl12xx_get_vif_count(hw, vif, &vif_count);
2536 
2537 	mutex_lock(&wl->mutex);
2538 
2539 	/*
2540 	 * in some very corner case HW recovery scenarios its possible to
2541 	 * get here before __wl1271_op_remove_interface is complete, so
2542 	 * opt out if that is the case.
2543 	 */
2544 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2545 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2546 		ret = -EBUSY;
2547 		goto out;
2548 	}
2549 
2550 
2551 	ret = wl12xx_init_vif_data(wl, vif);
2552 	if (ret < 0)
2553 		goto out;
2554 
2555 	wlvif->wl = wl;
2556 	role_type = wl12xx_get_role_type(wl, wlvif);
2557 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2558 		ret = -EINVAL;
2559 		goto out;
2560 	}
2561 
2562 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2563 	if (ret < 0)
2564 		goto out;
2565 
2566 	/*
2567 	 * TODO: after the nvs issue will be solved, move this block
2568 	 * to start(), and make sure here the driver is ON.
2569 	 */
2570 	if (wl->state == WLCORE_STATE_OFF) {
2571 		/*
2572 		 * we still need this in order to configure the fw
2573 		 * while uploading the nvs
2574 		 */
2575 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2576 
2577 		ret = wl12xx_init_fw(wl);
2578 		if (ret < 0)
2579 			goto out;
2580 	}
2581 
2582 	/*
2583 	 * Call runtime PM only after possible wl12xx_init_fw() above
2584 	 * is done. Otherwise we do not have interrupts enabled.
2585 	 */
2586 	ret = pm_runtime_get_sync(wl->dev);
2587 	if (ret < 0) {
2588 		pm_runtime_put_noidle(wl->dev);
2589 		goto out_unlock;
2590 	}
2591 
2592 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2593 		wl12xx_force_active_psm(wl);
2594 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2595 		mutex_unlock(&wl->mutex);
2596 		wl1271_recovery_work(&wl->recovery_work);
2597 		return 0;
2598 	}
2599 
2600 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2601 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2602 					     role_type, &wlvif->role_id);
2603 		if (ret < 0)
2604 			goto out;
2605 
2606 		ret = wl1271_init_vif_specific(wl, vif);
2607 		if (ret < 0)
2608 			goto out;
2609 
2610 	} else {
2611 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2612 					     &wlvif->dev_role_id);
2613 		if (ret < 0)
2614 			goto out;
2615 
2616 		/* needed mainly for configuring rate policies */
2617 		ret = wl1271_sta_hw_init(wl, wlvif);
2618 		if (ret < 0)
2619 			goto out;
2620 	}
2621 
2622 	list_add(&wlvif->list, &wl->wlvif_list);
2623 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2624 
2625 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2626 		wl->ap_count++;
2627 	else
2628 		wl->sta_count++;
2629 out:
2630 	pm_runtime_mark_last_busy(wl->dev);
2631 	pm_runtime_put_autosuspend(wl->dev);
2632 out_unlock:
2633 	mutex_unlock(&wl->mutex);
2634 
2635 	return ret;
2636 }
2637 
2638 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2639 					 struct ieee80211_vif *vif,
2640 					 bool reset_tx_queues)
2641 {
2642 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2643 	int i, ret;
2644 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2645 
2646 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2647 
2648 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2649 		return;
2650 
2651 	/* because of hardware recovery, we may get here twice */
2652 	if (wl->state == WLCORE_STATE_OFF)
2653 		return;
2654 
2655 	wl1271_info("down");
2656 
2657 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2658 	    wl->scan_wlvif == wlvif) {
2659 		struct cfg80211_scan_info info = {
2660 			.aborted = true,
2661 		};
2662 
2663 		/*
2664 		 * Rearm the tx watchdog just before idling scan. This
2665 		 * prevents just-finished scans from triggering the watchdog
2666 		 */
2667 		wl12xx_rearm_tx_watchdog_locked(wl);
2668 
2669 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2670 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2671 		wl->scan_wlvif = NULL;
2672 		wl->scan.req = NULL;
2673 		ieee80211_scan_completed(wl->hw, &info);
2674 	}
2675 
2676 	if (wl->sched_vif == wlvif)
2677 		wl->sched_vif = NULL;
2678 
2679 	if (wl->roc_vif == vif) {
2680 		wl->roc_vif = NULL;
2681 		ieee80211_remain_on_channel_expired(wl->hw);
2682 	}
2683 
2684 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2685 		/* disable active roles */
2686 		ret = pm_runtime_get_sync(wl->dev);
2687 		if (ret < 0) {
2688 			pm_runtime_put_noidle(wl->dev);
2689 			goto deinit;
2690 		}
2691 
2692 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2693 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2694 			if (wl12xx_dev_role_started(wlvif))
2695 				wl12xx_stop_dev(wl, wlvif);
2696 		}
2697 
2698 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2699 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2700 			if (ret < 0) {
2701 				pm_runtime_put_noidle(wl->dev);
2702 				goto deinit;
2703 			}
2704 		} else {
2705 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2706 			if (ret < 0) {
2707 				pm_runtime_put_noidle(wl->dev);
2708 				goto deinit;
2709 			}
2710 		}
2711 
2712 		pm_runtime_mark_last_busy(wl->dev);
2713 		pm_runtime_put_autosuspend(wl->dev);
2714 	}
2715 deinit:
2716 	wl12xx_tx_reset_wlvif(wl, wlvif);
2717 
2718 	/* clear all hlids (except system_hlid) */
2719 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2720 
2721 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2722 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2723 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2724 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2725 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2726 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2727 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2728 	} else {
2729 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2730 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2731 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2732 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2733 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2734 			wl12xx_free_rate_policy(wl,
2735 						&wlvif->ap.ucast_rate_idx[i]);
2736 		wl1271_free_ap_keys(wl, wlvif);
2737 	}
2738 
2739 	dev_kfree_skb(wlvif->probereq);
2740 	wlvif->probereq = NULL;
2741 	if (wl->last_wlvif == wlvif)
2742 		wl->last_wlvif = NULL;
2743 	list_del(&wlvif->list);
2744 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2745 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2746 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2747 
2748 	if (is_ap)
2749 		wl->ap_count--;
2750 	else
2751 		wl->sta_count--;
2752 
2753 	/*
2754 	 * Last AP, have more stations. Configure sleep auth according to STA.
2755 	 * Don't do thin on unintended recovery.
2756 	 */
2757 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2758 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2759 		goto unlock;
2760 
2761 	if (wl->ap_count == 0 && is_ap) {
2762 		/* mask ap events */
2763 		wl->event_mask &= ~wl->ap_event_mask;
2764 		wl1271_event_unmask(wl);
2765 	}
2766 
2767 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2768 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2769 		/* Configure for power according to debugfs */
2770 		if (sta_auth != WL1271_PSM_ILLEGAL)
2771 			wl1271_acx_sleep_auth(wl, sta_auth);
2772 		/* Configure for ELP power saving */
2773 		else
2774 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2775 	}
2776 
2777 unlock:
2778 	mutex_unlock(&wl->mutex);
2779 
2780 	del_timer_sync(&wlvif->rx_streaming_timer);
2781 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2782 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2783 	cancel_work_sync(&wlvif->rc_update_work);
2784 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2785 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2786 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2787 
2788 	mutex_lock(&wl->mutex);
2789 }
2790 
2791 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2792 				       struct ieee80211_vif *vif)
2793 {
2794 	struct wl1271 *wl = hw->priv;
2795 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2796 	struct wl12xx_vif *iter;
2797 	struct vif_counter_data vif_count;
2798 
2799 	wl12xx_get_vif_count(hw, vif, &vif_count);
2800 	mutex_lock(&wl->mutex);
2801 
2802 	if (wl->state == WLCORE_STATE_OFF ||
2803 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2804 		goto out;
2805 
2806 	/*
2807 	 * wl->vif can be null here if someone shuts down the interface
2808 	 * just when hardware recovery has been started.
2809 	 */
2810 	wl12xx_for_each_wlvif(wl, iter) {
2811 		if (iter != wlvif)
2812 			continue;
2813 
2814 		__wl1271_op_remove_interface(wl, vif, true);
2815 		break;
2816 	}
2817 	WARN_ON(iter != wlvif);
2818 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2819 		wl12xx_force_active_psm(wl);
2820 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2821 		wl12xx_queue_recovery_work(wl);
2822 	}
2823 out:
2824 	mutex_unlock(&wl->mutex);
2825 }
2826 
2827 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2828 				      struct ieee80211_vif *vif,
2829 				      enum nl80211_iftype new_type, bool p2p)
2830 {
2831 	struct wl1271 *wl = hw->priv;
2832 	int ret;
2833 
2834 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2835 	wl1271_op_remove_interface(hw, vif);
2836 
2837 	vif->type = new_type;
2838 	vif->p2p = p2p;
2839 	ret = wl1271_op_add_interface(hw, vif);
2840 
2841 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2842 	return ret;
2843 }
2844 
2845 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2846 {
2847 	int ret;
2848 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2849 
2850 	/*
2851 	 * One of the side effects of the JOIN command is that is clears
2852 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2853 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2854 	 * Currently the only valid scenario for JOIN during association
2855 	 * is on roaming, in which case we will also be given new keys.
2856 	 * Keep the below message for now, unless it starts bothering
2857 	 * users who really like to roam a lot :)
2858 	 */
2859 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2860 		wl1271_info("JOIN while associated.");
2861 
2862 	/* clear encryption type */
2863 	wlvif->encryption_type = KEY_NONE;
2864 
2865 	if (is_ibss)
2866 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2867 	else {
2868 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2869 			/*
2870 			 * TODO: this is an ugly workaround for wl12xx fw
2871 			 * bug - we are not able to tx/rx after the first
2872 			 * start_sta, so make dummy start+stop calls,
2873 			 * and then call start_sta again.
2874 			 * this should be fixed in the fw.
2875 			 */
2876 			wl12xx_cmd_role_start_sta(wl, wlvif);
2877 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2878 		}
2879 
2880 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2881 	}
2882 
2883 	return ret;
2884 }
2885 
2886 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2887 			    int offset)
2888 {
2889 	u8 ssid_len;
2890 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2891 					 skb->len - offset);
2892 
2893 	if (!ptr) {
2894 		wl1271_error("No SSID in IEs!");
2895 		return -ENOENT;
2896 	}
2897 
2898 	ssid_len = ptr[1];
2899 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2900 		wl1271_error("SSID is too long!");
2901 		return -EINVAL;
2902 	}
2903 
2904 	wlvif->ssid_len = ssid_len;
2905 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2906 	return 0;
2907 }
2908 
2909 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2910 {
2911 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2912 	struct sk_buff *skb;
2913 	int ieoffset;
2914 
2915 	/* we currently only support setting the ssid from the ap probe req */
2916 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2917 		return -EINVAL;
2918 
2919 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2920 	if (!skb)
2921 		return -EINVAL;
2922 
2923 	ieoffset = offsetof(struct ieee80211_mgmt,
2924 			    u.probe_req.variable);
2925 	wl1271_ssid_set(wlvif, skb, ieoffset);
2926 	dev_kfree_skb(skb);
2927 
2928 	return 0;
2929 }
2930 
2931 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2932 			    struct ieee80211_bss_conf *bss_conf,
2933 			    u32 sta_rate_set)
2934 {
2935 	int ieoffset;
2936 	int ret;
2937 
2938 	wlvif->aid = bss_conf->aid;
2939 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2940 	wlvif->beacon_int = bss_conf->beacon_int;
2941 	wlvif->wmm_enabled = bss_conf->qos;
2942 
2943 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2944 
2945 	/*
2946 	 * with wl1271, we don't need to update the
2947 	 * beacon_int and dtim_period, because the firmware
2948 	 * updates it by itself when the first beacon is
2949 	 * received after a join.
2950 	 */
2951 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2952 	if (ret < 0)
2953 		return ret;
2954 
2955 	/*
2956 	 * Get a template for hardware connection maintenance
2957 	 */
2958 	dev_kfree_skb(wlvif->probereq);
2959 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2960 							wlvif,
2961 							NULL);
2962 	ieoffset = offsetof(struct ieee80211_mgmt,
2963 			    u.probe_req.variable);
2964 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2965 
2966 	/* enable the connection monitoring feature */
2967 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2968 	if (ret < 0)
2969 		return ret;
2970 
2971 	/*
2972 	 * The join command disable the keep-alive mode, shut down its process,
2973 	 * and also clear the template config, so we need to reset it all after
2974 	 * the join. The acx_aid starts the keep-alive process, and the order
2975 	 * of the commands below is relevant.
2976 	 */
2977 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2978 	if (ret < 0)
2979 		return ret;
2980 
2981 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2982 	if (ret < 0)
2983 		return ret;
2984 
2985 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2986 	if (ret < 0)
2987 		return ret;
2988 
2989 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2990 					   wlvif->sta.klv_template_id,
2991 					   ACX_KEEP_ALIVE_TPL_VALID);
2992 	if (ret < 0)
2993 		return ret;
2994 
2995 	/*
2996 	 * The default fw psm configuration is AUTO, while mac80211 default
2997 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2998 	 */
2999 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3000 	if (ret < 0)
3001 		return ret;
3002 
3003 	if (sta_rate_set) {
3004 		wlvif->rate_set =
3005 			wl1271_tx_enabled_rates_get(wl,
3006 						    sta_rate_set,
3007 						    wlvif->band);
3008 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3009 		if (ret < 0)
3010 			return ret;
3011 	}
3012 
3013 	return ret;
3014 }
3015 
3016 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3017 {
3018 	int ret;
3019 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3020 
3021 	/* make sure we are connected (sta) joined */
3022 	if (sta &&
3023 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3024 		return false;
3025 
3026 	/* make sure we are joined (ibss) */
3027 	if (!sta &&
3028 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3029 		return false;
3030 
3031 	if (sta) {
3032 		/* use defaults when not associated */
3033 		wlvif->aid = 0;
3034 
3035 		/* free probe-request template */
3036 		dev_kfree_skb(wlvif->probereq);
3037 		wlvif->probereq = NULL;
3038 
3039 		/* disable connection monitor features */
3040 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3041 		if (ret < 0)
3042 			return ret;
3043 
3044 		/* Disable the keep-alive feature */
3045 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3046 		if (ret < 0)
3047 			return ret;
3048 
3049 		/* disable beacon filtering */
3050 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3051 		if (ret < 0)
3052 			return ret;
3053 	}
3054 
3055 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3056 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3057 
3058 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3059 		ieee80211_chswitch_done(vif, false);
3060 		cancel_delayed_work(&wlvif->channel_switch_work);
3061 	}
3062 
3063 	/* invalidate keep-alive template */
3064 	wl1271_acx_keep_alive_config(wl, wlvif,
3065 				     wlvif->sta.klv_template_id,
3066 				     ACX_KEEP_ALIVE_TPL_INVALID);
3067 
3068 	return 0;
3069 }
3070 
3071 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3072 {
3073 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3074 	wlvif->rate_set = wlvif->basic_rate_set;
3075 }
3076 
3077 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3078 				   bool idle)
3079 {
3080 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3081 
3082 	if (idle == cur_idle)
3083 		return;
3084 
3085 	if (idle) {
3086 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3087 	} else {
3088 		/* The current firmware only supports sched_scan in idle */
3089 		if (wl->sched_vif == wlvif)
3090 			wl->ops->sched_scan_stop(wl, wlvif);
3091 
3092 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3093 	}
3094 }
3095 
3096 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3097 			     struct ieee80211_conf *conf, u32 changed)
3098 {
3099 	int ret;
3100 
3101 	if (wlcore_is_p2p_mgmt(wlvif))
3102 		return 0;
3103 
3104 	if (conf->power_level != wlvif->power_level) {
3105 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3106 		if (ret < 0)
3107 			return ret;
3108 
3109 		wlvif->power_level = conf->power_level;
3110 	}
3111 
3112 	return 0;
3113 }
3114 
3115 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3116 {
3117 	struct wl1271 *wl = hw->priv;
3118 	struct wl12xx_vif *wlvif;
3119 	struct ieee80211_conf *conf = &hw->conf;
3120 	int ret = 0;
3121 
3122 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3123 		     " changed 0x%x",
3124 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3125 		     conf->power_level,
3126 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3127 			 changed);
3128 
3129 	mutex_lock(&wl->mutex);
3130 
3131 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3132 		wl->power_level = conf->power_level;
3133 
3134 	if (unlikely(wl->state != WLCORE_STATE_ON))
3135 		goto out;
3136 
3137 	ret = pm_runtime_get_sync(wl->dev);
3138 	if (ret < 0) {
3139 		pm_runtime_put_noidle(wl->dev);
3140 		goto out;
3141 	}
3142 
3143 	/* configure each interface */
3144 	wl12xx_for_each_wlvif(wl, wlvif) {
3145 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3146 		if (ret < 0)
3147 			goto out_sleep;
3148 	}
3149 
3150 out_sleep:
3151 	pm_runtime_mark_last_busy(wl->dev);
3152 	pm_runtime_put_autosuspend(wl->dev);
3153 
3154 out:
3155 	mutex_unlock(&wl->mutex);
3156 
3157 	return ret;
3158 }
3159 
3160 struct wl1271_filter_params {
3161 	bool enabled;
3162 	int mc_list_length;
3163 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3164 };
3165 
3166 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3167 				       struct netdev_hw_addr_list *mc_list)
3168 {
3169 	struct wl1271_filter_params *fp;
3170 	struct netdev_hw_addr *ha;
3171 
3172 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3173 	if (!fp) {
3174 		wl1271_error("Out of memory setting filters.");
3175 		return 0;
3176 	}
3177 
3178 	/* update multicast filtering parameters */
3179 	fp->mc_list_length = 0;
3180 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3181 		fp->enabled = false;
3182 	} else {
3183 		fp->enabled = true;
3184 		netdev_hw_addr_list_for_each(ha, mc_list) {
3185 			memcpy(fp->mc_list[fp->mc_list_length],
3186 					ha->addr, ETH_ALEN);
3187 			fp->mc_list_length++;
3188 		}
3189 	}
3190 
3191 	return (u64)(unsigned long)fp;
3192 }
3193 
3194 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3195 				  FIF_FCSFAIL | \
3196 				  FIF_BCN_PRBRESP_PROMISC | \
3197 				  FIF_CONTROL | \
3198 				  FIF_OTHER_BSS)
3199 
3200 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3201 				       unsigned int changed,
3202 				       unsigned int *total, u64 multicast)
3203 {
3204 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3205 	struct wl1271 *wl = hw->priv;
3206 	struct wl12xx_vif *wlvif;
3207 
3208 	int ret;
3209 
3210 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3211 		     " total %x", changed, *total);
3212 
3213 	mutex_lock(&wl->mutex);
3214 
3215 	*total &= WL1271_SUPPORTED_FILTERS;
3216 	changed &= WL1271_SUPPORTED_FILTERS;
3217 
3218 	if (unlikely(wl->state != WLCORE_STATE_ON))
3219 		goto out;
3220 
3221 	ret = pm_runtime_get_sync(wl->dev);
3222 	if (ret < 0) {
3223 		pm_runtime_put_noidle(wl->dev);
3224 		goto out;
3225 	}
3226 
3227 	wl12xx_for_each_wlvif(wl, wlvif) {
3228 		if (wlcore_is_p2p_mgmt(wlvif))
3229 			continue;
3230 
3231 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3232 			if (*total & FIF_ALLMULTI)
3233 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3234 								   false,
3235 								   NULL, 0);
3236 			else if (fp)
3237 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3238 							fp->enabled,
3239 							fp->mc_list,
3240 							fp->mc_list_length);
3241 			if (ret < 0)
3242 				goto out_sleep;
3243 		}
3244 
3245 		/*
3246 		 * If interface in AP mode and created with allmulticast then disable
3247 		 * the firmware filters so that all multicast packets are passed
3248 		 * This is mandatory for MDNS based discovery protocols
3249 		 */
3250  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3251  			if (*total & FIF_ALLMULTI) {
3252 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3253 							false,
3254 							NULL, 0);
3255 				if (ret < 0)
3256 					goto out_sleep;
3257 			}
3258 		}
3259 	}
3260 
3261 	/*
3262 	 * the fw doesn't provide an api to configure the filters. instead,
3263 	 * the filters configuration is based on the active roles / ROC
3264 	 * state.
3265 	 */
3266 
3267 out_sleep:
3268 	pm_runtime_mark_last_busy(wl->dev);
3269 	pm_runtime_put_autosuspend(wl->dev);
3270 
3271 out:
3272 	mutex_unlock(&wl->mutex);
3273 	kfree(fp);
3274 }
3275 
3276 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3277 				u8 id, u8 key_type, u8 key_size,
3278 				const u8 *key, u8 hlid, u32 tx_seq_32,
3279 				u16 tx_seq_16, bool is_pairwise)
3280 {
3281 	struct wl1271_ap_key *ap_key;
3282 	int i;
3283 
3284 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3285 
3286 	if (key_size > MAX_KEY_SIZE)
3287 		return -EINVAL;
3288 
3289 	/*
3290 	 * Find next free entry in ap_keys. Also check we are not replacing
3291 	 * an existing key.
3292 	 */
3293 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3294 		if (wlvif->ap.recorded_keys[i] == NULL)
3295 			break;
3296 
3297 		if (wlvif->ap.recorded_keys[i]->id == id) {
3298 			wl1271_warning("trying to record key replacement");
3299 			return -EINVAL;
3300 		}
3301 	}
3302 
3303 	if (i == MAX_NUM_KEYS)
3304 		return -EBUSY;
3305 
3306 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3307 	if (!ap_key)
3308 		return -ENOMEM;
3309 
3310 	ap_key->id = id;
3311 	ap_key->key_type = key_type;
3312 	ap_key->key_size = key_size;
3313 	memcpy(ap_key->key, key, key_size);
3314 	ap_key->hlid = hlid;
3315 	ap_key->tx_seq_32 = tx_seq_32;
3316 	ap_key->tx_seq_16 = tx_seq_16;
3317 	ap_key->is_pairwise = is_pairwise;
3318 
3319 	wlvif->ap.recorded_keys[i] = ap_key;
3320 	return 0;
3321 }
3322 
3323 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3324 {
3325 	int i;
3326 
3327 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3328 		kfree(wlvif->ap.recorded_keys[i]);
3329 		wlvif->ap.recorded_keys[i] = NULL;
3330 	}
3331 }
3332 
3333 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3334 {
3335 	int i, ret = 0;
3336 	struct wl1271_ap_key *key;
3337 	bool wep_key_added = false;
3338 
3339 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3340 		u8 hlid;
3341 		if (wlvif->ap.recorded_keys[i] == NULL)
3342 			break;
3343 
3344 		key = wlvif->ap.recorded_keys[i];
3345 		hlid = key->hlid;
3346 		if (hlid == WL12XX_INVALID_LINK_ID)
3347 			hlid = wlvif->ap.bcast_hlid;
3348 
3349 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3350 					    key->id, key->key_type,
3351 					    key->key_size, key->key,
3352 					    hlid, key->tx_seq_32,
3353 					    key->tx_seq_16, key->is_pairwise);
3354 		if (ret < 0)
3355 			goto out;
3356 
3357 		if (key->key_type == KEY_WEP)
3358 			wep_key_added = true;
3359 	}
3360 
3361 	if (wep_key_added) {
3362 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3363 						     wlvif->ap.bcast_hlid);
3364 		if (ret < 0)
3365 			goto out;
3366 	}
3367 
3368 out:
3369 	wl1271_free_ap_keys(wl, wlvif);
3370 	return ret;
3371 }
3372 
3373 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3374 		       u16 action, u8 id, u8 key_type,
3375 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3376 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3377 		       bool is_pairwise)
3378 {
3379 	int ret;
3380 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3381 
3382 	if (is_ap) {
3383 		struct wl1271_station *wl_sta;
3384 		u8 hlid;
3385 
3386 		if (sta) {
3387 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3388 			hlid = wl_sta->hlid;
3389 		} else {
3390 			hlid = wlvif->ap.bcast_hlid;
3391 		}
3392 
3393 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3394 			/*
3395 			 * We do not support removing keys after AP shutdown.
3396 			 * Pretend we do to make mac80211 happy.
3397 			 */
3398 			if (action != KEY_ADD_OR_REPLACE)
3399 				return 0;
3400 
3401 			ret = wl1271_record_ap_key(wl, wlvif, id,
3402 					     key_type, key_size,
3403 					     key, hlid, tx_seq_32,
3404 					     tx_seq_16, is_pairwise);
3405 		} else {
3406 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3407 					     id, key_type, key_size,
3408 					     key, hlid, tx_seq_32,
3409 					     tx_seq_16, is_pairwise);
3410 		}
3411 
3412 		if (ret < 0)
3413 			return ret;
3414 	} else {
3415 		const u8 *addr;
3416 		static const u8 bcast_addr[ETH_ALEN] = {
3417 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3418 		};
3419 
3420 		addr = sta ? sta->addr : bcast_addr;
3421 
3422 		if (is_zero_ether_addr(addr)) {
3423 			/* We dont support TX only encryption */
3424 			return -EOPNOTSUPP;
3425 		}
3426 
3427 		/* The wl1271 does not allow to remove unicast keys - they
3428 		   will be cleared automatically on next CMD_JOIN. Ignore the
3429 		   request silently, as we dont want the mac80211 to emit
3430 		   an error message. */
3431 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3432 			return 0;
3433 
3434 		/* don't remove key if hlid was already deleted */
3435 		if (action == KEY_REMOVE &&
3436 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3437 			return 0;
3438 
3439 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3440 					     id, key_type, key_size,
3441 					     key, addr, tx_seq_32,
3442 					     tx_seq_16);
3443 		if (ret < 0)
3444 			return ret;
3445 
3446 	}
3447 
3448 	return 0;
3449 }
3450 
3451 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3452 			     struct ieee80211_vif *vif,
3453 			     struct ieee80211_sta *sta,
3454 			     struct ieee80211_key_conf *key_conf)
3455 {
3456 	struct wl1271 *wl = hw->priv;
3457 	int ret;
3458 	bool might_change_spare =
3459 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3460 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3461 
3462 	if (might_change_spare) {
3463 		/*
3464 		 * stop the queues and flush to ensure the next packets are
3465 		 * in sync with FW spare block accounting
3466 		 */
3467 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3468 		wl1271_tx_flush(wl);
3469 	}
3470 
3471 	mutex_lock(&wl->mutex);
3472 
3473 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3474 		ret = -EAGAIN;
3475 		goto out_wake_queues;
3476 	}
3477 
3478 	ret = pm_runtime_get_sync(wl->dev);
3479 	if (ret < 0) {
3480 		pm_runtime_put_noidle(wl->dev);
3481 		goto out_wake_queues;
3482 	}
3483 
3484 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3485 
3486 	pm_runtime_mark_last_busy(wl->dev);
3487 	pm_runtime_put_autosuspend(wl->dev);
3488 
3489 out_wake_queues:
3490 	if (might_change_spare)
3491 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3492 
3493 	mutex_unlock(&wl->mutex);
3494 
3495 	return ret;
3496 }
3497 
3498 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3499 		   struct ieee80211_vif *vif,
3500 		   struct ieee80211_sta *sta,
3501 		   struct ieee80211_key_conf *key_conf)
3502 {
3503 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3504 	int ret;
3505 	u32 tx_seq_32 = 0;
3506 	u16 tx_seq_16 = 0;
3507 	u8 key_type;
3508 	u8 hlid;
3509 	bool is_pairwise;
3510 
3511 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3512 
3513 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3514 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3515 		     key_conf->cipher, key_conf->keyidx,
3516 		     key_conf->keylen, key_conf->flags);
3517 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3518 
3519 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3520 		if (sta) {
3521 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3522 			hlid = wl_sta->hlid;
3523 		} else {
3524 			hlid = wlvif->ap.bcast_hlid;
3525 		}
3526 	else
3527 		hlid = wlvif->sta.hlid;
3528 
3529 	if (hlid != WL12XX_INVALID_LINK_ID) {
3530 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3531 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3532 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3533 	}
3534 
3535 	switch (key_conf->cipher) {
3536 	case WLAN_CIPHER_SUITE_WEP40:
3537 	case WLAN_CIPHER_SUITE_WEP104:
3538 		key_type = KEY_WEP;
3539 
3540 		key_conf->hw_key_idx = key_conf->keyidx;
3541 		break;
3542 	case WLAN_CIPHER_SUITE_TKIP:
3543 		key_type = KEY_TKIP;
3544 		key_conf->hw_key_idx = key_conf->keyidx;
3545 		break;
3546 	case WLAN_CIPHER_SUITE_CCMP:
3547 		key_type = KEY_AES;
3548 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3549 		break;
3550 	case WL1271_CIPHER_SUITE_GEM:
3551 		key_type = KEY_GEM;
3552 		break;
3553 	case WLAN_CIPHER_SUITE_AES_CMAC:
3554 		key_type = KEY_IGTK;
3555 		break;
3556 	default:
3557 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3558 
3559 		return -EOPNOTSUPP;
3560 	}
3561 
3562 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3563 
3564 	switch (cmd) {
3565 	case SET_KEY:
3566 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3567 				 key_conf->keyidx, key_type,
3568 				 key_conf->keylen, key_conf->key,
3569 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3570 		if (ret < 0) {
3571 			wl1271_error("Could not add or replace key");
3572 			return ret;
3573 		}
3574 
3575 		/*
3576 		 * reconfiguring arp response if the unicast (or common)
3577 		 * encryption key type was changed
3578 		 */
3579 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3580 		    (sta || key_type == KEY_WEP) &&
3581 		    wlvif->encryption_type != key_type) {
3582 			wlvif->encryption_type = key_type;
3583 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3584 			if (ret < 0) {
3585 				wl1271_warning("build arp rsp failed: %d", ret);
3586 				return ret;
3587 			}
3588 		}
3589 		break;
3590 
3591 	case DISABLE_KEY:
3592 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3593 				     key_conf->keyidx, key_type,
3594 				     key_conf->keylen, key_conf->key,
3595 				     0, 0, sta, is_pairwise);
3596 		if (ret < 0) {
3597 			wl1271_error("Could not remove key");
3598 			return ret;
3599 		}
3600 		break;
3601 
3602 	default:
3603 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3604 		return -EOPNOTSUPP;
3605 	}
3606 
3607 	return ret;
3608 }
3609 EXPORT_SYMBOL_GPL(wlcore_set_key);
3610 
3611 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3612 					  struct ieee80211_vif *vif,
3613 					  int key_idx)
3614 {
3615 	struct wl1271 *wl = hw->priv;
3616 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3617 	int ret;
3618 
3619 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3620 		     key_idx);
3621 
3622 	/* we don't handle unsetting of default key */
3623 	if (key_idx == -1)
3624 		return;
3625 
3626 	mutex_lock(&wl->mutex);
3627 
3628 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3629 		ret = -EAGAIN;
3630 		goto out_unlock;
3631 	}
3632 
3633 	ret = pm_runtime_get_sync(wl->dev);
3634 	if (ret < 0) {
3635 		pm_runtime_put_noidle(wl->dev);
3636 		goto out_unlock;
3637 	}
3638 
3639 	wlvif->default_key = key_idx;
3640 
3641 	/* the default WEP key needs to be configured at least once */
3642 	if (wlvif->encryption_type == KEY_WEP) {
3643 		ret = wl12xx_cmd_set_default_wep_key(wl,
3644 				key_idx,
3645 				wlvif->sta.hlid);
3646 		if (ret < 0)
3647 			goto out_sleep;
3648 	}
3649 
3650 out_sleep:
3651 	pm_runtime_mark_last_busy(wl->dev);
3652 	pm_runtime_put_autosuspend(wl->dev);
3653 
3654 out_unlock:
3655 	mutex_unlock(&wl->mutex);
3656 }
3657 
3658 void wlcore_regdomain_config(struct wl1271 *wl)
3659 {
3660 	int ret;
3661 
3662 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3663 		return;
3664 
3665 	mutex_lock(&wl->mutex);
3666 
3667 	if (unlikely(wl->state != WLCORE_STATE_ON))
3668 		goto out;
3669 
3670 	ret = pm_runtime_get_sync(wl->dev);
3671 	if (ret < 0) {
3672 		pm_runtime_put_autosuspend(wl->dev);
3673 		goto out;
3674 	}
3675 
3676 	ret = wlcore_cmd_regdomain_config_locked(wl);
3677 	if (ret < 0) {
3678 		wl12xx_queue_recovery_work(wl);
3679 		goto out;
3680 	}
3681 
3682 	pm_runtime_mark_last_busy(wl->dev);
3683 	pm_runtime_put_autosuspend(wl->dev);
3684 out:
3685 	mutex_unlock(&wl->mutex);
3686 }
3687 
3688 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3689 			     struct ieee80211_vif *vif,
3690 			     struct ieee80211_scan_request *hw_req)
3691 {
3692 	struct cfg80211_scan_request *req = &hw_req->req;
3693 	struct wl1271 *wl = hw->priv;
3694 	int ret;
3695 	u8 *ssid = NULL;
3696 	size_t len = 0;
3697 
3698 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3699 
3700 	if (req->n_ssids) {
3701 		ssid = req->ssids[0].ssid;
3702 		len = req->ssids[0].ssid_len;
3703 	}
3704 
3705 	mutex_lock(&wl->mutex);
3706 
3707 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3708 		/*
3709 		 * We cannot return -EBUSY here because cfg80211 will expect
3710 		 * a call to ieee80211_scan_completed if we do - in this case
3711 		 * there won't be any call.
3712 		 */
3713 		ret = -EAGAIN;
3714 		goto out;
3715 	}
3716 
3717 	ret = pm_runtime_get_sync(wl->dev);
3718 	if (ret < 0) {
3719 		pm_runtime_put_noidle(wl->dev);
3720 		goto out;
3721 	}
3722 
3723 	/* fail if there is any role in ROC */
3724 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3725 		/* don't allow scanning right now */
3726 		ret = -EBUSY;
3727 		goto out_sleep;
3728 	}
3729 
3730 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3731 out_sleep:
3732 	pm_runtime_mark_last_busy(wl->dev);
3733 	pm_runtime_put_autosuspend(wl->dev);
3734 out:
3735 	mutex_unlock(&wl->mutex);
3736 
3737 	return ret;
3738 }
3739 
3740 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3741 				     struct ieee80211_vif *vif)
3742 {
3743 	struct wl1271 *wl = hw->priv;
3744 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3745 	struct cfg80211_scan_info info = {
3746 		.aborted = true,
3747 	};
3748 	int ret;
3749 
3750 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3751 
3752 	mutex_lock(&wl->mutex);
3753 
3754 	if (unlikely(wl->state != WLCORE_STATE_ON))
3755 		goto out;
3756 
3757 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3758 		goto out;
3759 
3760 	ret = pm_runtime_get_sync(wl->dev);
3761 	if (ret < 0) {
3762 		pm_runtime_put_noidle(wl->dev);
3763 		goto out;
3764 	}
3765 
3766 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3767 		ret = wl->ops->scan_stop(wl, wlvif);
3768 		if (ret < 0)
3769 			goto out_sleep;
3770 	}
3771 
3772 	/*
3773 	 * Rearm the tx watchdog just before idling scan. This
3774 	 * prevents just-finished scans from triggering the watchdog
3775 	 */
3776 	wl12xx_rearm_tx_watchdog_locked(wl);
3777 
3778 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3779 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3780 	wl->scan_wlvif = NULL;
3781 	wl->scan.req = NULL;
3782 	ieee80211_scan_completed(wl->hw, &info);
3783 
3784 out_sleep:
3785 	pm_runtime_mark_last_busy(wl->dev);
3786 	pm_runtime_put_autosuspend(wl->dev);
3787 out:
3788 	mutex_unlock(&wl->mutex);
3789 
3790 	cancel_delayed_work_sync(&wl->scan_complete_work);
3791 }
3792 
3793 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3794 				      struct ieee80211_vif *vif,
3795 				      struct cfg80211_sched_scan_request *req,
3796 				      struct ieee80211_scan_ies *ies)
3797 {
3798 	struct wl1271 *wl = hw->priv;
3799 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3800 	int ret;
3801 
3802 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3803 
3804 	mutex_lock(&wl->mutex);
3805 
3806 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3807 		ret = -EAGAIN;
3808 		goto out;
3809 	}
3810 
3811 	ret = pm_runtime_get_sync(wl->dev);
3812 	if (ret < 0) {
3813 		pm_runtime_put_noidle(wl->dev);
3814 		goto out;
3815 	}
3816 
3817 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3818 	if (ret < 0)
3819 		goto out_sleep;
3820 
3821 	wl->sched_vif = wlvif;
3822 
3823 out_sleep:
3824 	pm_runtime_mark_last_busy(wl->dev);
3825 	pm_runtime_put_autosuspend(wl->dev);
3826 out:
3827 	mutex_unlock(&wl->mutex);
3828 	return ret;
3829 }
3830 
3831 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3832 				     struct ieee80211_vif *vif)
3833 {
3834 	struct wl1271 *wl = hw->priv;
3835 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3836 	int ret;
3837 
3838 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3839 
3840 	mutex_lock(&wl->mutex);
3841 
3842 	if (unlikely(wl->state != WLCORE_STATE_ON))
3843 		goto out;
3844 
3845 	ret = pm_runtime_get_sync(wl->dev);
3846 	if (ret < 0) {
3847 		pm_runtime_put_noidle(wl->dev);
3848 		goto out;
3849 	}
3850 
3851 	wl->ops->sched_scan_stop(wl, wlvif);
3852 
3853 	pm_runtime_mark_last_busy(wl->dev);
3854 	pm_runtime_put_autosuspend(wl->dev);
3855 out:
3856 	mutex_unlock(&wl->mutex);
3857 
3858 	return 0;
3859 }
3860 
3861 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3862 {
3863 	struct wl1271 *wl = hw->priv;
3864 	int ret = 0;
3865 
3866 	mutex_lock(&wl->mutex);
3867 
3868 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3869 		ret = -EAGAIN;
3870 		goto out;
3871 	}
3872 
3873 	ret = pm_runtime_get_sync(wl->dev);
3874 	if (ret < 0) {
3875 		pm_runtime_put_noidle(wl->dev);
3876 		goto out;
3877 	}
3878 
3879 	ret = wl1271_acx_frag_threshold(wl, value);
3880 	if (ret < 0)
3881 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3882 
3883 	pm_runtime_mark_last_busy(wl->dev);
3884 	pm_runtime_put_autosuspend(wl->dev);
3885 
3886 out:
3887 	mutex_unlock(&wl->mutex);
3888 
3889 	return ret;
3890 }
3891 
3892 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3893 {
3894 	struct wl1271 *wl = hw->priv;
3895 	struct wl12xx_vif *wlvif;
3896 	int ret = 0;
3897 
3898 	mutex_lock(&wl->mutex);
3899 
3900 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3901 		ret = -EAGAIN;
3902 		goto out;
3903 	}
3904 
3905 	ret = pm_runtime_get_sync(wl->dev);
3906 	if (ret < 0) {
3907 		pm_runtime_put_noidle(wl->dev);
3908 		goto out;
3909 	}
3910 
3911 	wl12xx_for_each_wlvif(wl, wlvif) {
3912 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3913 		if (ret < 0)
3914 			wl1271_warning("set rts threshold failed: %d", ret);
3915 	}
3916 	pm_runtime_mark_last_busy(wl->dev);
3917 	pm_runtime_put_autosuspend(wl->dev);
3918 
3919 out:
3920 	mutex_unlock(&wl->mutex);
3921 
3922 	return ret;
3923 }
3924 
3925 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3926 {
3927 	int len;
3928 	const u8 *next, *end = skb->data + skb->len;
3929 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3930 					skb->len - ieoffset);
3931 	if (!ie)
3932 		return;
3933 	len = ie[1] + 2;
3934 	next = ie + len;
3935 	memmove(ie, next, end - next);
3936 	skb_trim(skb, skb->len - len);
3937 }
3938 
3939 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3940 					    unsigned int oui, u8 oui_type,
3941 					    int ieoffset)
3942 {
3943 	int len;
3944 	const u8 *next, *end = skb->data + skb->len;
3945 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3946 					       skb->data + ieoffset,
3947 					       skb->len - ieoffset);
3948 	if (!ie)
3949 		return;
3950 	len = ie[1] + 2;
3951 	next = ie + len;
3952 	memmove(ie, next, end - next);
3953 	skb_trim(skb, skb->len - len);
3954 }
3955 
3956 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3957 					 struct ieee80211_vif *vif)
3958 {
3959 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3960 	struct sk_buff *skb;
3961 	int ret;
3962 
3963 	skb = ieee80211_proberesp_get(wl->hw, vif);
3964 	if (!skb)
3965 		return -EOPNOTSUPP;
3966 
3967 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3968 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3969 				      skb->data,
3970 				      skb->len, 0,
3971 				      rates);
3972 	dev_kfree_skb(skb);
3973 
3974 	if (ret < 0)
3975 		goto out;
3976 
3977 	wl1271_debug(DEBUG_AP, "probe response updated");
3978 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3979 
3980 out:
3981 	return ret;
3982 }
3983 
3984 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3985 					     struct ieee80211_vif *vif,
3986 					     u8 *probe_rsp_data,
3987 					     size_t probe_rsp_len,
3988 					     u32 rates)
3989 {
3990 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3991 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3992 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3993 	int ssid_ie_offset, ie_offset, templ_len;
3994 	const u8 *ptr;
3995 
3996 	/* no need to change probe response if the SSID is set correctly */
3997 	if (wlvif->ssid_len > 0)
3998 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3999 					       CMD_TEMPL_AP_PROBE_RESPONSE,
4000 					       probe_rsp_data,
4001 					       probe_rsp_len, 0,
4002 					       rates);
4003 
4004 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4005 		wl1271_error("probe_rsp template too big");
4006 		return -EINVAL;
4007 	}
4008 
4009 	/* start searching from IE offset */
4010 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4011 
4012 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4013 			       probe_rsp_len - ie_offset);
4014 	if (!ptr) {
4015 		wl1271_error("No SSID in beacon!");
4016 		return -EINVAL;
4017 	}
4018 
4019 	ssid_ie_offset = ptr - probe_rsp_data;
4020 	ptr += (ptr[1] + 2);
4021 
4022 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4023 
4024 	/* insert SSID from bss_conf */
4025 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4026 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4027 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4028 	       bss_conf->ssid, bss_conf->ssid_len);
4029 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4030 
4031 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4032 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4033 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4034 
4035 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4036 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4037 				       probe_rsp_templ,
4038 				       templ_len, 0,
4039 				       rates);
4040 }
4041 
4042 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4043 				       struct ieee80211_vif *vif,
4044 				       struct ieee80211_bss_conf *bss_conf,
4045 				       u32 changed)
4046 {
4047 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4048 	int ret = 0;
4049 
4050 	if (changed & BSS_CHANGED_ERP_SLOT) {
4051 		if (bss_conf->use_short_slot)
4052 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4053 		else
4054 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4055 		if (ret < 0) {
4056 			wl1271_warning("Set slot time failed %d", ret);
4057 			goto out;
4058 		}
4059 	}
4060 
4061 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4062 		if (bss_conf->use_short_preamble)
4063 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4064 		else
4065 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4066 	}
4067 
4068 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4069 		if (bss_conf->use_cts_prot)
4070 			ret = wl1271_acx_cts_protect(wl, wlvif,
4071 						     CTSPROTECT_ENABLE);
4072 		else
4073 			ret = wl1271_acx_cts_protect(wl, wlvif,
4074 						     CTSPROTECT_DISABLE);
4075 		if (ret < 0) {
4076 			wl1271_warning("Set ctsprotect failed %d", ret);
4077 			goto out;
4078 		}
4079 	}
4080 
4081 out:
4082 	return ret;
4083 }
4084 
4085 static int wlcore_set_beacon_template(struct wl1271 *wl,
4086 				      struct ieee80211_vif *vif,
4087 				      bool is_ap)
4088 {
4089 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4090 	struct ieee80211_hdr *hdr;
4091 	u32 min_rate;
4092 	int ret;
4093 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4094 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4095 	u16 tmpl_id;
4096 
4097 	if (!beacon) {
4098 		ret = -EINVAL;
4099 		goto out;
4100 	}
4101 
4102 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4103 
4104 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4105 	if (ret < 0) {
4106 		dev_kfree_skb(beacon);
4107 		goto out;
4108 	}
4109 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4110 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4111 		CMD_TEMPL_BEACON;
4112 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4113 				      beacon->data,
4114 				      beacon->len, 0,
4115 				      min_rate);
4116 	if (ret < 0) {
4117 		dev_kfree_skb(beacon);
4118 		goto out;
4119 	}
4120 
4121 	wlvif->wmm_enabled =
4122 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4123 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4124 					beacon->data + ieoffset,
4125 					beacon->len - ieoffset);
4126 
4127 	/*
4128 	 * In case we already have a probe-resp beacon set explicitly
4129 	 * by usermode, don't use the beacon data.
4130 	 */
4131 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4132 		goto end_bcn;
4133 
4134 	/* remove TIM ie from probe response */
4135 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4136 
4137 	/*
4138 	 * remove p2p ie from probe response.
4139 	 * the fw reponds to probe requests that don't include
4140 	 * the p2p ie. probe requests with p2p ie will be passed,
4141 	 * and will be responded by the supplicant (the spec
4142 	 * forbids including the p2p ie when responding to probe
4143 	 * requests that didn't include it).
4144 	 */
4145 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4146 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4147 
4148 	hdr = (struct ieee80211_hdr *) beacon->data;
4149 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4150 					 IEEE80211_STYPE_PROBE_RESP);
4151 	if (is_ap)
4152 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4153 							   beacon->data,
4154 							   beacon->len,
4155 							   min_rate);
4156 	else
4157 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4158 					      CMD_TEMPL_PROBE_RESPONSE,
4159 					      beacon->data,
4160 					      beacon->len, 0,
4161 					      min_rate);
4162 end_bcn:
4163 	dev_kfree_skb(beacon);
4164 	if (ret < 0)
4165 		goto out;
4166 
4167 out:
4168 	return ret;
4169 }
4170 
4171 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4172 					  struct ieee80211_vif *vif,
4173 					  struct ieee80211_bss_conf *bss_conf,
4174 					  u32 changed)
4175 {
4176 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4177 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4178 	int ret = 0;
4179 
4180 	if (changed & BSS_CHANGED_BEACON_INT) {
4181 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4182 			bss_conf->beacon_int);
4183 
4184 		wlvif->beacon_int = bss_conf->beacon_int;
4185 	}
4186 
4187 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4188 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4189 
4190 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4191 	}
4192 
4193 	if (changed & BSS_CHANGED_BEACON) {
4194 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4195 		if (ret < 0)
4196 			goto out;
4197 
4198 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4199 				       &wlvif->flags)) {
4200 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4201 			if (ret < 0)
4202 				goto out;
4203 		}
4204 	}
4205 out:
4206 	if (ret != 0)
4207 		wl1271_error("beacon info change failed: %d", ret);
4208 	return ret;
4209 }
4210 
4211 /* AP mode changes */
4212 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4213 				       struct ieee80211_vif *vif,
4214 				       struct ieee80211_bss_conf *bss_conf,
4215 				       u32 changed)
4216 {
4217 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4218 	int ret = 0;
4219 
4220 	if (changed & BSS_CHANGED_BASIC_RATES) {
4221 		u32 rates = bss_conf->basic_rates;
4222 
4223 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4224 								 wlvif->band);
4225 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4226 							wlvif->basic_rate_set);
4227 
4228 		ret = wl1271_init_ap_rates(wl, wlvif);
4229 		if (ret < 0) {
4230 			wl1271_error("AP rate policy change failed %d", ret);
4231 			goto out;
4232 		}
4233 
4234 		ret = wl1271_ap_init_templates(wl, vif);
4235 		if (ret < 0)
4236 			goto out;
4237 
4238 		/* No need to set probe resp template for mesh */
4239 		if (!ieee80211_vif_is_mesh(vif)) {
4240 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4241 							    wlvif->basic_rate,
4242 							    vif);
4243 			if (ret < 0)
4244 				goto out;
4245 		}
4246 
4247 		ret = wlcore_set_beacon_template(wl, vif, true);
4248 		if (ret < 0)
4249 			goto out;
4250 	}
4251 
4252 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4253 	if (ret < 0)
4254 		goto out;
4255 
4256 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4257 		if (bss_conf->enable_beacon) {
4258 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4259 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4260 				if (ret < 0)
4261 					goto out;
4262 
4263 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4264 				if (ret < 0)
4265 					goto out;
4266 
4267 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4268 				wl1271_debug(DEBUG_AP, "started AP");
4269 			}
4270 		} else {
4271 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4272 				/*
4273 				 * AP might be in ROC in case we have just
4274 				 * sent auth reply. handle it.
4275 				 */
4276 				if (test_bit(wlvif->role_id, wl->roc_map))
4277 					wl12xx_croc(wl, wlvif->role_id);
4278 
4279 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4280 				if (ret < 0)
4281 					goto out;
4282 
4283 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4284 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4285 					  &wlvif->flags);
4286 				wl1271_debug(DEBUG_AP, "stopped AP");
4287 			}
4288 		}
4289 	}
4290 
4291 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4292 	if (ret < 0)
4293 		goto out;
4294 
4295 	/* Handle HT information change */
4296 	if ((changed & BSS_CHANGED_HT) &&
4297 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4298 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4299 					bss_conf->ht_operation_mode);
4300 		if (ret < 0) {
4301 			wl1271_warning("Set ht information failed %d", ret);
4302 			goto out;
4303 		}
4304 	}
4305 
4306 out:
4307 	return;
4308 }
4309 
4310 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4311 			    struct ieee80211_bss_conf *bss_conf,
4312 			    u32 sta_rate_set)
4313 {
4314 	u32 rates;
4315 	int ret;
4316 
4317 	wl1271_debug(DEBUG_MAC80211,
4318 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4319 	     bss_conf->bssid, bss_conf->aid,
4320 	     bss_conf->beacon_int,
4321 	     bss_conf->basic_rates, sta_rate_set);
4322 
4323 	wlvif->beacon_int = bss_conf->beacon_int;
4324 	rates = bss_conf->basic_rates;
4325 	wlvif->basic_rate_set =
4326 		wl1271_tx_enabled_rates_get(wl, rates,
4327 					    wlvif->band);
4328 	wlvif->basic_rate =
4329 		wl1271_tx_min_rate_get(wl,
4330 				       wlvif->basic_rate_set);
4331 
4332 	if (sta_rate_set)
4333 		wlvif->rate_set =
4334 			wl1271_tx_enabled_rates_get(wl,
4335 						sta_rate_set,
4336 						wlvif->band);
4337 
4338 	/* we only support sched_scan while not connected */
4339 	if (wl->sched_vif == wlvif)
4340 		wl->ops->sched_scan_stop(wl, wlvif);
4341 
4342 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4343 	if (ret < 0)
4344 		return ret;
4345 
4346 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4347 	if (ret < 0)
4348 		return ret;
4349 
4350 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4351 	if (ret < 0)
4352 		return ret;
4353 
4354 	wlcore_set_ssid(wl, wlvif);
4355 
4356 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4357 
4358 	return 0;
4359 }
4360 
4361 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4362 {
4363 	int ret;
4364 
4365 	/* revert back to minimum rates for the current band */
4366 	wl1271_set_band_rate(wl, wlvif);
4367 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4368 
4369 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4370 	if (ret < 0)
4371 		return ret;
4372 
4373 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4374 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4375 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4376 		if (ret < 0)
4377 			return ret;
4378 	}
4379 
4380 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4381 	return 0;
4382 }
4383 /* STA/IBSS mode changes */
4384 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4385 					struct ieee80211_vif *vif,
4386 					struct ieee80211_bss_conf *bss_conf,
4387 					u32 changed)
4388 {
4389 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4390 	bool do_join = false;
4391 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4392 	bool ibss_joined = false;
4393 	u32 sta_rate_set = 0;
4394 	int ret;
4395 	struct ieee80211_sta *sta;
4396 	bool sta_exists = false;
4397 	struct ieee80211_sta_ht_cap sta_ht_cap;
4398 
4399 	if (is_ibss) {
4400 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4401 						     changed);
4402 		if (ret < 0)
4403 			goto out;
4404 	}
4405 
4406 	if (changed & BSS_CHANGED_IBSS) {
4407 		if (bss_conf->ibss_joined) {
4408 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4409 			ibss_joined = true;
4410 		} else {
4411 			wlcore_unset_assoc(wl, wlvif);
4412 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4413 		}
4414 	}
4415 
4416 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4417 		do_join = true;
4418 
4419 	/* Need to update the SSID (for filtering etc) */
4420 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4421 		do_join = true;
4422 
4423 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4424 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4425 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4426 
4427 		do_join = true;
4428 	}
4429 
4430 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4431 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4432 
4433 	if (changed & BSS_CHANGED_CQM) {
4434 		bool enable = false;
4435 		if (bss_conf->cqm_rssi_thold)
4436 			enable = true;
4437 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4438 						  bss_conf->cqm_rssi_thold,
4439 						  bss_conf->cqm_rssi_hyst);
4440 		if (ret < 0)
4441 			goto out;
4442 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4443 	}
4444 
4445 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4446 		       BSS_CHANGED_ASSOC)) {
4447 		rcu_read_lock();
4448 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4449 		if (sta) {
4450 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4451 
4452 			/* save the supp_rates of the ap */
4453 			sta_rate_set = sta->supp_rates[wlvif->band];
4454 			if (sta->ht_cap.ht_supported)
4455 				sta_rate_set |=
4456 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4457 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4458 			sta_ht_cap = sta->ht_cap;
4459 			sta_exists = true;
4460 		}
4461 
4462 		rcu_read_unlock();
4463 	}
4464 
4465 	if (changed & BSS_CHANGED_BSSID) {
4466 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4467 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4468 					       sta_rate_set);
4469 			if (ret < 0)
4470 				goto out;
4471 
4472 			/* Need to update the BSSID (for filtering etc) */
4473 			do_join = true;
4474 		} else {
4475 			ret = wlcore_clear_bssid(wl, wlvif);
4476 			if (ret < 0)
4477 				goto out;
4478 		}
4479 	}
4480 
4481 	if (changed & BSS_CHANGED_IBSS) {
4482 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4483 			     bss_conf->ibss_joined);
4484 
4485 		if (bss_conf->ibss_joined) {
4486 			u32 rates = bss_conf->basic_rates;
4487 			wlvif->basic_rate_set =
4488 				wl1271_tx_enabled_rates_get(wl, rates,
4489 							    wlvif->band);
4490 			wlvif->basic_rate =
4491 				wl1271_tx_min_rate_get(wl,
4492 						       wlvif->basic_rate_set);
4493 
4494 			/* by default, use 11b + OFDM rates */
4495 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4496 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4497 			if (ret < 0)
4498 				goto out;
4499 		}
4500 	}
4501 
4502 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4503 		/* enable beacon filtering */
4504 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4505 		if (ret < 0)
4506 			goto out;
4507 	}
4508 
4509 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4510 	if (ret < 0)
4511 		goto out;
4512 
4513 	if (do_join) {
4514 		ret = wlcore_join(wl, wlvif);
4515 		if (ret < 0) {
4516 			wl1271_warning("cmd join failed %d", ret);
4517 			goto out;
4518 		}
4519 	}
4520 
4521 	if (changed & BSS_CHANGED_ASSOC) {
4522 		if (bss_conf->assoc) {
4523 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4524 					       sta_rate_set);
4525 			if (ret < 0)
4526 				goto out;
4527 
4528 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4529 				wl12xx_set_authorized(wl, wlvif);
4530 		} else {
4531 			wlcore_unset_assoc(wl, wlvif);
4532 		}
4533 	}
4534 
4535 	if (changed & BSS_CHANGED_PS) {
4536 		if ((bss_conf->ps) &&
4537 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4538 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4539 			int ps_mode;
4540 			char *ps_mode_str;
4541 
4542 			if (wl->conf.conn.forced_ps) {
4543 				ps_mode = STATION_POWER_SAVE_MODE;
4544 				ps_mode_str = "forced";
4545 			} else {
4546 				ps_mode = STATION_AUTO_PS_MODE;
4547 				ps_mode_str = "auto";
4548 			}
4549 
4550 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4551 
4552 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4553 			if (ret < 0)
4554 				wl1271_warning("enter %s ps failed %d",
4555 					       ps_mode_str, ret);
4556 		} else if (!bss_conf->ps &&
4557 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4558 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4559 
4560 			ret = wl1271_ps_set_mode(wl, wlvif,
4561 						 STATION_ACTIVE_MODE);
4562 			if (ret < 0)
4563 				wl1271_warning("exit auto ps failed %d", ret);
4564 		}
4565 	}
4566 
4567 	/* Handle new association with HT. Do this after join. */
4568 	if (sta_exists) {
4569 		bool enabled =
4570 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4571 
4572 		ret = wlcore_hw_set_peer_cap(wl,
4573 					     &sta_ht_cap,
4574 					     enabled,
4575 					     wlvif->rate_set,
4576 					     wlvif->sta.hlid);
4577 		if (ret < 0) {
4578 			wl1271_warning("Set ht cap failed %d", ret);
4579 			goto out;
4580 
4581 		}
4582 
4583 		if (enabled) {
4584 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4585 						bss_conf->ht_operation_mode);
4586 			if (ret < 0) {
4587 				wl1271_warning("Set ht information failed %d",
4588 					       ret);
4589 				goto out;
4590 			}
4591 		}
4592 	}
4593 
4594 	/* Handle arp filtering. Done after join. */
4595 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4596 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4597 		__be32 addr = bss_conf->arp_addr_list[0];
4598 		wlvif->sta.qos = bss_conf->qos;
4599 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4600 
4601 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4602 			wlvif->ip_addr = addr;
4603 			/*
4604 			 * The template should have been configured only upon
4605 			 * association. however, it seems that the correct ip
4606 			 * isn't being set (when sending), so we have to
4607 			 * reconfigure the template upon every ip change.
4608 			 */
4609 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4610 			if (ret < 0) {
4611 				wl1271_warning("build arp rsp failed: %d", ret);
4612 				goto out;
4613 			}
4614 
4615 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4616 				(ACX_ARP_FILTER_ARP_FILTERING |
4617 				 ACX_ARP_FILTER_AUTO_ARP),
4618 				addr);
4619 		} else {
4620 			wlvif->ip_addr = 0;
4621 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4622 		}
4623 
4624 		if (ret < 0)
4625 			goto out;
4626 	}
4627 
4628 out:
4629 	return;
4630 }
4631 
4632 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4633 				       struct ieee80211_vif *vif,
4634 				       struct ieee80211_bss_conf *bss_conf,
4635 				       u32 changed)
4636 {
4637 	struct wl1271 *wl = hw->priv;
4638 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4639 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4640 	int ret;
4641 
4642 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4643 		     wlvif->role_id, (int)changed);
4644 
4645 	/*
4646 	 * make sure to cancel pending disconnections if our association
4647 	 * state changed
4648 	 */
4649 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4650 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4651 
4652 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4653 	    !bss_conf->enable_beacon)
4654 		wl1271_tx_flush(wl);
4655 
4656 	mutex_lock(&wl->mutex);
4657 
4658 	if (unlikely(wl->state != WLCORE_STATE_ON))
4659 		goto out;
4660 
4661 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4662 		goto out;
4663 
4664 	ret = pm_runtime_get_sync(wl->dev);
4665 	if (ret < 0) {
4666 		pm_runtime_put_noidle(wl->dev);
4667 		goto out;
4668 	}
4669 
4670 	if ((changed & BSS_CHANGED_TXPOWER) &&
4671 	    bss_conf->txpower != wlvif->power_level) {
4672 
4673 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4674 		if (ret < 0)
4675 			goto out;
4676 
4677 		wlvif->power_level = bss_conf->txpower;
4678 	}
4679 
4680 	if (is_ap)
4681 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4682 	else
4683 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4684 
4685 	pm_runtime_mark_last_busy(wl->dev);
4686 	pm_runtime_put_autosuspend(wl->dev);
4687 
4688 out:
4689 	mutex_unlock(&wl->mutex);
4690 }
4691 
4692 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4693 				 struct ieee80211_chanctx_conf *ctx)
4694 {
4695 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4696 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4697 		     cfg80211_get_chandef_type(&ctx->def));
4698 	return 0;
4699 }
4700 
4701 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4702 				     struct ieee80211_chanctx_conf *ctx)
4703 {
4704 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4705 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4706 		     cfg80211_get_chandef_type(&ctx->def));
4707 }
4708 
4709 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4710 				     struct ieee80211_chanctx_conf *ctx,
4711 				     u32 changed)
4712 {
4713 	struct wl1271 *wl = hw->priv;
4714 	struct wl12xx_vif *wlvif;
4715 	int ret;
4716 	int channel = ieee80211_frequency_to_channel(
4717 		ctx->def.chan->center_freq);
4718 
4719 	wl1271_debug(DEBUG_MAC80211,
4720 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4721 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4722 
4723 	mutex_lock(&wl->mutex);
4724 
4725 	ret = pm_runtime_get_sync(wl->dev);
4726 	if (ret < 0) {
4727 		pm_runtime_put_noidle(wl->dev);
4728 		goto out;
4729 	}
4730 
4731 	wl12xx_for_each_wlvif(wl, wlvif) {
4732 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4733 
4734 		rcu_read_lock();
4735 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4736 			rcu_read_unlock();
4737 			continue;
4738 		}
4739 		rcu_read_unlock();
4740 
4741 		/* start radar if needed */
4742 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4743 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4744 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4745 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4746 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4747 			wlcore_hw_set_cac(wl, wlvif, true);
4748 			wlvif->radar_enabled = true;
4749 		}
4750 	}
4751 
4752 	pm_runtime_mark_last_busy(wl->dev);
4753 	pm_runtime_put_autosuspend(wl->dev);
4754 out:
4755 	mutex_unlock(&wl->mutex);
4756 }
4757 
4758 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4759 					struct ieee80211_vif *vif,
4760 					struct ieee80211_chanctx_conf *ctx)
4761 {
4762 	struct wl1271 *wl = hw->priv;
4763 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4764 	int channel = ieee80211_frequency_to_channel(
4765 		ctx->def.chan->center_freq);
4766 	int ret = -EINVAL;
4767 
4768 	wl1271_debug(DEBUG_MAC80211,
4769 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4770 		     wlvif->role_id, channel,
4771 		     cfg80211_get_chandef_type(&ctx->def),
4772 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4773 
4774 	mutex_lock(&wl->mutex);
4775 
4776 	if (unlikely(wl->state != WLCORE_STATE_ON))
4777 		goto out;
4778 
4779 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4780 		goto out;
4781 
4782 	ret = pm_runtime_get_sync(wl->dev);
4783 	if (ret < 0) {
4784 		pm_runtime_put_noidle(wl->dev);
4785 		goto out;
4786 	}
4787 
4788 	wlvif->band = ctx->def.chan->band;
4789 	wlvif->channel = channel;
4790 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4791 
4792 	/* update default rates according to the band */
4793 	wl1271_set_band_rate(wl, wlvif);
4794 
4795 	if (ctx->radar_enabled &&
4796 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4797 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4798 		wlcore_hw_set_cac(wl, wlvif, true);
4799 		wlvif->radar_enabled = true;
4800 	}
4801 
4802 	pm_runtime_mark_last_busy(wl->dev);
4803 	pm_runtime_put_autosuspend(wl->dev);
4804 out:
4805 	mutex_unlock(&wl->mutex);
4806 
4807 	return 0;
4808 }
4809 
4810 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4811 					   struct ieee80211_vif *vif,
4812 					   struct ieee80211_chanctx_conf *ctx)
4813 {
4814 	struct wl1271 *wl = hw->priv;
4815 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4816 	int ret;
4817 
4818 	wl1271_debug(DEBUG_MAC80211,
4819 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4820 		     wlvif->role_id,
4821 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4822 		     cfg80211_get_chandef_type(&ctx->def));
4823 
4824 	wl1271_tx_flush(wl);
4825 
4826 	mutex_lock(&wl->mutex);
4827 
4828 	if (unlikely(wl->state != WLCORE_STATE_ON))
4829 		goto out;
4830 
4831 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4832 		goto out;
4833 
4834 	ret = pm_runtime_get_sync(wl->dev);
4835 	if (ret < 0) {
4836 		pm_runtime_put_noidle(wl->dev);
4837 		goto out;
4838 	}
4839 
4840 	if (wlvif->radar_enabled) {
4841 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4842 		wlcore_hw_set_cac(wl, wlvif, false);
4843 		wlvif->radar_enabled = false;
4844 	}
4845 
4846 	pm_runtime_mark_last_busy(wl->dev);
4847 	pm_runtime_put_autosuspend(wl->dev);
4848 out:
4849 	mutex_unlock(&wl->mutex);
4850 }
4851 
4852 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4853 				    struct wl12xx_vif *wlvif,
4854 				    struct ieee80211_chanctx_conf *new_ctx)
4855 {
4856 	int channel = ieee80211_frequency_to_channel(
4857 		new_ctx->def.chan->center_freq);
4858 
4859 	wl1271_debug(DEBUG_MAC80211,
4860 		     "switch vif (role %d) %d -> %d chan_type: %d",
4861 		     wlvif->role_id, wlvif->channel, channel,
4862 		     cfg80211_get_chandef_type(&new_ctx->def));
4863 
4864 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4865 		return 0;
4866 
4867 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4868 
4869 	if (wlvif->radar_enabled) {
4870 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4871 		wlcore_hw_set_cac(wl, wlvif, false);
4872 		wlvif->radar_enabled = false;
4873 	}
4874 
4875 	wlvif->band = new_ctx->def.chan->band;
4876 	wlvif->channel = channel;
4877 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4878 
4879 	/* start radar if needed */
4880 	if (new_ctx->radar_enabled) {
4881 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4882 		wlcore_hw_set_cac(wl, wlvif, true);
4883 		wlvif->radar_enabled = true;
4884 	}
4885 
4886 	return 0;
4887 }
4888 
4889 static int
4890 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4891 			     struct ieee80211_vif_chanctx_switch *vifs,
4892 			     int n_vifs,
4893 			     enum ieee80211_chanctx_switch_mode mode)
4894 {
4895 	struct wl1271 *wl = hw->priv;
4896 	int i, ret;
4897 
4898 	wl1271_debug(DEBUG_MAC80211,
4899 		     "mac80211 switch chanctx n_vifs %d mode %d",
4900 		     n_vifs, mode);
4901 
4902 	mutex_lock(&wl->mutex);
4903 
4904 	ret = pm_runtime_get_sync(wl->dev);
4905 	if (ret < 0) {
4906 		pm_runtime_put_noidle(wl->dev);
4907 		goto out;
4908 	}
4909 
4910 	for (i = 0; i < n_vifs; i++) {
4911 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4912 
4913 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4914 		if (ret)
4915 			goto out_sleep;
4916 	}
4917 out_sleep:
4918 	pm_runtime_mark_last_busy(wl->dev);
4919 	pm_runtime_put_autosuspend(wl->dev);
4920 out:
4921 	mutex_unlock(&wl->mutex);
4922 
4923 	return 0;
4924 }
4925 
4926 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4927 			     struct ieee80211_vif *vif, u16 queue,
4928 			     const struct ieee80211_tx_queue_params *params)
4929 {
4930 	struct wl1271 *wl = hw->priv;
4931 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4932 	u8 ps_scheme;
4933 	int ret = 0;
4934 
4935 	if (wlcore_is_p2p_mgmt(wlvif))
4936 		return 0;
4937 
4938 	mutex_lock(&wl->mutex);
4939 
4940 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4941 
4942 	if (params->uapsd)
4943 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4944 	else
4945 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4946 
4947 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4948 		goto out;
4949 
4950 	ret = pm_runtime_get_sync(wl->dev);
4951 	if (ret < 0) {
4952 		pm_runtime_put_noidle(wl->dev);
4953 		goto out;
4954 	}
4955 
4956 	/*
4957 	 * the txop is confed in units of 32us by the mac80211,
4958 	 * we need us
4959 	 */
4960 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4961 				params->cw_min, params->cw_max,
4962 				params->aifs, params->txop << 5);
4963 	if (ret < 0)
4964 		goto out_sleep;
4965 
4966 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4967 				 CONF_CHANNEL_TYPE_EDCF,
4968 				 wl1271_tx_get_queue(queue),
4969 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4970 				 0, 0);
4971 
4972 out_sleep:
4973 	pm_runtime_mark_last_busy(wl->dev);
4974 	pm_runtime_put_autosuspend(wl->dev);
4975 
4976 out:
4977 	mutex_unlock(&wl->mutex);
4978 
4979 	return ret;
4980 }
4981 
4982 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4983 			     struct ieee80211_vif *vif)
4984 {
4985 
4986 	struct wl1271 *wl = hw->priv;
4987 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4988 	u64 mactime = ULLONG_MAX;
4989 	int ret;
4990 
4991 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4992 
4993 	mutex_lock(&wl->mutex);
4994 
4995 	if (unlikely(wl->state != WLCORE_STATE_ON))
4996 		goto out;
4997 
4998 	ret = pm_runtime_get_sync(wl->dev);
4999 	if (ret < 0) {
5000 		pm_runtime_put_noidle(wl->dev);
5001 		goto out;
5002 	}
5003 
5004 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5005 	if (ret < 0)
5006 		goto out_sleep;
5007 
5008 out_sleep:
5009 	pm_runtime_mark_last_busy(wl->dev);
5010 	pm_runtime_put_autosuspend(wl->dev);
5011 
5012 out:
5013 	mutex_unlock(&wl->mutex);
5014 	return mactime;
5015 }
5016 
5017 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5018 				struct survey_info *survey)
5019 {
5020 	struct ieee80211_conf *conf = &hw->conf;
5021 
5022 	if (idx != 0)
5023 		return -ENOENT;
5024 
5025 	survey->channel = conf->chandef.chan;
5026 	survey->filled = 0;
5027 	return 0;
5028 }
5029 
5030 static int wl1271_allocate_sta(struct wl1271 *wl,
5031 			     struct wl12xx_vif *wlvif,
5032 			     struct ieee80211_sta *sta)
5033 {
5034 	struct wl1271_station *wl_sta;
5035 	int ret;
5036 
5037 
5038 	if (wl->active_sta_count >= wl->max_ap_stations) {
5039 		wl1271_warning("could not allocate HLID - too much stations");
5040 		return -EBUSY;
5041 	}
5042 
5043 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5044 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5045 	if (ret < 0) {
5046 		wl1271_warning("could not allocate HLID - too many links");
5047 		return -EBUSY;
5048 	}
5049 
5050 	/* use the previous security seq, if this is a recovery/resume */
5051 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5052 
5053 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5054 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5055 	wl->active_sta_count++;
5056 	return 0;
5057 }
5058 
5059 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5060 {
5061 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5062 		return;
5063 
5064 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5065 	__clear_bit(hlid, &wl->ap_ps_map);
5066 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5067 
5068 	/*
5069 	 * save the last used PN in the private part of iee80211_sta,
5070 	 * in case of recovery/suspend
5071 	 */
5072 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5073 
5074 	wl12xx_free_link(wl, wlvif, &hlid);
5075 	wl->active_sta_count--;
5076 
5077 	/*
5078 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5079 	 * chance to return STA-buffered packets before complaining.
5080 	 */
5081 	if (wl->active_sta_count == 0)
5082 		wl12xx_rearm_tx_watchdog_locked(wl);
5083 }
5084 
5085 static int wl12xx_sta_add(struct wl1271 *wl,
5086 			  struct wl12xx_vif *wlvif,
5087 			  struct ieee80211_sta *sta)
5088 {
5089 	struct wl1271_station *wl_sta;
5090 	int ret = 0;
5091 	u8 hlid;
5092 
5093 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5094 
5095 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5096 	if (ret < 0)
5097 		return ret;
5098 
5099 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5100 	hlid = wl_sta->hlid;
5101 
5102 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5103 	if (ret < 0)
5104 		wl1271_free_sta(wl, wlvif, hlid);
5105 
5106 	return ret;
5107 }
5108 
5109 static int wl12xx_sta_remove(struct wl1271 *wl,
5110 			     struct wl12xx_vif *wlvif,
5111 			     struct ieee80211_sta *sta)
5112 {
5113 	struct wl1271_station *wl_sta;
5114 	int ret = 0, id;
5115 
5116 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5117 
5118 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5119 	id = wl_sta->hlid;
5120 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5121 		return -EINVAL;
5122 
5123 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5124 	if (ret < 0)
5125 		return ret;
5126 
5127 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5128 	return ret;
5129 }
5130 
5131 static void wlcore_roc_if_possible(struct wl1271 *wl,
5132 				   struct wl12xx_vif *wlvif)
5133 {
5134 	if (find_first_bit(wl->roc_map,
5135 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5136 		return;
5137 
5138 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5139 		return;
5140 
5141 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5142 }
5143 
5144 /*
5145  * when wl_sta is NULL, we treat this call as if coming from a
5146  * pending auth reply.
5147  * wl->mutex must be taken and the FW must be awake when the call
5148  * takes place.
5149  */
5150 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5151 			      struct wl1271_station *wl_sta, bool in_conn)
5152 {
5153 	if (in_conn) {
5154 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5155 			return;
5156 
5157 		if (!wlvif->ap_pending_auth_reply &&
5158 		    !wlvif->inconn_count)
5159 			wlcore_roc_if_possible(wl, wlvif);
5160 
5161 		if (wl_sta) {
5162 			wl_sta->in_connection = true;
5163 			wlvif->inconn_count++;
5164 		} else {
5165 			wlvif->ap_pending_auth_reply = true;
5166 		}
5167 	} else {
5168 		if (wl_sta && !wl_sta->in_connection)
5169 			return;
5170 
5171 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5172 			return;
5173 
5174 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5175 			return;
5176 
5177 		if (wl_sta) {
5178 			wl_sta->in_connection = false;
5179 			wlvif->inconn_count--;
5180 		} else {
5181 			wlvif->ap_pending_auth_reply = false;
5182 		}
5183 
5184 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5185 		    test_bit(wlvif->role_id, wl->roc_map))
5186 			wl12xx_croc(wl, wlvif->role_id);
5187 	}
5188 }
5189 
5190 static int wl12xx_update_sta_state(struct wl1271 *wl,
5191 				   struct wl12xx_vif *wlvif,
5192 				   struct ieee80211_sta *sta,
5193 				   enum ieee80211_sta_state old_state,
5194 				   enum ieee80211_sta_state new_state)
5195 {
5196 	struct wl1271_station *wl_sta;
5197 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5198 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5199 	int ret;
5200 
5201 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5202 
5203 	/* Add station (AP mode) */
5204 	if (is_ap &&
5205 	    old_state == IEEE80211_STA_NOTEXIST &&
5206 	    new_state == IEEE80211_STA_NONE) {
5207 		ret = wl12xx_sta_add(wl, wlvif, sta);
5208 		if (ret)
5209 			return ret;
5210 
5211 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5212 	}
5213 
5214 	/* Remove station (AP mode) */
5215 	if (is_ap &&
5216 	    old_state == IEEE80211_STA_NONE &&
5217 	    new_state == IEEE80211_STA_NOTEXIST) {
5218 		/* must not fail */
5219 		wl12xx_sta_remove(wl, wlvif, sta);
5220 
5221 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5222 	}
5223 
5224 	/* Authorize station (AP mode) */
5225 	if (is_ap &&
5226 	    new_state == IEEE80211_STA_AUTHORIZED) {
5227 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5228 		if (ret < 0)
5229 			return ret;
5230 
5231 		/* reconfigure rates */
5232 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5233 		if (ret < 0)
5234 			return ret;
5235 
5236 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5237 						     wl_sta->hlid);
5238 		if (ret)
5239 			return ret;
5240 
5241 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5242 	}
5243 
5244 	/* Authorize station */
5245 	if (is_sta &&
5246 	    new_state == IEEE80211_STA_AUTHORIZED) {
5247 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5248 		ret = wl12xx_set_authorized(wl, wlvif);
5249 		if (ret)
5250 			return ret;
5251 	}
5252 
5253 	if (is_sta &&
5254 	    old_state == IEEE80211_STA_AUTHORIZED &&
5255 	    new_state == IEEE80211_STA_ASSOC) {
5256 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5257 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5258 	}
5259 
5260 	/* save seq number on disassoc (suspend) */
5261 	if (is_sta &&
5262 	    old_state == IEEE80211_STA_ASSOC &&
5263 	    new_state == IEEE80211_STA_AUTH) {
5264 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5265 		wlvif->total_freed_pkts = 0;
5266 	}
5267 
5268 	/* restore seq number on assoc (resume) */
5269 	if (is_sta &&
5270 	    old_state == IEEE80211_STA_AUTH &&
5271 	    new_state == IEEE80211_STA_ASSOC) {
5272 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5273 	}
5274 
5275 	/* clear ROCs on failure or authorization */
5276 	if (is_sta &&
5277 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5278 	     new_state == IEEE80211_STA_NOTEXIST)) {
5279 		if (test_bit(wlvif->role_id, wl->roc_map))
5280 			wl12xx_croc(wl, wlvif->role_id);
5281 	}
5282 
5283 	if (is_sta &&
5284 	    old_state == IEEE80211_STA_NOTEXIST &&
5285 	    new_state == IEEE80211_STA_NONE) {
5286 		if (find_first_bit(wl->roc_map,
5287 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5288 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5289 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5290 				   wlvif->band, wlvif->channel);
5291 		}
5292 	}
5293 	return 0;
5294 }
5295 
5296 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5297 			       struct ieee80211_vif *vif,
5298 			       struct ieee80211_sta *sta,
5299 			       enum ieee80211_sta_state old_state,
5300 			       enum ieee80211_sta_state new_state)
5301 {
5302 	struct wl1271 *wl = hw->priv;
5303 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5304 	int ret;
5305 
5306 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5307 		     sta->aid, old_state, new_state);
5308 
5309 	mutex_lock(&wl->mutex);
5310 
5311 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5312 		ret = -EBUSY;
5313 		goto out;
5314 	}
5315 
5316 	ret = pm_runtime_get_sync(wl->dev);
5317 	if (ret < 0) {
5318 		pm_runtime_put_noidle(wl->dev);
5319 		goto out;
5320 	}
5321 
5322 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5323 
5324 	pm_runtime_mark_last_busy(wl->dev);
5325 	pm_runtime_put_autosuspend(wl->dev);
5326 out:
5327 	mutex_unlock(&wl->mutex);
5328 	if (new_state < old_state)
5329 		return 0;
5330 	return ret;
5331 }
5332 
5333 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5334 				  struct ieee80211_vif *vif,
5335 				  struct ieee80211_ampdu_params *params)
5336 {
5337 	struct wl1271 *wl = hw->priv;
5338 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5339 	int ret;
5340 	u8 hlid, *ba_bitmap;
5341 	struct ieee80211_sta *sta = params->sta;
5342 	enum ieee80211_ampdu_mlme_action action = params->action;
5343 	u16 tid = params->tid;
5344 	u16 *ssn = &params->ssn;
5345 
5346 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5347 		     tid);
5348 
5349 	/* sanity check - the fields in FW are only 8bits wide */
5350 	if (WARN_ON(tid > 0xFF))
5351 		return -ENOTSUPP;
5352 
5353 	mutex_lock(&wl->mutex);
5354 
5355 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5356 		ret = -EAGAIN;
5357 		goto out;
5358 	}
5359 
5360 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5361 		hlid = wlvif->sta.hlid;
5362 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5363 		struct wl1271_station *wl_sta;
5364 
5365 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5366 		hlid = wl_sta->hlid;
5367 	} else {
5368 		ret = -EINVAL;
5369 		goto out;
5370 	}
5371 
5372 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5373 
5374 	ret = pm_runtime_get_sync(wl->dev);
5375 	if (ret < 0) {
5376 		pm_runtime_put_noidle(wl->dev);
5377 		goto out;
5378 	}
5379 
5380 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5381 		     tid, action);
5382 
5383 	switch (action) {
5384 	case IEEE80211_AMPDU_RX_START:
5385 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5386 			ret = -ENOTSUPP;
5387 			break;
5388 		}
5389 
5390 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5391 			ret = -EBUSY;
5392 			wl1271_error("exceeded max RX BA sessions");
5393 			break;
5394 		}
5395 
5396 		if (*ba_bitmap & BIT(tid)) {
5397 			ret = -EINVAL;
5398 			wl1271_error("cannot enable RX BA session on active "
5399 				     "tid: %d", tid);
5400 			break;
5401 		}
5402 
5403 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5404 				hlid,
5405 				params->buf_size);
5406 
5407 		if (!ret) {
5408 			*ba_bitmap |= BIT(tid);
5409 			wl->ba_rx_session_count++;
5410 		}
5411 		break;
5412 
5413 	case IEEE80211_AMPDU_RX_STOP:
5414 		if (!(*ba_bitmap & BIT(tid))) {
5415 			/*
5416 			 * this happens on reconfig - so only output a debug
5417 			 * message for now, and don't fail the function.
5418 			 */
5419 			wl1271_debug(DEBUG_MAC80211,
5420 				     "no active RX BA session on tid: %d",
5421 				     tid);
5422 			ret = 0;
5423 			break;
5424 		}
5425 
5426 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5427 							 hlid, 0);
5428 		if (!ret) {
5429 			*ba_bitmap &= ~BIT(tid);
5430 			wl->ba_rx_session_count--;
5431 		}
5432 		break;
5433 
5434 	/*
5435 	 * The BA initiator session management in FW independently.
5436 	 * Falling break here on purpose for all TX APDU commands.
5437 	 */
5438 	case IEEE80211_AMPDU_TX_START:
5439 	case IEEE80211_AMPDU_TX_STOP_CONT:
5440 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5441 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5442 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5443 		ret = -EINVAL;
5444 		break;
5445 
5446 	default:
5447 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5448 		ret = -EINVAL;
5449 	}
5450 
5451 	pm_runtime_mark_last_busy(wl->dev);
5452 	pm_runtime_put_autosuspend(wl->dev);
5453 
5454 out:
5455 	mutex_unlock(&wl->mutex);
5456 
5457 	return ret;
5458 }
5459 
5460 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5461 				   struct ieee80211_vif *vif,
5462 				   const struct cfg80211_bitrate_mask *mask)
5463 {
5464 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5465 	struct wl1271 *wl = hw->priv;
5466 	int i, ret = 0;
5467 
5468 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5469 		mask->control[NL80211_BAND_2GHZ].legacy,
5470 		mask->control[NL80211_BAND_5GHZ].legacy);
5471 
5472 	mutex_lock(&wl->mutex);
5473 
5474 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5475 		wlvif->bitrate_masks[i] =
5476 			wl1271_tx_enabled_rates_get(wl,
5477 						    mask->control[i].legacy,
5478 						    i);
5479 
5480 	if (unlikely(wl->state != WLCORE_STATE_ON))
5481 		goto out;
5482 
5483 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5484 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5485 
5486 		ret = pm_runtime_get_sync(wl->dev);
5487 		if (ret < 0) {
5488 			pm_runtime_put_noidle(wl->dev);
5489 			goto out;
5490 		}
5491 
5492 		wl1271_set_band_rate(wl, wlvif);
5493 		wlvif->basic_rate =
5494 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5495 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5496 
5497 		pm_runtime_mark_last_busy(wl->dev);
5498 		pm_runtime_put_autosuspend(wl->dev);
5499 	}
5500 out:
5501 	mutex_unlock(&wl->mutex);
5502 
5503 	return ret;
5504 }
5505 
5506 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5507 				     struct ieee80211_vif *vif,
5508 				     struct ieee80211_channel_switch *ch_switch)
5509 {
5510 	struct wl1271 *wl = hw->priv;
5511 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5512 	int ret;
5513 
5514 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5515 
5516 	wl1271_tx_flush(wl);
5517 
5518 	mutex_lock(&wl->mutex);
5519 
5520 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5521 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5522 			ieee80211_chswitch_done(vif, false);
5523 		goto out;
5524 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5525 		goto out;
5526 	}
5527 
5528 	ret = pm_runtime_get_sync(wl->dev);
5529 	if (ret < 0) {
5530 		pm_runtime_put_noidle(wl->dev);
5531 		goto out;
5532 	}
5533 
5534 	/* TODO: change mac80211 to pass vif as param */
5535 
5536 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5537 		unsigned long delay_usec;
5538 
5539 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5540 		if (ret)
5541 			goto out_sleep;
5542 
5543 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5544 
5545 		/* indicate failure 5 seconds after channel switch time */
5546 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5547 			ch_switch->count;
5548 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5549 					     usecs_to_jiffies(delay_usec) +
5550 					     msecs_to_jiffies(5000));
5551 	}
5552 
5553 out_sleep:
5554 	pm_runtime_mark_last_busy(wl->dev);
5555 	pm_runtime_put_autosuspend(wl->dev);
5556 
5557 out:
5558 	mutex_unlock(&wl->mutex);
5559 }
5560 
5561 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5562 					struct wl12xx_vif *wlvif,
5563 					u8 eid)
5564 {
5565 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5566 	struct sk_buff *beacon =
5567 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5568 
5569 	if (!beacon)
5570 		return NULL;
5571 
5572 	return cfg80211_find_ie(eid,
5573 				beacon->data + ieoffset,
5574 				beacon->len - ieoffset);
5575 }
5576 
5577 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5578 				u8 *csa_count)
5579 {
5580 	const u8 *ie;
5581 	const struct ieee80211_channel_sw_ie *ie_csa;
5582 
5583 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5584 	if (!ie)
5585 		return -EINVAL;
5586 
5587 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5588 	*csa_count = ie_csa->count;
5589 
5590 	return 0;
5591 }
5592 
5593 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5594 					    struct ieee80211_vif *vif,
5595 					    struct cfg80211_chan_def *chandef)
5596 {
5597 	struct wl1271 *wl = hw->priv;
5598 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5599 	struct ieee80211_channel_switch ch_switch = {
5600 		.block_tx = true,
5601 		.chandef = *chandef,
5602 	};
5603 	int ret;
5604 
5605 	wl1271_debug(DEBUG_MAC80211,
5606 		     "mac80211 channel switch beacon (role %d)",
5607 		     wlvif->role_id);
5608 
5609 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5610 	if (ret < 0) {
5611 		wl1271_error("error getting beacon (for CSA counter)");
5612 		return;
5613 	}
5614 
5615 	mutex_lock(&wl->mutex);
5616 
5617 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5618 		ret = -EBUSY;
5619 		goto out;
5620 	}
5621 
5622 	ret = pm_runtime_get_sync(wl->dev);
5623 	if (ret < 0) {
5624 		pm_runtime_put_noidle(wl->dev);
5625 		goto out;
5626 	}
5627 
5628 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5629 	if (ret)
5630 		goto out_sleep;
5631 
5632 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5633 
5634 out_sleep:
5635 	pm_runtime_mark_last_busy(wl->dev);
5636 	pm_runtime_put_autosuspend(wl->dev);
5637 out:
5638 	mutex_unlock(&wl->mutex);
5639 }
5640 
5641 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5642 			    u32 queues, bool drop)
5643 {
5644 	struct wl1271 *wl = hw->priv;
5645 
5646 	wl1271_tx_flush(wl);
5647 }
5648 
5649 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5650 				       struct ieee80211_vif *vif,
5651 				       struct ieee80211_channel *chan,
5652 				       int duration,
5653 				       enum ieee80211_roc_type type)
5654 {
5655 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5656 	struct wl1271 *wl = hw->priv;
5657 	int channel, active_roc, ret = 0;
5658 
5659 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5660 
5661 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5662 		     channel, wlvif->role_id);
5663 
5664 	mutex_lock(&wl->mutex);
5665 
5666 	if (unlikely(wl->state != WLCORE_STATE_ON))
5667 		goto out;
5668 
5669 	/* return EBUSY if we can't ROC right now */
5670 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5671 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5672 		wl1271_warning("active roc on role %d", active_roc);
5673 		ret = -EBUSY;
5674 		goto out;
5675 	}
5676 
5677 	ret = pm_runtime_get_sync(wl->dev);
5678 	if (ret < 0) {
5679 		pm_runtime_put_noidle(wl->dev);
5680 		goto out;
5681 	}
5682 
5683 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5684 	if (ret < 0)
5685 		goto out_sleep;
5686 
5687 	wl->roc_vif = vif;
5688 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5689 				     msecs_to_jiffies(duration));
5690 out_sleep:
5691 	pm_runtime_mark_last_busy(wl->dev);
5692 	pm_runtime_put_autosuspend(wl->dev);
5693 out:
5694 	mutex_unlock(&wl->mutex);
5695 	return ret;
5696 }
5697 
5698 static int __wlcore_roc_completed(struct wl1271 *wl)
5699 {
5700 	struct wl12xx_vif *wlvif;
5701 	int ret;
5702 
5703 	/* already completed */
5704 	if (unlikely(!wl->roc_vif))
5705 		return 0;
5706 
5707 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5708 
5709 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5710 		return -EBUSY;
5711 
5712 	ret = wl12xx_stop_dev(wl, wlvif);
5713 	if (ret < 0)
5714 		return ret;
5715 
5716 	wl->roc_vif = NULL;
5717 
5718 	return 0;
5719 }
5720 
5721 static int wlcore_roc_completed(struct wl1271 *wl)
5722 {
5723 	int ret;
5724 
5725 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5726 
5727 	mutex_lock(&wl->mutex);
5728 
5729 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5730 		ret = -EBUSY;
5731 		goto out;
5732 	}
5733 
5734 	ret = pm_runtime_get_sync(wl->dev);
5735 	if (ret < 0) {
5736 		pm_runtime_put_noidle(wl->dev);
5737 		goto out;
5738 	}
5739 
5740 	ret = __wlcore_roc_completed(wl);
5741 
5742 	pm_runtime_mark_last_busy(wl->dev);
5743 	pm_runtime_put_autosuspend(wl->dev);
5744 out:
5745 	mutex_unlock(&wl->mutex);
5746 
5747 	return ret;
5748 }
5749 
5750 static void wlcore_roc_complete_work(struct work_struct *work)
5751 {
5752 	struct delayed_work *dwork;
5753 	struct wl1271 *wl;
5754 	int ret;
5755 
5756 	dwork = to_delayed_work(work);
5757 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5758 
5759 	ret = wlcore_roc_completed(wl);
5760 	if (!ret)
5761 		ieee80211_remain_on_channel_expired(wl->hw);
5762 }
5763 
5764 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5765 					      struct ieee80211_vif *vif)
5766 {
5767 	struct wl1271 *wl = hw->priv;
5768 
5769 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5770 
5771 	/* TODO: per-vif */
5772 	wl1271_tx_flush(wl);
5773 
5774 	/*
5775 	 * we can't just flush_work here, because it might deadlock
5776 	 * (as we might get called from the same workqueue)
5777 	 */
5778 	cancel_delayed_work_sync(&wl->roc_complete_work);
5779 	wlcore_roc_completed(wl);
5780 
5781 	return 0;
5782 }
5783 
5784 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5785 				    struct ieee80211_vif *vif,
5786 				    struct ieee80211_sta *sta,
5787 				    u32 changed)
5788 {
5789 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5790 
5791 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5792 
5793 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5794 		return;
5795 
5796 	/* this callback is atomic, so schedule a new work */
5797 	wlvif->rc_update_bw = sta->bandwidth;
5798 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5799 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5800 }
5801 
5802 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5803 				     struct ieee80211_vif *vif,
5804 				     struct ieee80211_sta *sta,
5805 				     struct station_info *sinfo)
5806 {
5807 	struct wl1271 *wl = hw->priv;
5808 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5809 	s8 rssi_dbm;
5810 	int ret;
5811 
5812 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5813 
5814 	mutex_lock(&wl->mutex);
5815 
5816 	if (unlikely(wl->state != WLCORE_STATE_ON))
5817 		goto out;
5818 
5819 	ret = pm_runtime_get_sync(wl->dev);
5820 	if (ret < 0) {
5821 		pm_runtime_put_noidle(wl->dev);
5822 		goto out_sleep;
5823 	}
5824 
5825 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5826 	if (ret < 0)
5827 		goto out_sleep;
5828 
5829 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5830 	sinfo->signal = rssi_dbm;
5831 
5832 out_sleep:
5833 	pm_runtime_mark_last_busy(wl->dev);
5834 	pm_runtime_put_autosuspend(wl->dev);
5835 
5836 out:
5837 	mutex_unlock(&wl->mutex);
5838 }
5839 
5840 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5841 					     struct ieee80211_sta *sta)
5842 {
5843 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5844 	struct wl1271 *wl = hw->priv;
5845 	u8 hlid = wl_sta->hlid;
5846 
5847 	/* return in units of Kbps */
5848 	return (wl->links[hlid].fw_rate_mbps * 1000);
5849 }
5850 
5851 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5852 {
5853 	struct wl1271 *wl = hw->priv;
5854 	bool ret = false;
5855 
5856 	mutex_lock(&wl->mutex);
5857 
5858 	if (unlikely(wl->state != WLCORE_STATE_ON))
5859 		goto out;
5860 
5861 	/* packets are considered pending if in the TX queue or the FW */
5862 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5863 out:
5864 	mutex_unlock(&wl->mutex);
5865 
5866 	return ret;
5867 }
5868 
5869 /* can't be const, mac80211 writes to this */
5870 static struct ieee80211_rate wl1271_rates[] = {
5871 	{ .bitrate = 10,
5872 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5873 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5874 	{ .bitrate = 20,
5875 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5876 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5877 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5878 	{ .bitrate = 55,
5879 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5880 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5881 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5882 	{ .bitrate = 110,
5883 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5884 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5885 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5886 	{ .bitrate = 60,
5887 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5888 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5889 	{ .bitrate = 90,
5890 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5891 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5892 	{ .bitrate = 120,
5893 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5894 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5895 	{ .bitrate = 180,
5896 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5897 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5898 	{ .bitrate = 240,
5899 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5900 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5901 	{ .bitrate = 360,
5902 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5903 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5904 	{ .bitrate = 480,
5905 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5906 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5907 	{ .bitrate = 540,
5908 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5909 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5910 };
5911 
5912 /* can't be const, mac80211 writes to this */
5913 static struct ieee80211_channel wl1271_channels[] = {
5914 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5915 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5916 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5917 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5918 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5919 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5920 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5921 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5922 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5923 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5924 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5925 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5926 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5927 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5928 };
5929 
5930 /* can't be const, mac80211 writes to this */
5931 static struct ieee80211_supported_band wl1271_band_2ghz = {
5932 	.channels = wl1271_channels,
5933 	.n_channels = ARRAY_SIZE(wl1271_channels),
5934 	.bitrates = wl1271_rates,
5935 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5936 };
5937 
5938 /* 5 GHz data rates for WL1273 */
5939 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5940 	{ .bitrate = 60,
5941 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5942 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5943 	{ .bitrate = 90,
5944 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5945 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5946 	{ .bitrate = 120,
5947 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5948 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5949 	{ .bitrate = 180,
5950 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5951 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5952 	{ .bitrate = 240,
5953 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5954 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5955 	{ .bitrate = 360,
5956 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5957 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5958 	{ .bitrate = 480,
5959 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5960 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5961 	{ .bitrate = 540,
5962 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5963 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5964 };
5965 
5966 /* 5 GHz band channels for WL1273 */
5967 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5968 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5969 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5970 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5971 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5972 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5973 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5994 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5995 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5996 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5997 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5998 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5999 };
6000 
6001 static struct ieee80211_supported_band wl1271_band_5ghz = {
6002 	.channels = wl1271_channels_5ghz,
6003 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6004 	.bitrates = wl1271_rates_5ghz,
6005 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6006 };
6007 
6008 static const struct ieee80211_ops wl1271_ops = {
6009 	.start = wl1271_op_start,
6010 	.stop = wlcore_op_stop,
6011 	.add_interface = wl1271_op_add_interface,
6012 	.remove_interface = wl1271_op_remove_interface,
6013 	.change_interface = wl12xx_op_change_interface,
6014 #ifdef CONFIG_PM
6015 	.suspend = wl1271_op_suspend,
6016 	.resume = wl1271_op_resume,
6017 #endif
6018 	.config = wl1271_op_config,
6019 	.prepare_multicast = wl1271_op_prepare_multicast,
6020 	.configure_filter = wl1271_op_configure_filter,
6021 	.tx = wl1271_op_tx,
6022 	.set_key = wlcore_op_set_key,
6023 	.hw_scan = wl1271_op_hw_scan,
6024 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6025 	.sched_scan_start = wl1271_op_sched_scan_start,
6026 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6027 	.bss_info_changed = wl1271_op_bss_info_changed,
6028 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6029 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6030 	.conf_tx = wl1271_op_conf_tx,
6031 	.get_tsf = wl1271_op_get_tsf,
6032 	.get_survey = wl1271_op_get_survey,
6033 	.sta_state = wl12xx_op_sta_state,
6034 	.ampdu_action = wl1271_op_ampdu_action,
6035 	.tx_frames_pending = wl1271_tx_frames_pending,
6036 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6037 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6038 	.channel_switch = wl12xx_op_channel_switch,
6039 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6040 	.flush = wlcore_op_flush,
6041 	.remain_on_channel = wlcore_op_remain_on_channel,
6042 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6043 	.add_chanctx = wlcore_op_add_chanctx,
6044 	.remove_chanctx = wlcore_op_remove_chanctx,
6045 	.change_chanctx = wlcore_op_change_chanctx,
6046 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6047 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6048 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6049 	.sta_rc_update = wlcore_op_sta_rc_update,
6050 	.sta_statistics = wlcore_op_sta_statistics,
6051 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6052 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6053 };
6054 
6055 
6056 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6057 {
6058 	u8 idx;
6059 
6060 	BUG_ON(band >= 2);
6061 
6062 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6063 		wl1271_error("Illegal RX rate from HW: %d", rate);
6064 		return 0;
6065 	}
6066 
6067 	idx = wl->band_rate_to_idx[band][rate];
6068 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6069 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6070 		return 0;
6071 	}
6072 
6073 	return idx;
6074 }
6075 
6076 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6077 {
6078 	int i;
6079 
6080 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6081 		     oui, nic);
6082 
6083 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6084 		wl1271_warning("NIC part of the MAC address wraps around!");
6085 
6086 	for (i = 0; i < wl->num_mac_addr; i++) {
6087 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6088 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6089 		wl->addresses[i].addr[2] = (u8) oui;
6090 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6091 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6092 		wl->addresses[i].addr[5] = (u8) nic;
6093 		nic++;
6094 	}
6095 
6096 	/* we may be one address short at the most */
6097 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6098 
6099 	/*
6100 	 * turn on the LAA bit in the first address and use it as
6101 	 * the last address.
6102 	 */
6103 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6104 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6105 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6106 		       sizeof(wl->addresses[0]));
6107 		/* LAA bit */
6108 		wl->addresses[idx].addr[0] |= BIT(1);
6109 	}
6110 
6111 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6112 	wl->hw->wiphy->addresses = wl->addresses;
6113 }
6114 
6115 static int wl12xx_get_hw_info(struct wl1271 *wl)
6116 {
6117 	int ret;
6118 
6119 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6120 	if (ret < 0)
6121 		goto out;
6122 
6123 	wl->fuse_oui_addr = 0;
6124 	wl->fuse_nic_addr = 0;
6125 
6126 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6127 	if (ret < 0)
6128 		goto out;
6129 
6130 	if (wl->ops->get_mac)
6131 		ret = wl->ops->get_mac(wl);
6132 
6133 out:
6134 	return ret;
6135 }
6136 
6137 static int wl1271_register_hw(struct wl1271 *wl)
6138 {
6139 	int ret;
6140 	u32 oui_addr = 0, nic_addr = 0;
6141 	struct platform_device *pdev = wl->pdev;
6142 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6143 
6144 	if (wl->mac80211_registered)
6145 		return 0;
6146 
6147 	if (wl->nvs_len >= 12) {
6148 		/* NOTE: The wl->nvs->nvs element must be first, in
6149 		 * order to simplify the casting, we assume it is at
6150 		 * the beginning of the wl->nvs structure.
6151 		 */
6152 		u8 *nvs_ptr = (u8 *)wl->nvs;
6153 
6154 		oui_addr =
6155 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6156 		nic_addr =
6157 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6158 	}
6159 
6160 	/* if the MAC address is zeroed in the NVS derive from fuse */
6161 	if (oui_addr == 0 && nic_addr == 0) {
6162 		oui_addr = wl->fuse_oui_addr;
6163 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6164 		nic_addr = wl->fuse_nic_addr + 1;
6165 	}
6166 
6167 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6168 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6169 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6170 			wl1271_warning("This default nvs file can be removed from the file system");
6171 		} else {
6172 			wl1271_warning("Your device performance is not optimized.");
6173 			wl1271_warning("Please use the calibrator tool to configure your device.");
6174 		}
6175 
6176 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6177 			wl1271_warning("Fuse mac address is zero. using random mac");
6178 			/* Use TI oui and a random nic */
6179 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6180 			nic_addr = get_random_int();
6181 		} else {
6182 			oui_addr = wl->fuse_oui_addr;
6183 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6184 			nic_addr = wl->fuse_nic_addr + 1;
6185 		}
6186 	}
6187 
6188 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6189 
6190 	ret = ieee80211_register_hw(wl->hw);
6191 	if (ret < 0) {
6192 		wl1271_error("unable to register mac80211 hw: %d", ret);
6193 		goto out;
6194 	}
6195 
6196 	wl->mac80211_registered = true;
6197 
6198 	wl1271_debugfs_init(wl);
6199 
6200 	wl1271_notice("loaded");
6201 
6202 out:
6203 	return ret;
6204 }
6205 
6206 static void wl1271_unregister_hw(struct wl1271 *wl)
6207 {
6208 	if (wl->plt)
6209 		wl1271_plt_stop(wl);
6210 
6211 	ieee80211_unregister_hw(wl->hw);
6212 	wl->mac80211_registered = false;
6213 
6214 }
6215 
6216 static int wl1271_init_ieee80211(struct wl1271 *wl)
6217 {
6218 	int i;
6219 	static const u32 cipher_suites[] = {
6220 		WLAN_CIPHER_SUITE_WEP40,
6221 		WLAN_CIPHER_SUITE_WEP104,
6222 		WLAN_CIPHER_SUITE_TKIP,
6223 		WLAN_CIPHER_SUITE_CCMP,
6224 		WL1271_CIPHER_SUITE_GEM,
6225 		WLAN_CIPHER_SUITE_AES_CMAC,
6226 	};
6227 
6228 	/* The tx descriptor buffer */
6229 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6230 
6231 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6232 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6233 
6234 	/* unit us */
6235 	/* FIXME: find a proper value */
6236 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6237 
6238 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6239 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6240 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6241 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6242 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6243 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6244 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6245 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6246 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6247 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6248 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6249 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6250 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6251 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6252 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6253 
6254 	wl->hw->wiphy->cipher_suites = cipher_suites;
6255 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6256 
6257 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6258 					 BIT(NL80211_IFTYPE_AP) |
6259 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6260 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6261 #ifdef CONFIG_MAC80211_MESH
6262 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6263 #endif
6264 					 BIT(NL80211_IFTYPE_P2P_GO);
6265 
6266 	wl->hw->wiphy->max_scan_ssids = 1;
6267 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6268 	wl->hw->wiphy->max_match_sets = 16;
6269 	/*
6270 	 * Maximum length of elements in scanning probe request templates
6271 	 * should be the maximum length possible for a template, without
6272 	 * the IEEE80211 header of the template
6273 	 */
6274 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6275 			sizeof(struct ieee80211_header);
6276 
6277 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6278 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6279 		sizeof(struct ieee80211_header);
6280 
6281 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6282 
6283 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6284 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6285 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6286 				WIPHY_FLAG_IBSS_RSN;
6287 
6288 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6289 
6290 	/* make sure all our channels fit in the scanned_ch bitmask */
6291 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6292 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6293 		     WL1271_MAX_CHANNELS);
6294 	/*
6295 	* clear channel flags from the previous usage
6296 	* and restore max_power & max_antenna_gain values.
6297 	*/
6298 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6299 		wl1271_band_2ghz.channels[i].flags = 0;
6300 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6301 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6302 	}
6303 
6304 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6305 		wl1271_band_5ghz.channels[i].flags = 0;
6306 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6307 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6308 	}
6309 
6310 	/*
6311 	 * We keep local copies of the band structs because we need to
6312 	 * modify them on a per-device basis.
6313 	 */
6314 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6315 	       sizeof(wl1271_band_2ghz));
6316 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6317 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6318 	       sizeof(*wl->ht_cap));
6319 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6320 	       sizeof(wl1271_band_5ghz));
6321 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6322 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6323 	       sizeof(*wl->ht_cap));
6324 
6325 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6326 		&wl->bands[NL80211_BAND_2GHZ];
6327 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6328 		&wl->bands[NL80211_BAND_5GHZ];
6329 
6330 	/*
6331 	 * allow 4 queues per mac address we support +
6332 	 * 1 cab queue per mac + one global offchannel Tx queue
6333 	 */
6334 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6335 
6336 	/* the last queue is the offchannel queue */
6337 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6338 	wl->hw->max_rates = 1;
6339 
6340 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6341 
6342 	/* the FW answers probe-requests in AP-mode */
6343 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6344 	wl->hw->wiphy->probe_resp_offload =
6345 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6346 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6347 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6348 
6349 	/* allowed interface combinations */
6350 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6351 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6352 
6353 	/* register vendor commands */
6354 	wlcore_set_vendor_commands(wl->hw->wiphy);
6355 
6356 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6357 
6358 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6359 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6360 
6361 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6362 
6363 	return 0;
6364 }
6365 
6366 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6367 				     u32 mbox_size)
6368 {
6369 	struct ieee80211_hw *hw;
6370 	struct wl1271 *wl;
6371 	int i, j, ret;
6372 	unsigned int order;
6373 
6374 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6375 	if (!hw) {
6376 		wl1271_error("could not alloc ieee80211_hw");
6377 		ret = -ENOMEM;
6378 		goto err_hw_alloc;
6379 	}
6380 
6381 	wl = hw->priv;
6382 	memset(wl, 0, sizeof(*wl));
6383 
6384 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6385 	if (!wl->priv) {
6386 		wl1271_error("could not alloc wl priv");
6387 		ret = -ENOMEM;
6388 		goto err_priv_alloc;
6389 	}
6390 
6391 	INIT_LIST_HEAD(&wl->wlvif_list);
6392 
6393 	wl->hw = hw;
6394 
6395 	/*
6396 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6397 	 * we don't allocate any additional resource here, so that's fine.
6398 	 */
6399 	for (i = 0; i < NUM_TX_QUEUES; i++)
6400 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6401 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6402 
6403 	skb_queue_head_init(&wl->deferred_rx_queue);
6404 	skb_queue_head_init(&wl->deferred_tx_queue);
6405 
6406 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6407 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6408 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6409 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6410 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6411 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6412 
6413 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6414 	if (!wl->freezable_wq) {
6415 		ret = -ENOMEM;
6416 		goto err_hw;
6417 	}
6418 
6419 	wl->channel = 0;
6420 	wl->rx_counter = 0;
6421 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6422 	wl->band = NL80211_BAND_2GHZ;
6423 	wl->channel_type = NL80211_CHAN_NO_HT;
6424 	wl->flags = 0;
6425 	wl->sg_enabled = true;
6426 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6427 	wl->recovery_count = 0;
6428 	wl->hw_pg_ver = -1;
6429 	wl->ap_ps_map = 0;
6430 	wl->ap_fw_ps_map = 0;
6431 	wl->quirks = 0;
6432 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6433 	wl->active_sta_count = 0;
6434 	wl->active_link_count = 0;
6435 	wl->fwlog_size = 0;
6436 
6437 	/* The system link is always allocated */
6438 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6439 
6440 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6441 	for (i = 0; i < wl->num_tx_desc; i++)
6442 		wl->tx_frames[i] = NULL;
6443 
6444 	spin_lock_init(&wl->wl_lock);
6445 
6446 	wl->state = WLCORE_STATE_OFF;
6447 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6448 	mutex_init(&wl->mutex);
6449 	mutex_init(&wl->flush_mutex);
6450 	init_completion(&wl->nvs_loading_complete);
6451 
6452 	order = get_order(aggr_buf_size);
6453 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6454 	if (!wl->aggr_buf) {
6455 		ret = -ENOMEM;
6456 		goto err_wq;
6457 	}
6458 	wl->aggr_buf_size = aggr_buf_size;
6459 
6460 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6461 	if (!wl->dummy_packet) {
6462 		ret = -ENOMEM;
6463 		goto err_aggr;
6464 	}
6465 
6466 	/* Allocate one page for the FW log */
6467 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6468 	if (!wl->fwlog) {
6469 		ret = -ENOMEM;
6470 		goto err_dummy_packet;
6471 	}
6472 
6473 	wl->mbox_size = mbox_size;
6474 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6475 	if (!wl->mbox) {
6476 		ret = -ENOMEM;
6477 		goto err_fwlog;
6478 	}
6479 
6480 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6481 	if (!wl->buffer_32) {
6482 		ret = -ENOMEM;
6483 		goto err_mbox;
6484 	}
6485 
6486 	return hw;
6487 
6488 err_mbox:
6489 	kfree(wl->mbox);
6490 
6491 err_fwlog:
6492 	free_page((unsigned long)wl->fwlog);
6493 
6494 err_dummy_packet:
6495 	dev_kfree_skb(wl->dummy_packet);
6496 
6497 err_aggr:
6498 	free_pages((unsigned long)wl->aggr_buf, order);
6499 
6500 err_wq:
6501 	destroy_workqueue(wl->freezable_wq);
6502 
6503 err_hw:
6504 	wl1271_debugfs_exit(wl);
6505 	kfree(wl->priv);
6506 
6507 err_priv_alloc:
6508 	ieee80211_free_hw(hw);
6509 
6510 err_hw_alloc:
6511 
6512 	return ERR_PTR(ret);
6513 }
6514 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6515 
6516 int wlcore_free_hw(struct wl1271 *wl)
6517 {
6518 	/* Unblock any fwlog readers */
6519 	mutex_lock(&wl->mutex);
6520 	wl->fwlog_size = -1;
6521 	mutex_unlock(&wl->mutex);
6522 
6523 	wlcore_sysfs_free(wl);
6524 
6525 	kfree(wl->buffer_32);
6526 	kfree(wl->mbox);
6527 	free_page((unsigned long)wl->fwlog);
6528 	dev_kfree_skb(wl->dummy_packet);
6529 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6530 
6531 	wl1271_debugfs_exit(wl);
6532 
6533 	vfree(wl->fw);
6534 	wl->fw = NULL;
6535 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6536 	kfree(wl->nvs);
6537 	wl->nvs = NULL;
6538 
6539 	kfree(wl->raw_fw_status);
6540 	kfree(wl->fw_status);
6541 	kfree(wl->tx_res_if);
6542 	destroy_workqueue(wl->freezable_wq);
6543 
6544 	kfree(wl->priv);
6545 	ieee80211_free_hw(wl->hw);
6546 
6547 	return 0;
6548 }
6549 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6550 
6551 #ifdef CONFIG_PM
6552 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6553 	.flags = WIPHY_WOWLAN_ANY,
6554 	.n_patterns = WL1271_MAX_RX_FILTERS,
6555 	.pattern_min_len = 1,
6556 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6557 };
6558 #endif
6559 
6560 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6561 {
6562 	return IRQ_WAKE_THREAD;
6563 }
6564 
6565 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6566 {
6567 	struct wl1271 *wl = context;
6568 	struct platform_device *pdev = wl->pdev;
6569 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6570 	struct resource *res;
6571 
6572 	int ret;
6573 	irq_handler_t hardirq_fn = NULL;
6574 
6575 	if (fw) {
6576 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6577 		if (!wl->nvs) {
6578 			wl1271_error("Could not allocate nvs data");
6579 			goto out;
6580 		}
6581 		wl->nvs_len = fw->size;
6582 	} else if (pdev_data->family->nvs_name) {
6583 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6584 			     pdev_data->family->nvs_name);
6585 		wl->nvs = NULL;
6586 		wl->nvs_len = 0;
6587 	} else {
6588 		wl->nvs = NULL;
6589 		wl->nvs_len = 0;
6590 	}
6591 
6592 	ret = wl->ops->setup(wl);
6593 	if (ret < 0)
6594 		goto out_free_nvs;
6595 
6596 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6597 
6598 	/* adjust some runtime configuration parameters */
6599 	wlcore_adjust_conf(wl);
6600 
6601 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6602 	if (!res) {
6603 		wl1271_error("Could not get IRQ resource");
6604 		goto out_free_nvs;
6605 	}
6606 
6607 	wl->irq = res->start;
6608 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6609 	wl->if_ops = pdev_data->if_ops;
6610 
6611 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6612 		hardirq_fn = wlcore_hardirq;
6613 	else
6614 		wl->irq_flags |= IRQF_ONESHOT;
6615 
6616 	ret = wl12xx_set_power_on(wl);
6617 	if (ret < 0)
6618 		goto out_free_nvs;
6619 
6620 	ret = wl12xx_get_hw_info(wl);
6621 	if (ret < 0) {
6622 		wl1271_error("couldn't get hw info");
6623 		wl1271_power_off(wl);
6624 		goto out_free_nvs;
6625 	}
6626 
6627 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6628 				   wl->irq_flags, pdev->name, wl);
6629 	if (ret < 0) {
6630 		wl1271_error("interrupt configuration failed");
6631 		wl1271_power_off(wl);
6632 		goto out_free_nvs;
6633 	}
6634 
6635 #ifdef CONFIG_PM
6636 	device_init_wakeup(wl->dev, true);
6637 
6638 	ret = enable_irq_wake(wl->irq);
6639 	if (!ret) {
6640 		wl->irq_wake_enabled = true;
6641 		if (pdev_data->pwr_in_suspend)
6642 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6643 	}
6644 
6645 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6646 	if (res) {
6647 		wl->wakeirq = res->start;
6648 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6649 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6650 		if (ret)
6651 			wl->wakeirq = -ENODEV;
6652 	} else {
6653 		wl->wakeirq = -ENODEV;
6654 	}
6655 #endif
6656 	disable_irq(wl->irq);
6657 	wl1271_power_off(wl);
6658 
6659 	ret = wl->ops->identify_chip(wl);
6660 	if (ret < 0)
6661 		goto out_irq;
6662 
6663 	ret = wl1271_init_ieee80211(wl);
6664 	if (ret)
6665 		goto out_irq;
6666 
6667 	ret = wl1271_register_hw(wl);
6668 	if (ret)
6669 		goto out_irq;
6670 
6671 	ret = wlcore_sysfs_init(wl);
6672 	if (ret)
6673 		goto out_unreg;
6674 
6675 	wl->initialized = true;
6676 	goto out;
6677 
6678 out_unreg:
6679 	wl1271_unregister_hw(wl);
6680 
6681 out_irq:
6682 	if (wl->wakeirq >= 0)
6683 		dev_pm_clear_wake_irq(wl->dev);
6684 	device_init_wakeup(wl->dev, false);
6685 	free_irq(wl->irq, wl);
6686 
6687 out_free_nvs:
6688 	kfree(wl->nvs);
6689 
6690 out:
6691 	release_firmware(fw);
6692 	complete_all(&wl->nvs_loading_complete);
6693 }
6694 
6695 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6696 {
6697 	struct wl1271 *wl = dev_get_drvdata(dev);
6698 	struct wl12xx_vif *wlvif;
6699 	int error;
6700 
6701 	/* We do not enter elp sleep in PLT mode */
6702 	if (wl->plt)
6703 		return 0;
6704 
6705 	/* Nothing to do if no ELP mode requested */
6706 	if (wl->sleep_auth != WL1271_PSM_ELP)
6707 		return 0;
6708 
6709 	wl12xx_for_each_wlvif(wl, wlvif) {
6710 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6711 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6712 			return -EBUSY;
6713 	}
6714 
6715 	wl1271_debug(DEBUG_PSM, "chip to elp");
6716 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6717 	if (error < 0) {
6718 		wl12xx_queue_recovery_work(wl);
6719 
6720 		return error;
6721 	}
6722 
6723 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6724 
6725 	return 0;
6726 }
6727 
6728 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6729 {
6730 	struct wl1271 *wl = dev_get_drvdata(dev);
6731 	DECLARE_COMPLETION_ONSTACK(compl);
6732 	unsigned long flags;
6733 	int ret;
6734 	unsigned long start_time = jiffies;
6735 	bool pending = false;
6736 	bool recovery = false;
6737 
6738 	/* Nothing to do if no ELP mode requested */
6739 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6740 		return 0;
6741 
6742 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6743 
6744 	spin_lock_irqsave(&wl->wl_lock, flags);
6745 	if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6746 		pending = true;
6747 	else
6748 		wl->elp_compl = &compl;
6749 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6750 
6751 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6752 	if (ret < 0) {
6753 		recovery = true;
6754 		goto err;
6755 	}
6756 
6757 	if (!pending) {
6758 		ret = wait_for_completion_timeout(&compl,
6759 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6760 		if (ret == 0) {
6761 			wl1271_warning("ELP wakeup timeout!");
6762 
6763 			/* Return no error for runtime PM for recovery */
6764 			ret = 0;
6765 			recovery = true;
6766 			goto err;
6767 		}
6768 	}
6769 
6770 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6771 
6772 	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6773 		     jiffies_to_msecs(jiffies - start_time));
6774 
6775 	return 0;
6776 
6777 err:
6778 	spin_lock_irqsave(&wl->wl_lock, flags);
6779 	wl->elp_compl = NULL;
6780 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6781 
6782 	if (recovery) {
6783 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6784 		wl12xx_queue_recovery_work(wl);
6785 	}
6786 
6787 	return ret;
6788 }
6789 
6790 static const struct dev_pm_ops wlcore_pm_ops = {
6791 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6792 			   wlcore_runtime_resume,
6793 			   NULL)
6794 };
6795 
6796 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6797 {
6798 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6799 	const char *nvs_name;
6800 	int ret = 0;
6801 
6802 	if (!wl->ops || !wl->ptable || !pdev_data)
6803 		return -EINVAL;
6804 
6805 	wl->dev = &pdev->dev;
6806 	wl->pdev = pdev;
6807 	platform_set_drvdata(pdev, wl);
6808 
6809 	if (pdev_data->family && pdev_data->family->nvs_name) {
6810 		nvs_name = pdev_data->family->nvs_name;
6811 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6812 					      nvs_name, &pdev->dev, GFP_KERNEL,
6813 					      wl, wlcore_nvs_cb);
6814 		if (ret < 0) {
6815 			wl1271_error("request_firmware_nowait failed for %s: %d",
6816 				     nvs_name, ret);
6817 			complete_all(&wl->nvs_loading_complete);
6818 		}
6819 	} else {
6820 		wlcore_nvs_cb(NULL, wl);
6821 	}
6822 
6823 	wl->dev->driver->pm = &wlcore_pm_ops;
6824 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6825 	pm_runtime_use_autosuspend(wl->dev);
6826 	pm_runtime_enable(wl->dev);
6827 
6828 	return ret;
6829 }
6830 EXPORT_SYMBOL_GPL(wlcore_probe);
6831 
6832 int wlcore_remove(struct platform_device *pdev)
6833 {
6834 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6835 	struct wl1271 *wl = platform_get_drvdata(pdev);
6836 	int error;
6837 
6838 	error = pm_runtime_get_sync(wl->dev);
6839 	if (error < 0)
6840 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6841 
6842 	wl->dev->driver->pm = NULL;
6843 
6844 	if (pdev_data->family && pdev_data->family->nvs_name)
6845 		wait_for_completion(&wl->nvs_loading_complete);
6846 	if (!wl->initialized)
6847 		return 0;
6848 
6849 	if (wl->wakeirq >= 0) {
6850 		dev_pm_clear_wake_irq(wl->dev);
6851 		wl->wakeirq = -ENODEV;
6852 	}
6853 
6854 	device_init_wakeup(wl->dev, false);
6855 
6856 	if (wl->irq_wake_enabled)
6857 		disable_irq_wake(wl->irq);
6858 
6859 	wl1271_unregister_hw(wl);
6860 
6861 	pm_runtime_put_sync(wl->dev);
6862 	pm_runtime_dont_use_autosuspend(wl->dev);
6863 	pm_runtime_disable(wl->dev);
6864 
6865 	free_irq(wl->irq, wl);
6866 	wlcore_free_hw(wl);
6867 
6868 	return 0;
6869 }
6870 EXPORT_SYMBOL_GPL(wlcore_remove);
6871 
6872 u32 wl12xx_debug_level = DEBUG_NONE;
6873 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6874 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6875 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6876 
6877 module_param_named(fwlog, fwlog_param, charp, 0);
6878 MODULE_PARM_DESC(fwlog,
6879 		 "FW logger options: continuous, dbgpins or disable");
6880 
6881 module_param(fwlog_mem_blocks, int, 0600);
6882 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6883 
6884 module_param(bug_on_recovery, int, 0600);
6885 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6886 
6887 module_param(no_recovery, int, 0600);
6888 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6889 
6890 MODULE_LICENSE("GPL");
6891 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6892 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6893