1 /*
2  * Atheros CARL9170 driver
3  *
4  * mac80211 interaction code
5  *
6  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7  * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; see the file COPYING.  If not, see
21  * http://www.gnu.org/licenses/.
22  *
23  * This file incorporates work covered by the following copyright and
24  * permission notice:
25  *    Copyright (c) 2007-2008 Atheros Communications, Inc.
26  *
27  *    Permission to use, copy, modify, and/or distribute this software for any
28  *    purpose with or without fee is hereby granted, provided that the above
29  *    copyright notice and this permission notice appear in all copies.
30  *
31  *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32  *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33  *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34  *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35  *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36  *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38  */
39 
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <linux/etherdevice.h>
43 #include <linux/random.h>
44 #include <net/mac80211.h>
45 #include <net/cfg80211.h>
46 #include "hw.h"
47 #include "carl9170.h"
48 #include "cmd.h"
49 
50 static bool modparam_nohwcrypt;
51 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444);
52 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
53 
54 int modparam_noht;
55 module_param_named(noht, modparam_noht, int, 0444);
56 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
57 
58 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) {	\
59 	.bitrate	= (_bitrate),			\
60 	.flags		= (_flags),			\
61 	.hw_value	= (_hw_rate) | (_txpidx) << 4,	\
62 }
63 
64 struct ieee80211_rate __carl9170_ratetable[] = {
65 	RATE(10, 0, 0, 0),
66 	RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
67 	RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
68 	RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
69 	RATE(60, 0xb, 0, 0),
70 	RATE(90, 0xf, 0, 0),
71 	RATE(120, 0xa, 0, 0),
72 	RATE(180, 0xe, 0, 0),
73 	RATE(240, 0x9, 0, 0),
74 	RATE(360, 0xd, 1, 0),
75 	RATE(480, 0x8, 2, 0),
76 	RATE(540, 0xc, 3, 0),
77 };
78 #undef RATE
79 
80 #define carl9170_g_ratetable	(__carl9170_ratetable + 0)
81 #define carl9170_g_ratetable_size	12
82 #define carl9170_a_ratetable	(__carl9170_ratetable + 4)
83 #define carl9170_a_ratetable_size	8
84 
85 /*
86  * NB: The hw_value is used as an index into the carl9170_phy_freq_params
87  *     array in phy.c so that we don't have to do frequency lookups!
88  */
89 #define CHAN(_freq, _idx) {		\
90 	.center_freq	= (_freq),	\
91 	.hw_value	= (_idx),	\
92 	.max_power	= 18, /* XXX */	\
93 }
94 
95 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
96 	CHAN(2412,  0),
97 	CHAN(2417,  1),
98 	CHAN(2422,  2),
99 	CHAN(2427,  3),
100 	CHAN(2432,  4),
101 	CHAN(2437,  5),
102 	CHAN(2442,  6),
103 	CHAN(2447,  7),
104 	CHAN(2452,  8),
105 	CHAN(2457,  9),
106 	CHAN(2462, 10),
107 	CHAN(2467, 11),
108 	CHAN(2472, 12),
109 	CHAN(2484, 13),
110 };
111 
112 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
113 	CHAN(4920, 14),
114 	CHAN(4940, 15),
115 	CHAN(4960, 16),
116 	CHAN(4980, 17),
117 	CHAN(5040, 18),
118 	CHAN(5060, 19),
119 	CHAN(5080, 20),
120 	CHAN(5180, 21),
121 	CHAN(5200, 22),
122 	CHAN(5220, 23),
123 	CHAN(5240, 24),
124 	CHAN(5260, 25),
125 	CHAN(5280, 26),
126 	CHAN(5300, 27),
127 	CHAN(5320, 28),
128 	CHAN(5500, 29),
129 	CHAN(5520, 30),
130 	CHAN(5540, 31),
131 	CHAN(5560, 32),
132 	CHAN(5580, 33),
133 	CHAN(5600, 34),
134 	CHAN(5620, 35),
135 	CHAN(5640, 36),
136 	CHAN(5660, 37),
137 	CHAN(5680, 38),
138 	CHAN(5700, 39),
139 	CHAN(5745, 40),
140 	CHAN(5765, 41),
141 	CHAN(5785, 42),
142 	CHAN(5805, 43),
143 	CHAN(5825, 44),
144 	CHAN(5170, 45),
145 	CHAN(5190, 46),
146 	CHAN(5210, 47),
147 	CHAN(5230, 48),
148 };
149 #undef CHAN
150 
151 #define CARL9170_HT_CAP							\
152 {									\
153 	.ht_supported	= true,						\
154 	.cap		= IEEE80211_HT_CAP_MAX_AMSDU |			\
155 			  IEEE80211_HT_CAP_SUP_WIDTH_20_40 |		\
156 			  IEEE80211_HT_CAP_SGI_40 |			\
157 			  IEEE80211_HT_CAP_DSSSCCK40 |			\
158 			  IEEE80211_HT_CAP_SM_PS,			\
159 	.ampdu_factor	= IEEE80211_HT_MAX_AMPDU_64K,			\
160 	.ampdu_density	= IEEE80211_HT_MPDU_DENSITY_8,			\
161 	.mcs		= {						\
162 		.rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, },	\
163 		.rx_highest = cpu_to_le16(300),				\
164 		.tx_params = IEEE80211_HT_MCS_TX_DEFINED,		\
165 	},								\
166 }
167 
168 static struct ieee80211_supported_band carl9170_band_2GHz = {
169 	.channels	= carl9170_2ghz_chantable,
170 	.n_channels	= ARRAY_SIZE(carl9170_2ghz_chantable),
171 	.bitrates	= carl9170_g_ratetable,
172 	.n_bitrates	= carl9170_g_ratetable_size,
173 	.ht_cap		= CARL9170_HT_CAP,
174 };
175 
176 static struct ieee80211_supported_band carl9170_band_5GHz = {
177 	.channels	= carl9170_5ghz_chantable,
178 	.n_channels	= ARRAY_SIZE(carl9170_5ghz_chantable),
179 	.bitrates	= carl9170_a_ratetable,
180 	.n_bitrates	= carl9170_a_ratetable_size,
181 	.ht_cap		= CARL9170_HT_CAP,
182 };
183 
184 static void carl9170_ampdu_gc(struct ar9170 *ar)
185 {
186 	struct carl9170_sta_tid *tid_info;
187 	LIST_HEAD(tid_gc);
188 
189 	rcu_read_lock();
190 	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
191 		spin_lock_bh(&ar->tx_ampdu_list_lock);
192 		if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
193 			tid_info->state = CARL9170_TID_STATE_KILLED;
194 			list_del_rcu(&tid_info->list);
195 			ar->tx_ampdu_list_len--;
196 			list_add_tail(&tid_info->tmp_list, &tid_gc);
197 		}
198 		spin_unlock_bh(&ar->tx_ampdu_list_lock);
199 
200 	}
201 	rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
202 	rcu_read_unlock();
203 
204 	synchronize_rcu();
205 
206 	while (!list_empty(&tid_gc)) {
207 		struct sk_buff *skb;
208 		tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
209 					    tmp_list);
210 
211 		while ((skb = __skb_dequeue(&tid_info->queue)))
212 			carl9170_tx_status(ar, skb, false);
213 
214 		list_del_init(&tid_info->tmp_list);
215 		kfree(tid_info);
216 	}
217 }
218 
219 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
220 {
221 	if (drop_queued) {
222 		int i;
223 
224 		/*
225 		 * We can only drop frames which have not been uploaded
226 		 * to the device yet.
227 		 */
228 
229 		for (i = 0; i < ar->hw->queues; i++) {
230 			struct sk_buff *skb;
231 
232 			while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
233 				struct ieee80211_tx_info *info;
234 
235 				info = IEEE80211_SKB_CB(skb);
236 				if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 					atomic_dec(&ar->tx_ampdu_upload);
238 
239 				carl9170_tx_status(ar, skb, false);
240 			}
241 		}
242 	}
243 
244 	/* Wait for all other outstanding frames to timeout. */
245 	if (atomic_read(&ar->tx_total_queued))
246 		WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
247 }
248 
249 static void carl9170_flush_ba(struct ar9170 *ar)
250 {
251 	struct sk_buff_head free;
252 	struct carl9170_sta_tid *tid_info;
253 	struct sk_buff *skb;
254 
255 	__skb_queue_head_init(&free);
256 
257 	rcu_read_lock();
258 	spin_lock_bh(&ar->tx_ampdu_list_lock);
259 	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
260 		if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
261 			tid_info->state = CARL9170_TID_STATE_SUSPEND;
262 
263 			spin_lock(&tid_info->lock);
264 			while ((skb = __skb_dequeue(&tid_info->queue)))
265 				__skb_queue_tail(&free, skb);
266 			spin_unlock(&tid_info->lock);
267 		}
268 	}
269 	spin_unlock_bh(&ar->tx_ampdu_list_lock);
270 	rcu_read_unlock();
271 
272 	while ((skb = __skb_dequeue(&free)))
273 		carl9170_tx_status(ar, skb, false);
274 }
275 
276 static void carl9170_zap_queues(struct ar9170 *ar)
277 {
278 	struct carl9170_vif_info *cvif;
279 	unsigned int i;
280 
281 	carl9170_ampdu_gc(ar);
282 
283 	carl9170_flush_ba(ar);
284 	carl9170_flush(ar, true);
285 
286 	for (i = 0; i < ar->hw->queues; i++) {
287 		spin_lock_bh(&ar->tx_status[i].lock);
288 		while (!skb_queue_empty(&ar->tx_status[i])) {
289 			struct sk_buff *skb;
290 
291 			skb = skb_peek(&ar->tx_status[i]);
292 			carl9170_tx_get_skb(skb);
293 			spin_unlock_bh(&ar->tx_status[i].lock);
294 			carl9170_tx_drop(ar, skb);
295 			spin_lock_bh(&ar->tx_status[i].lock);
296 			carl9170_tx_put_skb(skb);
297 		}
298 		spin_unlock_bh(&ar->tx_status[i].lock);
299 	}
300 
301 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
302 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
303 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
304 
305 	/* reinitialize queues statistics */
306 	memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
307 	for (i = 0; i < ar->hw->queues; i++)
308 		ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
309 
310 	for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
311 		ar->mem_bitmap[i] = 0;
312 
313 	rcu_read_lock();
314 	list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
315 		spin_lock_bh(&ar->beacon_lock);
316 		dev_kfree_skb_any(cvif->beacon);
317 		cvif->beacon = NULL;
318 		spin_unlock_bh(&ar->beacon_lock);
319 	}
320 	rcu_read_unlock();
321 
322 	atomic_set(&ar->tx_ampdu_upload, 0);
323 	atomic_set(&ar->tx_ampdu_scheduler, 0);
324 	atomic_set(&ar->tx_total_pending, 0);
325 	atomic_set(&ar->tx_total_queued, 0);
326 	atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
327 }
328 
329 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop)		\
330 do {									\
331 	queue.aifs = ai_fs;						\
332 	queue.cw_min = cwmin;						\
333 	queue.cw_max = cwmax;						\
334 	queue.txop = _txop;						\
335 } while (0)
336 
337 static int carl9170_op_start(struct ieee80211_hw *hw)
338 {
339 	struct ar9170 *ar = hw->priv;
340 	int err, i;
341 
342 	mutex_lock(&ar->mutex);
343 
344 	carl9170_zap_queues(ar);
345 
346 	/* reset QoS defaults */
347 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3,     7, 47);
348 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7,    15, 94);
349 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023,  0);
350 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023,  0);
351 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
352 
353 	ar->current_factor = ar->current_density = -1;
354 	/* "The first key is unique." */
355 	ar->usedkeys = 1;
356 	ar->filter_state = 0;
357 	ar->ps.last_action = jiffies;
358 	ar->ps.last_slept = jiffies;
359 	ar->erp_mode = CARL9170_ERP_AUTO;
360 
361 	/* Set "disable hw crypto offload" whenever the module parameter
362 	 * nohwcrypt is true or if the firmware does not support it.
363 	 */
364 	ar->disable_offload = modparam_nohwcrypt |
365 		ar->fw.disable_offload_fw;
366 	ar->rx_software_decryption = ar->disable_offload;
367 
368 	for (i = 0; i < ar->hw->queues; i++) {
369 		ar->queue_stop_timeout[i] = jiffies;
370 		ar->max_queue_stop_timeout[i] = 0;
371 	}
372 
373 	atomic_set(&ar->mem_allocs, 0);
374 
375 	err = carl9170_usb_open(ar);
376 	if (err)
377 		goto out;
378 
379 	err = carl9170_init_mac(ar);
380 	if (err)
381 		goto out;
382 
383 	err = carl9170_set_qos(ar);
384 	if (err)
385 		goto out;
386 
387 	if (ar->fw.rx_filter) {
388 		err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
389 			CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
390 		if (err)
391 			goto out;
392 	}
393 
394 	err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
395 				 AR9170_DMA_TRIGGER_RXQ);
396 	if (err)
397 		goto out;
398 
399 	/* Clear key-cache */
400 	for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
401 		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
402 					  0, NULL, 0);
403 		if (err)
404 			goto out;
405 
406 		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
407 					  1, NULL, 0);
408 		if (err)
409 			goto out;
410 
411 		if (i < AR9170_CAM_MAX_USER) {
412 			err = carl9170_disable_key(ar, i);
413 			if (err)
414 				goto out;
415 		}
416 	}
417 
418 	carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
419 
420 	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
421 		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
422 
423 	ieee80211_wake_queues(ar->hw);
424 	err = 0;
425 
426 out:
427 	mutex_unlock(&ar->mutex);
428 	return err;
429 }
430 
431 static void carl9170_cancel_worker(struct ar9170 *ar)
432 {
433 	cancel_delayed_work_sync(&ar->stat_work);
434 	cancel_delayed_work_sync(&ar->tx_janitor);
435 #ifdef CONFIG_CARL9170_LEDS
436 	cancel_delayed_work_sync(&ar->led_work);
437 #endif /* CONFIG_CARL9170_LEDS */
438 	cancel_work_sync(&ar->ps_work);
439 	cancel_work_sync(&ar->ping_work);
440 	cancel_work_sync(&ar->ampdu_work);
441 }
442 
443 static void carl9170_op_stop(struct ieee80211_hw *hw)
444 {
445 	struct ar9170 *ar = hw->priv;
446 
447 	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
448 
449 	ieee80211_stop_queues(ar->hw);
450 
451 	mutex_lock(&ar->mutex);
452 	if (IS_ACCEPTING_CMD(ar)) {
453 		RCU_INIT_POINTER(ar->beacon_iter, NULL);
454 
455 		carl9170_led_set_state(ar, 0);
456 
457 		/* stop DMA */
458 		carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
459 		carl9170_usb_stop(ar);
460 	}
461 
462 	carl9170_zap_queues(ar);
463 	mutex_unlock(&ar->mutex);
464 
465 	carl9170_cancel_worker(ar);
466 }
467 
468 static void carl9170_restart_work(struct work_struct *work)
469 {
470 	struct ar9170 *ar = container_of(work, struct ar9170,
471 					 restart_work);
472 	int err = -EIO;
473 
474 	ar->usedkeys = 0;
475 	ar->filter_state = 0;
476 	carl9170_cancel_worker(ar);
477 
478 	mutex_lock(&ar->mutex);
479 	if (!ar->force_usb_reset) {
480 		err = carl9170_usb_restart(ar);
481 		if (net_ratelimit()) {
482 			if (err)
483 				dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
484 			else
485 				dev_info(&ar->udev->dev, "device restarted successfully.\n");
486 		}
487 	}
488 	carl9170_zap_queues(ar);
489 	mutex_unlock(&ar->mutex);
490 
491 	if (!err && !ar->force_usb_reset) {
492 		ar->restart_counter++;
493 		atomic_set(&ar->pending_restarts, 0);
494 
495 		ieee80211_restart_hw(ar->hw);
496 	} else {
497 		/*
498 		 * The reset was unsuccessful and the device seems to
499 		 * be dead. But there's still one option: a low-level
500 		 * usb subsystem reset...
501 		 */
502 
503 		carl9170_usb_reset(ar);
504 	}
505 }
506 
507 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
508 {
509 	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
510 
511 	/*
512 	 * Sometimes, an error can trigger several different reset events.
513 	 * By ignoring these *surplus* reset events, the device won't be
514 	 * killed again, right after it has recovered.
515 	 */
516 	if (atomic_inc_return(&ar->pending_restarts) > 1) {
517 		dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
518 		return;
519 	}
520 
521 	ieee80211_stop_queues(ar->hw);
522 
523 	dev_err(&ar->udev->dev, "restart device (%d)\n", r);
524 
525 	if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
526 	    !WARN_ON(r >= __CARL9170_RR_LAST))
527 		ar->last_reason = r;
528 
529 	if (!ar->registered)
530 		return;
531 
532 	if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
533 		ar->force_usb_reset = true;
534 
535 	ieee80211_queue_work(ar->hw, &ar->restart_work);
536 
537 	/*
538 	 * At this point, the device instance might have vanished/disabled.
539 	 * So, don't put any code which access the ar9170 struct
540 	 * without proper protection.
541 	 */
542 }
543 
544 static void carl9170_ping_work(struct work_struct *work)
545 {
546 	struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
547 	int err;
548 
549 	if (!IS_STARTED(ar))
550 		return;
551 
552 	mutex_lock(&ar->mutex);
553 	err = carl9170_echo_test(ar, 0xdeadbeef);
554 	if (err)
555 		carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
556 	mutex_unlock(&ar->mutex);
557 }
558 
559 static int carl9170_init_interface(struct ar9170 *ar,
560 				   struct ieee80211_vif *vif)
561 {
562 	struct ath_common *common = &ar->common;
563 	int err;
564 
565 	if (!vif) {
566 		WARN_ON_ONCE(IS_STARTED(ar));
567 		return 0;
568 	}
569 
570 	memcpy(common->macaddr, vif->addr, ETH_ALEN);
571 
572 	/* We have to fall back to software crypto, whenever
573 	 * the user choose to participates in an IBSS. HW
574 	 * offload for IBSS RSN is not supported by this driver.
575 	 *
576 	 * NOTE: If the previous main interface has already
577 	 * disabled hw crypto offload, we have to keep this
578 	 * previous disable_offload setting as it was.
579 	 * Altough ideally, we should notify mac80211 and tell
580 	 * it to forget about any HW crypto offload for now.
581 	 */
582 	ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
583 	    (vif->type != NL80211_IFTYPE_AP));
584 
585 	/* The driver used to have P2P GO+CLIENT support,
586 	 * but since this was dropped and we don't know if
587 	 * there are any gremlins lurking in the shadows,
588 	 * so best we keep HW offload disabled for P2P.
589 	 */
590 	ar->disable_offload |= vif->p2p;
591 
592 	ar->rx_software_decryption = ar->disable_offload;
593 
594 	err = carl9170_set_operating_mode(ar);
595 	return err;
596 }
597 
598 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
599 				     struct ieee80211_vif *vif)
600 {
601 	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
602 	struct ieee80211_vif *main_vif, *old_main = NULL;
603 	struct ar9170 *ar = hw->priv;
604 	int vif_id = -1, err = 0;
605 
606 	mutex_lock(&ar->mutex);
607 	rcu_read_lock();
608 	if (vif_priv->active) {
609 		/*
610 		 * Skip the interface structure initialization,
611 		 * if the vif survived the _restart call.
612 		 */
613 		vif_id = vif_priv->id;
614 		vif_priv->enable_beacon = false;
615 
616 		spin_lock_bh(&ar->beacon_lock);
617 		dev_kfree_skb_any(vif_priv->beacon);
618 		vif_priv->beacon = NULL;
619 		spin_unlock_bh(&ar->beacon_lock);
620 
621 		goto init;
622 	}
623 
624 	/* Because the AR9170 HW's MAC doesn't provide full support for
625 	 * multiple, independent interfaces [of different operation modes].
626 	 * We have to select ONE main interface [main mode of HW], but we
627 	 * can have multiple slaves [AKA: entry in the ACK-table].
628 	 *
629 	 * The first (from HEAD/TOP) interface in the ar->vif_list is
630 	 * always the main intf. All following intfs in this list
631 	 * are considered to be slave intfs.
632 	 */
633 	main_vif = carl9170_get_main_vif(ar);
634 
635 	if (main_vif) {
636 		switch (main_vif->type) {
637 		case NL80211_IFTYPE_STATION:
638 			if (vif->type == NL80211_IFTYPE_STATION)
639 				break;
640 
641 			err = -EBUSY;
642 			rcu_read_unlock();
643 
644 			goto unlock;
645 
646 		case NL80211_IFTYPE_MESH_POINT:
647 		case NL80211_IFTYPE_AP:
648 			if ((vif->type == NL80211_IFTYPE_STATION) ||
649 			    (vif->type == NL80211_IFTYPE_WDS) ||
650 			    (vif->type == NL80211_IFTYPE_AP) ||
651 			    (vif->type == NL80211_IFTYPE_MESH_POINT))
652 				break;
653 
654 			err = -EBUSY;
655 			rcu_read_unlock();
656 			goto unlock;
657 
658 		default:
659 			rcu_read_unlock();
660 			goto unlock;
661 		}
662 	}
663 
664 	vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
665 
666 	if (vif_id < 0) {
667 		rcu_read_unlock();
668 
669 		err = -ENOSPC;
670 		goto unlock;
671 	}
672 
673 	BUG_ON(ar->vif_priv[vif_id].id != vif_id);
674 
675 	vif_priv->active = true;
676 	vif_priv->id = vif_id;
677 	vif_priv->enable_beacon = false;
678 	ar->vifs++;
679 	if (old_main) {
680 		/* We end up in here, if the main interface is being replaced.
681 		 * Put the new main interface at the HEAD of the list and the
682 		 * previous inteface will automatically become second in line.
683 		 */
684 		list_add_rcu(&vif_priv->list, &ar->vif_list);
685 	} else {
686 		/* Add new inteface. If the list is empty, it will become the
687 		 * main inteface, otherwise it will be slave.
688 		 */
689 		list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
690 	}
691 	rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
692 
693 init:
694 	main_vif = carl9170_get_main_vif(ar);
695 
696 	if (main_vif == vif) {
697 		rcu_assign_pointer(ar->beacon_iter, vif_priv);
698 		rcu_read_unlock();
699 
700 		if (old_main) {
701 			struct carl9170_vif_info *old_main_priv =
702 				(void *) old_main->drv_priv;
703 			/* downgrade old main intf to slave intf.
704 			 * NOTE: We are no longer under rcu_read_lock.
705 			 * But we are still holding ar->mutex, so the
706 			 * vif data [id, addr] is safe.
707 			 */
708 			err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
709 						       old_main->addr);
710 			if (err)
711 				goto unlock;
712 		}
713 
714 		err = carl9170_init_interface(ar, vif);
715 		if (err)
716 			goto unlock;
717 	} else {
718 		rcu_read_unlock();
719 		err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
720 
721 		if (err)
722 			goto unlock;
723 	}
724 
725 	if (ar->fw.tx_seq_table) {
726 		err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
727 					 0);
728 		if (err)
729 			goto unlock;
730 	}
731 
732 unlock:
733 	if (err && (vif_id >= 0)) {
734 		vif_priv->active = false;
735 		bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
736 		ar->vifs--;
737 		RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
738 		list_del_rcu(&vif_priv->list);
739 		mutex_unlock(&ar->mutex);
740 		synchronize_rcu();
741 	} else {
742 		if (ar->vifs > 1)
743 			ar->ps.off_override |= PS_OFF_VIF;
744 
745 		mutex_unlock(&ar->mutex);
746 	}
747 
748 	return err;
749 }
750 
751 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
752 					 struct ieee80211_vif *vif)
753 {
754 	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
755 	struct ieee80211_vif *main_vif;
756 	struct ar9170 *ar = hw->priv;
757 	unsigned int id;
758 
759 	mutex_lock(&ar->mutex);
760 
761 	if (WARN_ON_ONCE(!vif_priv->active))
762 		goto unlock;
763 
764 	ar->vifs--;
765 
766 	rcu_read_lock();
767 	main_vif = carl9170_get_main_vif(ar);
768 
769 	id = vif_priv->id;
770 
771 	vif_priv->active = false;
772 	WARN_ON(vif_priv->enable_beacon);
773 	vif_priv->enable_beacon = false;
774 	list_del_rcu(&vif_priv->list);
775 	RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
776 
777 	if (vif == main_vif) {
778 		rcu_read_unlock();
779 
780 		if (ar->vifs) {
781 			WARN_ON(carl9170_init_interface(ar,
782 					carl9170_get_main_vif(ar)));
783 		} else {
784 			carl9170_set_operating_mode(ar);
785 		}
786 	} else {
787 		rcu_read_unlock();
788 
789 		WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
790 	}
791 
792 	carl9170_update_beacon(ar, false);
793 	carl9170_flush_cab(ar, id);
794 
795 	spin_lock_bh(&ar->beacon_lock);
796 	dev_kfree_skb_any(vif_priv->beacon);
797 	vif_priv->beacon = NULL;
798 	spin_unlock_bh(&ar->beacon_lock);
799 
800 	bitmap_release_region(&ar->vif_bitmap, id, 0);
801 
802 	carl9170_set_beacon_timers(ar);
803 
804 	if (ar->vifs == 1)
805 		ar->ps.off_override &= ~PS_OFF_VIF;
806 
807 unlock:
808 	mutex_unlock(&ar->mutex);
809 
810 	synchronize_rcu();
811 }
812 
813 void carl9170_ps_check(struct ar9170 *ar)
814 {
815 	ieee80211_queue_work(ar->hw, &ar->ps_work);
816 }
817 
818 /* caller must hold ar->mutex */
819 static int carl9170_ps_update(struct ar9170 *ar)
820 {
821 	bool ps = false;
822 	int err = 0;
823 
824 	if (!ar->ps.off_override)
825 		ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
826 
827 	if (ps != ar->ps.state) {
828 		err = carl9170_powersave(ar, ps);
829 		if (err)
830 			return err;
831 
832 		if (ar->ps.state && !ps) {
833 			ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
834 				ar->ps.last_action);
835 		}
836 
837 		if (ps)
838 			ar->ps.last_slept = jiffies;
839 
840 		ar->ps.last_action = jiffies;
841 		ar->ps.state = ps;
842 	}
843 
844 	return 0;
845 }
846 
847 static void carl9170_ps_work(struct work_struct *work)
848 {
849 	struct ar9170 *ar = container_of(work, struct ar9170,
850 					 ps_work);
851 	mutex_lock(&ar->mutex);
852 	if (IS_STARTED(ar))
853 		WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
854 	mutex_unlock(&ar->mutex);
855 }
856 
857 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
858 {
859 	int err;
860 
861 	if (noise) {
862 		err = carl9170_get_noisefloor(ar);
863 		if (err)
864 			return err;
865 	}
866 
867 	if (ar->fw.hw_counters) {
868 		err = carl9170_collect_tally(ar);
869 		if (err)
870 			return err;
871 	}
872 
873 	if (flush)
874 		memset(&ar->tally, 0, sizeof(ar->tally));
875 
876 	return 0;
877 }
878 
879 static void carl9170_stat_work(struct work_struct *work)
880 {
881 	struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
882 	int err;
883 
884 	mutex_lock(&ar->mutex);
885 	err = carl9170_update_survey(ar, false, true);
886 	mutex_unlock(&ar->mutex);
887 
888 	if (err)
889 		return;
890 
891 	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
892 		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
893 }
894 
895 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
896 {
897 	struct ar9170 *ar = hw->priv;
898 	int err = 0;
899 
900 	mutex_lock(&ar->mutex);
901 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
902 		/* TODO */
903 		err = 0;
904 	}
905 
906 	if (changed & IEEE80211_CONF_CHANGE_PS) {
907 		err = carl9170_ps_update(ar);
908 		if (err)
909 			goto out;
910 	}
911 
912 	if (changed & IEEE80211_CONF_CHANGE_SMPS) {
913 		/* TODO */
914 		err = 0;
915 	}
916 
917 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
918 		enum nl80211_channel_type channel_type =
919 			cfg80211_get_chandef_type(&hw->conf.chandef);
920 
921 		/* adjust slot time for 5 GHz */
922 		err = carl9170_set_slot_time(ar);
923 		if (err)
924 			goto out;
925 
926 		err = carl9170_update_survey(ar, true, false);
927 		if (err)
928 			goto out;
929 
930 		err = carl9170_set_channel(ar, hw->conf.chandef.chan,
931 					   channel_type);
932 		if (err)
933 			goto out;
934 
935 		err = carl9170_update_survey(ar, false, true);
936 		if (err)
937 			goto out;
938 
939 		err = carl9170_set_dyn_sifs_ack(ar);
940 		if (err)
941 			goto out;
942 
943 		err = carl9170_set_rts_cts_rate(ar);
944 		if (err)
945 			goto out;
946 	}
947 
948 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
949 		err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
950 		if (err)
951 			goto out;
952 	}
953 
954 out:
955 	mutex_unlock(&ar->mutex);
956 	return err;
957 }
958 
959 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
960 					 struct netdev_hw_addr_list *mc_list)
961 {
962 	struct netdev_hw_addr *ha;
963 	u64 mchash;
964 
965 	/* always get broadcast frames */
966 	mchash = 1ULL << (0xff >> 2);
967 
968 	netdev_hw_addr_list_for_each(ha, mc_list)
969 		mchash |= 1ULL << (ha->addr[5] >> 2);
970 
971 	return mchash;
972 }
973 
974 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
975 					 unsigned int changed_flags,
976 					 unsigned int *new_flags,
977 					 u64 multicast)
978 {
979 	struct ar9170 *ar = hw->priv;
980 
981 	/* mask supported flags */
982 	*new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
983 
984 	if (!IS_ACCEPTING_CMD(ar))
985 		return;
986 
987 	mutex_lock(&ar->mutex);
988 
989 	ar->filter_state = *new_flags;
990 	/*
991 	 * We can support more by setting the sniffer bit and
992 	 * then checking the error flags, later.
993 	 */
994 
995 	if (*new_flags & FIF_ALLMULTI)
996 		multicast = ~0ULL;
997 
998 	if (multicast != ar->cur_mc_hash)
999 		WARN_ON(carl9170_update_multicast(ar, multicast));
1000 
1001 	if (changed_flags & FIF_OTHER_BSS) {
1002 		ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
1003 
1004 		WARN_ON(carl9170_set_operating_mode(ar));
1005 	}
1006 
1007 	if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1008 		u32 rx_filter = 0;
1009 
1010 		if (!ar->fw.ba_filter)
1011 			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1012 
1013 		if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1014 			rx_filter |= CARL9170_RX_FILTER_BAD;
1015 
1016 		if (!(*new_flags & FIF_CONTROL))
1017 			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1018 
1019 		if (!(*new_flags & FIF_PSPOLL))
1020 			rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1021 
1022 		if (!(*new_flags & FIF_OTHER_BSS)) {
1023 			rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1024 			rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1025 		}
1026 
1027 		WARN_ON(carl9170_rx_filter(ar, rx_filter));
1028 	}
1029 
1030 	mutex_unlock(&ar->mutex);
1031 }
1032 
1033 
1034 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1035 					 struct ieee80211_vif *vif,
1036 					 struct ieee80211_bss_conf *bss_conf,
1037 					 u32 changed)
1038 {
1039 	struct ar9170 *ar = hw->priv;
1040 	struct ath_common *common = &ar->common;
1041 	int err = 0;
1042 	struct carl9170_vif_info *vif_priv;
1043 	struct ieee80211_vif *main_vif;
1044 
1045 	mutex_lock(&ar->mutex);
1046 	vif_priv = (void *) vif->drv_priv;
1047 	main_vif = carl9170_get_main_vif(ar);
1048 	if (WARN_ON(!main_vif))
1049 		goto out;
1050 
1051 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
1052 		struct carl9170_vif_info *iter;
1053 		int i = 0;
1054 
1055 		vif_priv->enable_beacon = bss_conf->enable_beacon;
1056 		rcu_read_lock();
1057 		list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1058 			if (iter->active && iter->enable_beacon)
1059 				i++;
1060 
1061 		}
1062 		rcu_read_unlock();
1063 
1064 		ar->beacon_enabled = i;
1065 	}
1066 
1067 	if (changed & BSS_CHANGED_BEACON) {
1068 		err = carl9170_update_beacon(ar, false);
1069 		if (err)
1070 			goto out;
1071 	}
1072 
1073 	if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1074 		       BSS_CHANGED_BEACON_INT)) {
1075 
1076 		if (main_vif != vif) {
1077 			bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1078 			bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1079 		}
1080 
1081 		/*
1082 		 * Therefore a hard limit for the broadcast traffic should
1083 		 * prevent false alarms.
1084 		 */
1085 		if (vif->type != NL80211_IFTYPE_STATION &&
1086 		    (bss_conf->beacon_int * bss_conf->dtim_period >=
1087 		     (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1088 			err = -EINVAL;
1089 			goto out;
1090 		}
1091 
1092 		err = carl9170_set_beacon_timers(ar);
1093 		if (err)
1094 			goto out;
1095 	}
1096 
1097 	if (changed & BSS_CHANGED_HT) {
1098 		/* TODO */
1099 		err = 0;
1100 		if (err)
1101 			goto out;
1102 	}
1103 
1104 	if (main_vif != vif)
1105 		goto out;
1106 
1107 	/*
1108 	 * The following settings can only be changed by the
1109 	 * master interface.
1110 	 */
1111 
1112 	if (changed & BSS_CHANGED_BSSID) {
1113 		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1114 		err = carl9170_set_operating_mode(ar);
1115 		if (err)
1116 			goto out;
1117 	}
1118 
1119 	if (changed & BSS_CHANGED_ASSOC) {
1120 		ar->common.curaid = bss_conf->aid;
1121 		err = carl9170_set_beacon_timers(ar);
1122 		if (err)
1123 			goto out;
1124 	}
1125 
1126 	if (changed & BSS_CHANGED_ERP_SLOT) {
1127 		err = carl9170_set_slot_time(ar);
1128 		if (err)
1129 			goto out;
1130 	}
1131 
1132 	if (changed & BSS_CHANGED_BASIC_RATES) {
1133 		err = carl9170_set_mac_rates(ar);
1134 		if (err)
1135 			goto out;
1136 	}
1137 
1138 out:
1139 	WARN_ON_ONCE(err && IS_STARTED(ar));
1140 	mutex_unlock(&ar->mutex);
1141 }
1142 
1143 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1144 			       struct ieee80211_vif *vif)
1145 {
1146 	struct ar9170 *ar = hw->priv;
1147 	struct carl9170_tsf_rsp tsf;
1148 	int err;
1149 
1150 	mutex_lock(&ar->mutex);
1151 	err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1152 				0, NULL, sizeof(tsf), &tsf);
1153 	mutex_unlock(&ar->mutex);
1154 	if (WARN_ON(err))
1155 		return 0;
1156 
1157 	return le64_to_cpu(tsf.tsf_64);
1158 }
1159 
1160 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1161 			       struct ieee80211_vif *vif,
1162 			       struct ieee80211_sta *sta,
1163 			       struct ieee80211_key_conf *key)
1164 {
1165 	struct ar9170 *ar = hw->priv;
1166 	int err = 0, i;
1167 	u8 ktype;
1168 
1169 	if (ar->disable_offload || !vif)
1170 		return -EOPNOTSUPP;
1171 
1172 	/* Fall back to software encryption whenever the driver is connected
1173 	 * to more than one network.
1174 	 *
1175 	 * This is very unfortunate, because some machines cannot handle
1176 	 * the high througput speed in 802.11n networks.
1177 	 */
1178 
1179 	if (!is_main_vif(ar, vif)) {
1180 		mutex_lock(&ar->mutex);
1181 		goto err_softw;
1182 	}
1183 
1184 	/*
1185 	 * While the hardware supports *catch-all* key, for offloading
1186 	 * group-key en-/de-cryption. The way of how the hardware
1187 	 * decides which keyId maps to which key, remains a mystery...
1188 	 */
1189 	if ((vif->type != NL80211_IFTYPE_STATION &&
1190 	     vif->type != NL80211_IFTYPE_ADHOC) &&
1191 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1192 		return -EOPNOTSUPP;
1193 
1194 	switch (key->cipher) {
1195 	case WLAN_CIPHER_SUITE_WEP40:
1196 		ktype = AR9170_ENC_ALG_WEP64;
1197 		break;
1198 	case WLAN_CIPHER_SUITE_WEP104:
1199 		ktype = AR9170_ENC_ALG_WEP128;
1200 		break;
1201 	case WLAN_CIPHER_SUITE_TKIP:
1202 		ktype = AR9170_ENC_ALG_TKIP;
1203 		break;
1204 	case WLAN_CIPHER_SUITE_CCMP:
1205 		ktype = AR9170_ENC_ALG_AESCCMP;
1206 		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1207 		break;
1208 	default:
1209 		return -EOPNOTSUPP;
1210 	}
1211 
1212 	mutex_lock(&ar->mutex);
1213 	if (cmd == SET_KEY) {
1214 		if (!IS_STARTED(ar)) {
1215 			err = -EOPNOTSUPP;
1216 			goto out;
1217 		}
1218 
1219 		if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1220 			sta = NULL;
1221 
1222 			i = 64 + key->keyidx;
1223 		} else {
1224 			for (i = 0; i < 64; i++)
1225 				if (!(ar->usedkeys & BIT(i)))
1226 					break;
1227 			if (i == 64)
1228 				goto err_softw;
1229 		}
1230 
1231 		key->hw_key_idx = i;
1232 
1233 		err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1234 					  ktype, 0, key->key,
1235 					  min_t(u8, 16, key->keylen));
1236 		if (err)
1237 			goto out;
1238 
1239 		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1240 			err = carl9170_upload_key(ar, i, sta ? sta->addr :
1241 						  NULL, ktype, 1,
1242 						  key->key + 16, 16);
1243 			if (err)
1244 				goto out;
1245 
1246 			/*
1247 			 * hardware is not capable generating MMIC
1248 			 * of fragmented frames!
1249 			 */
1250 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1251 		}
1252 
1253 		if (i < 64)
1254 			ar->usedkeys |= BIT(i);
1255 
1256 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1257 	} else {
1258 		if (!IS_STARTED(ar)) {
1259 			/* The device is gone... together with the key ;-) */
1260 			err = 0;
1261 			goto out;
1262 		}
1263 
1264 		if (key->hw_key_idx < 64) {
1265 			ar->usedkeys &= ~BIT(key->hw_key_idx);
1266 		} else {
1267 			err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1268 						  AR9170_ENC_ALG_NONE, 0,
1269 						  NULL, 0);
1270 			if (err)
1271 				goto out;
1272 
1273 			if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1274 				err = carl9170_upload_key(ar, key->hw_key_idx,
1275 							  NULL,
1276 							  AR9170_ENC_ALG_NONE,
1277 							  1, NULL, 0);
1278 				if (err)
1279 					goto out;
1280 			}
1281 
1282 		}
1283 
1284 		err = carl9170_disable_key(ar, key->hw_key_idx);
1285 		if (err)
1286 			goto out;
1287 	}
1288 
1289 out:
1290 	mutex_unlock(&ar->mutex);
1291 	return err;
1292 
1293 err_softw:
1294 	if (!ar->rx_software_decryption) {
1295 		ar->rx_software_decryption = true;
1296 		carl9170_set_operating_mode(ar);
1297 	}
1298 	mutex_unlock(&ar->mutex);
1299 	return -ENOSPC;
1300 }
1301 
1302 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1303 			       struct ieee80211_vif *vif,
1304 			       struct ieee80211_sta *sta)
1305 {
1306 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1307 	unsigned int i;
1308 
1309 	atomic_set(&sta_info->pending_frames, 0);
1310 
1311 	if (sta->ht_cap.ht_supported) {
1312 		if (sta->ht_cap.ampdu_density > 6) {
1313 			/*
1314 			 * HW does support 16us AMPDU density.
1315 			 * No HT-Xmit for station.
1316 			 */
1317 
1318 			return 0;
1319 		}
1320 
1321 		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1322 			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1323 
1324 		sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1325 		sta_info->ht_sta = true;
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1332 				struct ieee80211_vif *vif,
1333 				struct ieee80211_sta *sta)
1334 {
1335 	struct ar9170 *ar = hw->priv;
1336 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1337 	unsigned int i;
1338 	bool cleanup = false;
1339 
1340 	if (sta->ht_cap.ht_supported) {
1341 
1342 		sta_info->ht_sta = false;
1343 
1344 		rcu_read_lock();
1345 		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1346 			struct carl9170_sta_tid *tid_info;
1347 
1348 			tid_info = rcu_dereference(sta_info->agg[i]);
1349 			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1350 
1351 			if (!tid_info)
1352 				continue;
1353 
1354 			spin_lock_bh(&ar->tx_ampdu_list_lock);
1355 			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1356 				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1357 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1358 			cleanup = true;
1359 		}
1360 		rcu_read_unlock();
1361 
1362 		if (cleanup)
1363 			carl9170_ampdu_gc(ar);
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1370 			       struct ieee80211_vif *vif, u16 queue,
1371 			       const struct ieee80211_tx_queue_params *param)
1372 {
1373 	struct ar9170 *ar = hw->priv;
1374 	int ret;
1375 
1376 	mutex_lock(&ar->mutex);
1377 	memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1378 	ret = carl9170_set_qos(ar);
1379 	mutex_unlock(&ar->mutex);
1380 	return ret;
1381 }
1382 
1383 static void carl9170_ampdu_work(struct work_struct *work)
1384 {
1385 	struct ar9170 *ar = container_of(work, struct ar9170,
1386 					 ampdu_work);
1387 
1388 	if (!IS_STARTED(ar))
1389 		return;
1390 
1391 	mutex_lock(&ar->mutex);
1392 	carl9170_ampdu_gc(ar);
1393 	mutex_unlock(&ar->mutex);
1394 }
1395 
1396 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1397 				    struct ieee80211_vif *vif,
1398 				    struct ieee80211_ampdu_params *params)
1399 {
1400 	struct ieee80211_sta *sta = params->sta;
1401 	enum ieee80211_ampdu_mlme_action action = params->action;
1402 	u16 tid = params->tid;
1403 	u16 *ssn = &params->ssn;
1404 	struct ar9170 *ar = hw->priv;
1405 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1406 	struct carl9170_sta_tid *tid_info;
1407 
1408 	if (modparam_noht)
1409 		return -EOPNOTSUPP;
1410 
1411 	switch (action) {
1412 	case IEEE80211_AMPDU_TX_START:
1413 		if (!sta_info->ht_sta)
1414 			return -EOPNOTSUPP;
1415 
1416 		tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1417 				   GFP_ATOMIC);
1418 		if (!tid_info)
1419 			return -ENOMEM;
1420 
1421 		tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1422 		tid_info->state = CARL9170_TID_STATE_PROGRESS;
1423 		tid_info->tid = tid;
1424 		tid_info->max = sta_info->ampdu_max_len;
1425 		tid_info->sta = sta;
1426 		tid_info->vif = vif;
1427 
1428 		INIT_LIST_HEAD(&tid_info->list);
1429 		INIT_LIST_HEAD(&tid_info->tmp_list);
1430 		skb_queue_head_init(&tid_info->queue);
1431 		spin_lock_init(&tid_info->lock);
1432 
1433 		spin_lock_bh(&ar->tx_ampdu_list_lock);
1434 		ar->tx_ampdu_list_len++;
1435 		list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1436 		rcu_assign_pointer(sta_info->agg[tid], tid_info);
1437 		spin_unlock_bh(&ar->tx_ampdu_list_lock);
1438 
1439 		return IEEE80211_AMPDU_TX_START_IMMEDIATE;
1440 
1441 	case IEEE80211_AMPDU_TX_STOP_CONT:
1442 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
1443 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1444 		rcu_read_lock();
1445 		tid_info = rcu_dereference(sta_info->agg[tid]);
1446 		if (tid_info) {
1447 			spin_lock_bh(&ar->tx_ampdu_list_lock);
1448 			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1449 				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1450 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1451 		}
1452 
1453 		RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1454 		rcu_read_unlock();
1455 
1456 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1457 		ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1458 		break;
1459 
1460 	case IEEE80211_AMPDU_TX_OPERATIONAL:
1461 		rcu_read_lock();
1462 		tid_info = rcu_dereference(sta_info->agg[tid]);
1463 
1464 		sta_info->stats[tid].clear = true;
1465 		sta_info->stats[tid].req = false;
1466 
1467 		if (tid_info) {
1468 			bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1469 			tid_info->state = CARL9170_TID_STATE_IDLE;
1470 		}
1471 		rcu_read_unlock();
1472 
1473 		if (WARN_ON_ONCE(!tid_info))
1474 			return -EFAULT;
1475 
1476 		break;
1477 
1478 	case IEEE80211_AMPDU_RX_START:
1479 	case IEEE80211_AMPDU_RX_STOP:
1480 		/* Handled by hardware */
1481 		break;
1482 
1483 	default:
1484 		return -EOPNOTSUPP;
1485 	}
1486 
1487 	return 0;
1488 }
1489 
1490 #ifdef CONFIG_CARL9170_WPC
1491 static int carl9170_register_wps_button(struct ar9170 *ar)
1492 {
1493 	struct input_dev *input;
1494 	int err;
1495 
1496 	if (!(ar->features & CARL9170_WPS_BUTTON))
1497 		return 0;
1498 
1499 	input = input_allocate_device();
1500 	if (!input)
1501 		return -ENOMEM;
1502 
1503 	snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1504 		 wiphy_name(ar->hw->wiphy));
1505 
1506 	snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1507 		 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1508 
1509 	input->name = ar->wps.name;
1510 	input->phys = ar->wps.phys;
1511 	input->id.bustype = BUS_USB;
1512 	input->dev.parent = &ar->hw->wiphy->dev;
1513 
1514 	input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1515 
1516 	err = input_register_device(input);
1517 	if (err) {
1518 		input_free_device(input);
1519 		return err;
1520 	}
1521 
1522 	ar->wps.pbc = input;
1523 	return 0;
1524 }
1525 #endif /* CONFIG_CARL9170_WPC */
1526 
1527 #ifdef CONFIG_CARL9170_HWRNG
1528 static int carl9170_rng_get(struct ar9170 *ar)
1529 {
1530 
1531 #define RW	(CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1532 #define RB	(CARL9170_MAX_CMD_PAYLOAD_LEN)
1533 
1534 	static const __le32 rng_load[RW] = {
1535 		[0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1536 
1537 	u32 buf[RW];
1538 
1539 	unsigned int i, off = 0, transfer, count;
1540 	int err;
1541 
1542 	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1543 
1544 	if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1545 		return -EAGAIN;
1546 
1547 	count = ARRAY_SIZE(ar->rng.cache);
1548 	while (count) {
1549 		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1550 					RB, (u8 *) rng_load,
1551 					RB, (u8 *) buf);
1552 		if (err)
1553 			return err;
1554 
1555 		transfer = min_t(unsigned int, count, RW);
1556 		for (i = 0; i < transfer; i++)
1557 			ar->rng.cache[off + i] = buf[i];
1558 
1559 		off += transfer;
1560 		count -= transfer;
1561 	}
1562 
1563 	ar->rng.cache_idx = 0;
1564 
1565 #undef RW
1566 #undef RB
1567 	return 0;
1568 }
1569 
1570 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1571 {
1572 	struct ar9170 *ar = (struct ar9170 *)rng->priv;
1573 	int ret = -EIO;
1574 
1575 	mutex_lock(&ar->mutex);
1576 	if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1577 		ret = carl9170_rng_get(ar);
1578 		if (ret) {
1579 			mutex_unlock(&ar->mutex);
1580 			return ret;
1581 		}
1582 	}
1583 
1584 	*data = ar->rng.cache[ar->rng.cache_idx++];
1585 	mutex_unlock(&ar->mutex);
1586 
1587 	return sizeof(u16);
1588 }
1589 
1590 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1591 {
1592 	if (ar->rng.initialized) {
1593 		hwrng_unregister(&ar->rng.rng);
1594 		ar->rng.initialized = false;
1595 	}
1596 }
1597 
1598 static int carl9170_register_hwrng(struct ar9170 *ar)
1599 {
1600 	int err;
1601 
1602 	snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1603 		 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1604 	ar->rng.rng.name = ar->rng.name;
1605 	ar->rng.rng.data_read = carl9170_rng_read;
1606 	ar->rng.rng.priv = (unsigned long)ar;
1607 
1608 	if (WARN_ON(ar->rng.initialized))
1609 		return -EALREADY;
1610 
1611 	err = hwrng_register(&ar->rng.rng);
1612 	if (err) {
1613 		dev_err(&ar->udev->dev, "Failed to register the random "
1614 			"number generator (%d)\n", err);
1615 		return err;
1616 	}
1617 
1618 	ar->rng.initialized = true;
1619 
1620 	err = carl9170_rng_get(ar);
1621 	if (err) {
1622 		carl9170_unregister_hwrng(ar);
1623 		return err;
1624 	}
1625 
1626 	return 0;
1627 }
1628 #endif /* CONFIG_CARL9170_HWRNG */
1629 
1630 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1631 				struct survey_info *survey)
1632 {
1633 	struct ar9170 *ar = hw->priv;
1634 	struct ieee80211_channel *chan;
1635 	struct ieee80211_supported_band *band;
1636 	int err, b, i;
1637 
1638 	chan = ar->channel;
1639 	if (!chan)
1640 		return -ENODEV;
1641 
1642 	if (idx == chan->hw_value) {
1643 		mutex_lock(&ar->mutex);
1644 		err = carl9170_update_survey(ar, false, true);
1645 		mutex_unlock(&ar->mutex);
1646 		if (err)
1647 			return err;
1648 	}
1649 
1650 	for (b = 0; b < NUM_NL80211_BANDS; b++) {
1651 		band = ar->hw->wiphy->bands[b];
1652 
1653 		if (!band)
1654 			continue;
1655 
1656 		for (i = 0; i < band->n_channels; i++) {
1657 			if (band->channels[i].hw_value == idx) {
1658 				chan = &band->channels[i];
1659 				goto found;
1660 			}
1661 		}
1662 	}
1663 	return -ENOENT;
1664 
1665 found:
1666 	memcpy(survey, &ar->survey[idx], sizeof(*survey));
1667 
1668 	survey->channel = chan;
1669 	survey->filled = SURVEY_INFO_NOISE_DBM;
1670 
1671 	if (ar->channel == chan)
1672 		survey->filled |= SURVEY_INFO_IN_USE;
1673 
1674 	if (ar->fw.hw_counters) {
1675 		survey->filled |= SURVEY_INFO_TIME |
1676 				  SURVEY_INFO_TIME_BUSY |
1677 				  SURVEY_INFO_TIME_TX;
1678 	}
1679 
1680 	return 0;
1681 }
1682 
1683 static void carl9170_op_flush(struct ieee80211_hw *hw,
1684 			      struct ieee80211_vif *vif,
1685 			      u32 queues, bool drop)
1686 {
1687 	struct ar9170 *ar = hw->priv;
1688 	unsigned int vid;
1689 
1690 	mutex_lock(&ar->mutex);
1691 	for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1692 		carl9170_flush_cab(ar, vid);
1693 
1694 	carl9170_flush(ar, drop);
1695 	mutex_unlock(&ar->mutex);
1696 }
1697 
1698 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1699 				 struct ieee80211_low_level_stats *stats)
1700 {
1701 	struct ar9170 *ar = hw->priv;
1702 
1703 	memset(stats, 0, sizeof(*stats));
1704 	stats->dot11ACKFailureCount = ar->tx_ack_failures;
1705 	stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1706 	return 0;
1707 }
1708 
1709 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1710 				   struct ieee80211_vif *vif,
1711 				   enum sta_notify_cmd cmd,
1712 				   struct ieee80211_sta *sta)
1713 {
1714 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1715 
1716 	switch (cmd) {
1717 	case STA_NOTIFY_SLEEP:
1718 		sta_info->sleeping = true;
1719 		if (atomic_read(&sta_info->pending_frames))
1720 			ieee80211_sta_block_awake(hw, sta, true);
1721 		break;
1722 
1723 	case STA_NOTIFY_AWAKE:
1724 		sta_info->sleeping = false;
1725 		break;
1726 	}
1727 }
1728 
1729 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1730 {
1731 	struct ar9170 *ar = hw->priv;
1732 
1733 	return !!atomic_read(&ar->tx_total_queued);
1734 }
1735 
1736 static const struct ieee80211_ops carl9170_ops = {
1737 	.start			= carl9170_op_start,
1738 	.stop			= carl9170_op_stop,
1739 	.tx			= carl9170_op_tx,
1740 	.flush			= carl9170_op_flush,
1741 	.add_interface		= carl9170_op_add_interface,
1742 	.remove_interface	= carl9170_op_remove_interface,
1743 	.config			= carl9170_op_config,
1744 	.prepare_multicast	= carl9170_op_prepare_multicast,
1745 	.configure_filter	= carl9170_op_configure_filter,
1746 	.conf_tx		= carl9170_op_conf_tx,
1747 	.bss_info_changed	= carl9170_op_bss_info_changed,
1748 	.get_tsf		= carl9170_op_get_tsf,
1749 	.set_key		= carl9170_op_set_key,
1750 	.sta_add		= carl9170_op_sta_add,
1751 	.sta_remove		= carl9170_op_sta_remove,
1752 	.sta_notify		= carl9170_op_sta_notify,
1753 	.get_survey		= carl9170_op_get_survey,
1754 	.get_stats		= carl9170_op_get_stats,
1755 	.ampdu_action		= carl9170_op_ampdu_action,
1756 	.tx_frames_pending	= carl9170_tx_frames_pending,
1757 };
1758 
1759 void *carl9170_alloc(size_t priv_size)
1760 {
1761 	struct ieee80211_hw *hw;
1762 	struct ar9170 *ar;
1763 	struct sk_buff *skb;
1764 	int i;
1765 
1766 	/*
1767 	 * this buffer is used for rx stream reconstruction.
1768 	 * Under heavy load this device (or the transport layer?)
1769 	 * tends to split the streams into separate rx descriptors.
1770 	 */
1771 
1772 	skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1773 	if (!skb)
1774 		goto err_nomem;
1775 
1776 	hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1777 	if (!hw)
1778 		goto err_nomem;
1779 
1780 	ar = hw->priv;
1781 	ar->hw = hw;
1782 	ar->rx_failover = skb;
1783 
1784 	memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1785 	ar->rx_has_plcp = false;
1786 
1787 	/*
1788 	 * Here's a hidden pitfall!
1789 	 *
1790 	 * All 4 AC queues work perfectly well under _legacy_ operation.
1791 	 * However as soon as aggregation is enabled, the traffic flow
1792 	 * gets very bumpy. Therefore we have to _switch_ to a
1793 	 * software AC with a single HW queue.
1794 	 */
1795 	hw->queues = __AR9170_NUM_TXQ;
1796 
1797 	mutex_init(&ar->mutex);
1798 	spin_lock_init(&ar->beacon_lock);
1799 	spin_lock_init(&ar->cmd_lock);
1800 	spin_lock_init(&ar->tx_stats_lock);
1801 	spin_lock_init(&ar->tx_ampdu_list_lock);
1802 	spin_lock_init(&ar->mem_lock);
1803 	spin_lock_init(&ar->state_lock);
1804 	atomic_set(&ar->pending_restarts, 0);
1805 	ar->vifs = 0;
1806 	for (i = 0; i < ar->hw->queues; i++) {
1807 		skb_queue_head_init(&ar->tx_status[i]);
1808 		skb_queue_head_init(&ar->tx_pending[i]);
1809 
1810 		INIT_LIST_HEAD(&ar->bar_list[i]);
1811 		spin_lock_init(&ar->bar_list_lock[i]);
1812 	}
1813 	INIT_WORK(&ar->ps_work, carl9170_ps_work);
1814 	INIT_WORK(&ar->ping_work, carl9170_ping_work);
1815 	INIT_WORK(&ar->restart_work, carl9170_restart_work);
1816 	INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1817 	INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1818 	INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1819 	INIT_LIST_HEAD(&ar->tx_ampdu_list);
1820 	rcu_assign_pointer(ar->tx_ampdu_iter,
1821 			   (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1822 
1823 	bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1824 	INIT_LIST_HEAD(&ar->vif_list);
1825 	init_completion(&ar->tx_flush);
1826 
1827 	/* firmware decides which modes we support */
1828 	hw->wiphy->interface_modes = 0;
1829 
1830 	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
1831 	ieee80211_hw_set(hw, MFP_CAPABLE);
1832 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
1833 	ieee80211_hw_set(hw, SUPPORTS_PS);
1834 	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
1835 	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
1836 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
1837 	ieee80211_hw_set(hw, SIGNAL_DBM);
1838 	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
1839 
1840 	if (!modparam_noht) {
1841 		/*
1842 		 * see the comment above, why we allow the user
1843 		 * to disable HT by a module parameter.
1844 		 */
1845 		ieee80211_hw_set(hw, AMPDU_AGGREGATION);
1846 	}
1847 
1848 	hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1849 	hw->sta_data_size = sizeof(struct carl9170_sta_info);
1850 	hw->vif_data_size = sizeof(struct carl9170_vif_info);
1851 
1852 	hw->max_rates = CARL9170_TX_MAX_RATES;
1853 	hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1854 
1855 	for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1856 		ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1857 
1858 	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1859 
1860 	return ar;
1861 
1862 err_nomem:
1863 	kfree_skb(skb);
1864 	return ERR_PTR(-ENOMEM);
1865 }
1866 
1867 static int carl9170_read_eeprom(struct ar9170 *ar)
1868 {
1869 #define RW	8	/* number of words to read at once */
1870 #define RB	(sizeof(u32) * RW)
1871 	u8 *eeprom = (void *)&ar->eeprom;
1872 	__le32 offsets[RW];
1873 	int i, j, err;
1874 
1875 	BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1876 
1877 	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1878 #ifndef __CHECKER__
1879 	/* don't want to handle trailing remains */
1880 	BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1881 #endif
1882 
1883 	for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1884 		for (j = 0; j < RW; j++)
1885 			offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1886 						 RB * i + 4 * j);
1887 
1888 		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1889 					RB, (u8 *) &offsets,
1890 					RB, eeprom + RB * i);
1891 		if (err)
1892 			return err;
1893 	}
1894 
1895 #undef RW
1896 #undef RB
1897 	return 0;
1898 }
1899 
1900 static int carl9170_parse_eeprom(struct ar9170 *ar)
1901 {
1902 	struct ath_regulatory *regulatory = &ar->common.regulatory;
1903 	unsigned int rx_streams, tx_streams, tx_params = 0;
1904 	int bands = 0;
1905 	int chans = 0;
1906 
1907 	if (ar->eeprom.length == cpu_to_le16(0xffff))
1908 		return -ENODATA;
1909 
1910 	rx_streams = hweight8(ar->eeprom.rx_mask);
1911 	tx_streams = hweight8(ar->eeprom.tx_mask);
1912 
1913 	if (rx_streams != tx_streams) {
1914 		tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1915 
1916 		WARN_ON(!(tx_streams >= 1 && tx_streams <=
1917 			IEEE80211_HT_MCS_TX_MAX_STREAMS));
1918 
1919 		tx_params = (tx_streams - 1) <<
1920 			    IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1921 
1922 		carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1923 		carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1924 	}
1925 
1926 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1927 		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
1928 			&carl9170_band_2GHz;
1929 		chans += carl9170_band_2GHz.n_channels;
1930 		bands++;
1931 	}
1932 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1933 		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
1934 			&carl9170_band_5GHz;
1935 		chans += carl9170_band_5GHz.n_channels;
1936 		bands++;
1937 	}
1938 
1939 	if (!bands)
1940 		return -EINVAL;
1941 
1942 	ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
1943 	if (!ar->survey)
1944 		return -ENOMEM;
1945 	ar->num_channels = chans;
1946 
1947 	regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1948 
1949 	/* second part of wiphy init */
1950 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1951 
1952 	return 0;
1953 }
1954 
1955 static void carl9170_reg_notifier(struct wiphy *wiphy,
1956 				  struct regulatory_request *request)
1957 {
1958 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1959 	struct ar9170 *ar = hw->priv;
1960 
1961 	ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1962 }
1963 
1964 int carl9170_register(struct ar9170 *ar)
1965 {
1966 	struct ath_regulatory *regulatory = &ar->common.regulatory;
1967 	int err = 0, i;
1968 
1969 	if (WARN_ON(ar->mem_bitmap))
1970 		return -EINVAL;
1971 
1972 	ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG),
1973 				 sizeof(unsigned long),
1974 				 GFP_KERNEL);
1975 
1976 	if (!ar->mem_bitmap)
1977 		return -ENOMEM;
1978 
1979 	/* try to read EEPROM, init MAC addr */
1980 	err = carl9170_read_eeprom(ar);
1981 	if (err)
1982 		return err;
1983 
1984 	err = carl9170_parse_eeprom(ar);
1985 	if (err)
1986 		return err;
1987 
1988 	err = ath_regd_init(regulatory, ar->hw->wiphy,
1989 			    carl9170_reg_notifier);
1990 	if (err)
1991 		return err;
1992 
1993 	if (modparam_noht) {
1994 		carl9170_band_2GHz.ht_cap.ht_supported = false;
1995 		carl9170_band_5GHz.ht_cap.ht_supported = false;
1996 	}
1997 
1998 	for (i = 0; i < ar->fw.vif_num; i++) {
1999 		ar->vif_priv[i].id = i;
2000 		ar->vif_priv[i].vif = NULL;
2001 	}
2002 
2003 	err = ieee80211_register_hw(ar->hw);
2004 	if (err)
2005 		return err;
2006 
2007 	/* mac80211 interface is now registered */
2008 	ar->registered = true;
2009 
2010 	if (!ath_is_world_regd(regulatory))
2011 		regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2012 
2013 #ifdef CONFIG_CARL9170_DEBUGFS
2014 	carl9170_debugfs_register(ar);
2015 #endif /* CONFIG_CARL9170_DEBUGFS */
2016 
2017 	err = carl9170_led_init(ar);
2018 	if (err)
2019 		goto err_unreg;
2020 
2021 #ifdef CONFIG_CARL9170_LEDS
2022 	err = carl9170_led_register(ar);
2023 	if (err)
2024 		goto err_unreg;
2025 #endif /* CONFIG_CARL9170_LEDS */
2026 
2027 #ifdef CONFIG_CARL9170_WPC
2028 	err = carl9170_register_wps_button(ar);
2029 	if (err)
2030 		goto err_unreg;
2031 #endif /* CONFIG_CARL9170_WPC */
2032 
2033 #ifdef CONFIG_CARL9170_HWRNG
2034 	err = carl9170_register_hwrng(ar);
2035 	if (err)
2036 		goto err_unreg;
2037 #endif /* CONFIG_CARL9170_HWRNG */
2038 
2039 	dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2040 		 wiphy_name(ar->hw->wiphy));
2041 
2042 	return 0;
2043 
2044 err_unreg:
2045 	carl9170_unregister(ar);
2046 	return err;
2047 }
2048 
2049 void carl9170_unregister(struct ar9170 *ar)
2050 {
2051 	if (!ar->registered)
2052 		return;
2053 
2054 	ar->registered = false;
2055 
2056 #ifdef CONFIG_CARL9170_LEDS
2057 	carl9170_led_unregister(ar);
2058 #endif /* CONFIG_CARL9170_LEDS */
2059 
2060 #ifdef CONFIG_CARL9170_DEBUGFS
2061 	carl9170_debugfs_unregister(ar);
2062 #endif /* CONFIG_CARL9170_DEBUGFS */
2063 
2064 #ifdef CONFIG_CARL9170_WPC
2065 	if (ar->wps.pbc) {
2066 		input_unregister_device(ar->wps.pbc);
2067 		ar->wps.pbc = NULL;
2068 	}
2069 #endif /* CONFIG_CARL9170_WPC */
2070 
2071 #ifdef CONFIG_CARL9170_HWRNG
2072 	carl9170_unregister_hwrng(ar);
2073 #endif /* CONFIG_CARL9170_HWRNG */
2074 
2075 	carl9170_cancel_worker(ar);
2076 	cancel_work_sync(&ar->restart_work);
2077 
2078 	ieee80211_unregister_hw(ar->hw);
2079 }
2080 
2081 void carl9170_free(struct ar9170 *ar)
2082 {
2083 	WARN_ON(ar->registered);
2084 	WARN_ON(IS_INITIALIZED(ar));
2085 
2086 	kfree_skb(ar->rx_failover);
2087 	ar->rx_failover = NULL;
2088 
2089 	kfree(ar->mem_bitmap);
2090 	ar->mem_bitmap = NULL;
2091 
2092 	kfree(ar->survey);
2093 	ar->survey = NULL;
2094 
2095 	mutex_destroy(&ar->mutex);
2096 
2097 	ieee80211_free_hw(ar->hw);
2098 }
2099