1 /*
2  * Atheros CARL9170 driver
3  *
4  * mac80211 interaction code
5  *
6  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7  * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; see the file COPYING.  If not, see
21  * http://www.gnu.org/licenses/.
22  *
23  * This file incorporates work covered by the following copyright and
24  * permission notice:
25  *    Copyright (c) 2007-2008 Atheros Communications, Inc.
26  *
27  *    Permission to use, copy, modify, and/or distribute this software for any
28  *    purpose with or without fee is hereby granted, provided that the above
29  *    copyright notice and this permission notice appear in all copies.
30  *
31  *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32  *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33  *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34  *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35  *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36  *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38  */
39 
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <linux/etherdevice.h>
43 #include <linux/random.h>
44 #include <net/mac80211.h>
45 #include <net/cfg80211.h>
46 #include "hw.h"
47 #include "carl9170.h"
48 #include "cmd.h"
49 
50 static bool modparam_nohwcrypt;
51 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444);
52 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
53 
54 int modparam_noht;
55 module_param_named(noht, modparam_noht, int, 0444);
56 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
57 
58 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) {	\
59 	.bitrate	= (_bitrate),			\
60 	.flags		= (_flags),			\
61 	.hw_value	= (_hw_rate) | (_txpidx) << 4,	\
62 }
63 
64 struct ieee80211_rate __carl9170_ratetable[] = {
65 	RATE(10, 0, 0, 0),
66 	RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
67 	RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
68 	RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
69 	RATE(60, 0xb, 0, 0),
70 	RATE(90, 0xf, 0, 0),
71 	RATE(120, 0xa, 0, 0),
72 	RATE(180, 0xe, 0, 0),
73 	RATE(240, 0x9, 0, 0),
74 	RATE(360, 0xd, 1, 0),
75 	RATE(480, 0x8, 2, 0),
76 	RATE(540, 0xc, 3, 0),
77 };
78 #undef RATE
79 
80 #define carl9170_g_ratetable	(__carl9170_ratetable + 0)
81 #define carl9170_g_ratetable_size	12
82 #define carl9170_a_ratetable	(__carl9170_ratetable + 4)
83 #define carl9170_a_ratetable_size	8
84 
85 /*
86  * NB: The hw_value is used as an index into the carl9170_phy_freq_params
87  *     array in phy.c so that we don't have to do frequency lookups!
88  */
89 #define CHAN(_freq, _idx) {		\
90 	.center_freq	= (_freq),	\
91 	.hw_value	= (_idx),	\
92 	.max_power	= 18, /* XXX */	\
93 }
94 
95 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
96 	CHAN(2412,  0),
97 	CHAN(2417,  1),
98 	CHAN(2422,  2),
99 	CHAN(2427,  3),
100 	CHAN(2432,  4),
101 	CHAN(2437,  5),
102 	CHAN(2442,  6),
103 	CHAN(2447,  7),
104 	CHAN(2452,  8),
105 	CHAN(2457,  9),
106 	CHAN(2462, 10),
107 	CHAN(2467, 11),
108 	CHAN(2472, 12),
109 	CHAN(2484, 13),
110 };
111 
112 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
113 	CHAN(4920, 14),
114 	CHAN(4940, 15),
115 	CHAN(4960, 16),
116 	CHAN(4980, 17),
117 	CHAN(5040, 18),
118 	CHAN(5060, 19),
119 	CHAN(5080, 20),
120 	CHAN(5180, 21),
121 	CHAN(5200, 22),
122 	CHAN(5220, 23),
123 	CHAN(5240, 24),
124 	CHAN(5260, 25),
125 	CHAN(5280, 26),
126 	CHAN(5300, 27),
127 	CHAN(5320, 28),
128 	CHAN(5500, 29),
129 	CHAN(5520, 30),
130 	CHAN(5540, 31),
131 	CHAN(5560, 32),
132 	CHAN(5580, 33),
133 	CHAN(5600, 34),
134 	CHAN(5620, 35),
135 	CHAN(5640, 36),
136 	CHAN(5660, 37),
137 	CHAN(5680, 38),
138 	CHAN(5700, 39),
139 	CHAN(5745, 40),
140 	CHAN(5765, 41),
141 	CHAN(5785, 42),
142 	CHAN(5805, 43),
143 	CHAN(5825, 44),
144 	CHAN(5170, 45),
145 	CHAN(5190, 46),
146 	CHAN(5210, 47),
147 	CHAN(5230, 48),
148 };
149 #undef CHAN
150 
151 #define CARL9170_HT_CAP							\
152 {									\
153 	.ht_supported	= true,						\
154 	.cap		= IEEE80211_HT_CAP_MAX_AMSDU |			\
155 			  IEEE80211_HT_CAP_SUP_WIDTH_20_40 |		\
156 			  IEEE80211_HT_CAP_SGI_40 |			\
157 			  IEEE80211_HT_CAP_DSSSCCK40 |			\
158 			  IEEE80211_HT_CAP_SM_PS,			\
159 	.ampdu_factor	= IEEE80211_HT_MAX_AMPDU_64K,			\
160 	.ampdu_density	= IEEE80211_HT_MPDU_DENSITY_8,			\
161 	.mcs		= {						\
162 		.rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, },	\
163 		.rx_highest = cpu_to_le16(300),				\
164 		.tx_params = IEEE80211_HT_MCS_TX_DEFINED,		\
165 	},								\
166 }
167 
168 static struct ieee80211_supported_band carl9170_band_2GHz = {
169 	.channels	= carl9170_2ghz_chantable,
170 	.n_channels	= ARRAY_SIZE(carl9170_2ghz_chantable),
171 	.bitrates	= carl9170_g_ratetable,
172 	.n_bitrates	= carl9170_g_ratetable_size,
173 	.ht_cap		= CARL9170_HT_CAP,
174 };
175 
176 static struct ieee80211_supported_band carl9170_band_5GHz = {
177 	.channels	= carl9170_5ghz_chantable,
178 	.n_channels	= ARRAY_SIZE(carl9170_5ghz_chantable),
179 	.bitrates	= carl9170_a_ratetable,
180 	.n_bitrates	= carl9170_a_ratetable_size,
181 	.ht_cap		= CARL9170_HT_CAP,
182 };
183 
184 static void carl9170_ampdu_gc(struct ar9170 *ar)
185 {
186 	struct carl9170_sta_tid *tid_info;
187 	LIST_HEAD(tid_gc);
188 
189 	rcu_read_lock();
190 	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
191 		spin_lock_bh(&ar->tx_ampdu_list_lock);
192 		if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
193 			tid_info->state = CARL9170_TID_STATE_KILLED;
194 			list_del_rcu(&tid_info->list);
195 			ar->tx_ampdu_list_len--;
196 			list_add_tail(&tid_info->tmp_list, &tid_gc);
197 		}
198 		spin_unlock_bh(&ar->tx_ampdu_list_lock);
199 
200 	}
201 	rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
202 	rcu_read_unlock();
203 
204 	synchronize_rcu();
205 
206 	while (!list_empty(&tid_gc)) {
207 		struct sk_buff *skb;
208 		tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
209 					    tmp_list);
210 
211 		while ((skb = __skb_dequeue(&tid_info->queue)))
212 			carl9170_tx_status(ar, skb, false);
213 
214 		list_del_init(&tid_info->tmp_list);
215 		kfree(tid_info);
216 	}
217 }
218 
219 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
220 {
221 	if (drop_queued) {
222 		int i;
223 
224 		/*
225 		 * We can only drop frames which have not been uploaded
226 		 * to the device yet.
227 		 */
228 
229 		for (i = 0; i < ar->hw->queues; i++) {
230 			struct sk_buff *skb;
231 
232 			while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
233 				struct ieee80211_tx_info *info;
234 
235 				info = IEEE80211_SKB_CB(skb);
236 				if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 					atomic_dec(&ar->tx_ampdu_upload);
238 
239 				carl9170_tx_status(ar, skb, false);
240 			}
241 		}
242 	}
243 
244 	/* Wait for all other outstanding frames to timeout. */
245 	if (atomic_read(&ar->tx_total_queued))
246 		WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
247 }
248 
249 static void carl9170_flush_ba(struct ar9170 *ar)
250 {
251 	struct sk_buff_head free;
252 	struct carl9170_sta_tid *tid_info;
253 	struct sk_buff *skb;
254 
255 	__skb_queue_head_init(&free);
256 
257 	rcu_read_lock();
258 	spin_lock_bh(&ar->tx_ampdu_list_lock);
259 	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
260 		if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
261 			tid_info->state = CARL9170_TID_STATE_SUSPEND;
262 
263 			spin_lock(&tid_info->lock);
264 			while ((skb = __skb_dequeue(&tid_info->queue)))
265 				__skb_queue_tail(&free, skb);
266 			spin_unlock(&tid_info->lock);
267 		}
268 	}
269 	spin_unlock_bh(&ar->tx_ampdu_list_lock);
270 	rcu_read_unlock();
271 
272 	while ((skb = __skb_dequeue(&free)))
273 		carl9170_tx_status(ar, skb, false);
274 }
275 
276 static void carl9170_zap_queues(struct ar9170 *ar)
277 {
278 	struct carl9170_vif_info *cvif;
279 	unsigned int i;
280 
281 	carl9170_ampdu_gc(ar);
282 
283 	carl9170_flush_ba(ar);
284 	carl9170_flush(ar, true);
285 
286 	for (i = 0; i < ar->hw->queues; i++) {
287 		spin_lock_bh(&ar->tx_status[i].lock);
288 		while (!skb_queue_empty(&ar->tx_status[i])) {
289 			struct sk_buff *skb;
290 
291 			skb = skb_peek(&ar->tx_status[i]);
292 			carl9170_tx_get_skb(skb);
293 			spin_unlock_bh(&ar->tx_status[i].lock);
294 			carl9170_tx_drop(ar, skb);
295 			spin_lock_bh(&ar->tx_status[i].lock);
296 			carl9170_tx_put_skb(skb);
297 		}
298 		spin_unlock_bh(&ar->tx_status[i].lock);
299 	}
300 
301 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
302 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
303 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
304 
305 	/* reinitialize queues statistics */
306 	memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
307 	for (i = 0; i < ar->hw->queues; i++)
308 		ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
309 
310 	for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
311 		ar->mem_bitmap[i] = 0;
312 
313 	rcu_read_lock();
314 	list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
315 		spin_lock_bh(&ar->beacon_lock);
316 		dev_kfree_skb_any(cvif->beacon);
317 		cvif->beacon = NULL;
318 		spin_unlock_bh(&ar->beacon_lock);
319 	}
320 	rcu_read_unlock();
321 
322 	atomic_set(&ar->tx_ampdu_upload, 0);
323 	atomic_set(&ar->tx_ampdu_scheduler, 0);
324 	atomic_set(&ar->tx_total_pending, 0);
325 	atomic_set(&ar->tx_total_queued, 0);
326 	atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
327 }
328 
329 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop)		\
330 do {									\
331 	queue.aifs = ai_fs;						\
332 	queue.cw_min = cwmin;						\
333 	queue.cw_max = cwmax;						\
334 	queue.txop = _txop;						\
335 } while (0)
336 
337 static int carl9170_op_start(struct ieee80211_hw *hw)
338 {
339 	struct ar9170 *ar = hw->priv;
340 	int err, i;
341 
342 	mutex_lock(&ar->mutex);
343 
344 	carl9170_zap_queues(ar);
345 
346 	/* reset QoS defaults */
347 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3,     7, 47);
348 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7,    15, 94);
349 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023,  0);
350 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023,  0);
351 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
352 
353 	ar->current_factor = ar->current_density = -1;
354 	/* "The first key is unique." */
355 	ar->usedkeys = 1;
356 	ar->filter_state = 0;
357 	ar->ps.last_action = jiffies;
358 	ar->ps.last_slept = jiffies;
359 	ar->erp_mode = CARL9170_ERP_AUTO;
360 
361 	/* Set "disable hw crypto offload" whenever the module parameter
362 	 * nohwcrypt is true or if the firmware does not support it.
363 	 */
364 	ar->disable_offload = modparam_nohwcrypt |
365 		ar->fw.disable_offload_fw;
366 	ar->rx_software_decryption = ar->disable_offload;
367 
368 	for (i = 0; i < ar->hw->queues; i++) {
369 		ar->queue_stop_timeout[i] = jiffies;
370 		ar->max_queue_stop_timeout[i] = 0;
371 	}
372 
373 	atomic_set(&ar->mem_allocs, 0);
374 
375 	err = carl9170_usb_open(ar);
376 	if (err)
377 		goto out;
378 
379 	err = carl9170_init_mac(ar);
380 	if (err)
381 		goto out;
382 
383 	err = carl9170_set_qos(ar);
384 	if (err)
385 		goto out;
386 
387 	if (ar->fw.rx_filter) {
388 		err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
389 			CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
390 		if (err)
391 			goto out;
392 	}
393 
394 	err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
395 				 AR9170_DMA_TRIGGER_RXQ);
396 	if (err)
397 		goto out;
398 
399 	/* Clear key-cache */
400 	for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
401 		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
402 					  0, NULL, 0);
403 		if (err)
404 			goto out;
405 
406 		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
407 					  1, NULL, 0);
408 		if (err)
409 			goto out;
410 
411 		if (i < AR9170_CAM_MAX_USER) {
412 			err = carl9170_disable_key(ar, i);
413 			if (err)
414 				goto out;
415 		}
416 	}
417 
418 	carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
419 
420 	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
421 		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
422 
423 	ieee80211_wake_queues(ar->hw);
424 	err = 0;
425 
426 out:
427 	mutex_unlock(&ar->mutex);
428 	return err;
429 }
430 
431 static void carl9170_cancel_worker(struct ar9170 *ar)
432 {
433 	cancel_delayed_work_sync(&ar->stat_work);
434 	cancel_delayed_work_sync(&ar->tx_janitor);
435 #ifdef CONFIG_CARL9170_LEDS
436 	cancel_delayed_work_sync(&ar->led_work);
437 #endif /* CONFIG_CARL9170_LEDS */
438 	cancel_work_sync(&ar->ps_work);
439 	cancel_work_sync(&ar->ping_work);
440 	cancel_work_sync(&ar->ampdu_work);
441 }
442 
443 static void carl9170_op_stop(struct ieee80211_hw *hw)
444 {
445 	struct ar9170 *ar = hw->priv;
446 
447 	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
448 
449 	ieee80211_stop_queues(ar->hw);
450 
451 	mutex_lock(&ar->mutex);
452 	if (IS_ACCEPTING_CMD(ar)) {
453 		RCU_INIT_POINTER(ar->beacon_iter, NULL);
454 
455 		carl9170_led_set_state(ar, 0);
456 
457 		/* stop DMA */
458 		carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
459 		carl9170_usb_stop(ar);
460 	}
461 
462 	carl9170_zap_queues(ar);
463 	mutex_unlock(&ar->mutex);
464 
465 	carl9170_cancel_worker(ar);
466 }
467 
468 static void carl9170_restart_work(struct work_struct *work)
469 {
470 	struct ar9170 *ar = container_of(work, struct ar9170,
471 					 restart_work);
472 	int err = -EIO;
473 
474 	ar->usedkeys = 0;
475 	ar->filter_state = 0;
476 	carl9170_cancel_worker(ar);
477 
478 	mutex_lock(&ar->mutex);
479 	if (!ar->force_usb_reset) {
480 		err = carl9170_usb_restart(ar);
481 		if (net_ratelimit()) {
482 			if (err)
483 				dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
484 			else
485 				dev_info(&ar->udev->dev, "device restarted successfully.\n");
486 		}
487 	}
488 	carl9170_zap_queues(ar);
489 	mutex_unlock(&ar->mutex);
490 
491 	if (!err && !ar->force_usb_reset) {
492 		ar->restart_counter++;
493 		atomic_set(&ar->pending_restarts, 0);
494 
495 		ieee80211_restart_hw(ar->hw);
496 	} else {
497 		/*
498 		 * The reset was unsuccessful and the device seems to
499 		 * be dead. But there's still one option: a low-level
500 		 * usb subsystem reset...
501 		 */
502 
503 		carl9170_usb_reset(ar);
504 	}
505 }
506 
507 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
508 {
509 	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
510 
511 	/*
512 	 * Sometimes, an error can trigger several different reset events.
513 	 * By ignoring these *surplus* reset events, the device won't be
514 	 * killed again, right after it has recovered.
515 	 */
516 	if (atomic_inc_return(&ar->pending_restarts) > 1) {
517 		dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
518 		return;
519 	}
520 
521 	ieee80211_stop_queues(ar->hw);
522 
523 	dev_err(&ar->udev->dev, "restart device (%d)\n", r);
524 
525 	if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
526 	    !WARN_ON(r >= __CARL9170_RR_LAST))
527 		ar->last_reason = r;
528 
529 	if (!ar->registered)
530 		return;
531 
532 	if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
533 		ar->force_usb_reset = true;
534 
535 	ieee80211_queue_work(ar->hw, &ar->restart_work);
536 
537 	/*
538 	 * At this point, the device instance might have vanished/disabled.
539 	 * So, don't put any code which access the ar9170 struct
540 	 * without proper protection.
541 	 */
542 }
543 
544 static void carl9170_ping_work(struct work_struct *work)
545 {
546 	struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
547 	int err;
548 
549 	if (!IS_STARTED(ar))
550 		return;
551 
552 	mutex_lock(&ar->mutex);
553 	err = carl9170_echo_test(ar, 0xdeadbeef);
554 	if (err)
555 		carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
556 	mutex_unlock(&ar->mutex);
557 }
558 
559 static int carl9170_init_interface(struct ar9170 *ar,
560 				   struct ieee80211_vif *vif)
561 {
562 	struct ath_common *common = &ar->common;
563 	int err;
564 
565 	if (!vif) {
566 		WARN_ON_ONCE(IS_STARTED(ar));
567 		return 0;
568 	}
569 
570 	memcpy(common->macaddr, vif->addr, ETH_ALEN);
571 
572 	/* We have to fall back to software crypto, whenever
573 	 * the user choose to participates in an IBSS. HW
574 	 * offload for IBSS RSN is not supported by this driver.
575 	 *
576 	 * NOTE: If the previous main interface has already
577 	 * disabled hw crypto offload, we have to keep this
578 	 * previous disable_offload setting as it was.
579 	 * Altough ideally, we should notify mac80211 and tell
580 	 * it to forget about any HW crypto offload for now.
581 	 */
582 	ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
583 	    (vif->type != NL80211_IFTYPE_AP));
584 
585 	/* While the driver supports HW offload in a single
586 	 * P2P client configuration, it doesn't support HW
587 	 * offload in the favourit, concurrent P2P GO+CLIENT
588 	 * configuration. Hence, HW offload will always be
589 	 * disabled for P2P.
590 	 */
591 	ar->disable_offload |= vif->p2p;
592 
593 	ar->rx_software_decryption = ar->disable_offload;
594 
595 	err = carl9170_set_operating_mode(ar);
596 	return err;
597 }
598 
599 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
600 				     struct ieee80211_vif *vif)
601 {
602 	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
603 	struct ieee80211_vif *main_vif, *old_main = NULL;
604 	struct ar9170 *ar = hw->priv;
605 	int vif_id = -1, err = 0;
606 
607 	mutex_lock(&ar->mutex);
608 	rcu_read_lock();
609 	if (vif_priv->active) {
610 		/*
611 		 * Skip the interface structure initialization,
612 		 * if the vif survived the _restart call.
613 		 */
614 		vif_id = vif_priv->id;
615 		vif_priv->enable_beacon = false;
616 
617 		spin_lock_bh(&ar->beacon_lock);
618 		dev_kfree_skb_any(vif_priv->beacon);
619 		vif_priv->beacon = NULL;
620 		spin_unlock_bh(&ar->beacon_lock);
621 
622 		goto init;
623 	}
624 
625 	/* Because the AR9170 HW's MAC doesn't provide full support for
626 	 * multiple, independent interfaces [of different operation modes].
627 	 * We have to select ONE main interface [main mode of HW], but we
628 	 * can have multiple slaves [AKA: entry in the ACK-table].
629 	 *
630 	 * The first (from HEAD/TOP) interface in the ar->vif_list is
631 	 * always the main intf. All following intfs in this list
632 	 * are considered to be slave intfs.
633 	 */
634 	main_vif = carl9170_get_main_vif(ar);
635 
636 	if (main_vif) {
637 		switch (main_vif->type) {
638 		case NL80211_IFTYPE_STATION:
639 			if (vif->type == NL80211_IFTYPE_STATION)
640 				break;
641 
642 			/* P2P GO [master] use-case
643 			 * Because the P2P GO station is selected dynamically
644 			 * by all participating peers of a WIFI Direct network,
645 			 * the driver has be able to change the main interface
646 			 * operating mode on the fly.
647 			 */
648 			if (main_vif->p2p && vif->p2p &&
649 			    vif->type == NL80211_IFTYPE_AP) {
650 				old_main = main_vif;
651 				break;
652 			}
653 
654 			err = -EBUSY;
655 			rcu_read_unlock();
656 
657 			goto unlock;
658 
659 		case NL80211_IFTYPE_MESH_POINT:
660 		case NL80211_IFTYPE_AP:
661 			if ((vif->type == NL80211_IFTYPE_STATION) ||
662 			    (vif->type == NL80211_IFTYPE_WDS) ||
663 			    (vif->type == NL80211_IFTYPE_AP) ||
664 			    (vif->type == NL80211_IFTYPE_MESH_POINT))
665 				break;
666 
667 			err = -EBUSY;
668 			rcu_read_unlock();
669 			goto unlock;
670 
671 		default:
672 			rcu_read_unlock();
673 			goto unlock;
674 		}
675 	}
676 
677 	vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
678 
679 	if (vif_id < 0) {
680 		rcu_read_unlock();
681 
682 		err = -ENOSPC;
683 		goto unlock;
684 	}
685 
686 	BUG_ON(ar->vif_priv[vif_id].id != vif_id);
687 
688 	vif_priv->active = true;
689 	vif_priv->id = vif_id;
690 	vif_priv->enable_beacon = false;
691 	ar->vifs++;
692 	if (old_main) {
693 		/* We end up in here, if the main interface is being replaced.
694 		 * Put the new main interface at the HEAD of the list and the
695 		 * previous inteface will automatically become second in line.
696 		 */
697 		list_add_rcu(&vif_priv->list, &ar->vif_list);
698 	} else {
699 		/* Add new inteface. If the list is empty, it will become the
700 		 * main inteface, otherwise it will be slave.
701 		 */
702 		list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
703 	}
704 	rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
705 
706 init:
707 	main_vif = carl9170_get_main_vif(ar);
708 
709 	if (main_vif == vif) {
710 		rcu_assign_pointer(ar->beacon_iter, vif_priv);
711 		rcu_read_unlock();
712 
713 		if (old_main) {
714 			struct carl9170_vif_info *old_main_priv =
715 				(void *) old_main->drv_priv;
716 			/* downgrade old main intf to slave intf.
717 			 * NOTE: We are no longer under rcu_read_lock.
718 			 * But we are still holding ar->mutex, so the
719 			 * vif data [id, addr] is safe.
720 			 */
721 			err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
722 						       old_main->addr);
723 			if (err)
724 				goto unlock;
725 		}
726 
727 		err = carl9170_init_interface(ar, vif);
728 		if (err)
729 			goto unlock;
730 	} else {
731 		rcu_read_unlock();
732 		err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
733 
734 		if (err)
735 			goto unlock;
736 	}
737 
738 	if (ar->fw.tx_seq_table) {
739 		err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
740 					 0);
741 		if (err)
742 			goto unlock;
743 	}
744 
745 unlock:
746 	if (err && (vif_id >= 0)) {
747 		vif_priv->active = false;
748 		bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
749 		ar->vifs--;
750 		RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
751 		list_del_rcu(&vif_priv->list);
752 		mutex_unlock(&ar->mutex);
753 		synchronize_rcu();
754 	} else {
755 		if (ar->vifs > 1)
756 			ar->ps.off_override |= PS_OFF_VIF;
757 
758 		mutex_unlock(&ar->mutex);
759 	}
760 
761 	return err;
762 }
763 
764 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
765 					 struct ieee80211_vif *vif)
766 {
767 	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
768 	struct ieee80211_vif *main_vif;
769 	struct ar9170 *ar = hw->priv;
770 	unsigned int id;
771 
772 	mutex_lock(&ar->mutex);
773 
774 	if (WARN_ON_ONCE(!vif_priv->active))
775 		goto unlock;
776 
777 	ar->vifs--;
778 
779 	rcu_read_lock();
780 	main_vif = carl9170_get_main_vif(ar);
781 
782 	id = vif_priv->id;
783 
784 	vif_priv->active = false;
785 	WARN_ON(vif_priv->enable_beacon);
786 	vif_priv->enable_beacon = false;
787 	list_del_rcu(&vif_priv->list);
788 	RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
789 
790 	if (vif == main_vif) {
791 		rcu_read_unlock();
792 
793 		if (ar->vifs) {
794 			WARN_ON(carl9170_init_interface(ar,
795 					carl9170_get_main_vif(ar)));
796 		} else {
797 			carl9170_set_operating_mode(ar);
798 		}
799 	} else {
800 		rcu_read_unlock();
801 
802 		WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
803 	}
804 
805 	carl9170_update_beacon(ar, false);
806 	carl9170_flush_cab(ar, id);
807 
808 	spin_lock_bh(&ar->beacon_lock);
809 	dev_kfree_skb_any(vif_priv->beacon);
810 	vif_priv->beacon = NULL;
811 	spin_unlock_bh(&ar->beacon_lock);
812 
813 	bitmap_release_region(&ar->vif_bitmap, id, 0);
814 
815 	carl9170_set_beacon_timers(ar);
816 
817 	if (ar->vifs == 1)
818 		ar->ps.off_override &= ~PS_OFF_VIF;
819 
820 unlock:
821 	mutex_unlock(&ar->mutex);
822 
823 	synchronize_rcu();
824 }
825 
826 void carl9170_ps_check(struct ar9170 *ar)
827 {
828 	ieee80211_queue_work(ar->hw, &ar->ps_work);
829 }
830 
831 /* caller must hold ar->mutex */
832 static int carl9170_ps_update(struct ar9170 *ar)
833 {
834 	bool ps = false;
835 	int err = 0;
836 
837 	if (!ar->ps.off_override)
838 		ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
839 
840 	if (ps != ar->ps.state) {
841 		err = carl9170_powersave(ar, ps);
842 		if (err)
843 			return err;
844 
845 		if (ar->ps.state && !ps) {
846 			ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
847 				ar->ps.last_action);
848 		}
849 
850 		if (ps)
851 			ar->ps.last_slept = jiffies;
852 
853 		ar->ps.last_action = jiffies;
854 		ar->ps.state = ps;
855 	}
856 
857 	return 0;
858 }
859 
860 static void carl9170_ps_work(struct work_struct *work)
861 {
862 	struct ar9170 *ar = container_of(work, struct ar9170,
863 					 ps_work);
864 	mutex_lock(&ar->mutex);
865 	if (IS_STARTED(ar))
866 		WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
867 	mutex_unlock(&ar->mutex);
868 }
869 
870 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
871 {
872 	int err;
873 
874 	if (noise) {
875 		err = carl9170_get_noisefloor(ar);
876 		if (err)
877 			return err;
878 	}
879 
880 	if (ar->fw.hw_counters) {
881 		err = carl9170_collect_tally(ar);
882 		if (err)
883 			return err;
884 	}
885 
886 	if (flush)
887 		memset(&ar->tally, 0, sizeof(ar->tally));
888 
889 	return 0;
890 }
891 
892 static void carl9170_stat_work(struct work_struct *work)
893 {
894 	struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
895 	int err;
896 
897 	mutex_lock(&ar->mutex);
898 	err = carl9170_update_survey(ar, false, true);
899 	mutex_unlock(&ar->mutex);
900 
901 	if (err)
902 		return;
903 
904 	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
905 		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
906 }
907 
908 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
909 {
910 	struct ar9170 *ar = hw->priv;
911 	int err = 0;
912 
913 	mutex_lock(&ar->mutex);
914 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
915 		/* TODO */
916 		err = 0;
917 	}
918 
919 	if (changed & IEEE80211_CONF_CHANGE_PS) {
920 		err = carl9170_ps_update(ar);
921 		if (err)
922 			goto out;
923 	}
924 
925 	if (changed & IEEE80211_CONF_CHANGE_SMPS) {
926 		/* TODO */
927 		err = 0;
928 	}
929 
930 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
931 		enum nl80211_channel_type channel_type =
932 			cfg80211_get_chandef_type(&hw->conf.chandef);
933 
934 		/* adjust slot time for 5 GHz */
935 		err = carl9170_set_slot_time(ar);
936 		if (err)
937 			goto out;
938 
939 		err = carl9170_update_survey(ar, true, false);
940 		if (err)
941 			goto out;
942 
943 		err = carl9170_set_channel(ar, hw->conf.chandef.chan,
944 					   channel_type);
945 		if (err)
946 			goto out;
947 
948 		err = carl9170_update_survey(ar, false, true);
949 		if (err)
950 			goto out;
951 
952 		err = carl9170_set_dyn_sifs_ack(ar);
953 		if (err)
954 			goto out;
955 
956 		err = carl9170_set_rts_cts_rate(ar);
957 		if (err)
958 			goto out;
959 	}
960 
961 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
962 		err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
963 		if (err)
964 			goto out;
965 	}
966 
967 out:
968 	mutex_unlock(&ar->mutex);
969 	return err;
970 }
971 
972 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
973 					 struct netdev_hw_addr_list *mc_list)
974 {
975 	struct netdev_hw_addr *ha;
976 	u64 mchash;
977 
978 	/* always get broadcast frames */
979 	mchash = 1ULL << (0xff >> 2);
980 
981 	netdev_hw_addr_list_for_each(ha, mc_list)
982 		mchash |= 1ULL << (ha->addr[5] >> 2);
983 
984 	return mchash;
985 }
986 
987 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
988 					 unsigned int changed_flags,
989 					 unsigned int *new_flags,
990 					 u64 multicast)
991 {
992 	struct ar9170 *ar = hw->priv;
993 
994 	/* mask supported flags */
995 	*new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
996 
997 	if (!IS_ACCEPTING_CMD(ar))
998 		return;
999 
1000 	mutex_lock(&ar->mutex);
1001 
1002 	ar->filter_state = *new_flags;
1003 	/*
1004 	 * We can support more by setting the sniffer bit and
1005 	 * then checking the error flags, later.
1006 	 */
1007 
1008 	if (*new_flags & FIF_ALLMULTI)
1009 		multicast = ~0ULL;
1010 
1011 	if (multicast != ar->cur_mc_hash)
1012 		WARN_ON(carl9170_update_multicast(ar, multicast));
1013 
1014 	if (changed_flags & FIF_OTHER_BSS) {
1015 		ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
1016 
1017 		WARN_ON(carl9170_set_operating_mode(ar));
1018 	}
1019 
1020 	if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1021 		u32 rx_filter = 0;
1022 
1023 		if (!ar->fw.ba_filter)
1024 			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1025 
1026 		if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1027 			rx_filter |= CARL9170_RX_FILTER_BAD;
1028 
1029 		if (!(*new_flags & FIF_CONTROL))
1030 			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1031 
1032 		if (!(*new_flags & FIF_PSPOLL))
1033 			rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1034 
1035 		if (!(*new_flags & FIF_OTHER_BSS)) {
1036 			rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1037 			rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1038 		}
1039 
1040 		WARN_ON(carl9170_rx_filter(ar, rx_filter));
1041 	}
1042 
1043 	mutex_unlock(&ar->mutex);
1044 }
1045 
1046 
1047 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1048 					 struct ieee80211_vif *vif,
1049 					 struct ieee80211_bss_conf *bss_conf,
1050 					 u32 changed)
1051 {
1052 	struct ar9170 *ar = hw->priv;
1053 	struct ath_common *common = &ar->common;
1054 	int err = 0;
1055 	struct carl9170_vif_info *vif_priv;
1056 	struct ieee80211_vif *main_vif;
1057 
1058 	mutex_lock(&ar->mutex);
1059 	vif_priv = (void *) vif->drv_priv;
1060 	main_vif = carl9170_get_main_vif(ar);
1061 	if (WARN_ON(!main_vif))
1062 		goto out;
1063 
1064 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
1065 		struct carl9170_vif_info *iter;
1066 		int i = 0;
1067 
1068 		vif_priv->enable_beacon = bss_conf->enable_beacon;
1069 		rcu_read_lock();
1070 		list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1071 			if (iter->active && iter->enable_beacon)
1072 				i++;
1073 
1074 		}
1075 		rcu_read_unlock();
1076 
1077 		ar->beacon_enabled = i;
1078 	}
1079 
1080 	if (changed & BSS_CHANGED_BEACON) {
1081 		err = carl9170_update_beacon(ar, false);
1082 		if (err)
1083 			goto out;
1084 	}
1085 
1086 	if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1087 		       BSS_CHANGED_BEACON_INT)) {
1088 
1089 		if (main_vif != vif) {
1090 			bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1091 			bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1092 		}
1093 
1094 		/*
1095 		 * Therefore a hard limit for the broadcast traffic should
1096 		 * prevent false alarms.
1097 		 */
1098 		if (vif->type != NL80211_IFTYPE_STATION &&
1099 		    (bss_conf->beacon_int * bss_conf->dtim_period >=
1100 		     (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1101 			err = -EINVAL;
1102 			goto out;
1103 		}
1104 
1105 		err = carl9170_set_beacon_timers(ar);
1106 		if (err)
1107 			goto out;
1108 	}
1109 
1110 	if (changed & BSS_CHANGED_HT) {
1111 		/* TODO */
1112 		err = 0;
1113 		if (err)
1114 			goto out;
1115 	}
1116 
1117 	if (main_vif != vif)
1118 		goto out;
1119 
1120 	/*
1121 	 * The following settings can only be changed by the
1122 	 * master interface.
1123 	 */
1124 
1125 	if (changed & BSS_CHANGED_BSSID) {
1126 		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1127 		err = carl9170_set_operating_mode(ar);
1128 		if (err)
1129 			goto out;
1130 	}
1131 
1132 	if (changed & BSS_CHANGED_ASSOC) {
1133 		ar->common.curaid = bss_conf->aid;
1134 		err = carl9170_set_beacon_timers(ar);
1135 		if (err)
1136 			goto out;
1137 	}
1138 
1139 	if (changed & BSS_CHANGED_ERP_SLOT) {
1140 		err = carl9170_set_slot_time(ar);
1141 		if (err)
1142 			goto out;
1143 	}
1144 
1145 	if (changed & BSS_CHANGED_BASIC_RATES) {
1146 		err = carl9170_set_mac_rates(ar);
1147 		if (err)
1148 			goto out;
1149 	}
1150 
1151 out:
1152 	WARN_ON_ONCE(err && IS_STARTED(ar));
1153 	mutex_unlock(&ar->mutex);
1154 }
1155 
1156 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1157 			       struct ieee80211_vif *vif)
1158 {
1159 	struct ar9170 *ar = hw->priv;
1160 	struct carl9170_tsf_rsp tsf;
1161 	int err;
1162 
1163 	mutex_lock(&ar->mutex);
1164 	err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1165 				0, NULL, sizeof(tsf), &tsf);
1166 	mutex_unlock(&ar->mutex);
1167 	if (WARN_ON(err))
1168 		return 0;
1169 
1170 	return le64_to_cpu(tsf.tsf_64);
1171 }
1172 
1173 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1174 			       struct ieee80211_vif *vif,
1175 			       struct ieee80211_sta *sta,
1176 			       struct ieee80211_key_conf *key)
1177 {
1178 	struct ar9170 *ar = hw->priv;
1179 	int err = 0, i;
1180 	u8 ktype;
1181 
1182 	if (ar->disable_offload || !vif)
1183 		return -EOPNOTSUPP;
1184 
1185 	/* Fall back to software encryption whenever the driver is connected
1186 	 * to more than one network.
1187 	 *
1188 	 * This is very unfortunate, because some machines cannot handle
1189 	 * the high througput speed in 802.11n networks.
1190 	 */
1191 
1192 	if (!is_main_vif(ar, vif)) {
1193 		mutex_lock(&ar->mutex);
1194 		goto err_softw;
1195 	}
1196 
1197 	/*
1198 	 * While the hardware supports *catch-all* key, for offloading
1199 	 * group-key en-/de-cryption. The way of how the hardware
1200 	 * decides which keyId maps to which key, remains a mystery...
1201 	 */
1202 	if ((vif->type != NL80211_IFTYPE_STATION &&
1203 	     vif->type != NL80211_IFTYPE_ADHOC) &&
1204 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1205 		return -EOPNOTSUPP;
1206 
1207 	switch (key->cipher) {
1208 	case WLAN_CIPHER_SUITE_WEP40:
1209 		ktype = AR9170_ENC_ALG_WEP64;
1210 		break;
1211 	case WLAN_CIPHER_SUITE_WEP104:
1212 		ktype = AR9170_ENC_ALG_WEP128;
1213 		break;
1214 	case WLAN_CIPHER_SUITE_TKIP:
1215 		ktype = AR9170_ENC_ALG_TKIP;
1216 		break;
1217 	case WLAN_CIPHER_SUITE_CCMP:
1218 		ktype = AR9170_ENC_ALG_AESCCMP;
1219 		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1220 		break;
1221 	default:
1222 		return -EOPNOTSUPP;
1223 	}
1224 
1225 	mutex_lock(&ar->mutex);
1226 	if (cmd == SET_KEY) {
1227 		if (!IS_STARTED(ar)) {
1228 			err = -EOPNOTSUPP;
1229 			goto out;
1230 		}
1231 
1232 		if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1233 			sta = NULL;
1234 
1235 			i = 64 + key->keyidx;
1236 		} else {
1237 			for (i = 0; i < 64; i++)
1238 				if (!(ar->usedkeys & BIT(i)))
1239 					break;
1240 			if (i == 64)
1241 				goto err_softw;
1242 		}
1243 
1244 		key->hw_key_idx = i;
1245 
1246 		err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1247 					  ktype, 0, key->key,
1248 					  min_t(u8, 16, key->keylen));
1249 		if (err)
1250 			goto out;
1251 
1252 		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1253 			err = carl9170_upload_key(ar, i, sta ? sta->addr :
1254 						  NULL, ktype, 1,
1255 						  key->key + 16, 16);
1256 			if (err)
1257 				goto out;
1258 
1259 			/*
1260 			 * hardware is not capable generating MMIC
1261 			 * of fragmented frames!
1262 			 */
1263 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1264 		}
1265 
1266 		if (i < 64)
1267 			ar->usedkeys |= BIT(i);
1268 
1269 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1270 	} else {
1271 		if (!IS_STARTED(ar)) {
1272 			/* The device is gone... together with the key ;-) */
1273 			err = 0;
1274 			goto out;
1275 		}
1276 
1277 		if (key->hw_key_idx < 64) {
1278 			ar->usedkeys &= ~BIT(key->hw_key_idx);
1279 		} else {
1280 			err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1281 						  AR9170_ENC_ALG_NONE, 0,
1282 						  NULL, 0);
1283 			if (err)
1284 				goto out;
1285 
1286 			if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1287 				err = carl9170_upload_key(ar, key->hw_key_idx,
1288 							  NULL,
1289 							  AR9170_ENC_ALG_NONE,
1290 							  1, NULL, 0);
1291 				if (err)
1292 					goto out;
1293 			}
1294 
1295 		}
1296 
1297 		err = carl9170_disable_key(ar, key->hw_key_idx);
1298 		if (err)
1299 			goto out;
1300 	}
1301 
1302 out:
1303 	mutex_unlock(&ar->mutex);
1304 	return err;
1305 
1306 err_softw:
1307 	if (!ar->rx_software_decryption) {
1308 		ar->rx_software_decryption = true;
1309 		carl9170_set_operating_mode(ar);
1310 	}
1311 	mutex_unlock(&ar->mutex);
1312 	return -ENOSPC;
1313 }
1314 
1315 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1316 			       struct ieee80211_vif *vif,
1317 			       struct ieee80211_sta *sta)
1318 {
1319 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1320 	unsigned int i;
1321 
1322 	atomic_set(&sta_info->pending_frames, 0);
1323 
1324 	if (sta->ht_cap.ht_supported) {
1325 		if (sta->ht_cap.ampdu_density > 6) {
1326 			/*
1327 			 * HW does support 16us AMPDU density.
1328 			 * No HT-Xmit for station.
1329 			 */
1330 
1331 			return 0;
1332 		}
1333 
1334 		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1335 			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1336 
1337 		sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1338 		sta_info->ht_sta = true;
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1345 				struct ieee80211_vif *vif,
1346 				struct ieee80211_sta *sta)
1347 {
1348 	struct ar9170 *ar = hw->priv;
1349 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1350 	unsigned int i;
1351 	bool cleanup = false;
1352 
1353 	if (sta->ht_cap.ht_supported) {
1354 
1355 		sta_info->ht_sta = false;
1356 
1357 		rcu_read_lock();
1358 		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1359 			struct carl9170_sta_tid *tid_info;
1360 
1361 			tid_info = rcu_dereference(sta_info->agg[i]);
1362 			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1363 
1364 			if (!tid_info)
1365 				continue;
1366 
1367 			spin_lock_bh(&ar->tx_ampdu_list_lock);
1368 			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1369 				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1370 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1371 			cleanup = true;
1372 		}
1373 		rcu_read_unlock();
1374 
1375 		if (cleanup)
1376 			carl9170_ampdu_gc(ar);
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1383 			       struct ieee80211_vif *vif, u16 queue,
1384 			       const struct ieee80211_tx_queue_params *param)
1385 {
1386 	struct ar9170 *ar = hw->priv;
1387 	int ret;
1388 
1389 	mutex_lock(&ar->mutex);
1390 	if (queue < ar->hw->queues) {
1391 		memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1392 		ret = carl9170_set_qos(ar);
1393 	} else {
1394 		ret = -EINVAL;
1395 	}
1396 
1397 	mutex_unlock(&ar->mutex);
1398 	return ret;
1399 }
1400 
1401 static void carl9170_ampdu_work(struct work_struct *work)
1402 {
1403 	struct ar9170 *ar = container_of(work, struct ar9170,
1404 					 ampdu_work);
1405 
1406 	if (!IS_STARTED(ar))
1407 		return;
1408 
1409 	mutex_lock(&ar->mutex);
1410 	carl9170_ampdu_gc(ar);
1411 	mutex_unlock(&ar->mutex);
1412 }
1413 
1414 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1415 				    struct ieee80211_vif *vif,
1416 				    struct ieee80211_ampdu_params *params)
1417 {
1418 	struct ieee80211_sta *sta = params->sta;
1419 	enum ieee80211_ampdu_mlme_action action = params->action;
1420 	u16 tid = params->tid;
1421 	u16 *ssn = &params->ssn;
1422 	struct ar9170 *ar = hw->priv;
1423 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1424 	struct carl9170_sta_tid *tid_info;
1425 
1426 	if (modparam_noht)
1427 		return -EOPNOTSUPP;
1428 
1429 	switch (action) {
1430 	case IEEE80211_AMPDU_TX_START:
1431 		if (!sta_info->ht_sta)
1432 			return -EOPNOTSUPP;
1433 
1434 		tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1435 				   GFP_ATOMIC);
1436 		if (!tid_info)
1437 			return -ENOMEM;
1438 
1439 		tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1440 		tid_info->state = CARL9170_TID_STATE_PROGRESS;
1441 		tid_info->tid = tid;
1442 		tid_info->max = sta_info->ampdu_max_len;
1443 		tid_info->sta = sta;
1444 		tid_info->vif = vif;
1445 
1446 		INIT_LIST_HEAD(&tid_info->list);
1447 		INIT_LIST_HEAD(&tid_info->tmp_list);
1448 		skb_queue_head_init(&tid_info->queue);
1449 		spin_lock_init(&tid_info->lock);
1450 
1451 		spin_lock_bh(&ar->tx_ampdu_list_lock);
1452 		ar->tx_ampdu_list_len++;
1453 		list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1454 		rcu_assign_pointer(sta_info->agg[tid], tid_info);
1455 		spin_unlock_bh(&ar->tx_ampdu_list_lock);
1456 
1457 		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1458 		break;
1459 
1460 	case IEEE80211_AMPDU_TX_STOP_CONT:
1461 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
1462 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1463 		rcu_read_lock();
1464 		tid_info = rcu_dereference(sta_info->agg[tid]);
1465 		if (tid_info) {
1466 			spin_lock_bh(&ar->tx_ampdu_list_lock);
1467 			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1468 				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1469 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1470 		}
1471 
1472 		RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1473 		rcu_read_unlock();
1474 
1475 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1476 		ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1477 		break;
1478 
1479 	case IEEE80211_AMPDU_TX_OPERATIONAL:
1480 		rcu_read_lock();
1481 		tid_info = rcu_dereference(sta_info->agg[tid]);
1482 
1483 		sta_info->stats[tid].clear = true;
1484 		sta_info->stats[tid].req = false;
1485 
1486 		if (tid_info) {
1487 			bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1488 			tid_info->state = CARL9170_TID_STATE_IDLE;
1489 		}
1490 		rcu_read_unlock();
1491 
1492 		if (WARN_ON_ONCE(!tid_info))
1493 			return -EFAULT;
1494 
1495 		break;
1496 
1497 	case IEEE80211_AMPDU_RX_START:
1498 	case IEEE80211_AMPDU_RX_STOP:
1499 		/* Handled by hardware */
1500 		break;
1501 
1502 	default:
1503 		return -EOPNOTSUPP;
1504 	}
1505 
1506 	return 0;
1507 }
1508 
1509 #ifdef CONFIG_CARL9170_WPC
1510 static int carl9170_register_wps_button(struct ar9170 *ar)
1511 {
1512 	struct input_dev *input;
1513 	int err;
1514 
1515 	if (!(ar->features & CARL9170_WPS_BUTTON))
1516 		return 0;
1517 
1518 	input = input_allocate_device();
1519 	if (!input)
1520 		return -ENOMEM;
1521 
1522 	snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1523 		 wiphy_name(ar->hw->wiphy));
1524 
1525 	snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1526 		 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1527 
1528 	input->name = ar->wps.name;
1529 	input->phys = ar->wps.phys;
1530 	input->id.bustype = BUS_USB;
1531 	input->dev.parent = &ar->hw->wiphy->dev;
1532 
1533 	input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1534 
1535 	err = input_register_device(input);
1536 	if (err) {
1537 		input_free_device(input);
1538 		return err;
1539 	}
1540 
1541 	ar->wps.pbc = input;
1542 	return 0;
1543 }
1544 #endif /* CONFIG_CARL9170_WPC */
1545 
1546 #ifdef CONFIG_CARL9170_HWRNG
1547 static int carl9170_rng_get(struct ar9170 *ar)
1548 {
1549 
1550 #define RW	(CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1551 #define RB	(CARL9170_MAX_CMD_PAYLOAD_LEN)
1552 
1553 	static const __le32 rng_load[RW] = {
1554 		[0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1555 
1556 	u32 buf[RW];
1557 
1558 	unsigned int i, off = 0, transfer, count;
1559 	int err;
1560 
1561 	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1562 
1563 	if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1564 		return -EAGAIN;
1565 
1566 	count = ARRAY_SIZE(ar->rng.cache);
1567 	while (count) {
1568 		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1569 					RB, (u8 *) rng_load,
1570 					RB, (u8 *) buf);
1571 		if (err)
1572 			return err;
1573 
1574 		transfer = min_t(unsigned int, count, RW);
1575 		for (i = 0; i < transfer; i++)
1576 			ar->rng.cache[off + i] = buf[i];
1577 
1578 		off += transfer;
1579 		count -= transfer;
1580 	}
1581 
1582 	ar->rng.cache_idx = 0;
1583 
1584 #undef RW
1585 #undef RB
1586 	return 0;
1587 }
1588 
1589 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1590 {
1591 	struct ar9170 *ar = (struct ar9170 *)rng->priv;
1592 	int ret = -EIO;
1593 
1594 	mutex_lock(&ar->mutex);
1595 	if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1596 		ret = carl9170_rng_get(ar);
1597 		if (ret) {
1598 			mutex_unlock(&ar->mutex);
1599 			return ret;
1600 		}
1601 	}
1602 
1603 	*data = ar->rng.cache[ar->rng.cache_idx++];
1604 	mutex_unlock(&ar->mutex);
1605 
1606 	return sizeof(u16);
1607 }
1608 
1609 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1610 {
1611 	if (ar->rng.initialized) {
1612 		hwrng_unregister(&ar->rng.rng);
1613 		ar->rng.initialized = false;
1614 	}
1615 }
1616 
1617 static int carl9170_register_hwrng(struct ar9170 *ar)
1618 {
1619 	int err;
1620 
1621 	snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1622 		 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1623 	ar->rng.rng.name = ar->rng.name;
1624 	ar->rng.rng.data_read = carl9170_rng_read;
1625 	ar->rng.rng.priv = (unsigned long)ar;
1626 
1627 	if (WARN_ON(ar->rng.initialized))
1628 		return -EALREADY;
1629 
1630 	err = hwrng_register(&ar->rng.rng);
1631 	if (err) {
1632 		dev_err(&ar->udev->dev, "Failed to register the random "
1633 			"number generator (%d)\n", err);
1634 		return err;
1635 	}
1636 
1637 	ar->rng.initialized = true;
1638 
1639 	err = carl9170_rng_get(ar);
1640 	if (err) {
1641 		carl9170_unregister_hwrng(ar);
1642 		return err;
1643 	}
1644 
1645 	return 0;
1646 }
1647 #endif /* CONFIG_CARL9170_HWRNG */
1648 
1649 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1650 				struct survey_info *survey)
1651 {
1652 	struct ar9170 *ar = hw->priv;
1653 	struct ieee80211_channel *chan;
1654 	struct ieee80211_supported_band *band;
1655 	int err, b, i;
1656 
1657 	chan = ar->channel;
1658 	if (!chan)
1659 		return -ENODEV;
1660 
1661 	if (idx == chan->hw_value) {
1662 		mutex_lock(&ar->mutex);
1663 		err = carl9170_update_survey(ar, false, true);
1664 		mutex_unlock(&ar->mutex);
1665 		if (err)
1666 			return err;
1667 	}
1668 
1669 	for (b = 0; b < NUM_NL80211_BANDS; b++) {
1670 		band = ar->hw->wiphy->bands[b];
1671 
1672 		if (!band)
1673 			continue;
1674 
1675 		for (i = 0; i < band->n_channels; i++) {
1676 			if (band->channels[i].hw_value == idx) {
1677 				chan = &band->channels[i];
1678 				goto found;
1679 			}
1680 		}
1681 	}
1682 	return -ENOENT;
1683 
1684 found:
1685 	memcpy(survey, &ar->survey[idx], sizeof(*survey));
1686 
1687 	survey->channel = chan;
1688 	survey->filled = SURVEY_INFO_NOISE_DBM;
1689 
1690 	if (ar->channel == chan)
1691 		survey->filled |= SURVEY_INFO_IN_USE;
1692 
1693 	if (ar->fw.hw_counters) {
1694 		survey->filled |= SURVEY_INFO_TIME |
1695 				  SURVEY_INFO_TIME_BUSY |
1696 				  SURVEY_INFO_TIME_TX;
1697 	}
1698 
1699 	return 0;
1700 }
1701 
1702 static void carl9170_op_flush(struct ieee80211_hw *hw,
1703 			      struct ieee80211_vif *vif,
1704 			      u32 queues, bool drop)
1705 {
1706 	struct ar9170 *ar = hw->priv;
1707 	unsigned int vid;
1708 
1709 	mutex_lock(&ar->mutex);
1710 	for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1711 		carl9170_flush_cab(ar, vid);
1712 
1713 	carl9170_flush(ar, drop);
1714 	mutex_unlock(&ar->mutex);
1715 }
1716 
1717 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1718 				 struct ieee80211_low_level_stats *stats)
1719 {
1720 	struct ar9170 *ar = hw->priv;
1721 
1722 	memset(stats, 0, sizeof(*stats));
1723 	stats->dot11ACKFailureCount = ar->tx_ack_failures;
1724 	stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1725 	return 0;
1726 }
1727 
1728 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1729 				   struct ieee80211_vif *vif,
1730 				   enum sta_notify_cmd cmd,
1731 				   struct ieee80211_sta *sta)
1732 {
1733 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1734 
1735 	switch (cmd) {
1736 	case STA_NOTIFY_SLEEP:
1737 		sta_info->sleeping = true;
1738 		if (atomic_read(&sta_info->pending_frames))
1739 			ieee80211_sta_block_awake(hw, sta, true);
1740 		break;
1741 
1742 	case STA_NOTIFY_AWAKE:
1743 		sta_info->sleeping = false;
1744 		break;
1745 	}
1746 }
1747 
1748 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1749 {
1750 	struct ar9170 *ar = hw->priv;
1751 
1752 	return !!atomic_read(&ar->tx_total_queued);
1753 }
1754 
1755 static const struct ieee80211_ops carl9170_ops = {
1756 	.start			= carl9170_op_start,
1757 	.stop			= carl9170_op_stop,
1758 	.tx			= carl9170_op_tx,
1759 	.flush			= carl9170_op_flush,
1760 	.add_interface		= carl9170_op_add_interface,
1761 	.remove_interface	= carl9170_op_remove_interface,
1762 	.config			= carl9170_op_config,
1763 	.prepare_multicast	= carl9170_op_prepare_multicast,
1764 	.configure_filter	= carl9170_op_configure_filter,
1765 	.conf_tx		= carl9170_op_conf_tx,
1766 	.bss_info_changed	= carl9170_op_bss_info_changed,
1767 	.get_tsf		= carl9170_op_get_tsf,
1768 	.set_key		= carl9170_op_set_key,
1769 	.sta_add		= carl9170_op_sta_add,
1770 	.sta_remove		= carl9170_op_sta_remove,
1771 	.sta_notify		= carl9170_op_sta_notify,
1772 	.get_survey		= carl9170_op_get_survey,
1773 	.get_stats		= carl9170_op_get_stats,
1774 	.ampdu_action		= carl9170_op_ampdu_action,
1775 	.tx_frames_pending	= carl9170_tx_frames_pending,
1776 };
1777 
1778 void *carl9170_alloc(size_t priv_size)
1779 {
1780 	struct ieee80211_hw *hw;
1781 	struct ar9170 *ar;
1782 	struct sk_buff *skb;
1783 	int i;
1784 
1785 	/*
1786 	 * this buffer is used for rx stream reconstruction.
1787 	 * Under heavy load this device (or the transport layer?)
1788 	 * tends to split the streams into separate rx descriptors.
1789 	 */
1790 
1791 	skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1792 	if (!skb)
1793 		goto err_nomem;
1794 
1795 	hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1796 	if (!hw)
1797 		goto err_nomem;
1798 
1799 	ar = hw->priv;
1800 	ar->hw = hw;
1801 	ar->rx_failover = skb;
1802 
1803 	memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1804 	ar->rx_has_plcp = false;
1805 
1806 	/*
1807 	 * Here's a hidden pitfall!
1808 	 *
1809 	 * All 4 AC queues work perfectly well under _legacy_ operation.
1810 	 * However as soon as aggregation is enabled, the traffic flow
1811 	 * gets very bumpy. Therefore we have to _switch_ to a
1812 	 * software AC with a single HW queue.
1813 	 */
1814 	hw->queues = __AR9170_NUM_TXQ;
1815 
1816 	mutex_init(&ar->mutex);
1817 	spin_lock_init(&ar->beacon_lock);
1818 	spin_lock_init(&ar->cmd_lock);
1819 	spin_lock_init(&ar->tx_stats_lock);
1820 	spin_lock_init(&ar->tx_ampdu_list_lock);
1821 	spin_lock_init(&ar->mem_lock);
1822 	spin_lock_init(&ar->state_lock);
1823 	atomic_set(&ar->pending_restarts, 0);
1824 	ar->vifs = 0;
1825 	for (i = 0; i < ar->hw->queues; i++) {
1826 		skb_queue_head_init(&ar->tx_status[i]);
1827 		skb_queue_head_init(&ar->tx_pending[i]);
1828 
1829 		INIT_LIST_HEAD(&ar->bar_list[i]);
1830 		spin_lock_init(&ar->bar_list_lock[i]);
1831 	}
1832 	INIT_WORK(&ar->ps_work, carl9170_ps_work);
1833 	INIT_WORK(&ar->ping_work, carl9170_ping_work);
1834 	INIT_WORK(&ar->restart_work, carl9170_restart_work);
1835 	INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1836 	INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1837 	INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1838 	INIT_LIST_HEAD(&ar->tx_ampdu_list);
1839 	rcu_assign_pointer(ar->tx_ampdu_iter,
1840 			   (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1841 
1842 	bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1843 	INIT_LIST_HEAD(&ar->vif_list);
1844 	init_completion(&ar->tx_flush);
1845 
1846 	/* firmware decides which modes we support */
1847 	hw->wiphy->interface_modes = 0;
1848 
1849 	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
1850 	ieee80211_hw_set(hw, MFP_CAPABLE);
1851 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
1852 	ieee80211_hw_set(hw, SUPPORTS_PS);
1853 	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
1854 	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
1855 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
1856 	ieee80211_hw_set(hw, SIGNAL_DBM);
1857 	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
1858 
1859 	if (!modparam_noht) {
1860 		/*
1861 		 * see the comment above, why we allow the user
1862 		 * to disable HT by a module parameter.
1863 		 */
1864 		ieee80211_hw_set(hw, AMPDU_AGGREGATION);
1865 	}
1866 
1867 	hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1868 	hw->sta_data_size = sizeof(struct carl9170_sta_info);
1869 	hw->vif_data_size = sizeof(struct carl9170_vif_info);
1870 
1871 	hw->max_rates = CARL9170_TX_MAX_RATES;
1872 	hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1873 
1874 	for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1875 		ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1876 
1877 	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1878 
1879 	return ar;
1880 
1881 err_nomem:
1882 	kfree_skb(skb);
1883 	return ERR_PTR(-ENOMEM);
1884 }
1885 
1886 static int carl9170_read_eeprom(struct ar9170 *ar)
1887 {
1888 #define RW	8	/* number of words to read at once */
1889 #define RB	(sizeof(u32) * RW)
1890 	u8 *eeprom = (void *)&ar->eeprom;
1891 	__le32 offsets[RW];
1892 	int i, j, err;
1893 
1894 	BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1895 
1896 	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1897 #ifndef __CHECKER__
1898 	/* don't want to handle trailing remains */
1899 	BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1900 #endif
1901 
1902 	for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1903 		for (j = 0; j < RW; j++)
1904 			offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1905 						 RB * i + 4 * j);
1906 
1907 		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1908 					RB, (u8 *) &offsets,
1909 					RB, eeprom + RB * i);
1910 		if (err)
1911 			return err;
1912 	}
1913 
1914 #undef RW
1915 #undef RB
1916 	return 0;
1917 }
1918 
1919 static int carl9170_parse_eeprom(struct ar9170 *ar)
1920 {
1921 	struct ath_regulatory *regulatory = &ar->common.regulatory;
1922 	unsigned int rx_streams, tx_streams, tx_params = 0;
1923 	int bands = 0;
1924 	int chans = 0;
1925 
1926 	if (ar->eeprom.length == cpu_to_le16(0xffff))
1927 		return -ENODATA;
1928 
1929 	rx_streams = hweight8(ar->eeprom.rx_mask);
1930 	tx_streams = hweight8(ar->eeprom.tx_mask);
1931 
1932 	if (rx_streams != tx_streams) {
1933 		tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1934 
1935 		WARN_ON(!(tx_streams >= 1 && tx_streams <=
1936 			IEEE80211_HT_MCS_TX_MAX_STREAMS));
1937 
1938 		tx_params = (tx_streams - 1) <<
1939 			    IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1940 
1941 		carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1942 		carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1943 	}
1944 
1945 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1946 		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
1947 			&carl9170_band_2GHz;
1948 		chans += carl9170_band_2GHz.n_channels;
1949 		bands++;
1950 	}
1951 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1952 		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
1953 			&carl9170_band_5GHz;
1954 		chans += carl9170_band_5GHz.n_channels;
1955 		bands++;
1956 	}
1957 
1958 	if (!bands)
1959 		return -EINVAL;
1960 
1961 	ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
1962 	if (!ar->survey)
1963 		return -ENOMEM;
1964 	ar->num_channels = chans;
1965 
1966 	regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1967 
1968 	/* second part of wiphy init */
1969 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1970 
1971 	return 0;
1972 }
1973 
1974 static void carl9170_reg_notifier(struct wiphy *wiphy,
1975 				  struct regulatory_request *request)
1976 {
1977 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1978 	struct ar9170 *ar = hw->priv;
1979 
1980 	ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1981 }
1982 
1983 int carl9170_register(struct ar9170 *ar)
1984 {
1985 	struct ath_regulatory *regulatory = &ar->common.regulatory;
1986 	int err = 0, i;
1987 
1988 	if (WARN_ON(ar->mem_bitmap))
1989 		return -EINVAL;
1990 
1991 	ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG),
1992 				 sizeof(unsigned long),
1993 				 GFP_KERNEL);
1994 
1995 	if (!ar->mem_bitmap)
1996 		return -ENOMEM;
1997 
1998 	/* try to read EEPROM, init MAC addr */
1999 	err = carl9170_read_eeprom(ar);
2000 	if (err)
2001 		return err;
2002 
2003 	err = carl9170_parse_eeprom(ar);
2004 	if (err)
2005 		return err;
2006 
2007 	err = ath_regd_init(regulatory, ar->hw->wiphy,
2008 			    carl9170_reg_notifier);
2009 	if (err)
2010 		return err;
2011 
2012 	if (modparam_noht) {
2013 		carl9170_band_2GHz.ht_cap.ht_supported = false;
2014 		carl9170_band_5GHz.ht_cap.ht_supported = false;
2015 	}
2016 
2017 	for (i = 0; i < ar->fw.vif_num; i++) {
2018 		ar->vif_priv[i].id = i;
2019 		ar->vif_priv[i].vif = NULL;
2020 	}
2021 
2022 	err = ieee80211_register_hw(ar->hw);
2023 	if (err)
2024 		return err;
2025 
2026 	/* mac80211 interface is now registered */
2027 	ar->registered = true;
2028 
2029 	if (!ath_is_world_regd(regulatory))
2030 		regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2031 
2032 #ifdef CONFIG_CARL9170_DEBUGFS
2033 	carl9170_debugfs_register(ar);
2034 #endif /* CONFIG_CARL9170_DEBUGFS */
2035 
2036 	err = carl9170_led_init(ar);
2037 	if (err)
2038 		goto err_unreg;
2039 
2040 #ifdef CONFIG_CARL9170_LEDS
2041 	err = carl9170_led_register(ar);
2042 	if (err)
2043 		goto err_unreg;
2044 #endif /* CONFIG_CARL9170_LEDS */
2045 
2046 #ifdef CONFIG_CARL9170_WPC
2047 	err = carl9170_register_wps_button(ar);
2048 	if (err)
2049 		goto err_unreg;
2050 #endif /* CONFIG_CARL9170_WPC */
2051 
2052 #ifdef CONFIG_CARL9170_HWRNG
2053 	err = carl9170_register_hwrng(ar);
2054 	if (err)
2055 		goto err_unreg;
2056 #endif /* CONFIG_CARL9170_HWRNG */
2057 
2058 	dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2059 		 wiphy_name(ar->hw->wiphy));
2060 
2061 	return 0;
2062 
2063 err_unreg:
2064 	carl9170_unregister(ar);
2065 	return err;
2066 }
2067 
2068 void carl9170_unregister(struct ar9170 *ar)
2069 {
2070 	if (!ar->registered)
2071 		return;
2072 
2073 	ar->registered = false;
2074 
2075 #ifdef CONFIG_CARL9170_LEDS
2076 	carl9170_led_unregister(ar);
2077 #endif /* CONFIG_CARL9170_LEDS */
2078 
2079 #ifdef CONFIG_CARL9170_DEBUGFS
2080 	carl9170_debugfs_unregister(ar);
2081 #endif /* CONFIG_CARL9170_DEBUGFS */
2082 
2083 #ifdef CONFIG_CARL9170_WPC
2084 	if (ar->wps.pbc) {
2085 		input_unregister_device(ar->wps.pbc);
2086 		ar->wps.pbc = NULL;
2087 	}
2088 #endif /* CONFIG_CARL9170_WPC */
2089 
2090 #ifdef CONFIG_CARL9170_HWRNG
2091 	carl9170_unregister_hwrng(ar);
2092 #endif /* CONFIG_CARL9170_HWRNG */
2093 
2094 	carl9170_cancel_worker(ar);
2095 	cancel_work_sync(&ar->restart_work);
2096 
2097 	ieee80211_unregister_hw(ar->hw);
2098 }
2099 
2100 void carl9170_free(struct ar9170 *ar)
2101 {
2102 	WARN_ON(ar->registered);
2103 	WARN_ON(IS_INITIALIZED(ar));
2104 
2105 	kfree_skb(ar->rx_failover);
2106 	ar->rx_failover = NULL;
2107 
2108 	kfree(ar->mem_bitmap);
2109 	ar->mem_bitmap = NULL;
2110 
2111 	kfree(ar->survey);
2112 	ar->survey = NULL;
2113 
2114 	mutex_destroy(&ar->mutex);
2115 
2116 	ieee80211_free_hw(ar->hw);
2117 }
2118