1 /*
2  * Atheros CARL9170 driver
3  *
4  * mac80211 interaction code
5  *
6  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7  * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; see the file COPYING.  If not, see
21  * http://www.gnu.org/licenses/.
22  *
23  * This file incorporates work covered by the following copyright and
24  * permission notice:
25  *    Copyright (c) 2007-2008 Atheros Communications, Inc.
26  *
27  *    Permission to use, copy, modify, and/or distribute this software for any
28  *    purpose with or without fee is hereby granted, provided that the above
29  *    copyright notice and this permission notice appear in all copies.
30  *
31  *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32  *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33  *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34  *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35  *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36  *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38  */
39 
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <linux/etherdevice.h>
43 #include <linux/random.h>
44 #include <net/mac80211.h>
45 #include <net/cfg80211.h>
46 #include "hw.h"
47 #include "carl9170.h"
48 #include "cmd.h"
49 
50 static bool modparam_nohwcrypt;
51 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444);
52 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
53 
54 int modparam_noht;
55 module_param_named(noht, modparam_noht, int, 0444);
56 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
57 
58 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) {	\
59 	.bitrate	= (_bitrate),			\
60 	.flags		= (_flags),			\
61 	.hw_value	= (_hw_rate) | (_txpidx) << 4,	\
62 }
63 
64 struct ieee80211_rate __carl9170_ratetable[] = {
65 	RATE(10, 0, 0, 0),
66 	RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
67 	RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
68 	RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
69 	RATE(60, 0xb, 0, 0),
70 	RATE(90, 0xf, 0, 0),
71 	RATE(120, 0xa, 0, 0),
72 	RATE(180, 0xe, 0, 0),
73 	RATE(240, 0x9, 0, 0),
74 	RATE(360, 0xd, 1, 0),
75 	RATE(480, 0x8, 2, 0),
76 	RATE(540, 0xc, 3, 0),
77 };
78 #undef RATE
79 
80 #define carl9170_g_ratetable	(__carl9170_ratetable + 0)
81 #define carl9170_g_ratetable_size	12
82 #define carl9170_a_ratetable	(__carl9170_ratetable + 4)
83 #define carl9170_a_ratetable_size	8
84 
85 /*
86  * NB: The hw_value is used as an index into the carl9170_phy_freq_params
87  *     array in phy.c so that we don't have to do frequency lookups!
88  */
89 #define CHAN(_freq, _idx) {		\
90 	.center_freq	= (_freq),	\
91 	.hw_value	= (_idx),	\
92 	.max_power	= 18, /* XXX */	\
93 }
94 
95 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
96 	CHAN(2412,  0),
97 	CHAN(2417,  1),
98 	CHAN(2422,  2),
99 	CHAN(2427,  3),
100 	CHAN(2432,  4),
101 	CHAN(2437,  5),
102 	CHAN(2442,  6),
103 	CHAN(2447,  7),
104 	CHAN(2452,  8),
105 	CHAN(2457,  9),
106 	CHAN(2462, 10),
107 	CHAN(2467, 11),
108 	CHAN(2472, 12),
109 	CHAN(2484, 13),
110 };
111 
112 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
113 	CHAN(4920, 14),
114 	CHAN(4940, 15),
115 	CHAN(4960, 16),
116 	CHAN(4980, 17),
117 	CHAN(5040, 18),
118 	CHAN(5060, 19),
119 	CHAN(5080, 20),
120 	CHAN(5180, 21),
121 	CHAN(5200, 22),
122 	CHAN(5220, 23),
123 	CHAN(5240, 24),
124 	CHAN(5260, 25),
125 	CHAN(5280, 26),
126 	CHAN(5300, 27),
127 	CHAN(5320, 28),
128 	CHAN(5500, 29),
129 	CHAN(5520, 30),
130 	CHAN(5540, 31),
131 	CHAN(5560, 32),
132 	CHAN(5580, 33),
133 	CHAN(5600, 34),
134 	CHAN(5620, 35),
135 	CHAN(5640, 36),
136 	CHAN(5660, 37),
137 	CHAN(5680, 38),
138 	CHAN(5700, 39),
139 	CHAN(5745, 40),
140 	CHAN(5765, 41),
141 	CHAN(5785, 42),
142 	CHAN(5805, 43),
143 	CHAN(5825, 44),
144 	CHAN(5170, 45),
145 	CHAN(5190, 46),
146 	CHAN(5210, 47),
147 	CHAN(5230, 48),
148 };
149 #undef CHAN
150 
151 #define CARL9170_HT_CAP							\
152 {									\
153 	.ht_supported	= true,						\
154 	.cap		= IEEE80211_HT_CAP_MAX_AMSDU |			\
155 			  IEEE80211_HT_CAP_SUP_WIDTH_20_40 |		\
156 			  IEEE80211_HT_CAP_SGI_40 |			\
157 			  IEEE80211_HT_CAP_DSSSCCK40 |			\
158 			  IEEE80211_HT_CAP_SM_PS,			\
159 	.ampdu_factor	= IEEE80211_HT_MAX_AMPDU_64K,			\
160 	.ampdu_density	= IEEE80211_HT_MPDU_DENSITY_8,			\
161 	.mcs		= {						\
162 		.rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, },	\
163 		.rx_highest = cpu_to_le16(300),				\
164 		.tx_params = IEEE80211_HT_MCS_TX_DEFINED,		\
165 	},								\
166 }
167 
168 static struct ieee80211_supported_band carl9170_band_2GHz = {
169 	.channels	= carl9170_2ghz_chantable,
170 	.n_channels	= ARRAY_SIZE(carl9170_2ghz_chantable),
171 	.bitrates	= carl9170_g_ratetable,
172 	.n_bitrates	= carl9170_g_ratetable_size,
173 	.ht_cap		= CARL9170_HT_CAP,
174 };
175 
176 static struct ieee80211_supported_band carl9170_band_5GHz = {
177 	.channels	= carl9170_5ghz_chantable,
178 	.n_channels	= ARRAY_SIZE(carl9170_5ghz_chantable),
179 	.bitrates	= carl9170_a_ratetable,
180 	.n_bitrates	= carl9170_a_ratetable_size,
181 	.ht_cap		= CARL9170_HT_CAP,
182 };
183 
carl9170_ampdu_gc(struct ar9170 * ar)184 static void carl9170_ampdu_gc(struct ar9170 *ar)
185 {
186 	struct carl9170_sta_tid *tid_info;
187 	LIST_HEAD(tid_gc);
188 
189 	rcu_read_lock();
190 	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
191 		spin_lock_bh(&ar->tx_ampdu_list_lock);
192 		if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
193 			tid_info->state = CARL9170_TID_STATE_KILLED;
194 			list_del_rcu(&tid_info->list);
195 			ar->tx_ampdu_list_len--;
196 			list_add_tail(&tid_info->tmp_list, &tid_gc);
197 		}
198 		spin_unlock_bh(&ar->tx_ampdu_list_lock);
199 
200 	}
201 	rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
202 	rcu_read_unlock();
203 
204 	synchronize_rcu();
205 
206 	while (!list_empty(&tid_gc)) {
207 		struct sk_buff *skb;
208 		tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
209 					    tmp_list);
210 
211 		while ((skb = __skb_dequeue(&tid_info->queue)))
212 			carl9170_tx_status(ar, skb, false);
213 
214 		list_del_init(&tid_info->tmp_list);
215 		kfree(tid_info);
216 	}
217 }
218 
carl9170_flush(struct ar9170 * ar,bool drop_queued)219 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
220 {
221 	if (drop_queued) {
222 		int i;
223 
224 		/*
225 		 * We can only drop frames which have not been uploaded
226 		 * to the device yet.
227 		 */
228 
229 		for (i = 0; i < ar->hw->queues; i++) {
230 			struct sk_buff *skb;
231 
232 			while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
233 				struct ieee80211_tx_info *info;
234 
235 				info = IEEE80211_SKB_CB(skb);
236 				if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 					atomic_dec(&ar->tx_ampdu_upload);
238 
239 				carl9170_tx_status(ar, skb, false);
240 			}
241 		}
242 	}
243 
244 	/* Wait for all other outstanding frames to timeout. */
245 	if (atomic_read(&ar->tx_total_queued))
246 		WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
247 }
248 
carl9170_flush_ba(struct ar9170 * ar)249 static void carl9170_flush_ba(struct ar9170 *ar)
250 {
251 	struct sk_buff_head free;
252 	struct carl9170_sta_tid *tid_info;
253 	struct sk_buff *skb;
254 
255 	__skb_queue_head_init(&free);
256 
257 	rcu_read_lock();
258 	spin_lock_bh(&ar->tx_ampdu_list_lock);
259 	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
260 		if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
261 			tid_info->state = CARL9170_TID_STATE_SUSPEND;
262 
263 			spin_lock(&tid_info->lock);
264 			while ((skb = __skb_dequeue(&tid_info->queue)))
265 				__skb_queue_tail(&free, skb);
266 			spin_unlock(&tid_info->lock);
267 		}
268 	}
269 	spin_unlock_bh(&ar->tx_ampdu_list_lock);
270 	rcu_read_unlock();
271 
272 	while ((skb = __skb_dequeue(&free)))
273 		carl9170_tx_status(ar, skb, false);
274 }
275 
carl9170_zap_queues(struct ar9170 * ar)276 static void carl9170_zap_queues(struct ar9170 *ar)
277 {
278 	struct carl9170_vif_info *cvif;
279 	unsigned int i;
280 
281 	carl9170_ampdu_gc(ar);
282 
283 	carl9170_flush_ba(ar);
284 	carl9170_flush(ar, true);
285 
286 	for (i = 0; i < ar->hw->queues; i++) {
287 		spin_lock_bh(&ar->tx_status[i].lock);
288 		while (!skb_queue_empty(&ar->tx_status[i])) {
289 			struct sk_buff *skb;
290 
291 			skb = skb_peek(&ar->tx_status[i]);
292 			carl9170_tx_get_skb(skb);
293 			spin_unlock_bh(&ar->tx_status[i].lock);
294 			carl9170_tx_drop(ar, skb);
295 			spin_lock_bh(&ar->tx_status[i].lock);
296 			carl9170_tx_put_skb(skb);
297 		}
298 		spin_unlock_bh(&ar->tx_status[i].lock);
299 	}
300 
301 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
302 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
303 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
304 
305 	/* reinitialize queues statistics */
306 	memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
307 	for (i = 0; i < ar->hw->queues; i++)
308 		ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
309 
310 	bitmap_zero(ar->mem_bitmap, ar->fw.mem_blocks);
311 
312 	rcu_read_lock();
313 	list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
314 		spin_lock_bh(&ar->beacon_lock);
315 		dev_kfree_skb_any(cvif->beacon);
316 		cvif->beacon = NULL;
317 		spin_unlock_bh(&ar->beacon_lock);
318 	}
319 	rcu_read_unlock();
320 
321 	atomic_set(&ar->tx_ampdu_upload, 0);
322 	atomic_set(&ar->tx_ampdu_scheduler, 0);
323 	atomic_set(&ar->tx_total_pending, 0);
324 	atomic_set(&ar->tx_total_queued, 0);
325 	atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
326 }
327 
328 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop)		\
329 do {									\
330 	queue.aifs = ai_fs;						\
331 	queue.cw_min = cwmin;						\
332 	queue.cw_max = cwmax;						\
333 	queue.txop = _txop;						\
334 } while (0)
335 
carl9170_op_start(struct ieee80211_hw * hw)336 static int carl9170_op_start(struct ieee80211_hw *hw)
337 {
338 	struct ar9170 *ar = hw->priv;
339 	int err, i;
340 
341 	mutex_lock(&ar->mutex);
342 
343 	carl9170_zap_queues(ar);
344 
345 	/* reset QoS defaults */
346 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3,     7, 47);
347 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7,    15, 94);
348 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023,  0);
349 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023,  0);
350 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
351 
352 	ar->current_factor = ar->current_density = -1;
353 	/* "The first key is unique." */
354 	ar->usedkeys = 1;
355 	ar->filter_state = 0;
356 	ar->ps.last_action = jiffies;
357 	ar->ps.last_slept = jiffies;
358 	ar->erp_mode = CARL9170_ERP_AUTO;
359 
360 	/* Set "disable hw crypto offload" whenever the module parameter
361 	 * nohwcrypt is true or if the firmware does not support it.
362 	 */
363 	ar->disable_offload = modparam_nohwcrypt |
364 		ar->fw.disable_offload_fw;
365 	ar->rx_software_decryption = ar->disable_offload;
366 
367 	for (i = 0; i < ar->hw->queues; i++) {
368 		ar->queue_stop_timeout[i] = jiffies;
369 		ar->max_queue_stop_timeout[i] = 0;
370 	}
371 
372 	atomic_set(&ar->mem_allocs, 0);
373 
374 	err = carl9170_usb_open(ar);
375 	if (err)
376 		goto out;
377 
378 	err = carl9170_init_mac(ar);
379 	if (err)
380 		goto out;
381 
382 	err = carl9170_set_qos(ar);
383 	if (err)
384 		goto out;
385 
386 	if (ar->fw.rx_filter) {
387 		err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
388 			CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
389 		if (err)
390 			goto out;
391 	}
392 
393 	err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
394 				 AR9170_DMA_TRIGGER_RXQ);
395 	if (err)
396 		goto out;
397 
398 	/* Clear key-cache */
399 	for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
400 		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
401 					  0, NULL, 0);
402 		if (err)
403 			goto out;
404 
405 		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
406 					  1, NULL, 0);
407 		if (err)
408 			goto out;
409 
410 		if (i < AR9170_CAM_MAX_USER) {
411 			err = carl9170_disable_key(ar, i);
412 			if (err)
413 				goto out;
414 		}
415 	}
416 
417 	carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
418 
419 	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
420 		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
421 
422 	ieee80211_wake_queues(ar->hw);
423 	err = 0;
424 
425 out:
426 	mutex_unlock(&ar->mutex);
427 	return err;
428 }
429 
carl9170_cancel_worker(struct ar9170 * ar)430 static void carl9170_cancel_worker(struct ar9170 *ar)
431 {
432 	cancel_delayed_work_sync(&ar->stat_work);
433 	cancel_delayed_work_sync(&ar->tx_janitor);
434 #ifdef CONFIG_CARL9170_LEDS
435 	cancel_delayed_work_sync(&ar->led_work);
436 #endif /* CONFIG_CARL9170_LEDS */
437 	cancel_work_sync(&ar->ps_work);
438 	cancel_work_sync(&ar->ping_work);
439 	cancel_work_sync(&ar->ampdu_work);
440 }
441 
carl9170_op_stop(struct ieee80211_hw * hw)442 static void carl9170_op_stop(struct ieee80211_hw *hw)
443 {
444 	struct ar9170 *ar = hw->priv;
445 
446 	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
447 
448 	ieee80211_stop_queues(ar->hw);
449 
450 	mutex_lock(&ar->mutex);
451 	if (IS_ACCEPTING_CMD(ar)) {
452 		RCU_INIT_POINTER(ar->beacon_iter, NULL);
453 
454 		carl9170_led_set_state(ar, 0);
455 
456 		/* stop DMA */
457 		carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
458 		carl9170_usb_stop(ar);
459 	}
460 
461 	carl9170_zap_queues(ar);
462 	mutex_unlock(&ar->mutex);
463 
464 	carl9170_cancel_worker(ar);
465 }
466 
carl9170_restart_work(struct work_struct * work)467 static void carl9170_restart_work(struct work_struct *work)
468 {
469 	struct ar9170 *ar = container_of(work, struct ar9170,
470 					 restart_work);
471 	int err = -EIO;
472 
473 	ar->usedkeys = 0;
474 	ar->filter_state = 0;
475 	carl9170_cancel_worker(ar);
476 
477 	mutex_lock(&ar->mutex);
478 	if (!ar->force_usb_reset) {
479 		err = carl9170_usb_restart(ar);
480 		if (net_ratelimit()) {
481 			if (err)
482 				dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
483 			else
484 				dev_info(&ar->udev->dev, "device restarted successfully.\n");
485 		}
486 	}
487 	carl9170_zap_queues(ar);
488 	mutex_unlock(&ar->mutex);
489 
490 	if (!err && !ar->force_usb_reset) {
491 		ar->restart_counter++;
492 		atomic_set(&ar->pending_restarts, 0);
493 
494 		ieee80211_restart_hw(ar->hw);
495 	} else {
496 		/*
497 		 * The reset was unsuccessful and the device seems to
498 		 * be dead. But there's still one option: a low-level
499 		 * usb subsystem reset...
500 		 */
501 
502 		carl9170_usb_reset(ar);
503 	}
504 }
505 
carl9170_restart(struct ar9170 * ar,const enum carl9170_restart_reasons r)506 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
507 {
508 	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
509 
510 	/*
511 	 * Sometimes, an error can trigger several different reset events.
512 	 * By ignoring these *surplus* reset events, the device won't be
513 	 * killed again, right after it has recovered.
514 	 */
515 	if (atomic_inc_return(&ar->pending_restarts) > 1) {
516 		dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
517 		return;
518 	}
519 
520 	ieee80211_stop_queues(ar->hw);
521 
522 	dev_err(&ar->udev->dev, "restart device (%d)\n", r);
523 
524 	if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
525 	    !WARN_ON(r >= __CARL9170_RR_LAST))
526 		ar->last_reason = r;
527 
528 	if (!ar->registered)
529 		return;
530 
531 	if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
532 		ar->force_usb_reset = true;
533 
534 	ieee80211_queue_work(ar->hw, &ar->restart_work);
535 
536 	/*
537 	 * At this point, the device instance might have vanished/disabled.
538 	 * So, don't put any code which access the ar9170 struct
539 	 * without proper protection.
540 	 */
541 }
542 
carl9170_ping_work(struct work_struct * work)543 static void carl9170_ping_work(struct work_struct *work)
544 {
545 	struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
546 	int err;
547 
548 	if (!IS_STARTED(ar))
549 		return;
550 
551 	mutex_lock(&ar->mutex);
552 	err = carl9170_echo_test(ar, 0xdeadbeef);
553 	if (err)
554 		carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
555 	mutex_unlock(&ar->mutex);
556 }
557 
carl9170_init_interface(struct ar9170 * ar,struct ieee80211_vif * vif)558 static int carl9170_init_interface(struct ar9170 *ar,
559 				   struct ieee80211_vif *vif)
560 {
561 	struct ath_common *common = &ar->common;
562 	int err;
563 
564 	if (!vif) {
565 		WARN_ON_ONCE(IS_STARTED(ar));
566 		return 0;
567 	}
568 
569 	memcpy(common->macaddr, vif->addr, ETH_ALEN);
570 
571 	/* We have to fall back to software crypto, whenever
572 	 * the user choose to participates in an IBSS. HW
573 	 * offload for IBSS RSN is not supported by this driver.
574 	 *
575 	 * NOTE: If the previous main interface has already
576 	 * disabled hw crypto offload, we have to keep this
577 	 * previous disable_offload setting as it was.
578 	 * Altough ideally, we should notify mac80211 and tell
579 	 * it to forget about any HW crypto offload for now.
580 	 */
581 	ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
582 	    (vif->type != NL80211_IFTYPE_AP));
583 
584 	/* The driver used to have P2P GO+CLIENT support,
585 	 * but since this was dropped and we don't know if
586 	 * there are any gremlins lurking in the shadows,
587 	 * so best we keep HW offload disabled for P2P.
588 	 */
589 	ar->disable_offload |= vif->p2p;
590 
591 	ar->rx_software_decryption = ar->disable_offload;
592 
593 	err = carl9170_set_operating_mode(ar);
594 	return err;
595 }
596 
carl9170_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)597 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
598 				     struct ieee80211_vif *vif)
599 {
600 	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
601 	struct ieee80211_vif *main_vif, *old_main = NULL;
602 	struct ar9170 *ar = hw->priv;
603 	int vif_id = -1, err = 0;
604 
605 	mutex_lock(&ar->mutex);
606 	rcu_read_lock();
607 	if (vif_priv->active) {
608 		/*
609 		 * Skip the interface structure initialization,
610 		 * if the vif survived the _restart call.
611 		 */
612 		vif_id = vif_priv->id;
613 		vif_priv->enable_beacon = false;
614 
615 		spin_lock_bh(&ar->beacon_lock);
616 		dev_kfree_skb_any(vif_priv->beacon);
617 		vif_priv->beacon = NULL;
618 		spin_unlock_bh(&ar->beacon_lock);
619 
620 		goto init;
621 	}
622 
623 	/* Because the AR9170 HW's MAC doesn't provide full support for
624 	 * multiple, independent interfaces [of different operation modes].
625 	 * We have to select ONE main interface [main mode of HW], but we
626 	 * can have multiple slaves [AKA: entry in the ACK-table].
627 	 *
628 	 * The first (from HEAD/TOP) interface in the ar->vif_list is
629 	 * always the main intf. All following intfs in this list
630 	 * are considered to be slave intfs.
631 	 */
632 	main_vif = carl9170_get_main_vif(ar);
633 
634 	if (main_vif) {
635 		switch (main_vif->type) {
636 		case NL80211_IFTYPE_STATION:
637 			if (vif->type == NL80211_IFTYPE_STATION)
638 				break;
639 
640 			err = -EBUSY;
641 			rcu_read_unlock();
642 
643 			goto unlock;
644 
645 		case NL80211_IFTYPE_MESH_POINT:
646 		case NL80211_IFTYPE_AP:
647 			if ((vif->type == NL80211_IFTYPE_STATION) ||
648 			    (vif->type == NL80211_IFTYPE_AP) ||
649 			    (vif->type == NL80211_IFTYPE_MESH_POINT))
650 				break;
651 
652 			err = -EBUSY;
653 			rcu_read_unlock();
654 			goto unlock;
655 
656 		default:
657 			rcu_read_unlock();
658 			goto unlock;
659 		}
660 	}
661 
662 	vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
663 
664 	if (vif_id < 0) {
665 		rcu_read_unlock();
666 
667 		err = -ENOSPC;
668 		goto unlock;
669 	}
670 
671 	BUG_ON(ar->vif_priv[vif_id].id != vif_id);
672 
673 	vif_priv->active = true;
674 	vif_priv->id = vif_id;
675 	vif_priv->enable_beacon = false;
676 	ar->vifs++;
677 	if (old_main) {
678 		/* We end up in here, if the main interface is being replaced.
679 		 * Put the new main interface at the HEAD of the list and the
680 		 * previous inteface will automatically become second in line.
681 		 */
682 		list_add_rcu(&vif_priv->list, &ar->vif_list);
683 	} else {
684 		/* Add new inteface. If the list is empty, it will become the
685 		 * main inteface, otherwise it will be slave.
686 		 */
687 		list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
688 	}
689 	rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
690 
691 init:
692 	main_vif = carl9170_get_main_vif(ar);
693 
694 	if (main_vif == vif) {
695 		rcu_assign_pointer(ar->beacon_iter, vif_priv);
696 		rcu_read_unlock();
697 
698 		if (old_main) {
699 			struct carl9170_vif_info *old_main_priv =
700 				(void *) old_main->drv_priv;
701 			/* downgrade old main intf to slave intf.
702 			 * NOTE: We are no longer under rcu_read_lock.
703 			 * But we are still holding ar->mutex, so the
704 			 * vif data [id, addr] is safe.
705 			 */
706 			err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
707 						       old_main->addr);
708 			if (err)
709 				goto unlock;
710 		}
711 
712 		err = carl9170_init_interface(ar, vif);
713 		if (err)
714 			goto unlock;
715 	} else {
716 		rcu_read_unlock();
717 		err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
718 
719 		if (err)
720 			goto unlock;
721 	}
722 
723 	if (ar->fw.tx_seq_table) {
724 		err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
725 					 0);
726 		if (err)
727 			goto unlock;
728 	}
729 
730 unlock:
731 	if (err && (vif_id >= 0)) {
732 		vif_priv->active = false;
733 		bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
734 		ar->vifs--;
735 		RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
736 		list_del_rcu(&vif_priv->list);
737 		mutex_unlock(&ar->mutex);
738 		synchronize_rcu();
739 	} else {
740 		if (ar->vifs > 1)
741 			ar->ps.off_override |= PS_OFF_VIF;
742 
743 		mutex_unlock(&ar->mutex);
744 	}
745 
746 	return err;
747 }
748 
carl9170_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)749 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
750 					 struct ieee80211_vif *vif)
751 {
752 	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
753 	struct ieee80211_vif *main_vif;
754 	struct ar9170 *ar = hw->priv;
755 	unsigned int id;
756 
757 	mutex_lock(&ar->mutex);
758 
759 	if (WARN_ON_ONCE(!vif_priv->active))
760 		goto unlock;
761 
762 	ar->vifs--;
763 
764 	rcu_read_lock();
765 	main_vif = carl9170_get_main_vif(ar);
766 
767 	id = vif_priv->id;
768 
769 	vif_priv->active = false;
770 	WARN_ON(vif_priv->enable_beacon);
771 	vif_priv->enable_beacon = false;
772 	list_del_rcu(&vif_priv->list);
773 	RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
774 
775 	if (vif == main_vif) {
776 		rcu_read_unlock();
777 
778 		if (ar->vifs) {
779 			WARN_ON(carl9170_init_interface(ar,
780 					carl9170_get_main_vif(ar)));
781 		} else {
782 			carl9170_set_operating_mode(ar);
783 		}
784 	} else {
785 		rcu_read_unlock();
786 
787 		WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
788 	}
789 
790 	carl9170_update_beacon(ar, false);
791 	carl9170_flush_cab(ar, id);
792 
793 	spin_lock_bh(&ar->beacon_lock);
794 	dev_kfree_skb_any(vif_priv->beacon);
795 	vif_priv->beacon = NULL;
796 	spin_unlock_bh(&ar->beacon_lock);
797 
798 	bitmap_release_region(&ar->vif_bitmap, id, 0);
799 
800 	carl9170_set_beacon_timers(ar);
801 
802 	if (ar->vifs == 1)
803 		ar->ps.off_override &= ~PS_OFF_VIF;
804 
805 unlock:
806 	mutex_unlock(&ar->mutex);
807 
808 	synchronize_rcu();
809 }
810 
carl9170_ps_check(struct ar9170 * ar)811 void carl9170_ps_check(struct ar9170 *ar)
812 {
813 	ieee80211_queue_work(ar->hw, &ar->ps_work);
814 }
815 
816 /* caller must hold ar->mutex */
carl9170_ps_update(struct ar9170 * ar)817 static int carl9170_ps_update(struct ar9170 *ar)
818 {
819 	bool ps = false;
820 	int err = 0;
821 
822 	if (!ar->ps.off_override)
823 		ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
824 
825 	if (ps != ar->ps.state) {
826 		err = carl9170_powersave(ar, ps);
827 		if (err)
828 			return err;
829 
830 		if (ar->ps.state && !ps) {
831 			ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
832 				ar->ps.last_action);
833 		}
834 
835 		if (ps)
836 			ar->ps.last_slept = jiffies;
837 
838 		ar->ps.last_action = jiffies;
839 		ar->ps.state = ps;
840 	}
841 
842 	return 0;
843 }
844 
carl9170_ps_work(struct work_struct * work)845 static void carl9170_ps_work(struct work_struct *work)
846 {
847 	struct ar9170 *ar = container_of(work, struct ar9170,
848 					 ps_work);
849 	mutex_lock(&ar->mutex);
850 	if (IS_STARTED(ar))
851 		WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
852 	mutex_unlock(&ar->mutex);
853 }
854 
carl9170_update_survey(struct ar9170 * ar,bool flush,bool noise)855 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
856 {
857 	int err;
858 
859 	if (noise) {
860 		err = carl9170_get_noisefloor(ar);
861 		if (err)
862 			return err;
863 	}
864 
865 	if (ar->fw.hw_counters) {
866 		err = carl9170_collect_tally(ar);
867 		if (err)
868 			return err;
869 	}
870 
871 	if (flush)
872 		memset(&ar->tally, 0, sizeof(ar->tally));
873 
874 	return 0;
875 }
876 
carl9170_stat_work(struct work_struct * work)877 static void carl9170_stat_work(struct work_struct *work)
878 {
879 	struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
880 	int err;
881 
882 	mutex_lock(&ar->mutex);
883 	err = carl9170_update_survey(ar, false, true);
884 	mutex_unlock(&ar->mutex);
885 
886 	if (err)
887 		return;
888 
889 	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
890 		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
891 }
892 
carl9170_op_config(struct ieee80211_hw * hw,u32 changed)893 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
894 {
895 	struct ar9170 *ar = hw->priv;
896 	int err = 0;
897 
898 	mutex_lock(&ar->mutex);
899 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
900 		/* TODO */
901 		err = 0;
902 	}
903 
904 	if (changed & IEEE80211_CONF_CHANGE_PS) {
905 		err = carl9170_ps_update(ar);
906 		if (err)
907 			goto out;
908 	}
909 
910 	if (changed & IEEE80211_CONF_CHANGE_SMPS) {
911 		/* TODO */
912 		err = 0;
913 	}
914 
915 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
916 		enum nl80211_channel_type channel_type =
917 			cfg80211_get_chandef_type(&hw->conf.chandef);
918 
919 		/* adjust slot time for 5 GHz */
920 		err = carl9170_set_slot_time(ar);
921 		if (err)
922 			goto out;
923 
924 		err = carl9170_update_survey(ar, true, false);
925 		if (err)
926 			goto out;
927 
928 		err = carl9170_set_channel(ar, hw->conf.chandef.chan,
929 					   channel_type);
930 		if (err)
931 			goto out;
932 
933 		err = carl9170_update_survey(ar, false, true);
934 		if (err)
935 			goto out;
936 
937 		err = carl9170_set_dyn_sifs_ack(ar);
938 		if (err)
939 			goto out;
940 
941 		err = carl9170_set_rts_cts_rate(ar);
942 		if (err)
943 			goto out;
944 	}
945 
946 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
947 		err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
948 		if (err)
949 			goto out;
950 	}
951 
952 out:
953 	mutex_unlock(&ar->mutex);
954 	return err;
955 }
956 
carl9170_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)957 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
958 					 struct netdev_hw_addr_list *mc_list)
959 {
960 	struct netdev_hw_addr *ha;
961 	u64 mchash;
962 
963 	/* always get broadcast frames */
964 	mchash = 1ULL << (0xff >> 2);
965 
966 	netdev_hw_addr_list_for_each(ha, mc_list)
967 		mchash |= 1ULL << (ha->addr[5] >> 2);
968 
969 	return mchash;
970 }
971 
carl9170_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * new_flags,u64 multicast)972 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
973 					 unsigned int changed_flags,
974 					 unsigned int *new_flags,
975 					 u64 multicast)
976 {
977 	struct ar9170 *ar = hw->priv;
978 
979 	/* mask supported flags */
980 	*new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
981 
982 	if (!IS_ACCEPTING_CMD(ar))
983 		return;
984 
985 	mutex_lock(&ar->mutex);
986 
987 	ar->filter_state = *new_flags;
988 	/*
989 	 * We can support more by setting the sniffer bit and
990 	 * then checking the error flags, later.
991 	 */
992 
993 	if (*new_flags & FIF_ALLMULTI)
994 		multicast = ~0ULL;
995 
996 	if (multicast != ar->cur_mc_hash)
997 		WARN_ON(carl9170_update_multicast(ar, multicast));
998 
999 	if (changed_flags & FIF_OTHER_BSS) {
1000 		ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
1001 
1002 		WARN_ON(carl9170_set_operating_mode(ar));
1003 	}
1004 
1005 	if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1006 		u32 rx_filter = 0;
1007 
1008 		if (!ar->fw.ba_filter)
1009 			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1010 
1011 		if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1012 			rx_filter |= CARL9170_RX_FILTER_BAD;
1013 
1014 		if (!(*new_flags & FIF_CONTROL))
1015 			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1016 
1017 		if (!(*new_flags & FIF_PSPOLL))
1018 			rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1019 
1020 		if (!(*new_flags & FIF_OTHER_BSS)) {
1021 			rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1022 			rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1023 		}
1024 
1025 		WARN_ON(carl9170_rx_filter(ar, rx_filter));
1026 	}
1027 
1028 	mutex_unlock(&ar->mutex);
1029 }
1030 
1031 
carl9170_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u64 changed)1032 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1033 					 struct ieee80211_vif *vif,
1034 					 struct ieee80211_bss_conf *bss_conf,
1035 					 u64 changed)
1036 {
1037 	struct ar9170 *ar = hw->priv;
1038 	struct ath_common *common = &ar->common;
1039 	int err = 0;
1040 	struct carl9170_vif_info *vif_priv;
1041 	struct ieee80211_vif *main_vif;
1042 
1043 	mutex_lock(&ar->mutex);
1044 	vif_priv = (void *) vif->drv_priv;
1045 	main_vif = carl9170_get_main_vif(ar);
1046 	if (WARN_ON(!main_vif))
1047 		goto out;
1048 
1049 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
1050 		struct carl9170_vif_info *iter;
1051 		int i = 0;
1052 
1053 		vif_priv->enable_beacon = bss_conf->enable_beacon;
1054 		rcu_read_lock();
1055 		list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1056 			if (iter->active && iter->enable_beacon)
1057 				i++;
1058 
1059 		}
1060 		rcu_read_unlock();
1061 
1062 		ar->beacon_enabled = i;
1063 	}
1064 
1065 	if (changed & BSS_CHANGED_BEACON) {
1066 		err = carl9170_update_beacon(ar, false);
1067 		if (err)
1068 			goto out;
1069 	}
1070 
1071 	if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1072 		       BSS_CHANGED_BEACON_INT)) {
1073 
1074 		if (main_vif != vif) {
1075 			bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1076 			bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1077 		}
1078 
1079 		/*
1080 		 * Therefore a hard limit for the broadcast traffic should
1081 		 * prevent false alarms.
1082 		 */
1083 		if (vif->type != NL80211_IFTYPE_STATION &&
1084 		    (bss_conf->beacon_int * bss_conf->dtim_period >=
1085 		     (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1086 			err = -EINVAL;
1087 			goto out;
1088 		}
1089 
1090 		err = carl9170_set_beacon_timers(ar);
1091 		if (err)
1092 			goto out;
1093 	}
1094 
1095 	if (changed & BSS_CHANGED_HT) {
1096 		/* TODO */
1097 		err = 0;
1098 		if (err)
1099 			goto out;
1100 	}
1101 
1102 	if (main_vif != vif)
1103 		goto out;
1104 
1105 	/*
1106 	 * The following settings can only be changed by the
1107 	 * master interface.
1108 	 */
1109 
1110 	if (changed & BSS_CHANGED_BSSID) {
1111 		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1112 		err = carl9170_set_operating_mode(ar);
1113 		if (err)
1114 			goto out;
1115 	}
1116 
1117 	if (changed & BSS_CHANGED_ASSOC) {
1118 		ar->common.curaid = vif->cfg.aid;
1119 		err = carl9170_set_beacon_timers(ar);
1120 		if (err)
1121 			goto out;
1122 	}
1123 
1124 	if (changed & BSS_CHANGED_ERP_SLOT) {
1125 		err = carl9170_set_slot_time(ar);
1126 		if (err)
1127 			goto out;
1128 	}
1129 
1130 	if (changed & BSS_CHANGED_BASIC_RATES) {
1131 		err = carl9170_set_mac_rates(ar);
1132 		if (err)
1133 			goto out;
1134 	}
1135 
1136 out:
1137 	WARN_ON_ONCE(err && IS_STARTED(ar));
1138 	mutex_unlock(&ar->mutex);
1139 }
1140 
carl9170_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1141 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1142 			       struct ieee80211_vif *vif)
1143 {
1144 	struct ar9170 *ar = hw->priv;
1145 	struct carl9170_tsf_rsp tsf;
1146 	int err;
1147 
1148 	mutex_lock(&ar->mutex);
1149 	err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1150 				0, NULL, sizeof(tsf), &tsf);
1151 	mutex_unlock(&ar->mutex);
1152 	if (WARN_ON(err))
1153 		return 0;
1154 
1155 	return le64_to_cpu(tsf.tsf_64);
1156 }
1157 
carl9170_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)1158 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1159 			       struct ieee80211_vif *vif,
1160 			       struct ieee80211_sta *sta,
1161 			       struct ieee80211_key_conf *key)
1162 {
1163 	struct ar9170 *ar = hw->priv;
1164 	int err = 0, i;
1165 	u8 ktype;
1166 
1167 	if (ar->disable_offload || !vif)
1168 		return -EOPNOTSUPP;
1169 
1170 	/* Fall back to software encryption whenever the driver is connected
1171 	 * to more than one network.
1172 	 *
1173 	 * This is very unfortunate, because some machines cannot handle
1174 	 * the high througput speed in 802.11n networks.
1175 	 */
1176 
1177 	if (!is_main_vif(ar, vif)) {
1178 		mutex_lock(&ar->mutex);
1179 		goto err_softw;
1180 	}
1181 
1182 	/*
1183 	 * While the hardware supports *catch-all* key, for offloading
1184 	 * group-key en-/de-cryption. The way of how the hardware
1185 	 * decides which keyId maps to which key, remains a mystery...
1186 	 */
1187 	if ((vif->type != NL80211_IFTYPE_STATION &&
1188 	     vif->type != NL80211_IFTYPE_ADHOC) &&
1189 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1190 		return -EOPNOTSUPP;
1191 
1192 	switch (key->cipher) {
1193 	case WLAN_CIPHER_SUITE_WEP40:
1194 		ktype = AR9170_ENC_ALG_WEP64;
1195 		break;
1196 	case WLAN_CIPHER_SUITE_WEP104:
1197 		ktype = AR9170_ENC_ALG_WEP128;
1198 		break;
1199 	case WLAN_CIPHER_SUITE_TKIP:
1200 		ktype = AR9170_ENC_ALG_TKIP;
1201 		break;
1202 	case WLAN_CIPHER_SUITE_CCMP:
1203 		ktype = AR9170_ENC_ALG_AESCCMP;
1204 		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1205 		break;
1206 	default:
1207 		return -EOPNOTSUPP;
1208 	}
1209 
1210 	mutex_lock(&ar->mutex);
1211 	if (cmd == SET_KEY) {
1212 		if (!IS_STARTED(ar)) {
1213 			err = -EOPNOTSUPP;
1214 			goto out;
1215 		}
1216 
1217 		if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1218 			sta = NULL;
1219 
1220 			i = 64 + key->keyidx;
1221 		} else {
1222 			for (i = 0; i < 64; i++)
1223 				if (!(ar->usedkeys & BIT(i)))
1224 					break;
1225 			if (i == 64)
1226 				goto err_softw;
1227 		}
1228 
1229 		key->hw_key_idx = i;
1230 
1231 		err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1232 					  ktype, 0, key->key,
1233 					  min_t(u8, 16, key->keylen));
1234 		if (err)
1235 			goto out;
1236 
1237 		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1238 			err = carl9170_upload_key(ar, i, sta ? sta->addr :
1239 						  NULL, ktype, 1,
1240 						  key->key + 16, 16);
1241 			if (err)
1242 				goto out;
1243 
1244 			/*
1245 			 * hardware is not capable generating MMIC
1246 			 * of fragmented frames!
1247 			 */
1248 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1249 		}
1250 
1251 		if (i < 64)
1252 			ar->usedkeys |= BIT(i);
1253 
1254 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1255 	} else {
1256 		if (!IS_STARTED(ar)) {
1257 			/* The device is gone... together with the key ;-) */
1258 			err = 0;
1259 			goto out;
1260 		}
1261 
1262 		if (key->hw_key_idx < 64) {
1263 			ar->usedkeys &= ~BIT(key->hw_key_idx);
1264 		} else {
1265 			err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1266 						  AR9170_ENC_ALG_NONE, 0,
1267 						  NULL, 0);
1268 			if (err)
1269 				goto out;
1270 
1271 			if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1272 				err = carl9170_upload_key(ar, key->hw_key_idx,
1273 							  NULL,
1274 							  AR9170_ENC_ALG_NONE,
1275 							  1, NULL, 0);
1276 				if (err)
1277 					goto out;
1278 			}
1279 
1280 		}
1281 
1282 		err = carl9170_disable_key(ar, key->hw_key_idx);
1283 		if (err)
1284 			goto out;
1285 	}
1286 
1287 out:
1288 	mutex_unlock(&ar->mutex);
1289 	return err;
1290 
1291 err_softw:
1292 	if (!ar->rx_software_decryption) {
1293 		ar->rx_software_decryption = true;
1294 		carl9170_set_operating_mode(ar);
1295 	}
1296 	mutex_unlock(&ar->mutex);
1297 	return -ENOSPC;
1298 }
1299 
carl9170_op_sta_add(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1300 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1301 			       struct ieee80211_vif *vif,
1302 			       struct ieee80211_sta *sta)
1303 {
1304 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1305 	unsigned int i;
1306 
1307 	atomic_set(&sta_info->pending_frames, 0);
1308 
1309 	if (sta->deflink.ht_cap.ht_supported) {
1310 		if (sta->deflink.ht_cap.ampdu_density > 6) {
1311 			/*
1312 			 * HW does support 16us AMPDU density.
1313 			 * No HT-Xmit for station.
1314 			 */
1315 
1316 			return 0;
1317 		}
1318 
1319 		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1320 			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1321 
1322 		sta_info->ampdu_max_len = 1 << (3 + sta->deflink.ht_cap.ampdu_factor);
1323 		sta_info->ht_sta = true;
1324 	}
1325 
1326 	return 0;
1327 }
1328 
carl9170_op_sta_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1329 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1330 				struct ieee80211_vif *vif,
1331 				struct ieee80211_sta *sta)
1332 {
1333 	struct ar9170 *ar = hw->priv;
1334 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1335 	unsigned int i;
1336 	bool cleanup = false;
1337 
1338 	if (sta->deflink.ht_cap.ht_supported) {
1339 
1340 		sta_info->ht_sta = false;
1341 
1342 		rcu_read_lock();
1343 		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1344 			struct carl9170_sta_tid *tid_info;
1345 
1346 			tid_info = rcu_dereference(sta_info->agg[i]);
1347 			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1348 
1349 			if (!tid_info)
1350 				continue;
1351 
1352 			spin_lock_bh(&ar->tx_ampdu_list_lock);
1353 			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1354 				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1355 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1356 			cleanup = true;
1357 		}
1358 		rcu_read_unlock();
1359 
1360 		if (cleanup)
1361 			carl9170_ampdu_gc(ar);
1362 	}
1363 
1364 	return 0;
1365 }
1366 
carl9170_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,u16 queue,const struct ieee80211_tx_queue_params * param)1367 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1368 			       struct ieee80211_vif *vif,
1369 			       unsigned int link_id, u16 queue,
1370 			       const struct ieee80211_tx_queue_params *param)
1371 {
1372 	struct ar9170 *ar = hw->priv;
1373 	int ret;
1374 
1375 	mutex_lock(&ar->mutex);
1376 	memcpy(&ar->edcf[ar9170_qmap(queue)], param, sizeof(*param));
1377 	ret = carl9170_set_qos(ar);
1378 	mutex_unlock(&ar->mutex);
1379 	return ret;
1380 }
1381 
carl9170_ampdu_work(struct work_struct * work)1382 static void carl9170_ampdu_work(struct work_struct *work)
1383 {
1384 	struct ar9170 *ar = container_of(work, struct ar9170,
1385 					 ampdu_work);
1386 
1387 	if (!IS_STARTED(ar))
1388 		return;
1389 
1390 	mutex_lock(&ar->mutex);
1391 	carl9170_ampdu_gc(ar);
1392 	mutex_unlock(&ar->mutex);
1393 }
1394 
carl9170_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)1395 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1396 				    struct ieee80211_vif *vif,
1397 				    struct ieee80211_ampdu_params *params)
1398 {
1399 	struct ieee80211_sta *sta = params->sta;
1400 	enum ieee80211_ampdu_mlme_action action = params->action;
1401 	u16 tid = params->tid;
1402 	u16 *ssn = &params->ssn;
1403 	struct ar9170 *ar = hw->priv;
1404 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1405 	struct carl9170_sta_tid *tid_info;
1406 
1407 	if (modparam_noht)
1408 		return -EOPNOTSUPP;
1409 
1410 	switch (action) {
1411 	case IEEE80211_AMPDU_TX_START:
1412 		if (!sta_info->ht_sta)
1413 			return -EOPNOTSUPP;
1414 
1415 		tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1416 				   GFP_KERNEL);
1417 		if (!tid_info)
1418 			return -ENOMEM;
1419 
1420 		tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1421 		tid_info->state = CARL9170_TID_STATE_PROGRESS;
1422 		tid_info->tid = tid;
1423 		tid_info->max = sta_info->ampdu_max_len;
1424 		tid_info->sta = sta;
1425 		tid_info->vif = vif;
1426 
1427 		INIT_LIST_HEAD(&tid_info->list);
1428 		INIT_LIST_HEAD(&tid_info->tmp_list);
1429 		skb_queue_head_init(&tid_info->queue);
1430 		spin_lock_init(&tid_info->lock);
1431 
1432 		spin_lock_bh(&ar->tx_ampdu_list_lock);
1433 		ar->tx_ampdu_list_len++;
1434 		list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1435 		rcu_assign_pointer(sta_info->agg[tid], tid_info);
1436 		spin_unlock_bh(&ar->tx_ampdu_list_lock);
1437 
1438 		return IEEE80211_AMPDU_TX_START_IMMEDIATE;
1439 
1440 	case IEEE80211_AMPDU_TX_STOP_CONT:
1441 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
1442 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1443 		rcu_read_lock();
1444 		tid_info = rcu_dereference(sta_info->agg[tid]);
1445 		if (tid_info) {
1446 			spin_lock_bh(&ar->tx_ampdu_list_lock);
1447 			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1448 				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1449 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1450 		}
1451 
1452 		RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1453 		rcu_read_unlock();
1454 
1455 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1456 		ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1457 		break;
1458 
1459 	case IEEE80211_AMPDU_TX_OPERATIONAL:
1460 		rcu_read_lock();
1461 		tid_info = rcu_dereference(sta_info->agg[tid]);
1462 
1463 		sta_info->stats[tid].clear = true;
1464 		sta_info->stats[tid].req = false;
1465 
1466 		if (tid_info) {
1467 			bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1468 			tid_info->state = CARL9170_TID_STATE_IDLE;
1469 		}
1470 		rcu_read_unlock();
1471 
1472 		if (WARN_ON_ONCE(!tid_info))
1473 			return -EFAULT;
1474 
1475 		break;
1476 
1477 	case IEEE80211_AMPDU_RX_START:
1478 	case IEEE80211_AMPDU_RX_STOP:
1479 		/* Handled by hardware */
1480 		break;
1481 
1482 	default:
1483 		return -EOPNOTSUPP;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 #ifdef CONFIG_CARL9170_WPC
carl9170_register_wps_button(struct ar9170 * ar)1490 static int carl9170_register_wps_button(struct ar9170 *ar)
1491 {
1492 	struct input_dev *input;
1493 	int err;
1494 
1495 	if (!(ar->features & CARL9170_WPS_BUTTON))
1496 		return 0;
1497 
1498 	input = devm_input_allocate_device(&ar->udev->dev);
1499 	if (!input)
1500 		return -ENOMEM;
1501 
1502 	snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1503 		 wiphy_name(ar->hw->wiphy));
1504 
1505 	snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1506 		 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1507 
1508 	input->name = ar->wps.name;
1509 	input->phys = ar->wps.phys;
1510 	input->id.bustype = BUS_USB;
1511 	input->dev.parent = &ar->hw->wiphy->dev;
1512 
1513 	input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1514 
1515 	err = input_register_device(input);
1516 	if (err)
1517 		return err;
1518 
1519 	ar->wps.pbc = input;
1520 	return 0;
1521 }
1522 #endif /* CONFIG_CARL9170_WPC */
1523 
1524 #ifdef CONFIG_CARL9170_HWRNG
carl9170_rng_get(struct ar9170 * ar)1525 static int carl9170_rng_get(struct ar9170 *ar)
1526 {
1527 
1528 #define RW	(CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1529 #define RB	(CARL9170_MAX_CMD_PAYLOAD_LEN)
1530 
1531 	static const __le32 rng_load[RW] = {
1532 		[0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1533 
1534 	u32 buf[RW];
1535 
1536 	unsigned int i, off = 0, transfer, count;
1537 	int err;
1538 
1539 	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1540 
1541 	if (!IS_ACCEPTING_CMD(ar))
1542 		return -EAGAIN;
1543 
1544 	count = ARRAY_SIZE(ar->rng.cache);
1545 	while (count) {
1546 		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1547 					RB, (u8 *) rng_load,
1548 					RB, (u8 *) buf);
1549 		if (err)
1550 			return err;
1551 
1552 		transfer = min_t(unsigned int, count, RW);
1553 		for (i = 0; i < transfer; i++)
1554 			ar->rng.cache[off + i] = buf[i];
1555 
1556 		off += transfer;
1557 		count -= transfer;
1558 	}
1559 
1560 	ar->rng.cache_idx = 0;
1561 
1562 #undef RW
1563 #undef RB
1564 	return 0;
1565 }
1566 
carl9170_rng_read(struct hwrng * rng,u32 * data)1567 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1568 {
1569 	struct ar9170 *ar = (struct ar9170 *)rng->priv;
1570 	int ret = -EIO;
1571 
1572 	mutex_lock(&ar->mutex);
1573 	if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1574 		ret = carl9170_rng_get(ar);
1575 		if (ret) {
1576 			mutex_unlock(&ar->mutex);
1577 			return ret;
1578 		}
1579 	}
1580 
1581 	*data = ar->rng.cache[ar->rng.cache_idx++];
1582 	mutex_unlock(&ar->mutex);
1583 
1584 	return sizeof(u16);
1585 }
1586 
carl9170_register_hwrng(struct ar9170 * ar)1587 static int carl9170_register_hwrng(struct ar9170 *ar)
1588 {
1589 	int err;
1590 
1591 	snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1592 		 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1593 	ar->rng.rng.name = ar->rng.name;
1594 	ar->rng.rng.data_read = carl9170_rng_read;
1595 	ar->rng.rng.priv = (unsigned long)ar;
1596 
1597 	err = devm_hwrng_register(&ar->udev->dev, &ar->rng.rng);
1598 	if (err) {
1599 		dev_err(&ar->udev->dev, "Failed to register the random "
1600 			"number generator (%d)\n", err);
1601 		return err;
1602 	}
1603 
1604 	return carl9170_rng_get(ar);
1605 }
1606 #endif /* CONFIG_CARL9170_HWRNG */
1607 
carl9170_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1608 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1609 				struct survey_info *survey)
1610 {
1611 	struct ar9170 *ar = hw->priv;
1612 	struct ieee80211_channel *chan;
1613 	struct ieee80211_supported_band *band;
1614 	int err, b, i;
1615 
1616 	chan = ar->channel;
1617 	if (!chan)
1618 		return -ENODEV;
1619 
1620 	if (idx == chan->hw_value) {
1621 		mutex_lock(&ar->mutex);
1622 		err = carl9170_update_survey(ar, false, true);
1623 		mutex_unlock(&ar->mutex);
1624 		if (err)
1625 			return err;
1626 	}
1627 
1628 	for (b = 0; b < NUM_NL80211_BANDS; b++) {
1629 		band = ar->hw->wiphy->bands[b];
1630 
1631 		if (!band)
1632 			continue;
1633 
1634 		for (i = 0; i < band->n_channels; i++) {
1635 			if (band->channels[i].hw_value == idx) {
1636 				chan = &band->channels[i];
1637 				goto found;
1638 			}
1639 		}
1640 	}
1641 	return -ENOENT;
1642 
1643 found:
1644 	memcpy(survey, &ar->survey[idx], sizeof(*survey));
1645 
1646 	survey->channel = chan;
1647 	survey->filled = SURVEY_INFO_NOISE_DBM;
1648 
1649 	if (ar->channel == chan)
1650 		survey->filled |= SURVEY_INFO_IN_USE;
1651 
1652 	if (ar->fw.hw_counters) {
1653 		survey->filled |= SURVEY_INFO_TIME |
1654 				  SURVEY_INFO_TIME_BUSY |
1655 				  SURVEY_INFO_TIME_TX;
1656 	}
1657 
1658 	return 0;
1659 }
1660 
carl9170_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)1661 static void carl9170_op_flush(struct ieee80211_hw *hw,
1662 			      struct ieee80211_vif *vif,
1663 			      u32 queues, bool drop)
1664 {
1665 	struct ar9170 *ar = hw->priv;
1666 	unsigned int vid;
1667 
1668 	mutex_lock(&ar->mutex);
1669 	for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1670 		carl9170_flush_cab(ar, vid);
1671 
1672 	carl9170_flush(ar, drop);
1673 	mutex_unlock(&ar->mutex);
1674 }
1675 
carl9170_op_get_stats(struct ieee80211_hw * hw,struct ieee80211_low_level_stats * stats)1676 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1677 				 struct ieee80211_low_level_stats *stats)
1678 {
1679 	struct ar9170 *ar = hw->priv;
1680 
1681 	memset(stats, 0, sizeof(*stats));
1682 	stats->dot11ACKFailureCount = ar->tx_ack_failures;
1683 	stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1684 	return 0;
1685 }
1686 
carl9170_op_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)1687 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1688 				   struct ieee80211_vif *vif,
1689 				   enum sta_notify_cmd cmd,
1690 				   struct ieee80211_sta *sta)
1691 {
1692 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1693 
1694 	switch (cmd) {
1695 	case STA_NOTIFY_SLEEP:
1696 		sta_info->sleeping = true;
1697 		if (atomic_read(&sta_info->pending_frames))
1698 			ieee80211_sta_block_awake(hw, sta, true);
1699 		break;
1700 
1701 	case STA_NOTIFY_AWAKE:
1702 		sta_info->sleeping = false;
1703 		break;
1704 	}
1705 }
1706 
carl9170_tx_frames_pending(struct ieee80211_hw * hw)1707 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1708 {
1709 	struct ar9170 *ar = hw->priv;
1710 
1711 	return !!atomic_read(&ar->tx_total_queued);
1712 }
1713 
1714 static const struct ieee80211_ops carl9170_ops = {
1715 	.start			= carl9170_op_start,
1716 	.stop			= carl9170_op_stop,
1717 	.tx			= carl9170_op_tx,
1718 	.wake_tx_queue		= ieee80211_handle_wake_tx_queue,
1719 	.flush			= carl9170_op_flush,
1720 	.add_interface		= carl9170_op_add_interface,
1721 	.remove_interface	= carl9170_op_remove_interface,
1722 	.config			= carl9170_op_config,
1723 	.prepare_multicast	= carl9170_op_prepare_multicast,
1724 	.configure_filter	= carl9170_op_configure_filter,
1725 	.conf_tx		= carl9170_op_conf_tx,
1726 	.bss_info_changed	= carl9170_op_bss_info_changed,
1727 	.get_tsf		= carl9170_op_get_tsf,
1728 	.set_key		= carl9170_op_set_key,
1729 	.sta_add		= carl9170_op_sta_add,
1730 	.sta_remove		= carl9170_op_sta_remove,
1731 	.sta_notify		= carl9170_op_sta_notify,
1732 	.get_survey		= carl9170_op_get_survey,
1733 	.get_stats		= carl9170_op_get_stats,
1734 	.ampdu_action		= carl9170_op_ampdu_action,
1735 	.tx_frames_pending	= carl9170_tx_frames_pending,
1736 };
1737 
carl9170_alloc(size_t priv_size)1738 void *carl9170_alloc(size_t priv_size)
1739 {
1740 	struct ieee80211_hw *hw;
1741 	struct ar9170 *ar;
1742 	struct sk_buff *skb;
1743 	int i;
1744 
1745 	/*
1746 	 * this buffer is used for rx stream reconstruction.
1747 	 * Under heavy load this device (or the transport layer?)
1748 	 * tends to split the streams into separate rx descriptors.
1749 	 */
1750 
1751 	skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1752 	if (!skb)
1753 		goto err_nomem;
1754 
1755 	hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1756 	if (!hw)
1757 		goto err_nomem;
1758 
1759 	ar = hw->priv;
1760 	ar->hw = hw;
1761 	ar->rx_failover = skb;
1762 
1763 	memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1764 	ar->rx_has_plcp = false;
1765 
1766 	/*
1767 	 * Here's a hidden pitfall!
1768 	 *
1769 	 * All 4 AC queues work perfectly well under _legacy_ operation.
1770 	 * However as soon as aggregation is enabled, the traffic flow
1771 	 * gets very bumpy. Therefore we have to _switch_ to a
1772 	 * software AC with a single HW queue.
1773 	 */
1774 	hw->queues = __AR9170_NUM_TXQ;
1775 
1776 	mutex_init(&ar->mutex);
1777 	spin_lock_init(&ar->beacon_lock);
1778 	spin_lock_init(&ar->cmd_lock);
1779 	spin_lock_init(&ar->tx_stats_lock);
1780 	spin_lock_init(&ar->tx_ampdu_list_lock);
1781 	spin_lock_init(&ar->mem_lock);
1782 	spin_lock_init(&ar->state_lock);
1783 	atomic_set(&ar->pending_restarts, 0);
1784 	ar->vifs = 0;
1785 	for (i = 0; i < ar->hw->queues; i++) {
1786 		skb_queue_head_init(&ar->tx_status[i]);
1787 		skb_queue_head_init(&ar->tx_pending[i]);
1788 
1789 		INIT_LIST_HEAD(&ar->bar_list[i]);
1790 		spin_lock_init(&ar->bar_list_lock[i]);
1791 	}
1792 	INIT_WORK(&ar->ps_work, carl9170_ps_work);
1793 	INIT_WORK(&ar->ping_work, carl9170_ping_work);
1794 	INIT_WORK(&ar->restart_work, carl9170_restart_work);
1795 	INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1796 	INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1797 	INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1798 	INIT_LIST_HEAD(&ar->tx_ampdu_list);
1799 	rcu_assign_pointer(ar->tx_ampdu_iter,
1800 			   (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1801 
1802 	bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1803 	INIT_LIST_HEAD(&ar->vif_list);
1804 	init_completion(&ar->tx_flush);
1805 
1806 	/* firmware decides which modes we support */
1807 	hw->wiphy->interface_modes = 0;
1808 
1809 	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
1810 	ieee80211_hw_set(hw, MFP_CAPABLE);
1811 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
1812 	ieee80211_hw_set(hw, SUPPORTS_PS);
1813 	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
1814 	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
1815 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
1816 	ieee80211_hw_set(hw, SIGNAL_DBM);
1817 	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
1818 
1819 	if (!modparam_noht) {
1820 		/*
1821 		 * see the comment above, why we allow the user
1822 		 * to disable HT by a module parameter.
1823 		 */
1824 		ieee80211_hw_set(hw, AMPDU_AGGREGATION);
1825 	}
1826 
1827 	hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1828 	hw->sta_data_size = sizeof(struct carl9170_sta_info);
1829 	hw->vif_data_size = sizeof(struct carl9170_vif_info);
1830 
1831 	hw->max_rates = CARL9170_TX_MAX_RATES;
1832 	hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1833 
1834 	for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1835 		ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1836 
1837 	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1838 
1839 	return ar;
1840 
1841 err_nomem:
1842 	kfree_skb(skb);
1843 	return ERR_PTR(-ENOMEM);
1844 }
1845 
carl9170_read_eeprom(struct ar9170 * ar)1846 static int carl9170_read_eeprom(struct ar9170 *ar)
1847 {
1848 #define RW	8	/* number of words to read at once */
1849 #define RB	(sizeof(u32) * RW)
1850 	u8 *eeprom = (void *)&ar->eeprom;
1851 	__le32 offsets[RW];
1852 	int i, j, err;
1853 
1854 	BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1855 
1856 	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1857 #ifndef __CHECKER__
1858 	/* don't want to handle trailing remains */
1859 	BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1860 #endif
1861 
1862 	for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1863 		for (j = 0; j < RW; j++)
1864 			offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1865 						 RB * i + 4 * j);
1866 
1867 		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1868 					RB, (u8 *) &offsets,
1869 					RB, eeprom + RB * i);
1870 		if (err)
1871 			return err;
1872 	}
1873 
1874 #undef RW
1875 #undef RB
1876 	return 0;
1877 }
1878 
carl9170_parse_eeprom(struct ar9170 * ar)1879 static int carl9170_parse_eeprom(struct ar9170 *ar)
1880 {
1881 	struct ath_regulatory *regulatory = &ar->common.regulatory;
1882 	unsigned int rx_streams, tx_streams, tx_params = 0;
1883 	int bands = 0;
1884 	int chans = 0;
1885 
1886 	if (ar->eeprom.length == cpu_to_le16(0xffff))
1887 		return -ENODATA;
1888 
1889 	rx_streams = hweight8(ar->eeprom.rx_mask);
1890 	tx_streams = hweight8(ar->eeprom.tx_mask);
1891 
1892 	if (rx_streams != tx_streams) {
1893 		tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1894 
1895 		WARN_ON(!(tx_streams >= 1 && tx_streams <=
1896 			IEEE80211_HT_MCS_TX_MAX_STREAMS));
1897 
1898 		tx_params |= (tx_streams - 1) <<
1899 			    IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1900 
1901 		carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1902 		carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1903 	}
1904 
1905 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1906 		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
1907 			&carl9170_band_2GHz;
1908 		chans += carl9170_band_2GHz.n_channels;
1909 		bands++;
1910 	}
1911 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1912 		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
1913 			&carl9170_band_5GHz;
1914 		chans += carl9170_band_5GHz.n_channels;
1915 		bands++;
1916 	}
1917 
1918 	if (!bands)
1919 		return -EINVAL;
1920 
1921 	ar->survey = devm_kcalloc(&ar->udev->dev, chans,
1922 				  sizeof(struct survey_info), GFP_KERNEL);
1923 	if (!ar->survey)
1924 		return -ENOMEM;
1925 	ar->num_channels = chans;
1926 
1927 	regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1928 
1929 	/* second part of wiphy init */
1930 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1931 
1932 	return 0;
1933 }
1934 
carl9170_reg_notifier(struct wiphy * wiphy,struct regulatory_request * request)1935 static void carl9170_reg_notifier(struct wiphy *wiphy,
1936 				  struct regulatory_request *request)
1937 {
1938 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1939 	struct ar9170 *ar = hw->priv;
1940 
1941 	ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1942 }
1943 
carl9170_register(struct ar9170 * ar)1944 int carl9170_register(struct ar9170 *ar)
1945 {
1946 	struct ath_regulatory *regulatory = &ar->common.regulatory;
1947 	int err = 0, i;
1948 
1949 	ar->mem_bitmap = devm_bitmap_zalloc(&ar->udev->dev, ar->fw.mem_blocks, GFP_KERNEL);
1950 	if (!ar->mem_bitmap)
1951 		return -ENOMEM;
1952 
1953 	/* try to read EEPROM, init MAC addr */
1954 	err = carl9170_read_eeprom(ar);
1955 	if (err)
1956 		return err;
1957 
1958 	err = carl9170_parse_eeprom(ar);
1959 	if (err)
1960 		return err;
1961 
1962 	err = ath_regd_init(regulatory, ar->hw->wiphy,
1963 			    carl9170_reg_notifier);
1964 	if (err)
1965 		return err;
1966 
1967 	if (modparam_noht) {
1968 		carl9170_band_2GHz.ht_cap.ht_supported = false;
1969 		carl9170_band_5GHz.ht_cap.ht_supported = false;
1970 	}
1971 
1972 	for (i = 0; i < ar->fw.vif_num; i++) {
1973 		ar->vif_priv[i].id = i;
1974 		ar->vif_priv[i].vif = NULL;
1975 	}
1976 
1977 	err = ieee80211_register_hw(ar->hw);
1978 	if (err)
1979 		return err;
1980 
1981 	/* mac80211 interface is now registered */
1982 	ar->registered = true;
1983 
1984 	if (!ath_is_world_regd(regulatory))
1985 		regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1986 
1987 #ifdef CONFIG_CARL9170_DEBUGFS
1988 	carl9170_debugfs_register(ar);
1989 #endif /* CONFIG_CARL9170_DEBUGFS */
1990 
1991 	err = carl9170_led_init(ar);
1992 	if (err)
1993 		goto err_unreg;
1994 
1995 #ifdef CONFIG_CARL9170_LEDS
1996 	err = carl9170_led_register(ar);
1997 	if (err)
1998 		goto err_unreg;
1999 #endif /* CONFIG_CARL9170_LEDS */
2000 
2001 #ifdef CONFIG_CARL9170_WPC
2002 	err = carl9170_register_wps_button(ar);
2003 	if (err)
2004 		goto err_unreg;
2005 #endif /* CONFIG_CARL9170_WPC */
2006 
2007 #ifdef CONFIG_CARL9170_HWRNG
2008 	err = carl9170_register_hwrng(ar);
2009 	if (err)
2010 		goto err_unreg;
2011 #endif /* CONFIG_CARL9170_HWRNG */
2012 
2013 	dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2014 		 wiphy_name(ar->hw->wiphy));
2015 
2016 	return 0;
2017 
2018 err_unreg:
2019 	carl9170_unregister(ar);
2020 	return err;
2021 }
2022 
carl9170_unregister(struct ar9170 * ar)2023 void carl9170_unregister(struct ar9170 *ar)
2024 {
2025 	if (!ar->registered)
2026 		return;
2027 
2028 	ar->registered = false;
2029 
2030 #ifdef CONFIG_CARL9170_LEDS
2031 	carl9170_led_unregister(ar);
2032 #endif /* CONFIG_CARL9170_LEDS */
2033 
2034 #ifdef CONFIG_CARL9170_DEBUGFS
2035 	carl9170_debugfs_unregister(ar);
2036 #endif /* CONFIG_CARL9170_DEBUGFS */
2037 
2038 	carl9170_cancel_worker(ar);
2039 	cancel_work_sync(&ar->restart_work);
2040 
2041 	ieee80211_unregister_hw(ar->hw);
2042 }
2043 
carl9170_free(struct ar9170 * ar)2044 void carl9170_free(struct ar9170 *ar)
2045 {
2046 	WARN_ON(ar->registered);
2047 	WARN_ON(IS_INITIALIZED(ar));
2048 
2049 	kfree_skb(ar->rx_failover);
2050 	ar->rx_failover = NULL;
2051 
2052 	mutex_destroy(&ar->mutex);
2053 
2054 	ieee80211_free_hw(ar->hw);
2055 }
2056