xref: /openbmc/linux/drivers/net/wireless/ath/ath9k/init.c (revision cac4220b2e93e6344f987581d52d5bd71ff2cc0e)
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 
19 #include "ath9k.h"
20 
21 static char *dev_info = "ath9k";
22 
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27 
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31 
32 int modparam_nohwcrypt;
33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 
36 int led_blink;
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39 
40 /* We use the hw_value as an index into our private channel structure */
41 
42 #define CHAN2G(_freq, _idx)  { \
43 	.center_freq = (_freq), \
44 	.hw_value = (_idx), \
45 	.max_power = 20, \
46 }
47 
48 #define CHAN5G(_freq, _idx) { \
49 	.band = IEEE80211_BAND_5GHZ, \
50 	.center_freq = (_freq), \
51 	.hw_value = (_idx), \
52 	.max_power = 20, \
53 }
54 
55 /* Some 2 GHz radios are actually tunable on 2312-2732
56  * on 5 MHz steps, we support the channels which we know
57  * we have calibration data for all cards though to make
58  * this static */
59 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
60 	CHAN2G(2412, 0), /* Channel 1 */
61 	CHAN2G(2417, 1), /* Channel 2 */
62 	CHAN2G(2422, 2), /* Channel 3 */
63 	CHAN2G(2427, 3), /* Channel 4 */
64 	CHAN2G(2432, 4), /* Channel 5 */
65 	CHAN2G(2437, 5), /* Channel 6 */
66 	CHAN2G(2442, 6), /* Channel 7 */
67 	CHAN2G(2447, 7), /* Channel 8 */
68 	CHAN2G(2452, 8), /* Channel 9 */
69 	CHAN2G(2457, 9), /* Channel 10 */
70 	CHAN2G(2462, 10), /* Channel 11 */
71 	CHAN2G(2467, 11), /* Channel 12 */
72 	CHAN2G(2472, 12), /* Channel 13 */
73 	CHAN2G(2484, 13), /* Channel 14 */
74 };
75 
76 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
77  * on 5 MHz steps, we support the channels which we know
78  * we have calibration data for all cards though to make
79  * this static */
80 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
81 	/* _We_ call this UNII 1 */
82 	CHAN5G(5180, 14), /* Channel 36 */
83 	CHAN5G(5200, 15), /* Channel 40 */
84 	CHAN5G(5220, 16), /* Channel 44 */
85 	CHAN5G(5240, 17), /* Channel 48 */
86 	/* _We_ call this UNII 2 */
87 	CHAN5G(5260, 18), /* Channel 52 */
88 	CHAN5G(5280, 19), /* Channel 56 */
89 	CHAN5G(5300, 20), /* Channel 60 */
90 	CHAN5G(5320, 21), /* Channel 64 */
91 	/* _We_ call this "Middle band" */
92 	CHAN5G(5500, 22), /* Channel 100 */
93 	CHAN5G(5520, 23), /* Channel 104 */
94 	CHAN5G(5540, 24), /* Channel 108 */
95 	CHAN5G(5560, 25), /* Channel 112 */
96 	CHAN5G(5580, 26), /* Channel 116 */
97 	CHAN5G(5600, 27), /* Channel 120 */
98 	CHAN5G(5620, 28), /* Channel 124 */
99 	CHAN5G(5640, 29), /* Channel 128 */
100 	CHAN5G(5660, 30), /* Channel 132 */
101 	CHAN5G(5680, 31), /* Channel 136 */
102 	CHAN5G(5700, 32), /* Channel 140 */
103 	/* _We_ call this UNII 3 */
104 	CHAN5G(5745, 33), /* Channel 149 */
105 	CHAN5G(5765, 34), /* Channel 153 */
106 	CHAN5G(5785, 35), /* Channel 157 */
107 	CHAN5G(5805, 36), /* Channel 161 */
108 	CHAN5G(5825, 37), /* Channel 165 */
109 };
110 
111 /* Atheros hardware rate code addition for short premble */
112 #define SHPCHECK(__hw_rate, __flags) \
113 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
114 
115 #define RATE(_bitrate, _hw_rate, _flags) {              \
116 	.bitrate        = (_bitrate),                   \
117 	.flags          = (_flags),                     \
118 	.hw_value       = (_hw_rate),                   \
119 	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
120 }
121 
122 static struct ieee80211_rate ath9k_legacy_rates[] = {
123 	RATE(10, 0x1b, 0),
124 	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
125 	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
126 	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
127 	RATE(60, 0x0b, 0),
128 	RATE(90, 0x0f, 0),
129 	RATE(120, 0x0a, 0),
130 	RATE(180, 0x0e, 0),
131 	RATE(240, 0x09, 0),
132 	RATE(360, 0x0d, 0),
133 	RATE(480, 0x08, 0),
134 	RATE(540, 0x0c, 0),
135 };
136 
137 static void ath9k_deinit_softc(struct ath_softc *sc);
138 
139 /*
140  * Read and write, they both share the same lock. We do this to serialize
141  * reads and writes on Atheros 802.11n PCI devices only. This is required
142  * as the FIFO on these devices can only accept sanely 2 requests.
143  */
144 
145 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
146 {
147 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
148 	struct ath_common *common = ath9k_hw_common(ah);
149 	struct ath_softc *sc = (struct ath_softc *) common->priv;
150 
151 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
152 		unsigned long flags;
153 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
154 		iowrite32(val, sc->mem + reg_offset);
155 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
156 	} else
157 		iowrite32(val, sc->mem + reg_offset);
158 }
159 
160 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
161 {
162 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
163 	struct ath_common *common = ath9k_hw_common(ah);
164 	struct ath_softc *sc = (struct ath_softc *) common->priv;
165 	u32 val;
166 
167 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
168 		unsigned long flags;
169 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
170 		val = ioread32(sc->mem + reg_offset);
171 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
172 	} else
173 		val = ioread32(sc->mem + reg_offset);
174 	return val;
175 }
176 
177 static const struct ath_ops ath9k_common_ops = {
178 	.read = ath9k_ioread32,
179 	.write = ath9k_iowrite32,
180 };
181 
182 /**************************/
183 /*     Initialization     */
184 /**************************/
185 
186 static void setup_ht_cap(struct ath_softc *sc,
187 			 struct ieee80211_sta_ht_cap *ht_info)
188 {
189 	struct ath_hw *ah = sc->sc_ah;
190 	struct ath_common *common = ath9k_hw_common(ah);
191 	u8 tx_streams, rx_streams;
192 	int i, max_streams;
193 
194 	ht_info->ht_supported = true;
195 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
196 		       IEEE80211_HT_CAP_SM_PS |
197 		       IEEE80211_HT_CAP_SGI_40 |
198 		       IEEE80211_HT_CAP_DSSSCCK40;
199 
200 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
201 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
202 
203 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
204 		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
205 
206 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
207 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
208 
209 	if (AR_SREV_9300_20_OR_LATER(ah))
210 		max_streams = 3;
211 	else
212 		max_streams = 2;
213 
214 	if (AR_SREV_9280_20_OR_LATER(ah)) {
215 		if (max_streams >= 2)
216 			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
217 		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
218 	}
219 
220 	/* set up supported mcs set */
221 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
222 	tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
223 	rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
224 
225 	ath_print(common, ATH_DBG_CONFIG,
226 		  "TX streams %d, RX streams: %d\n",
227 		  tx_streams, rx_streams);
228 
229 	if (tx_streams != rx_streams) {
230 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
231 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
232 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
233 	}
234 
235 	for (i = 0; i < rx_streams; i++)
236 		ht_info->mcs.rx_mask[i] = 0xff;
237 
238 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
239 }
240 
241 static int ath9k_reg_notifier(struct wiphy *wiphy,
242 			      struct regulatory_request *request)
243 {
244 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
245 	struct ath_wiphy *aphy = hw->priv;
246 	struct ath_softc *sc = aphy->sc;
247 	struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
248 
249 	return ath_reg_notifier_apply(wiphy, request, reg);
250 }
251 
252 /*
253  *  This function will allocate both the DMA descriptor structure, and the
254  *  buffers it contains.  These are used to contain the descriptors used
255  *  by the system.
256 */
257 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
258 		      struct list_head *head, const char *name,
259 		      int nbuf, int ndesc, bool is_tx)
260 {
261 #define	DS2PHYS(_dd, _ds)						\
262 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
263 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
264 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
265 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
266 	u8 *ds;
267 	struct ath_buf *bf;
268 	int i, bsize, error, desc_len;
269 
270 	ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
271 		  name, nbuf, ndesc);
272 
273 	INIT_LIST_HEAD(head);
274 
275 	if (is_tx)
276 		desc_len = sc->sc_ah->caps.tx_desc_len;
277 	else
278 		desc_len = sizeof(struct ath_desc);
279 
280 	/* ath_desc must be a multiple of DWORDs */
281 	if ((desc_len % 4) != 0) {
282 		ath_print(common, ATH_DBG_FATAL,
283 			  "ath_desc not DWORD aligned\n");
284 		BUG_ON((desc_len % 4) != 0);
285 		error = -ENOMEM;
286 		goto fail;
287 	}
288 
289 	dd->dd_desc_len = desc_len * nbuf * ndesc;
290 
291 	/*
292 	 * Need additional DMA memory because we can't use
293 	 * descriptors that cross the 4K page boundary. Assume
294 	 * one skipped descriptor per 4K page.
295 	 */
296 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
297 		u32 ndesc_skipped =
298 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
299 		u32 dma_len;
300 
301 		while (ndesc_skipped) {
302 			dma_len = ndesc_skipped * desc_len;
303 			dd->dd_desc_len += dma_len;
304 
305 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
306 		}
307 	}
308 
309 	/* allocate descriptors */
310 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
311 					 &dd->dd_desc_paddr, GFP_KERNEL);
312 	if (dd->dd_desc == NULL) {
313 		error = -ENOMEM;
314 		goto fail;
315 	}
316 	ds = (u8 *) dd->dd_desc;
317 	ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
318 		  name, ds, (u32) dd->dd_desc_len,
319 		  ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
320 
321 	/* allocate buffers */
322 	bsize = sizeof(struct ath_buf) * nbuf;
323 	bf = kzalloc(bsize, GFP_KERNEL);
324 	if (bf == NULL) {
325 		error = -ENOMEM;
326 		goto fail2;
327 	}
328 	dd->dd_bufptr = bf;
329 
330 	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
331 		bf->bf_desc = ds;
332 		bf->bf_daddr = DS2PHYS(dd, ds);
333 
334 		if (!(sc->sc_ah->caps.hw_caps &
335 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
336 			/*
337 			 * Skip descriptor addresses which can cause 4KB
338 			 * boundary crossing (addr + length) with a 32 dword
339 			 * descriptor fetch.
340 			 */
341 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
342 				BUG_ON((caddr_t) bf->bf_desc >=
343 				       ((caddr_t) dd->dd_desc +
344 					dd->dd_desc_len));
345 
346 				ds += (desc_len * ndesc);
347 				bf->bf_desc = ds;
348 				bf->bf_daddr = DS2PHYS(dd, ds);
349 			}
350 		}
351 		list_add_tail(&bf->list, head);
352 	}
353 	return 0;
354 fail2:
355 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
356 			  dd->dd_desc_paddr);
357 fail:
358 	memset(dd, 0, sizeof(*dd));
359 	return error;
360 #undef ATH_DESC_4KB_BOUND_CHECK
361 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
362 #undef DS2PHYS
363 }
364 
365 static void ath9k_init_crypto(struct ath_softc *sc)
366 {
367 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
368 	int i = 0;
369 
370 	/* Get the hardware key cache size. */
371 	common->keymax = sc->sc_ah->caps.keycache_size;
372 	if (common->keymax > ATH_KEYMAX) {
373 		ath_print(common, ATH_DBG_ANY,
374 			  "Warning, using only %u entries in %u key cache\n",
375 			  ATH_KEYMAX, common->keymax);
376 		common->keymax = ATH_KEYMAX;
377 	}
378 
379 	/*
380 	 * Reset the key cache since some parts do not
381 	 * reset the contents on initial power up.
382 	 */
383 	for (i = 0; i < common->keymax; i++)
384 		ath_hw_keyreset(common, (u16) i);
385 
386 	/*
387 	 * Check whether the separate key cache entries
388 	 * are required to handle both tx+rx MIC keys.
389 	 * With split mic keys the number of stations is limited
390 	 * to 27 otherwise 59.
391 	 */
392 	if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
393 		common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
394 }
395 
396 static int ath9k_init_btcoex(struct ath_softc *sc)
397 {
398 	int r, qnum;
399 
400 	switch (sc->sc_ah->btcoex_hw.scheme) {
401 	case ATH_BTCOEX_CFG_NONE:
402 		break;
403 	case ATH_BTCOEX_CFG_2WIRE:
404 		ath9k_hw_btcoex_init_2wire(sc->sc_ah);
405 		break;
406 	case ATH_BTCOEX_CFG_3WIRE:
407 		ath9k_hw_btcoex_init_3wire(sc->sc_ah);
408 		r = ath_init_btcoex_timer(sc);
409 		if (r)
410 			return -1;
411 		qnum = sc->tx.hwq_map[WME_AC_BE];
412 		ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
413 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
414 		break;
415 	default:
416 		WARN_ON(1);
417 		break;
418 	}
419 
420 	return 0;
421 }
422 
423 static int ath9k_init_queues(struct ath_softc *sc)
424 {
425 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
426 	int i = 0;
427 
428 	for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
429 		sc->tx.hwq_map[i] = -1;
430 
431 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
432 	if (sc->beacon.beaconq == -1) {
433 		ath_print(common, ATH_DBG_FATAL,
434 			  "Unable to setup a beacon xmit queue\n");
435 		goto err;
436 	}
437 
438 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
439 	if (sc->beacon.cabq == NULL) {
440 		ath_print(common, ATH_DBG_FATAL,
441 			  "Unable to setup CAB xmit queue\n");
442 		goto err;
443 	}
444 
445 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
446 	ath_cabq_update(sc);
447 
448 	if (!ath_tx_setup(sc, WME_AC_BK)) {
449 		ath_print(common, ATH_DBG_FATAL,
450 			  "Unable to setup xmit queue for BK traffic\n");
451 		goto err;
452 	}
453 
454 	if (!ath_tx_setup(sc, WME_AC_BE)) {
455 		ath_print(common, ATH_DBG_FATAL,
456 			  "Unable to setup xmit queue for BE traffic\n");
457 		goto err;
458 	}
459 	if (!ath_tx_setup(sc, WME_AC_VI)) {
460 		ath_print(common, ATH_DBG_FATAL,
461 			  "Unable to setup xmit queue for VI traffic\n");
462 		goto err;
463 	}
464 	if (!ath_tx_setup(sc, WME_AC_VO)) {
465 		ath_print(common, ATH_DBG_FATAL,
466 			  "Unable to setup xmit queue for VO traffic\n");
467 		goto err;
468 	}
469 
470 	return 0;
471 
472 err:
473 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
474 		if (ATH_TXQ_SETUP(sc, i))
475 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
476 
477 	return -EIO;
478 }
479 
480 static int ath9k_init_channels_rates(struct ath_softc *sc)
481 {
482 	void *channels;
483 
484 	BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
485 		     ARRAY_SIZE(ath9k_5ghz_chantable) !=
486 		     ATH9K_NUM_CHANNELS);
487 
488 	if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
489 		channels = kmemdup(ath9k_2ghz_chantable,
490 			sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
491 		if (!channels)
492 		    return -ENOMEM;
493 
494 		sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
495 		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
496 		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
497 			ARRAY_SIZE(ath9k_2ghz_chantable);
498 		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
499 		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
500 			ARRAY_SIZE(ath9k_legacy_rates);
501 	}
502 
503 	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
504 		channels = kmemdup(ath9k_5ghz_chantable,
505 			sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
506 		if (!channels) {
507 			if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
508 				kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
509 			return -ENOMEM;
510 		}
511 
512 		sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
513 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
514 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
515 			ARRAY_SIZE(ath9k_5ghz_chantable);
516 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
517 			ath9k_legacy_rates + 4;
518 		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
519 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
520 	}
521 	return 0;
522 }
523 
524 static void ath9k_init_misc(struct ath_softc *sc)
525 {
526 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
527 	int i = 0;
528 
529 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
530 
531 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
532 
533 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
534 		sc->sc_flags |= SC_OP_TXAGGR;
535 		sc->sc_flags |= SC_OP_RXAGGR;
536 	}
537 
538 	common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
539 	common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
540 
541 	ath9k_hw_set_diversity(sc->sc_ah, true);
542 	sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
543 
544 	memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
545 
546 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;
547 
548 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
549 		sc->beacon.bslot[i] = NULL;
550 		sc->beacon.bslot_aphy[i] = NULL;
551 	}
552 
553 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
554 		sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
555 }
556 
557 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
558 			    const struct ath_bus_ops *bus_ops)
559 {
560 	struct ath_hw *ah = NULL;
561 	struct ath_common *common;
562 	int ret = 0, i;
563 	int csz = 0;
564 
565 	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
566 	if (!ah)
567 		return -ENOMEM;
568 
569 	ah->hw_version.devid = devid;
570 	ah->hw_version.subsysid = subsysid;
571 	sc->sc_ah = ah;
572 
573 	common = ath9k_hw_common(ah);
574 	common->ops = &ath9k_common_ops;
575 	common->bus_ops = bus_ops;
576 	common->ah = ah;
577 	common->hw = sc->hw;
578 	common->priv = sc;
579 	common->debug_mask = ath9k_debug;
580 
581 	spin_lock_init(&sc->wiphy_lock);
582 	spin_lock_init(&sc->sc_resetlock);
583 	spin_lock_init(&sc->sc_serial_rw);
584 	spin_lock_init(&sc->sc_pm_lock);
585 	mutex_init(&sc->mutex);
586 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
587 	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
588 		     (unsigned long)sc);
589 
590 	/*
591 	 * Cache line size is used to size and align various
592 	 * structures used to communicate with the hardware.
593 	 */
594 	ath_read_cachesize(common, &csz);
595 	common->cachelsz = csz << 2; /* convert to bytes */
596 
597 	/* Initializes the hardware for all supported chipsets */
598 	ret = ath9k_hw_init(ah);
599 	if (ret)
600 		goto err_hw;
601 
602 	ret = ath9k_init_debug(ah);
603 	if (ret) {
604 		ath_print(common, ATH_DBG_FATAL,
605 			  "Unable to create debugfs files\n");
606 		goto err_debug;
607 	}
608 
609 	ret = ath9k_init_queues(sc);
610 	if (ret)
611 		goto err_queues;
612 
613 	ret =  ath9k_init_btcoex(sc);
614 	if (ret)
615 		goto err_btcoex;
616 
617 	ret = ath9k_init_channels_rates(sc);
618 	if (ret)
619 		goto err_btcoex;
620 
621 	ath9k_init_crypto(sc);
622 	ath9k_init_misc(sc);
623 
624 	return 0;
625 
626 err_btcoex:
627 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
628 		if (ATH_TXQ_SETUP(sc, i))
629 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
630 err_queues:
631 	ath9k_exit_debug(ah);
632 err_debug:
633 	ath9k_hw_deinit(ah);
634 err_hw:
635 	tasklet_kill(&sc->intr_tq);
636 	tasklet_kill(&sc->bcon_tasklet);
637 
638 	kfree(ah);
639 	sc->sc_ah = NULL;
640 
641 	return ret;
642 }
643 
644 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
645 {
646 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
647 
648 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
649 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
650 		IEEE80211_HW_SIGNAL_DBM |
651 		IEEE80211_HW_SUPPORTS_PS |
652 		IEEE80211_HW_PS_NULLFUNC_STACK |
653 		IEEE80211_HW_SPECTRUM_MGMT |
654 		IEEE80211_HW_REPORTS_TX_ACK_STATUS;
655 
656 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
657 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
658 
659 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
660 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
661 
662 	hw->wiphy->interface_modes =
663 		BIT(NL80211_IFTYPE_AP) |
664 		BIT(NL80211_IFTYPE_WDS) |
665 		BIT(NL80211_IFTYPE_STATION) |
666 		BIT(NL80211_IFTYPE_ADHOC) |
667 		BIT(NL80211_IFTYPE_MESH_POINT);
668 
669 	if (AR_SREV_5416(sc->sc_ah))
670 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
671 
672 	hw->queues = 4;
673 	hw->max_rates = 4;
674 	hw->channel_change_time = 5000;
675 	hw->max_listen_interval = 10;
676 	hw->max_rate_tries = 10;
677 	hw->sta_data_size = sizeof(struct ath_node);
678 	hw->vif_data_size = sizeof(struct ath_vif);
679 
680 #ifdef CONFIG_ATH9K_RATE_CONTROL
681 	hw->rate_control_algorithm = "ath9k_rate_control";
682 #endif
683 
684 	if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
685 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
686 			&sc->sbands[IEEE80211_BAND_2GHZ];
687 	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
688 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
689 			&sc->sbands[IEEE80211_BAND_5GHZ];
690 
691 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
692 		if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
693 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
694 		if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
695 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
696 	}
697 
698 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
699 }
700 
701 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
702 		    const struct ath_bus_ops *bus_ops)
703 {
704 	struct ieee80211_hw *hw = sc->hw;
705 	struct ath_common *common;
706 	struct ath_hw *ah;
707 	int error = 0;
708 	struct ath_regulatory *reg;
709 
710 	/* Bring up device */
711 	error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
712 	if (error != 0)
713 		goto error_init;
714 
715 	ah = sc->sc_ah;
716 	common = ath9k_hw_common(ah);
717 	ath9k_set_hw_capab(sc, hw);
718 
719 	/* Initialize regulatory */
720 	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
721 			      ath9k_reg_notifier);
722 	if (error)
723 		goto error_regd;
724 
725 	reg = &common->regulatory;
726 
727 	/* Setup TX DMA */
728 	error = ath_tx_init(sc, ATH_TXBUF);
729 	if (error != 0)
730 		goto error_tx;
731 
732 	/* Setup RX DMA */
733 	error = ath_rx_init(sc, ATH_RXBUF);
734 	if (error != 0)
735 		goto error_rx;
736 
737 	/* Register with mac80211 */
738 	error = ieee80211_register_hw(hw);
739 	if (error)
740 		goto error_register;
741 
742 	/* Handle world regulatory */
743 	if (!ath_is_world_regd(reg)) {
744 		error = regulatory_hint(hw->wiphy, reg->alpha2);
745 		if (error)
746 			goto error_world;
747 	}
748 
749 	INIT_WORK(&sc->hw_check_work, ath_hw_check);
750 	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
751 	INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
752 	INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
753 	sc->wiphy_scheduler_int = msecs_to_jiffies(500);
754 
755 	ath_init_leds(sc);
756 	ath_start_rfkill_poll(sc);
757 
758 	return 0;
759 
760 error_world:
761 	ieee80211_unregister_hw(hw);
762 error_register:
763 	ath_rx_cleanup(sc);
764 error_rx:
765 	ath_tx_cleanup(sc);
766 error_tx:
767 	/* Nothing */
768 error_regd:
769 	ath9k_deinit_softc(sc);
770 error_init:
771 	return error;
772 }
773 
774 /*****************************/
775 /*     De-Initialization     */
776 /*****************************/
777 
778 static void ath9k_deinit_softc(struct ath_softc *sc)
779 {
780 	int i = 0;
781 
782 	if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
783 		kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
784 
785 	if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
786 		kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
787 
788         if ((sc->btcoex.no_stomp_timer) &&
789 	    sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
790 		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
791 
792 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
793 		if (ATH_TXQ_SETUP(sc, i))
794 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
795 
796 	ath9k_exit_debug(sc->sc_ah);
797 	ath9k_hw_deinit(sc->sc_ah);
798 
799 	tasklet_kill(&sc->intr_tq);
800 	tasklet_kill(&sc->bcon_tasklet);
801 
802 	kfree(sc->sc_ah);
803 	sc->sc_ah = NULL;
804 }
805 
806 void ath9k_deinit_device(struct ath_softc *sc)
807 {
808 	struct ieee80211_hw *hw = sc->hw;
809 	int i = 0;
810 
811 	ath9k_ps_wakeup(sc);
812 
813 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
814 	ath_deinit_leds(sc);
815 
816 	for (i = 0; i < sc->num_sec_wiphy; i++) {
817 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
818 		if (aphy == NULL)
819 			continue;
820 		sc->sec_wiphy[i] = NULL;
821 		ieee80211_unregister_hw(aphy->hw);
822 		ieee80211_free_hw(aphy->hw);
823 	}
824 
825 	ieee80211_unregister_hw(hw);
826 	ath_rx_cleanup(sc);
827 	ath_tx_cleanup(sc);
828 	ath9k_deinit_softc(sc);
829 	kfree(sc->sec_wiphy);
830 }
831 
832 void ath_descdma_cleanup(struct ath_softc *sc,
833 			 struct ath_descdma *dd,
834 			 struct list_head *head)
835 {
836 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
837 			  dd->dd_desc_paddr);
838 
839 	INIT_LIST_HEAD(head);
840 	kfree(dd->dd_bufptr);
841 	memset(dd, 0, sizeof(*dd));
842 }
843 
844 /************************/
845 /*     Module Hooks     */
846 /************************/
847 
848 static int __init ath9k_init(void)
849 {
850 	int error;
851 
852 	/* Register rate control algorithm */
853 	error = ath_rate_control_register();
854 	if (error != 0) {
855 		printk(KERN_ERR
856 			"ath9k: Unable to register rate control "
857 			"algorithm: %d\n",
858 			error);
859 		goto err_out;
860 	}
861 
862 	error = ath9k_debug_create_root();
863 	if (error) {
864 		printk(KERN_ERR
865 			"ath9k: Unable to create debugfs root: %d\n",
866 			error);
867 		goto err_rate_unregister;
868 	}
869 
870 	error = ath_pci_init();
871 	if (error < 0) {
872 		printk(KERN_ERR
873 			"ath9k: No PCI devices found, driver not installed.\n");
874 		error = -ENODEV;
875 		goto err_remove_root;
876 	}
877 
878 	error = ath_ahb_init();
879 	if (error < 0) {
880 		error = -ENODEV;
881 		goto err_pci_exit;
882 	}
883 
884 	return 0;
885 
886  err_pci_exit:
887 	ath_pci_exit();
888 
889  err_remove_root:
890 	ath9k_debug_remove_root();
891  err_rate_unregister:
892 	ath_rate_control_unregister();
893  err_out:
894 	return error;
895 }
896 module_init(ath9k_init);
897 
898 static void __exit ath9k_exit(void)
899 {
900 	ath_ahb_exit();
901 	ath_pci_exit();
902 	ath9k_debug_remove_root();
903 	ath_rate_control_unregister();
904 	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
905 }
906 module_exit(ath9k_exit);
907