xref: /openbmc/linux/drivers/net/wireless/ath/ath9k/init.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 
19 #include "ath9k.h"
20 
21 static char *dev_info = "ath9k";
22 
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27 
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31 
32 int ath9k_modparam_nohwcrypt;
33 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 
36 int led_blink;
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39 
40 static int ath9k_btcoex_enable;
41 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
42 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
43 
44 int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
45 module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
46 MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
47 
48 bool is_ath9k_unloaded;
49 /* We use the hw_value as an index into our private channel structure */
50 
51 #define CHAN2G(_freq, _idx)  { \
52 	.band = IEEE80211_BAND_2GHZ, \
53 	.center_freq = (_freq), \
54 	.hw_value = (_idx), \
55 	.max_power = 20, \
56 }
57 
58 #define CHAN5G(_freq, _idx) { \
59 	.band = IEEE80211_BAND_5GHZ, \
60 	.center_freq = (_freq), \
61 	.hw_value = (_idx), \
62 	.max_power = 20, \
63 }
64 
65 /* Some 2 GHz radios are actually tunable on 2312-2732
66  * on 5 MHz steps, we support the channels which we know
67  * we have calibration data for all cards though to make
68  * this static */
69 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
70 	CHAN2G(2412, 0), /* Channel 1 */
71 	CHAN2G(2417, 1), /* Channel 2 */
72 	CHAN2G(2422, 2), /* Channel 3 */
73 	CHAN2G(2427, 3), /* Channel 4 */
74 	CHAN2G(2432, 4), /* Channel 5 */
75 	CHAN2G(2437, 5), /* Channel 6 */
76 	CHAN2G(2442, 6), /* Channel 7 */
77 	CHAN2G(2447, 7), /* Channel 8 */
78 	CHAN2G(2452, 8), /* Channel 9 */
79 	CHAN2G(2457, 9), /* Channel 10 */
80 	CHAN2G(2462, 10), /* Channel 11 */
81 	CHAN2G(2467, 11), /* Channel 12 */
82 	CHAN2G(2472, 12), /* Channel 13 */
83 	CHAN2G(2484, 13), /* Channel 14 */
84 };
85 
86 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
87  * on 5 MHz steps, we support the channels which we know
88  * we have calibration data for all cards though to make
89  * this static */
90 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
91 	/* _We_ call this UNII 1 */
92 	CHAN5G(5180, 14), /* Channel 36 */
93 	CHAN5G(5200, 15), /* Channel 40 */
94 	CHAN5G(5220, 16), /* Channel 44 */
95 	CHAN5G(5240, 17), /* Channel 48 */
96 	/* _We_ call this UNII 2 */
97 	CHAN5G(5260, 18), /* Channel 52 */
98 	CHAN5G(5280, 19), /* Channel 56 */
99 	CHAN5G(5300, 20), /* Channel 60 */
100 	CHAN5G(5320, 21), /* Channel 64 */
101 	/* _We_ call this "Middle band" */
102 	CHAN5G(5500, 22), /* Channel 100 */
103 	CHAN5G(5520, 23), /* Channel 104 */
104 	CHAN5G(5540, 24), /* Channel 108 */
105 	CHAN5G(5560, 25), /* Channel 112 */
106 	CHAN5G(5580, 26), /* Channel 116 */
107 	CHAN5G(5600, 27), /* Channel 120 */
108 	CHAN5G(5620, 28), /* Channel 124 */
109 	CHAN5G(5640, 29), /* Channel 128 */
110 	CHAN5G(5660, 30), /* Channel 132 */
111 	CHAN5G(5680, 31), /* Channel 136 */
112 	CHAN5G(5700, 32), /* Channel 140 */
113 	/* _We_ call this UNII 3 */
114 	CHAN5G(5745, 33), /* Channel 149 */
115 	CHAN5G(5765, 34), /* Channel 153 */
116 	CHAN5G(5785, 35), /* Channel 157 */
117 	CHAN5G(5805, 36), /* Channel 161 */
118 	CHAN5G(5825, 37), /* Channel 165 */
119 };
120 
121 /* Atheros hardware rate code addition for short premble */
122 #define SHPCHECK(__hw_rate, __flags) \
123 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
124 
125 #define RATE(_bitrate, _hw_rate, _flags) {              \
126 	.bitrate        = (_bitrate),                   \
127 	.flags          = (_flags),                     \
128 	.hw_value       = (_hw_rate),                   \
129 	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
130 }
131 
132 static struct ieee80211_rate ath9k_legacy_rates[] = {
133 	RATE(10, 0x1b, 0),
134 	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
135 	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
136 	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
137 	RATE(60, 0x0b, 0),
138 	RATE(90, 0x0f, 0),
139 	RATE(120, 0x0a, 0),
140 	RATE(180, 0x0e, 0),
141 	RATE(240, 0x09, 0),
142 	RATE(360, 0x0d, 0),
143 	RATE(480, 0x08, 0),
144 	RATE(540, 0x0c, 0),
145 };
146 
147 static void ath9k_deinit_softc(struct ath_softc *sc);
148 
149 /*
150  * Read and write, they both share the same lock. We do this to serialize
151  * reads and writes on Atheros 802.11n PCI devices only. This is required
152  * as the FIFO on these devices can only accept sanely 2 requests.
153  */
154 
155 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
156 {
157 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
158 	struct ath_common *common = ath9k_hw_common(ah);
159 	struct ath_softc *sc = (struct ath_softc *) common->priv;
160 
161 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
162 		unsigned long flags;
163 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
164 		iowrite32(val, sc->mem + reg_offset);
165 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
166 	} else
167 		iowrite32(val, sc->mem + reg_offset);
168 }
169 
170 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
171 {
172 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
173 	struct ath_common *common = ath9k_hw_common(ah);
174 	struct ath_softc *sc = (struct ath_softc *) common->priv;
175 	u32 val;
176 
177 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
178 		unsigned long flags;
179 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
180 		val = ioread32(sc->mem + reg_offset);
181 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
182 	} else
183 		val = ioread32(sc->mem + reg_offset);
184 	return val;
185 }
186 
187 static const struct ath_ops ath9k_common_ops = {
188 	.read = ath9k_ioread32,
189 	.write = ath9k_iowrite32,
190 };
191 
192 /**************************/
193 /*     Initialization     */
194 /**************************/
195 
196 static void setup_ht_cap(struct ath_softc *sc,
197 			 struct ieee80211_sta_ht_cap *ht_info)
198 {
199 	struct ath_hw *ah = sc->sc_ah;
200 	struct ath_common *common = ath9k_hw_common(ah);
201 	u8 tx_streams, rx_streams;
202 	int i, max_streams;
203 
204 	ht_info->ht_supported = true;
205 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
206 		       IEEE80211_HT_CAP_SM_PS |
207 		       IEEE80211_HT_CAP_SGI_40 |
208 		       IEEE80211_HT_CAP_DSSSCCK40;
209 
210 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
211 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
212 
213 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
214 		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
215 
216 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
217 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
218 
219 	if (AR_SREV_9485(ah))
220 		max_streams = 1;
221 	else if (AR_SREV_9300_20_OR_LATER(ah))
222 		max_streams = 3;
223 	else
224 		max_streams = 2;
225 
226 	if (AR_SREV_9280_20_OR_LATER(ah)) {
227 		if (max_streams >= 2)
228 			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
229 		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
230 	}
231 
232 	/* set up supported mcs set */
233 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
234 	tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
235 	rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
236 
237 	ath_dbg(common, ATH_DBG_CONFIG,
238 		"TX streams %d, RX streams: %d\n",
239 		tx_streams, rx_streams);
240 
241 	if (tx_streams != rx_streams) {
242 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
243 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
244 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
245 	}
246 
247 	for (i = 0; i < rx_streams; i++)
248 		ht_info->mcs.rx_mask[i] = 0xff;
249 
250 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
251 }
252 
253 static int ath9k_reg_notifier(struct wiphy *wiphy,
254 			      struct regulatory_request *request)
255 {
256 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
257 	struct ath_wiphy *aphy = hw->priv;
258 	struct ath_softc *sc = aphy->sc;
259 	struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
260 
261 	return ath_reg_notifier_apply(wiphy, request, reg);
262 }
263 
264 /*
265  *  This function will allocate both the DMA descriptor structure, and the
266  *  buffers it contains.  These are used to contain the descriptors used
267  *  by the system.
268 */
269 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
270 		      struct list_head *head, const char *name,
271 		      int nbuf, int ndesc, bool is_tx)
272 {
273 #define	DS2PHYS(_dd, _ds)						\
274 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
275 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
276 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
277 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
278 	u8 *ds;
279 	struct ath_buf *bf;
280 	int i, bsize, error, desc_len;
281 
282 	ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
283 		name, nbuf, ndesc);
284 
285 	INIT_LIST_HEAD(head);
286 
287 	if (is_tx)
288 		desc_len = sc->sc_ah->caps.tx_desc_len;
289 	else
290 		desc_len = sizeof(struct ath_desc);
291 
292 	/* ath_desc must be a multiple of DWORDs */
293 	if ((desc_len % 4) != 0) {
294 		ath_err(common, "ath_desc not DWORD aligned\n");
295 		BUG_ON((desc_len % 4) != 0);
296 		error = -ENOMEM;
297 		goto fail;
298 	}
299 
300 	dd->dd_desc_len = desc_len * nbuf * ndesc;
301 
302 	/*
303 	 * Need additional DMA memory because we can't use
304 	 * descriptors that cross the 4K page boundary. Assume
305 	 * one skipped descriptor per 4K page.
306 	 */
307 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
308 		u32 ndesc_skipped =
309 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
310 		u32 dma_len;
311 
312 		while (ndesc_skipped) {
313 			dma_len = ndesc_skipped * desc_len;
314 			dd->dd_desc_len += dma_len;
315 
316 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
317 		}
318 	}
319 
320 	/* allocate descriptors */
321 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
322 					 &dd->dd_desc_paddr, GFP_KERNEL);
323 	if (dd->dd_desc == NULL) {
324 		error = -ENOMEM;
325 		goto fail;
326 	}
327 	ds = (u8 *) dd->dd_desc;
328 	ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
329 		name, ds, (u32) dd->dd_desc_len,
330 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
331 
332 	/* allocate buffers */
333 	bsize = sizeof(struct ath_buf) * nbuf;
334 	bf = kzalloc(bsize, GFP_KERNEL);
335 	if (bf == NULL) {
336 		error = -ENOMEM;
337 		goto fail2;
338 	}
339 	dd->dd_bufptr = bf;
340 
341 	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
342 		bf->bf_desc = ds;
343 		bf->bf_daddr = DS2PHYS(dd, ds);
344 
345 		if (!(sc->sc_ah->caps.hw_caps &
346 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
347 			/*
348 			 * Skip descriptor addresses which can cause 4KB
349 			 * boundary crossing (addr + length) with a 32 dword
350 			 * descriptor fetch.
351 			 */
352 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
353 				BUG_ON((caddr_t) bf->bf_desc >=
354 				       ((caddr_t) dd->dd_desc +
355 					dd->dd_desc_len));
356 
357 				ds += (desc_len * ndesc);
358 				bf->bf_desc = ds;
359 				bf->bf_daddr = DS2PHYS(dd, ds);
360 			}
361 		}
362 		list_add_tail(&bf->list, head);
363 	}
364 	return 0;
365 fail2:
366 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
367 			  dd->dd_desc_paddr);
368 fail:
369 	memset(dd, 0, sizeof(*dd));
370 	return error;
371 #undef ATH_DESC_4KB_BOUND_CHECK
372 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
373 #undef DS2PHYS
374 }
375 
376 void ath9k_init_crypto(struct ath_softc *sc)
377 {
378 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
379 	int i = 0;
380 
381 	/* Get the hardware key cache size. */
382 	common->keymax = sc->sc_ah->caps.keycache_size;
383 	if (common->keymax > ATH_KEYMAX) {
384 		ath_dbg(common, ATH_DBG_ANY,
385 			"Warning, using only %u entries in %u key cache\n",
386 			ATH_KEYMAX, common->keymax);
387 		common->keymax = ATH_KEYMAX;
388 	}
389 
390 	/*
391 	 * Reset the key cache since some parts do not
392 	 * reset the contents on initial power up.
393 	 */
394 	for (i = 0; i < common->keymax; i++)
395 		ath_hw_keyreset(common, (u16) i);
396 
397 	/*
398 	 * Check whether the separate key cache entries
399 	 * are required to handle both tx+rx MIC keys.
400 	 * With split mic keys the number of stations is limited
401 	 * to 27 otherwise 59.
402 	 */
403 	if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
404 		common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
405 }
406 
407 static int ath9k_init_btcoex(struct ath_softc *sc)
408 {
409 	struct ath_txq *txq;
410 	int r;
411 
412 	switch (sc->sc_ah->btcoex_hw.scheme) {
413 	case ATH_BTCOEX_CFG_NONE:
414 		break;
415 	case ATH_BTCOEX_CFG_2WIRE:
416 		ath9k_hw_btcoex_init_2wire(sc->sc_ah);
417 		break;
418 	case ATH_BTCOEX_CFG_3WIRE:
419 		ath9k_hw_btcoex_init_3wire(sc->sc_ah);
420 		r = ath_init_btcoex_timer(sc);
421 		if (r)
422 			return -1;
423 		txq = sc->tx.txq_map[WME_AC_BE];
424 		ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
425 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
426 		break;
427 	default:
428 		WARN_ON(1);
429 		break;
430 	}
431 
432 	return 0;
433 }
434 
435 static int ath9k_init_queues(struct ath_softc *sc)
436 {
437 	int i = 0;
438 
439 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
440 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
441 
442 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
443 	ath_cabq_update(sc);
444 
445 	for (i = 0; i < WME_NUM_AC; i++)
446 		sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
447 
448 	return 0;
449 }
450 
451 static int ath9k_init_channels_rates(struct ath_softc *sc)
452 {
453 	void *channels;
454 
455 	BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
456 		     ARRAY_SIZE(ath9k_5ghz_chantable) !=
457 		     ATH9K_NUM_CHANNELS);
458 
459 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
460 		channels = kmemdup(ath9k_2ghz_chantable,
461 			sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
462 		if (!channels)
463 		    return -ENOMEM;
464 
465 		sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
466 		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
467 		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
468 			ARRAY_SIZE(ath9k_2ghz_chantable);
469 		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
470 		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
471 			ARRAY_SIZE(ath9k_legacy_rates);
472 	}
473 
474 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
475 		channels = kmemdup(ath9k_5ghz_chantable,
476 			sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
477 		if (!channels) {
478 			if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
479 				kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
480 			return -ENOMEM;
481 		}
482 
483 		sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
484 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
485 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
486 			ARRAY_SIZE(ath9k_5ghz_chantable);
487 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
488 			ath9k_legacy_rates + 4;
489 		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
490 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
491 	}
492 	return 0;
493 }
494 
495 static void ath9k_init_misc(struct ath_softc *sc)
496 {
497 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
498 	int i = 0;
499 
500 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
501 
502 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
503 
504 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
505 		sc->sc_flags |= SC_OP_TXAGGR;
506 		sc->sc_flags |= SC_OP_RXAGGR;
507 	}
508 
509 	common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
510 	common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
511 
512 	ath9k_hw_set_diversity(sc->sc_ah, true);
513 	sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
514 
515 	memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
516 
517 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;
518 
519 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
520 		sc->beacon.bslot[i] = NULL;
521 		sc->beacon.bslot_aphy[i] = NULL;
522 	}
523 
524 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
525 		sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
526 }
527 
528 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
529 			    const struct ath_bus_ops *bus_ops)
530 {
531 	struct ath_hw *ah = NULL;
532 	struct ath_common *common;
533 	int ret = 0, i;
534 	int csz = 0;
535 
536 	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
537 	if (!ah)
538 		return -ENOMEM;
539 
540 	ah->hw_version.devid = devid;
541 	ah->hw_version.subsysid = subsysid;
542 	sc->sc_ah = ah;
543 
544 	if (!sc->dev->platform_data)
545 		ah->ah_flags |= AH_USE_EEPROM;
546 
547 	common = ath9k_hw_common(ah);
548 	common->ops = &ath9k_common_ops;
549 	common->bus_ops = bus_ops;
550 	common->ah = ah;
551 	common->hw = sc->hw;
552 	common->priv = sc;
553 	common->debug_mask = ath9k_debug;
554 	common->btcoex_enabled = ath9k_btcoex_enable == 1;
555 	spin_lock_init(&common->cc_lock);
556 
557 	spin_lock_init(&sc->wiphy_lock);
558 	spin_lock_init(&sc->sc_serial_rw);
559 	spin_lock_init(&sc->sc_pm_lock);
560 	mutex_init(&sc->mutex);
561 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
562 	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
563 		     (unsigned long)sc);
564 
565 	/*
566 	 * Cache line size is used to size and align various
567 	 * structures used to communicate with the hardware.
568 	 */
569 	ath_read_cachesize(common, &csz);
570 	common->cachelsz = csz << 2; /* convert to bytes */
571 
572 	/* Initializes the hardware for all supported chipsets */
573 	ret = ath9k_hw_init(ah);
574 	if (ret)
575 		goto err_hw;
576 
577 	ret = ath9k_init_queues(sc);
578 	if (ret)
579 		goto err_queues;
580 
581 	ret =  ath9k_init_btcoex(sc);
582 	if (ret)
583 		goto err_btcoex;
584 
585 	ret = ath9k_init_channels_rates(sc);
586 	if (ret)
587 		goto err_btcoex;
588 
589 	ath9k_init_crypto(sc);
590 	ath9k_init_misc(sc);
591 
592 	return 0;
593 
594 err_btcoex:
595 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
596 		if (ATH_TXQ_SETUP(sc, i))
597 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
598 err_queues:
599 	ath9k_hw_deinit(ah);
600 err_hw:
601 
602 	kfree(ah);
603 	sc->sc_ah = NULL;
604 
605 	return ret;
606 }
607 
608 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
609 {
610 	struct ieee80211_supported_band *sband;
611 	struct ieee80211_channel *chan;
612 	struct ath_hw *ah = sc->sc_ah;
613 	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
614 	int i;
615 
616 	sband = &sc->sbands[band];
617 	for (i = 0; i < sband->n_channels; i++) {
618 		chan = &sband->channels[i];
619 		ah->curchan = &ah->channels[chan->hw_value];
620 		ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
621 		ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
622 		chan->max_power = reg->max_power_level / 2;
623 	}
624 }
625 
626 static void ath9k_init_txpower_limits(struct ath_softc *sc)
627 {
628 	struct ath_hw *ah = sc->sc_ah;
629 	struct ath9k_channel *curchan = ah->curchan;
630 
631 	if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
632 		ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
633 	if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
634 		ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
635 
636 	ah->curchan = curchan;
637 }
638 
639 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
640 {
641 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
642 
643 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
644 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
645 		IEEE80211_HW_SIGNAL_DBM |
646 		IEEE80211_HW_SUPPORTS_PS |
647 		IEEE80211_HW_PS_NULLFUNC_STACK |
648 		IEEE80211_HW_SPECTRUM_MGMT |
649 		IEEE80211_HW_REPORTS_TX_ACK_STATUS;
650 
651 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
652 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
653 
654 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
655 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
656 
657 	hw->wiphy->interface_modes =
658 		BIT(NL80211_IFTYPE_P2P_GO) |
659 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
660 		BIT(NL80211_IFTYPE_AP) |
661 		BIT(NL80211_IFTYPE_WDS) |
662 		BIT(NL80211_IFTYPE_STATION) |
663 		BIT(NL80211_IFTYPE_ADHOC) |
664 		BIT(NL80211_IFTYPE_MESH_POINT);
665 
666 	if (AR_SREV_5416(sc->sc_ah))
667 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
668 
669 	hw->queues = 4;
670 	hw->max_rates = 4;
671 	hw->channel_change_time = 5000;
672 	hw->max_listen_interval = 10;
673 	hw->max_rate_tries = 10;
674 	hw->sta_data_size = sizeof(struct ath_node);
675 	hw->vif_data_size = sizeof(struct ath_vif);
676 
677 #ifdef CONFIG_ATH9K_RATE_CONTROL
678 	hw->rate_control_algorithm = "ath9k_rate_control";
679 #endif
680 
681 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
682 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
683 			&sc->sbands[IEEE80211_BAND_2GHZ];
684 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
685 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
686 			&sc->sbands[IEEE80211_BAND_5GHZ];
687 
688 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
689 		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
690 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
691 		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
692 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
693 	}
694 
695 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
696 }
697 
698 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
699 		    const struct ath_bus_ops *bus_ops)
700 {
701 	struct ieee80211_hw *hw = sc->hw;
702 	struct ath_wiphy *aphy = hw->priv;
703 	struct ath_common *common;
704 	struct ath_hw *ah;
705 	int error = 0;
706 	struct ath_regulatory *reg;
707 
708 	/* Bring up device */
709 	error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
710 	if (error != 0)
711 		goto error_init;
712 
713 	ah = sc->sc_ah;
714 	common = ath9k_hw_common(ah);
715 	ath9k_set_hw_capab(sc, hw);
716 
717 	/* Initialize regulatory */
718 	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
719 			      ath9k_reg_notifier);
720 	if (error)
721 		goto error_regd;
722 
723 	reg = &common->regulatory;
724 
725 	/* Setup TX DMA */
726 	error = ath_tx_init(sc, ATH_TXBUF);
727 	if (error != 0)
728 		goto error_tx;
729 
730 	/* Setup RX DMA */
731 	error = ath_rx_init(sc, ATH_RXBUF);
732 	if (error != 0)
733 		goto error_rx;
734 
735 	ath9k_init_txpower_limits(sc);
736 
737 	/* Register with mac80211 */
738 	error = ieee80211_register_hw(hw);
739 	if (error)
740 		goto error_register;
741 
742 	error = ath9k_init_debug(ah);
743 	if (error) {
744 		ath_err(common, "Unable to create debugfs files\n");
745 		goto error_world;
746 	}
747 
748 	/* Handle world regulatory */
749 	if (!ath_is_world_regd(reg)) {
750 		error = regulatory_hint(hw->wiphy, reg->alpha2);
751 		if (error)
752 			goto error_world;
753 	}
754 
755 	INIT_WORK(&sc->hw_check_work, ath_hw_check);
756 	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
757 	INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
758 	INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
759 	sc->wiphy_scheduler_int = msecs_to_jiffies(500);
760 	aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
761 
762 	ath_init_leds(sc);
763 	ath_start_rfkill_poll(sc);
764 
765 	pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
766 			   PM_QOS_DEFAULT_VALUE);
767 
768 	return 0;
769 
770 error_world:
771 	ieee80211_unregister_hw(hw);
772 error_register:
773 	ath_rx_cleanup(sc);
774 error_rx:
775 	ath_tx_cleanup(sc);
776 error_tx:
777 	/* Nothing */
778 error_regd:
779 	ath9k_deinit_softc(sc);
780 error_init:
781 	return error;
782 }
783 
784 /*****************************/
785 /*     De-Initialization     */
786 /*****************************/
787 
788 static void ath9k_deinit_softc(struct ath_softc *sc)
789 {
790 	int i = 0;
791 
792 	if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
793 		kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
794 
795 	if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
796 		kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
797 
798         if ((sc->btcoex.no_stomp_timer) &&
799 	    sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
800 		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
801 
802 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
803 		if (ATH_TXQ_SETUP(sc, i))
804 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
805 
806 	ath9k_hw_deinit(sc->sc_ah);
807 
808 	kfree(sc->sc_ah);
809 	sc->sc_ah = NULL;
810 }
811 
812 void ath9k_deinit_device(struct ath_softc *sc)
813 {
814 	struct ieee80211_hw *hw = sc->hw;
815 	int i = 0;
816 
817 	ath9k_ps_wakeup(sc);
818 
819 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
820 	ath_deinit_leds(sc);
821 
822 	ath9k_ps_restore(sc);
823 
824 	for (i = 0; i < sc->num_sec_wiphy; i++) {
825 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
826 		if (aphy == NULL)
827 			continue;
828 		sc->sec_wiphy[i] = NULL;
829 		ieee80211_unregister_hw(aphy->hw);
830 		ieee80211_free_hw(aphy->hw);
831 	}
832 
833 	ieee80211_unregister_hw(hw);
834 	pm_qos_remove_request(&sc->pm_qos_req);
835 	ath_rx_cleanup(sc);
836 	ath_tx_cleanup(sc);
837 	ath9k_deinit_softc(sc);
838 	kfree(sc->sec_wiphy);
839 }
840 
841 void ath_descdma_cleanup(struct ath_softc *sc,
842 			 struct ath_descdma *dd,
843 			 struct list_head *head)
844 {
845 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
846 			  dd->dd_desc_paddr);
847 
848 	INIT_LIST_HEAD(head);
849 	kfree(dd->dd_bufptr);
850 	memset(dd, 0, sizeof(*dd));
851 }
852 
853 /************************/
854 /*     Module Hooks     */
855 /************************/
856 
857 static int __init ath9k_init(void)
858 {
859 	int error;
860 
861 	/* Register rate control algorithm */
862 	error = ath_rate_control_register();
863 	if (error != 0) {
864 		printk(KERN_ERR
865 			"ath9k: Unable to register rate control "
866 			"algorithm: %d\n",
867 			error);
868 		goto err_out;
869 	}
870 
871 	error = ath_pci_init();
872 	if (error < 0) {
873 		printk(KERN_ERR
874 			"ath9k: No PCI devices found, driver not installed.\n");
875 		error = -ENODEV;
876 		goto err_rate_unregister;
877 	}
878 
879 	error = ath_ahb_init();
880 	if (error < 0) {
881 		error = -ENODEV;
882 		goto err_pci_exit;
883 	}
884 
885 	return 0;
886 
887  err_pci_exit:
888 	ath_pci_exit();
889 
890  err_rate_unregister:
891 	ath_rate_control_unregister();
892  err_out:
893 	return error;
894 }
895 module_init(ath9k_init);
896 
897 static void __exit ath9k_exit(void)
898 {
899 	is_ath9k_unloaded = true;
900 	ath_ahb_exit();
901 	ath_pci_exit();
902 	ath_rate_control_unregister();
903 	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
904 }
905 module_exit(ath9k_exit);
906