xref: /openbmc/linux/drivers/net/wireless/ath/ath9k/init.c (revision 4800cd83)
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 
19 #include "ath9k.h"
20 
21 static char *dev_info = "ath9k";
22 
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27 
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31 
32 int ath9k_modparam_nohwcrypt;
33 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 
36 int led_blink;
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39 
40 static int ath9k_btcoex_enable;
41 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
42 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
43 
44 bool is_ath9k_unloaded;
45 /* We use the hw_value as an index into our private channel structure */
46 
47 #define CHAN2G(_freq, _idx)  { \
48 	.band = IEEE80211_BAND_2GHZ, \
49 	.center_freq = (_freq), \
50 	.hw_value = (_idx), \
51 	.max_power = 20, \
52 }
53 
54 #define CHAN5G(_freq, _idx) { \
55 	.band = IEEE80211_BAND_5GHZ, \
56 	.center_freq = (_freq), \
57 	.hw_value = (_idx), \
58 	.max_power = 20, \
59 }
60 
61 /* Some 2 GHz radios are actually tunable on 2312-2732
62  * on 5 MHz steps, we support the channels which we know
63  * we have calibration data for all cards though to make
64  * this static */
65 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
66 	CHAN2G(2412, 0), /* Channel 1 */
67 	CHAN2G(2417, 1), /* Channel 2 */
68 	CHAN2G(2422, 2), /* Channel 3 */
69 	CHAN2G(2427, 3), /* Channel 4 */
70 	CHAN2G(2432, 4), /* Channel 5 */
71 	CHAN2G(2437, 5), /* Channel 6 */
72 	CHAN2G(2442, 6), /* Channel 7 */
73 	CHAN2G(2447, 7), /* Channel 8 */
74 	CHAN2G(2452, 8), /* Channel 9 */
75 	CHAN2G(2457, 9), /* Channel 10 */
76 	CHAN2G(2462, 10), /* Channel 11 */
77 	CHAN2G(2467, 11), /* Channel 12 */
78 	CHAN2G(2472, 12), /* Channel 13 */
79 	CHAN2G(2484, 13), /* Channel 14 */
80 };
81 
82 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
83  * on 5 MHz steps, we support the channels which we know
84  * we have calibration data for all cards though to make
85  * this static */
86 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
87 	/* _We_ call this UNII 1 */
88 	CHAN5G(5180, 14), /* Channel 36 */
89 	CHAN5G(5200, 15), /* Channel 40 */
90 	CHAN5G(5220, 16), /* Channel 44 */
91 	CHAN5G(5240, 17), /* Channel 48 */
92 	/* _We_ call this UNII 2 */
93 	CHAN5G(5260, 18), /* Channel 52 */
94 	CHAN5G(5280, 19), /* Channel 56 */
95 	CHAN5G(5300, 20), /* Channel 60 */
96 	CHAN5G(5320, 21), /* Channel 64 */
97 	/* _We_ call this "Middle band" */
98 	CHAN5G(5500, 22), /* Channel 100 */
99 	CHAN5G(5520, 23), /* Channel 104 */
100 	CHAN5G(5540, 24), /* Channel 108 */
101 	CHAN5G(5560, 25), /* Channel 112 */
102 	CHAN5G(5580, 26), /* Channel 116 */
103 	CHAN5G(5600, 27), /* Channel 120 */
104 	CHAN5G(5620, 28), /* Channel 124 */
105 	CHAN5G(5640, 29), /* Channel 128 */
106 	CHAN5G(5660, 30), /* Channel 132 */
107 	CHAN5G(5680, 31), /* Channel 136 */
108 	CHAN5G(5700, 32), /* Channel 140 */
109 	/* _We_ call this UNII 3 */
110 	CHAN5G(5745, 33), /* Channel 149 */
111 	CHAN5G(5765, 34), /* Channel 153 */
112 	CHAN5G(5785, 35), /* Channel 157 */
113 	CHAN5G(5805, 36), /* Channel 161 */
114 	CHAN5G(5825, 37), /* Channel 165 */
115 };
116 
117 /* Atheros hardware rate code addition for short premble */
118 #define SHPCHECK(__hw_rate, __flags) \
119 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
120 
121 #define RATE(_bitrate, _hw_rate, _flags) {              \
122 	.bitrate        = (_bitrate),                   \
123 	.flags          = (_flags),                     \
124 	.hw_value       = (_hw_rate),                   \
125 	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
126 }
127 
128 static struct ieee80211_rate ath9k_legacy_rates[] = {
129 	RATE(10, 0x1b, 0),
130 	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
131 	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
132 	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
133 	RATE(60, 0x0b, 0),
134 	RATE(90, 0x0f, 0),
135 	RATE(120, 0x0a, 0),
136 	RATE(180, 0x0e, 0),
137 	RATE(240, 0x09, 0),
138 	RATE(360, 0x0d, 0),
139 	RATE(480, 0x08, 0),
140 	RATE(540, 0x0c, 0),
141 };
142 
143 static void ath9k_deinit_softc(struct ath_softc *sc);
144 
145 /*
146  * Read and write, they both share the same lock. We do this to serialize
147  * reads and writes on Atheros 802.11n PCI devices only. This is required
148  * as the FIFO on these devices can only accept sanely 2 requests.
149  */
150 
151 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
152 {
153 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
154 	struct ath_common *common = ath9k_hw_common(ah);
155 	struct ath_softc *sc = (struct ath_softc *) common->priv;
156 
157 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
158 		unsigned long flags;
159 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
160 		iowrite32(val, sc->mem + reg_offset);
161 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
162 	} else
163 		iowrite32(val, sc->mem + reg_offset);
164 }
165 
166 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
167 {
168 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
169 	struct ath_common *common = ath9k_hw_common(ah);
170 	struct ath_softc *sc = (struct ath_softc *) common->priv;
171 	u32 val;
172 
173 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
174 		unsigned long flags;
175 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
176 		val = ioread32(sc->mem + reg_offset);
177 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
178 	} else
179 		val = ioread32(sc->mem + reg_offset);
180 	return val;
181 }
182 
183 static const struct ath_ops ath9k_common_ops = {
184 	.read = ath9k_ioread32,
185 	.write = ath9k_iowrite32,
186 };
187 
188 /**************************/
189 /*     Initialization     */
190 /**************************/
191 
192 static void setup_ht_cap(struct ath_softc *sc,
193 			 struct ieee80211_sta_ht_cap *ht_info)
194 {
195 	struct ath_hw *ah = sc->sc_ah;
196 	struct ath_common *common = ath9k_hw_common(ah);
197 	u8 tx_streams, rx_streams;
198 	int i, max_streams;
199 
200 	ht_info->ht_supported = true;
201 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
202 		       IEEE80211_HT_CAP_SM_PS |
203 		       IEEE80211_HT_CAP_SGI_40 |
204 		       IEEE80211_HT_CAP_DSSSCCK40;
205 
206 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
207 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
208 
209 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
210 		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
211 
212 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
213 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
214 
215 	if (AR_SREV_9485(ah))
216 		max_streams = 1;
217 	else if (AR_SREV_9300_20_OR_LATER(ah))
218 		max_streams = 3;
219 	else
220 		max_streams = 2;
221 
222 	if (AR_SREV_9280_20_OR_LATER(ah)) {
223 		if (max_streams >= 2)
224 			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
225 		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
226 	}
227 
228 	/* set up supported mcs set */
229 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
230 	tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
231 	rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
232 
233 	ath_dbg(common, ATH_DBG_CONFIG,
234 		"TX streams %d, RX streams: %d\n",
235 		tx_streams, rx_streams);
236 
237 	if (tx_streams != rx_streams) {
238 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
239 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
240 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
241 	}
242 
243 	for (i = 0; i < rx_streams; i++)
244 		ht_info->mcs.rx_mask[i] = 0xff;
245 
246 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
247 }
248 
249 static int ath9k_reg_notifier(struct wiphy *wiphy,
250 			      struct regulatory_request *request)
251 {
252 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
253 	struct ath_wiphy *aphy = hw->priv;
254 	struct ath_softc *sc = aphy->sc;
255 	struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
256 
257 	return ath_reg_notifier_apply(wiphy, request, reg);
258 }
259 
260 /*
261  *  This function will allocate both the DMA descriptor structure, and the
262  *  buffers it contains.  These are used to contain the descriptors used
263  *  by the system.
264 */
265 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
266 		      struct list_head *head, const char *name,
267 		      int nbuf, int ndesc, bool is_tx)
268 {
269 #define	DS2PHYS(_dd, _ds)						\
270 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
271 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
272 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
273 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
274 	u8 *ds;
275 	struct ath_buf *bf;
276 	int i, bsize, error, desc_len;
277 
278 	ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
279 		name, nbuf, ndesc);
280 
281 	INIT_LIST_HEAD(head);
282 
283 	if (is_tx)
284 		desc_len = sc->sc_ah->caps.tx_desc_len;
285 	else
286 		desc_len = sizeof(struct ath_desc);
287 
288 	/* ath_desc must be a multiple of DWORDs */
289 	if ((desc_len % 4) != 0) {
290 		ath_err(common, "ath_desc not DWORD aligned\n");
291 		BUG_ON((desc_len % 4) != 0);
292 		error = -ENOMEM;
293 		goto fail;
294 	}
295 
296 	dd->dd_desc_len = desc_len * nbuf * ndesc;
297 
298 	/*
299 	 * Need additional DMA memory because we can't use
300 	 * descriptors that cross the 4K page boundary. Assume
301 	 * one skipped descriptor per 4K page.
302 	 */
303 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
304 		u32 ndesc_skipped =
305 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
306 		u32 dma_len;
307 
308 		while (ndesc_skipped) {
309 			dma_len = ndesc_skipped * desc_len;
310 			dd->dd_desc_len += dma_len;
311 
312 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
313 		}
314 	}
315 
316 	/* allocate descriptors */
317 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
318 					 &dd->dd_desc_paddr, GFP_KERNEL);
319 	if (dd->dd_desc == NULL) {
320 		error = -ENOMEM;
321 		goto fail;
322 	}
323 	ds = (u8 *) dd->dd_desc;
324 	ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
325 		name, ds, (u32) dd->dd_desc_len,
326 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
327 
328 	/* allocate buffers */
329 	bsize = sizeof(struct ath_buf) * nbuf;
330 	bf = kzalloc(bsize, GFP_KERNEL);
331 	if (bf == NULL) {
332 		error = -ENOMEM;
333 		goto fail2;
334 	}
335 	dd->dd_bufptr = bf;
336 
337 	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
338 		bf->bf_desc = ds;
339 		bf->bf_daddr = DS2PHYS(dd, ds);
340 
341 		if (!(sc->sc_ah->caps.hw_caps &
342 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
343 			/*
344 			 * Skip descriptor addresses which can cause 4KB
345 			 * boundary crossing (addr + length) with a 32 dword
346 			 * descriptor fetch.
347 			 */
348 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
349 				BUG_ON((caddr_t) bf->bf_desc >=
350 				       ((caddr_t) dd->dd_desc +
351 					dd->dd_desc_len));
352 
353 				ds += (desc_len * ndesc);
354 				bf->bf_desc = ds;
355 				bf->bf_daddr = DS2PHYS(dd, ds);
356 			}
357 		}
358 		list_add_tail(&bf->list, head);
359 	}
360 	return 0;
361 fail2:
362 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
363 			  dd->dd_desc_paddr);
364 fail:
365 	memset(dd, 0, sizeof(*dd));
366 	return error;
367 #undef ATH_DESC_4KB_BOUND_CHECK
368 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
369 #undef DS2PHYS
370 }
371 
372 void ath9k_init_crypto(struct ath_softc *sc)
373 {
374 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
375 	int i = 0;
376 
377 	/* Get the hardware key cache size. */
378 	common->keymax = sc->sc_ah->caps.keycache_size;
379 	if (common->keymax > ATH_KEYMAX) {
380 		ath_dbg(common, ATH_DBG_ANY,
381 			"Warning, using only %u entries in %u key cache\n",
382 			ATH_KEYMAX, common->keymax);
383 		common->keymax = ATH_KEYMAX;
384 	}
385 
386 	/*
387 	 * Reset the key cache since some parts do not
388 	 * reset the contents on initial power up.
389 	 */
390 	for (i = 0; i < common->keymax; i++)
391 		ath_hw_keyreset(common, (u16) i);
392 
393 	/*
394 	 * Check whether the separate key cache entries
395 	 * are required to handle both tx+rx MIC keys.
396 	 * With split mic keys the number of stations is limited
397 	 * to 27 otherwise 59.
398 	 */
399 	if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
400 		common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
401 }
402 
403 static int ath9k_init_btcoex(struct ath_softc *sc)
404 {
405 	struct ath_txq *txq;
406 	int r;
407 
408 	switch (sc->sc_ah->btcoex_hw.scheme) {
409 	case ATH_BTCOEX_CFG_NONE:
410 		break;
411 	case ATH_BTCOEX_CFG_2WIRE:
412 		ath9k_hw_btcoex_init_2wire(sc->sc_ah);
413 		break;
414 	case ATH_BTCOEX_CFG_3WIRE:
415 		ath9k_hw_btcoex_init_3wire(sc->sc_ah);
416 		r = ath_init_btcoex_timer(sc);
417 		if (r)
418 			return -1;
419 		txq = sc->tx.txq_map[WME_AC_BE];
420 		ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
421 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
422 		break;
423 	default:
424 		WARN_ON(1);
425 		break;
426 	}
427 
428 	return 0;
429 }
430 
431 static int ath9k_init_queues(struct ath_softc *sc)
432 {
433 	int i = 0;
434 
435 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
436 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
437 
438 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
439 	ath_cabq_update(sc);
440 
441 	for (i = 0; i < WME_NUM_AC; i++)
442 		sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
443 
444 	return 0;
445 }
446 
447 static int ath9k_init_channels_rates(struct ath_softc *sc)
448 {
449 	void *channels;
450 
451 	BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
452 		     ARRAY_SIZE(ath9k_5ghz_chantable) !=
453 		     ATH9K_NUM_CHANNELS);
454 
455 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
456 		channels = kmemdup(ath9k_2ghz_chantable,
457 			sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
458 		if (!channels)
459 		    return -ENOMEM;
460 
461 		sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
462 		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
463 		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
464 			ARRAY_SIZE(ath9k_2ghz_chantable);
465 		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
466 		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
467 			ARRAY_SIZE(ath9k_legacy_rates);
468 	}
469 
470 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
471 		channels = kmemdup(ath9k_5ghz_chantable,
472 			sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
473 		if (!channels) {
474 			if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
475 				kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
476 			return -ENOMEM;
477 		}
478 
479 		sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
480 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
481 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
482 			ARRAY_SIZE(ath9k_5ghz_chantable);
483 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
484 			ath9k_legacy_rates + 4;
485 		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
486 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
487 	}
488 	return 0;
489 }
490 
491 static void ath9k_init_misc(struct ath_softc *sc)
492 {
493 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
494 	int i = 0;
495 
496 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
497 
498 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
499 
500 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
501 		sc->sc_flags |= SC_OP_TXAGGR;
502 		sc->sc_flags |= SC_OP_RXAGGR;
503 	}
504 
505 	common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
506 	common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
507 
508 	ath9k_hw_set_diversity(sc->sc_ah, true);
509 	sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
510 
511 	memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
512 
513 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;
514 
515 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
516 		sc->beacon.bslot[i] = NULL;
517 		sc->beacon.bslot_aphy[i] = NULL;
518 	}
519 
520 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
521 		sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
522 }
523 
524 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
525 			    const struct ath_bus_ops *bus_ops)
526 {
527 	struct ath_hw *ah = NULL;
528 	struct ath_common *common;
529 	int ret = 0, i;
530 	int csz = 0;
531 
532 	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
533 	if (!ah)
534 		return -ENOMEM;
535 
536 	ah->hw_version.devid = devid;
537 	ah->hw_version.subsysid = subsysid;
538 	sc->sc_ah = ah;
539 
540 	if (!sc->dev->platform_data)
541 		ah->ah_flags |= AH_USE_EEPROM;
542 
543 	common = ath9k_hw_common(ah);
544 	common->ops = &ath9k_common_ops;
545 	common->bus_ops = bus_ops;
546 	common->ah = ah;
547 	common->hw = sc->hw;
548 	common->priv = sc;
549 	common->debug_mask = ath9k_debug;
550 	common->btcoex_enabled = ath9k_btcoex_enable == 1;
551 	spin_lock_init(&common->cc_lock);
552 
553 	spin_lock_init(&sc->wiphy_lock);
554 	spin_lock_init(&sc->sc_serial_rw);
555 	spin_lock_init(&sc->sc_pm_lock);
556 	mutex_init(&sc->mutex);
557 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
558 	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
559 		     (unsigned long)sc);
560 
561 	/*
562 	 * Cache line size is used to size and align various
563 	 * structures used to communicate with the hardware.
564 	 */
565 	ath_read_cachesize(common, &csz);
566 	common->cachelsz = csz << 2; /* convert to bytes */
567 
568 	/* Initializes the hardware for all supported chipsets */
569 	ret = ath9k_hw_init(ah);
570 	if (ret)
571 		goto err_hw;
572 
573 	ret = ath9k_init_queues(sc);
574 	if (ret)
575 		goto err_queues;
576 
577 	ret =  ath9k_init_btcoex(sc);
578 	if (ret)
579 		goto err_btcoex;
580 
581 	ret = ath9k_init_channels_rates(sc);
582 	if (ret)
583 		goto err_btcoex;
584 
585 	ath9k_init_crypto(sc);
586 	ath9k_init_misc(sc);
587 
588 	return 0;
589 
590 err_btcoex:
591 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
592 		if (ATH_TXQ_SETUP(sc, i))
593 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
594 err_queues:
595 	ath9k_hw_deinit(ah);
596 err_hw:
597 
598 	kfree(ah);
599 	sc->sc_ah = NULL;
600 
601 	return ret;
602 }
603 
604 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
605 {
606 	struct ieee80211_supported_band *sband;
607 	struct ieee80211_channel *chan;
608 	struct ath_hw *ah = sc->sc_ah;
609 	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
610 	int i;
611 
612 	sband = &sc->sbands[band];
613 	for (i = 0; i < sband->n_channels; i++) {
614 		chan = &sband->channels[i];
615 		ah->curchan = &ah->channels[chan->hw_value];
616 		ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
617 		ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
618 		chan->max_power = reg->max_power_level / 2;
619 	}
620 }
621 
622 static void ath9k_init_txpower_limits(struct ath_softc *sc)
623 {
624 	struct ath_hw *ah = sc->sc_ah;
625 	struct ath9k_channel *curchan = ah->curchan;
626 
627 	if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
628 		ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
629 	if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
630 		ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
631 
632 	ah->curchan = curchan;
633 }
634 
635 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
636 {
637 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
638 
639 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
640 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
641 		IEEE80211_HW_SIGNAL_DBM |
642 		IEEE80211_HW_SUPPORTS_PS |
643 		IEEE80211_HW_PS_NULLFUNC_STACK |
644 		IEEE80211_HW_SPECTRUM_MGMT |
645 		IEEE80211_HW_REPORTS_TX_ACK_STATUS;
646 
647 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
648 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
649 
650 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
651 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
652 
653 	hw->wiphy->interface_modes =
654 		BIT(NL80211_IFTYPE_P2P_GO) |
655 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
656 		BIT(NL80211_IFTYPE_AP) |
657 		BIT(NL80211_IFTYPE_WDS) |
658 		BIT(NL80211_IFTYPE_STATION) |
659 		BIT(NL80211_IFTYPE_ADHOC) |
660 		BIT(NL80211_IFTYPE_MESH_POINT);
661 
662 	if (AR_SREV_5416(sc->sc_ah))
663 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
664 
665 	hw->queues = 4;
666 	hw->max_rates = 4;
667 	hw->channel_change_time = 5000;
668 	hw->max_listen_interval = 10;
669 	hw->max_rate_tries = 10;
670 	hw->sta_data_size = sizeof(struct ath_node);
671 	hw->vif_data_size = sizeof(struct ath_vif);
672 
673 #ifdef CONFIG_ATH9K_RATE_CONTROL
674 	hw->rate_control_algorithm = "ath9k_rate_control";
675 #endif
676 
677 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
678 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
679 			&sc->sbands[IEEE80211_BAND_2GHZ];
680 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
681 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
682 			&sc->sbands[IEEE80211_BAND_5GHZ];
683 
684 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
685 		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
686 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
687 		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
688 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
689 	}
690 
691 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
692 }
693 
694 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
695 		    const struct ath_bus_ops *bus_ops)
696 {
697 	struct ieee80211_hw *hw = sc->hw;
698 	struct ath_wiphy *aphy = hw->priv;
699 	struct ath_common *common;
700 	struct ath_hw *ah;
701 	int error = 0;
702 	struct ath_regulatory *reg;
703 
704 	/* Bring up device */
705 	error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
706 	if (error != 0)
707 		goto error_init;
708 
709 	ah = sc->sc_ah;
710 	common = ath9k_hw_common(ah);
711 	ath9k_set_hw_capab(sc, hw);
712 
713 	/* Initialize regulatory */
714 	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
715 			      ath9k_reg_notifier);
716 	if (error)
717 		goto error_regd;
718 
719 	reg = &common->regulatory;
720 
721 	/* Setup TX DMA */
722 	error = ath_tx_init(sc, ATH_TXBUF);
723 	if (error != 0)
724 		goto error_tx;
725 
726 	/* Setup RX DMA */
727 	error = ath_rx_init(sc, ATH_RXBUF);
728 	if (error != 0)
729 		goto error_rx;
730 
731 	ath9k_init_txpower_limits(sc);
732 
733 	/* Register with mac80211 */
734 	error = ieee80211_register_hw(hw);
735 	if (error)
736 		goto error_register;
737 
738 	error = ath9k_init_debug(ah);
739 	if (error) {
740 		ath_err(common, "Unable to create debugfs files\n");
741 		goto error_world;
742 	}
743 
744 	/* Handle world regulatory */
745 	if (!ath_is_world_regd(reg)) {
746 		error = regulatory_hint(hw->wiphy, reg->alpha2);
747 		if (error)
748 			goto error_world;
749 	}
750 
751 	INIT_WORK(&sc->hw_check_work, ath_hw_check);
752 	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
753 	INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
754 	INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
755 	sc->wiphy_scheduler_int = msecs_to_jiffies(500);
756 	aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
757 
758 	ath_init_leds(sc);
759 	ath_start_rfkill_poll(sc);
760 
761 	return 0;
762 
763 error_world:
764 	ieee80211_unregister_hw(hw);
765 error_register:
766 	ath_rx_cleanup(sc);
767 error_rx:
768 	ath_tx_cleanup(sc);
769 error_tx:
770 	/* Nothing */
771 error_regd:
772 	ath9k_deinit_softc(sc);
773 error_init:
774 	return error;
775 }
776 
777 /*****************************/
778 /*     De-Initialization     */
779 /*****************************/
780 
781 static void ath9k_deinit_softc(struct ath_softc *sc)
782 {
783 	int i = 0;
784 
785 	if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
786 		kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
787 
788 	if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
789 		kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
790 
791         if ((sc->btcoex.no_stomp_timer) &&
792 	    sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
793 		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
794 
795 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
796 		if (ATH_TXQ_SETUP(sc, i))
797 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
798 
799 	ath9k_hw_deinit(sc->sc_ah);
800 
801 	kfree(sc->sc_ah);
802 	sc->sc_ah = NULL;
803 }
804 
805 void ath9k_deinit_device(struct ath_softc *sc)
806 {
807 	struct ieee80211_hw *hw = sc->hw;
808 	int i = 0;
809 
810 	ath9k_ps_wakeup(sc);
811 
812 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
813 	ath_deinit_leds(sc);
814 
815 	ath9k_ps_restore(sc);
816 
817 	for (i = 0; i < sc->num_sec_wiphy; i++) {
818 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
819 		if (aphy == NULL)
820 			continue;
821 		sc->sec_wiphy[i] = NULL;
822 		ieee80211_unregister_hw(aphy->hw);
823 		ieee80211_free_hw(aphy->hw);
824 	}
825 
826 	ieee80211_unregister_hw(hw);
827 	ath_rx_cleanup(sc);
828 	ath_tx_cleanup(sc);
829 	ath9k_deinit_softc(sc);
830 	kfree(sc->sec_wiphy);
831 }
832 
833 void ath_descdma_cleanup(struct ath_softc *sc,
834 			 struct ath_descdma *dd,
835 			 struct list_head *head)
836 {
837 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
838 			  dd->dd_desc_paddr);
839 
840 	INIT_LIST_HEAD(head);
841 	kfree(dd->dd_bufptr);
842 	memset(dd, 0, sizeof(*dd));
843 }
844 
845 /************************/
846 /*     Module Hooks     */
847 /************************/
848 
849 static int __init ath9k_init(void)
850 {
851 	int error;
852 
853 	/* Register rate control algorithm */
854 	error = ath_rate_control_register();
855 	if (error != 0) {
856 		printk(KERN_ERR
857 			"ath9k: Unable to register rate control "
858 			"algorithm: %d\n",
859 			error);
860 		goto err_out;
861 	}
862 
863 	error = ath_pci_init();
864 	if (error < 0) {
865 		printk(KERN_ERR
866 			"ath9k: No PCI devices found, driver not installed.\n");
867 		error = -ENODEV;
868 		goto err_rate_unregister;
869 	}
870 
871 	error = ath_ahb_init();
872 	if (error < 0) {
873 		error = -ENODEV;
874 		goto err_pci_exit;
875 	}
876 
877 	return 0;
878 
879  err_pci_exit:
880 	ath_pci_exit();
881 
882  err_rate_unregister:
883 	ath_rate_control_unregister();
884  err_out:
885 	return error;
886 }
887 module_init(ath9k_init);
888 
889 static void __exit ath9k_exit(void)
890 {
891 	is_ath9k_unloaded = true;
892 	ath_ahb_exit();
893 	ath_pci_exit();
894 	ath_rate_control_unregister();
895 	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
896 }
897 module_exit(ath9k_exit);
898