xref: /openbmc/linux/drivers/net/wireless/ath/ath9k/init.c (revision 95e9fd10)
1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/ath9k_platform.h>
22 #include <linux/module.h>
23 
24 #include "ath9k.h"
25 
26 static char *dev_info = "ath9k";
27 
28 MODULE_AUTHOR("Atheros Communications");
29 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
30 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
31 MODULE_LICENSE("Dual BSD/GPL");
32 
33 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
34 module_param_named(debug, ath9k_debug, uint, 0);
35 MODULE_PARM_DESC(debug, "Debugging mask");
36 
37 int ath9k_modparam_nohwcrypt;
38 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
39 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
40 
41 int led_blink;
42 module_param_named(blink, led_blink, int, 0444);
43 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
44 
45 static int ath9k_btcoex_enable;
46 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
47 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
48 
49 bool is_ath9k_unloaded;
50 /* We use the hw_value as an index into our private channel structure */
51 
52 #define CHAN2G(_freq, _idx)  { \
53 	.band = IEEE80211_BAND_2GHZ, \
54 	.center_freq = (_freq), \
55 	.hw_value = (_idx), \
56 	.max_power = 20, \
57 }
58 
59 #define CHAN5G(_freq, _idx) { \
60 	.band = IEEE80211_BAND_5GHZ, \
61 	.center_freq = (_freq), \
62 	.hw_value = (_idx), \
63 	.max_power = 20, \
64 }
65 
66 /* Some 2 GHz radios are actually tunable on 2312-2732
67  * on 5 MHz steps, we support the channels which we know
68  * we have calibration data for all cards though to make
69  * this static */
70 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
71 	CHAN2G(2412, 0), /* Channel 1 */
72 	CHAN2G(2417, 1), /* Channel 2 */
73 	CHAN2G(2422, 2), /* Channel 3 */
74 	CHAN2G(2427, 3), /* Channel 4 */
75 	CHAN2G(2432, 4), /* Channel 5 */
76 	CHAN2G(2437, 5), /* Channel 6 */
77 	CHAN2G(2442, 6), /* Channel 7 */
78 	CHAN2G(2447, 7), /* Channel 8 */
79 	CHAN2G(2452, 8), /* Channel 9 */
80 	CHAN2G(2457, 9), /* Channel 10 */
81 	CHAN2G(2462, 10), /* Channel 11 */
82 	CHAN2G(2467, 11), /* Channel 12 */
83 	CHAN2G(2472, 12), /* Channel 13 */
84 	CHAN2G(2484, 13), /* Channel 14 */
85 };
86 
87 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
88  * on 5 MHz steps, we support the channels which we know
89  * we have calibration data for all cards though to make
90  * this static */
91 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
92 	/* _We_ call this UNII 1 */
93 	CHAN5G(5180, 14), /* Channel 36 */
94 	CHAN5G(5200, 15), /* Channel 40 */
95 	CHAN5G(5220, 16), /* Channel 44 */
96 	CHAN5G(5240, 17), /* Channel 48 */
97 	/* _We_ call this UNII 2 */
98 	CHAN5G(5260, 18), /* Channel 52 */
99 	CHAN5G(5280, 19), /* Channel 56 */
100 	CHAN5G(5300, 20), /* Channel 60 */
101 	CHAN5G(5320, 21), /* Channel 64 */
102 	/* _We_ call this "Middle band" */
103 	CHAN5G(5500, 22), /* Channel 100 */
104 	CHAN5G(5520, 23), /* Channel 104 */
105 	CHAN5G(5540, 24), /* Channel 108 */
106 	CHAN5G(5560, 25), /* Channel 112 */
107 	CHAN5G(5580, 26), /* Channel 116 */
108 	CHAN5G(5600, 27), /* Channel 120 */
109 	CHAN5G(5620, 28), /* Channel 124 */
110 	CHAN5G(5640, 29), /* Channel 128 */
111 	CHAN5G(5660, 30), /* Channel 132 */
112 	CHAN5G(5680, 31), /* Channel 136 */
113 	CHAN5G(5700, 32), /* Channel 140 */
114 	/* _We_ call this UNII 3 */
115 	CHAN5G(5745, 33), /* Channel 149 */
116 	CHAN5G(5765, 34), /* Channel 153 */
117 	CHAN5G(5785, 35), /* Channel 157 */
118 	CHAN5G(5805, 36), /* Channel 161 */
119 	CHAN5G(5825, 37), /* Channel 165 */
120 };
121 
122 /* Atheros hardware rate code addition for short premble */
123 #define SHPCHECK(__hw_rate, __flags) \
124 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
125 
126 #define RATE(_bitrate, _hw_rate, _flags) {              \
127 	.bitrate        = (_bitrate),                   \
128 	.flags          = (_flags),                     \
129 	.hw_value       = (_hw_rate),                   \
130 	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
131 }
132 
133 static struct ieee80211_rate ath9k_legacy_rates[] = {
134 	RATE(10, 0x1b, 0),
135 	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
136 	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
137 	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
138 	RATE(60, 0x0b, 0),
139 	RATE(90, 0x0f, 0),
140 	RATE(120, 0x0a, 0),
141 	RATE(180, 0x0e, 0),
142 	RATE(240, 0x09, 0),
143 	RATE(360, 0x0d, 0),
144 	RATE(480, 0x08, 0),
145 	RATE(540, 0x0c, 0),
146 };
147 
148 #ifdef CONFIG_MAC80211_LEDS
149 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
150 	{ .throughput = 0 * 1024, .blink_time = 334 },
151 	{ .throughput = 1 * 1024, .blink_time = 260 },
152 	{ .throughput = 5 * 1024, .blink_time = 220 },
153 	{ .throughput = 10 * 1024, .blink_time = 190 },
154 	{ .throughput = 20 * 1024, .blink_time = 170 },
155 	{ .throughput = 50 * 1024, .blink_time = 150 },
156 	{ .throughput = 70 * 1024, .blink_time = 130 },
157 	{ .throughput = 100 * 1024, .blink_time = 110 },
158 	{ .throughput = 200 * 1024, .blink_time = 80 },
159 	{ .throughput = 300 * 1024, .blink_time = 50 },
160 };
161 #endif
162 
163 static void ath9k_deinit_softc(struct ath_softc *sc);
164 
165 /*
166  * Read and write, they both share the same lock. We do this to serialize
167  * reads and writes on Atheros 802.11n PCI devices only. This is required
168  * as the FIFO on these devices can only accept sanely 2 requests.
169  */
170 
171 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
172 {
173 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
174 	struct ath_common *common = ath9k_hw_common(ah);
175 	struct ath_softc *sc = (struct ath_softc *) common->priv;
176 
177 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
178 		unsigned long flags;
179 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
180 		iowrite32(val, sc->mem + reg_offset);
181 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
182 	} else
183 		iowrite32(val, sc->mem + reg_offset);
184 }
185 
186 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
187 {
188 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
189 	struct ath_common *common = ath9k_hw_common(ah);
190 	struct ath_softc *sc = (struct ath_softc *) common->priv;
191 	u32 val;
192 
193 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
194 		unsigned long flags;
195 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
196 		val = ioread32(sc->mem + reg_offset);
197 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
198 	} else
199 		val = ioread32(sc->mem + reg_offset);
200 	return val;
201 }
202 
203 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
204 				    u32 set, u32 clr)
205 {
206 	u32 val;
207 
208 	val = ioread32(sc->mem + reg_offset);
209 	val &= ~clr;
210 	val |= set;
211 	iowrite32(val, sc->mem + reg_offset);
212 
213 	return val;
214 }
215 
216 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
217 {
218 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
219 	struct ath_common *common = ath9k_hw_common(ah);
220 	struct ath_softc *sc = (struct ath_softc *) common->priv;
221 	unsigned long uninitialized_var(flags);
222 	u32 val;
223 
224 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
225 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
226 		val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
227 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
228 	} else
229 		val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
230 
231 	return val;
232 }
233 
234 /**************************/
235 /*     Initialization     */
236 /**************************/
237 
238 static void setup_ht_cap(struct ath_softc *sc,
239 			 struct ieee80211_sta_ht_cap *ht_info)
240 {
241 	struct ath_hw *ah = sc->sc_ah;
242 	struct ath_common *common = ath9k_hw_common(ah);
243 	u8 tx_streams, rx_streams;
244 	int i, max_streams;
245 
246 	ht_info->ht_supported = true;
247 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
248 		       IEEE80211_HT_CAP_SM_PS |
249 		       IEEE80211_HT_CAP_SGI_40 |
250 		       IEEE80211_HT_CAP_DSSSCCK40;
251 
252 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
253 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
254 
255 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
256 		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
257 
258 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
259 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
260 
261 	if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
262 		max_streams = 1;
263 	else if (AR_SREV_9462(ah))
264 		max_streams = 2;
265 	else if (AR_SREV_9300_20_OR_LATER(ah))
266 		max_streams = 3;
267 	else
268 		max_streams = 2;
269 
270 	if (AR_SREV_9280_20_OR_LATER(ah)) {
271 		if (max_streams >= 2)
272 			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
273 		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
274 	}
275 
276 	/* set up supported mcs set */
277 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
278 	tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
279 	rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
280 
281 	ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
282 		tx_streams, rx_streams);
283 
284 	if (tx_streams != rx_streams) {
285 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
286 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
287 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
288 	}
289 
290 	for (i = 0; i < rx_streams; i++)
291 		ht_info->mcs.rx_mask[i] = 0xff;
292 
293 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
294 }
295 
296 static int ath9k_reg_notifier(struct wiphy *wiphy,
297 			      struct regulatory_request *request)
298 {
299 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
300 	struct ath_softc *sc = hw->priv;
301 	struct ath_hw *ah = sc->sc_ah;
302 	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
303 	int ret;
304 
305 	ret = ath_reg_notifier_apply(wiphy, request, reg);
306 
307 	/* Set tx power */
308 	if (ah->curchan) {
309 		sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
310 		ath9k_ps_wakeup(sc);
311 		ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
312 		sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
313 		ath9k_ps_restore(sc);
314 	}
315 
316 	return ret;
317 }
318 
319 /*
320  *  This function will allocate both the DMA descriptor structure, and the
321  *  buffers it contains.  These are used to contain the descriptors used
322  *  by the system.
323 */
324 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
325 		      struct list_head *head, const char *name,
326 		      int nbuf, int ndesc, bool is_tx)
327 {
328 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
329 	u8 *ds;
330 	struct ath_buf *bf;
331 	int i, bsize, error, desc_len;
332 
333 	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
334 		name, nbuf, ndesc);
335 
336 	INIT_LIST_HEAD(head);
337 
338 	if (is_tx)
339 		desc_len = sc->sc_ah->caps.tx_desc_len;
340 	else
341 		desc_len = sizeof(struct ath_desc);
342 
343 	/* ath_desc must be a multiple of DWORDs */
344 	if ((desc_len % 4) != 0) {
345 		ath_err(common, "ath_desc not DWORD aligned\n");
346 		BUG_ON((desc_len % 4) != 0);
347 		error = -ENOMEM;
348 		goto fail;
349 	}
350 
351 	dd->dd_desc_len = desc_len * nbuf * ndesc;
352 
353 	/*
354 	 * Need additional DMA memory because we can't use
355 	 * descriptors that cross the 4K page boundary. Assume
356 	 * one skipped descriptor per 4K page.
357 	 */
358 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
359 		u32 ndesc_skipped =
360 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
361 		u32 dma_len;
362 
363 		while (ndesc_skipped) {
364 			dma_len = ndesc_skipped * desc_len;
365 			dd->dd_desc_len += dma_len;
366 
367 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
368 		}
369 	}
370 
371 	/* allocate descriptors */
372 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
373 					 &dd->dd_desc_paddr, GFP_KERNEL);
374 	if (dd->dd_desc == NULL) {
375 		error = -ENOMEM;
376 		goto fail;
377 	}
378 	ds = (u8 *) dd->dd_desc;
379 	ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
380 		name, ds, (u32) dd->dd_desc_len,
381 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
382 
383 	/* allocate buffers */
384 	bsize = sizeof(struct ath_buf) * nbuf;
385 	bf = kzalloc(bsize, GFP_KERNEL);
386 	if (bf == NULL) {
387 		error = -ENOMEM;
388 		goto fail2;
389 	}
390 	dd->dd_bufptr = bf;
391 
392 	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
393 		bf->bf_desc = ds;
394 		bf->bf_daddr = DS2PHYS(dd, ds);
395 
396 		if (!(sc->sc_ah->caps.hw_caps &
397 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
398 			/*
399 			 * Skip descriptor addresses which can cause 4KB
400 			 * boundary crossing (addr + length) with a 32 dword
401 			 * descriptor fetch.
402 			 */
403 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
404 				BUG_ON((caddr_t) bf->bf_desc >=
405 				       ((caddr_t) dd->dd_desc +
406 					dd->dd_desc_len));
407 
408 				ds += (desc_len * ndesc);
409 				bf->bf_desc = ds;
410 				bf->bf_daddr = DS2PHYS(dd, ds);
411 			}
412 		}
413 		list_add_tail(&bf->list, head);
414 	}
415 	return 0;
416 fail2:
417 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
418 			  dd->dd_desc_paddr);
419 fail:
420 	memset(dd, 0, sizeof(*dd));
421 	return error;
422 }
423 
424 static int ath9k_init_queues(struct ath_softc *sc)
425 {
426 	int i = 0;
427 
428 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
429 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
430 
431 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
432 	ath_cabq_update(sc);
433 
434 	for (i = 0; i < WME_NUM_AC; i++) {
435 		sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
436 		sc->tx.txq_map[i]->mac80211_qnum = i;
437 		sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
438 	}
439 	return 0;
440 }
441 
442 static int ath9k_init_channels_rates(struct ath_softc *sc)
443 {
444 	void *channels;
445 
446 	BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
447 		     ARRAY_SIZE(ath9k_5ghz_chantable) !=
448 		     ATH9K_NUM_CHANNELS);
449 
450 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
451 		channels = kmemdup(ath9k_2ghz_chantable,
452 			sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
453 		if (!channels)
454 		    return -ENOMEM;
455 
456 		sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
457 		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
458 		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
459 			ARRAY_SIZE(ath9k_2ghz_chantable);
460 		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
461 		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
462 			ARRAY_SIZE(ath9k_legacy_rates);
463 	}
464 
465 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
466 		channels = kmemdup(ath9k_5ghz_chantable,
467 			sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
468 		if (!channels) {
469 			if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
470 				kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
471 			return -ENOMEM;
472 		}
473 
474 		sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
475 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
476 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
477 			ARRAY_SIZE(ath9k_5ghz_chantable);
478 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
479 			ath9k_legacy_rates + 4;
480 		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
481 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
482 	}
483 	return 0;
484 }
485 
486 static void ath9k_init_misc(struct ath_softc *sc)
487 {
488 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
489 	int i = 0;
490 
491 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
492 
493 	sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
494 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
495 	memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
496 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;
497 
498 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
499 		sc->beacon.bslot[i] = NULL;
500 
501 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
502 		sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
503 }
504 
505 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
506 			    const struct ath_bus_ops *bus_ops)
507 {
508 	struct ath9k_platform_data *pdata = sc->dev->platform_data;
509 	struct ath_hw *ah = NULL;
510 	struct ath_common *common;
511 	int ret = 0, i;
512 	int csz = 0;
513 
514 	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
515 	if (!ah)
516 		return -ENOMEM;
517 
518 	ah->hw = sc->hw;
519 	ah->hw_version.devid = devid;
520 	ah->reg_ops.read = ath9k_ioread32;
521 	ah->reg_ops.write = ath9k_iowrite32;
522 	ah->reg_ops.rmw = ath9k_reg_rmw;
523 	atomic_set(&ah->intr_ref_cnt, -1);
524 	sc->sc_ah = ah;
525 
526 	sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET);
527 
528 	if (!pdata) {
529 		ah->ah_flags |= AH_USE_EEPROM;
530 		sc->sc_ah->led_pin = -1;
531 	} else {
532 		sc->sc_ah->gpio_mask = pdata->gpio_mask;
533 		sc->sc_ah->gpio_val = pdata->gpio_val;
534 		sc->sc_ah->led_pin = pdata->led_pin;
535 		ah->is_clk_25mhz = pdata->is_clk_25mhz;
536 		ah->get_mac_revision = pdata->get_mac_revision;
537 		ah->external_reset = pdata->external_reset;
538 	}
539 
540 	common = ath9k_hw_common(ah);
541 	common->ops = &ah->reg_ops;
542 	common->bus_ops = bus_ops;
543 	common->ah = ah;
544 	common->hw = sc->hw;
545 	common->priv = sc;
546 	common->debug_mask = ath9k_debug;
547 	common->btcoex_enabled = ath9k_btcoex_enable == 1;
548 	common->disable_ani = false;
549 	spin_lock_init(&common->cc_lock);
550 
551 	spin_lock_init(&sc->sc_serial_rw);
552 	spin_lock_init(&sc->sc_pm_lock);
553 	mutex_init(&sc->mutex);
554 #ifdef CONFIG_ATH9K_DEBUGFS
555 	spin_lock_init(&sc->nodes_lock);
556 	INIT_LIST_HEAD(&sc->nodes);
557 #endif
558 #ifdef CONFIG_ATH9K_MAC_DEBUG
559 	spin_lock_init(&sc->debug.samp_lock);
560 #endif
561 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
562 	tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
563 		     (unsigned long)sc);
564 
565 	INIT_WORK(&sc->hw_reset_work, ath_reset_work);
566 	INIT_WORK(&sc->hw_check_work, ath_hw_check);
567 	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
568 	INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
569 	setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
570 
571 	/*
572 	 * Cache line size is used to size and align various
573 	 * structures used to communicate with the hardware.
574 	 */
575 	ath_read_cachesize(common, &csz);
576 	common->cachelsz = csz << 2; /* convert to bytes */
577 
578 	/* Initializes the hardware for all supported chipsets */
579 	ret = ath9k_hw_init(ah);
580 	if (ret)
581 		goto err_hw;
582 
583 	if (pdata && pdata->macaddr)
584 		memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
585 
586 	ret = ath9k_init_queues(sc);
587 	if (ret)
588 		goto err_queues;
589 
590 	ret =  ath9k_init_btcoex(sc);
591 	if (ret)
592 		goto err_btcoex;
593 
594 	ret = ath9k_init_channels_rates(sc);
595 	if (ret)
596 		goto err_btcoex;
597 
598 	ath9k_cmn_init_crypto(sc->sc_ah);
599 	ath9k_init_misc(sc);
600 
601 	if (common->bus_ops->aspm_init)
602 		common->bus_ops->aspm_init(common);
603 
604 	return 0;
605 
606 err_btcoex:
607 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
608 		if (ATH_TXQ_SETUP(sc, i))
609 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
610 err_queues:
611 	ath9k_hw_deinit(ah);
612 err_hw:
613 
614 	kfree(ah);
615 	sc->sc_ah = NULL;
616 
617 	return ret;
618 }
619 
620 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
621 {
622 	struct ieee80211_supported_band *sband;
623 	struct ieee80211_channel *chan;
624 	struct ath_hw *ah = sc->sc_ah;
625 	int i;
626 
627 	sband = &sc->sbands[band];
628 	for (i = 0; i < sband->n_channels; i++) {
629 		chan = &sband->channels[i];
630 		ah->curchan = &ah->channels[chan->hw_value];
631 		ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
632 		ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
633 	}
634 }
635 
636 static void ath9k_init_txpower_limits(struct ath_softc *sc)
637 {
638 	struct ath_hw *ah = sc->sc_ah;
639 	struct ath9k_channel *curchan = ah->curchan;
640 
641 	if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
642 		ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
643 	if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
644 		ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
645 
646 	ah->curchan = curchan;
647 }
648 
649 void ath9k_reload_chainmask_settings(struct ath_softc *sc)
650 {
651 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
652 		return;
653 
654 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
655 		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
656 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
657 		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
658 }
659 
660 static const struct ieee80211_iface_limit if_limits[] = {
661 	{ .max = 2048,	.types = BIT(NL80211_IFTYPE_STATION) |
662 				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
663 				 BIT(NL80211_IFTYPE_WDS) },
664 	{ .max = 8,	.types =
665 #ifdef CONFIG_MAC80211_MESH
666 				 BIT(NL80211_IFTYPE_MESH_POINT) |
667 #endif
668 				 BIT(NL80211_IFTYPE_AP) |
669 				 BIT(NL80211_IFTYPE_P2P_GO) },
670 };
671 
672 static const struct ieee80211_iface_combination if_comb = {
673 	.limits = if_limits,
674 	.n_limits = ARRAY_SIZE(if_limits),
675 	.max_interfaces = 2048,
676 	.num_different_channels = 1,
677 };
678 
679 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
680 {
681 	struct ath_hw *ah = sc->sc_ah;
682 	struct ath_common *common = ath9k_hw_common(ah);
683 
684 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
685 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
686 		IEEE80211_HW_SIGNAL_DBM |
687 		IEEE80211_HW_SUPPORTS_PS |
688 		IEEE80211_HW_PS_NULLFUNC_STACK |
689 		IEEE80211_HW_SPECTRUM_MGMT |
690 		IEEE80211_HW_REPORTS_TX_ACK_STATUS;
691 
692 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
693 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
694 
695 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
696 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
697 
698 	hw->wiphy->interface_modes =
699 		BIT(NL80211_IFTYPE_P2P_GO) |
700 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
701 		BIT(NL80211_IFTYPE_AP) |
702 		BIT(NL80211_IFTYPE_WDS) |
703 		BIT(NL80211_IFTYPE_STATION) |
704 		BIT(NL80211_IFTYPE_ADHOC) |
705 		BIT(NL80211_IFTYPE_MESH_POINT);
706 
707 	hw->wiphy->iface_combinations = &if_comb;
708 	hw->wiphy->n_iface_combinations = 1;
709 
710 	if (AR_SREV_5416(sc->sc_ah))
711 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
712 
713 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
714 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
715 	hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
716 
717 #ifdef CONFIG_PM_SLEEP
718 
719 	if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
720 	    device_can_wakeup(sc->dev)) {
721 
722 		hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
723 					  WIPHY_WOWLAN_DISCONNECT;
724 		hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
725 		hw->wiphy->wowlan.pattern_min_len = 1;
726 		hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
727 
728 	}
729 
730 	atomic_set(&sc->wow_sleep_proc_intr, -1);
731 	atomic_set(&sc->wow_got_bmiss_intr, -1);
732 
733 #endif
734 
735 	hw->queues = 4;
736 	hw->max_rates = 4;
737 	hw->channel_change_time = 5000;
738 	hw->max_listen_interval = 1;
739 	hw->max_rate_tries = 10;
740 	hw->sta_data_size = sizeof(struct ath_node);
741 	hw->vif_data_size = sizeof(struct ath_vif);
742 
743 	hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
744 	hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
745 
746 	/* single chain devices with rx diversity */
747 	if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
748 		hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
749 
750 	sc->ant_rx = hw->wiphy->available_antennas_rx;
751 	sc->ant_tx = hw->wiphy->available_antennas_tx;
752 
753 #ifdef CONFIG_ATH9K_RATE_CONTROL
754 	hw->rate_control_algorithm = "ath9k_rate_control";
755 #endif
756 
757 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
758 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
759 			&sc->sbands[IEEE80211_BAND_2GHZ];
760 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
761 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
762 			&sc->sbands[IEEE80211_BAND_5GHZ];
763 
764 	ath9k_reload_chainmask_settings(sc);
765 
766 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
767 }
768 
769 int ath9k_init_device(u16 devid, struct ath_softc *sc,
770 		    const struct ath_bus_ops *bus_ops)
771 {
772 	struct ieee80211_hw *hw = sc->hw;
773 	struct ath_common *common;
774 	struct ath_hw *ah;
775 	int error = 0;
776 	struct ath_regulatory *reg;
777 
778 	/* Bring up device */
779 	error = ath9k_init_softc(devid, sc, bus_ops);
780 	if (error != 0)
781 		goto error_init;
782 
783 	ah = sc->sc_ah;
784 	common = ath9k_hw_common(ah);
785 	ath9k_set_hw_capab(sc, hw);
786 
787 	/* Initialize regulatory */
788 	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
789 			      ath9k_reg_notifier);
790 	if (error)
791 		goto error_regd;
792 
793 	reg = &common->regulatory;
794 
795 	/* Setup TX DMA */
796 	error = ath_tx_init(sc, ATH_TXBUF);
797 	if (error != 0)
798 		goto error_tx;
799 
800 	/* Setup RX DMA */
801 	error = ath_rx_init(sc, ATH_RXBUF);
802 	if (error != 0)
803 		goto error_rx;
804 
805 	ath9k_init_txpower_limits(sc);
806 
807 #ifdef CONFIG_MAC80211_LEDS
808 	/* must be initialized before ieee80211_register_hw */
809 	sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
810 		IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
811 		ARRAY_SIZE(ath9k_tpt_blink));
812 #endif
813 
814 	/* Register with mac80211 */
815 	error = ieee80211_register_hw(hw);
816 	if (error)
817 		goto error_register;
818 
819 	error = ath9k_init_debug(ah);
820 	if (error) {
821 		ath_err(common, "Unable to create debugfs files\n");
822 		goto error_world;
823 	}
824 
825 	/* Handle world regulatory */
826 	if (!ath_is_world_regd(reg)) {
827 		error = regulatory_hint(hw->wiphy, reg->alpha2);
828 		if (error)
829 			goto error_world;
830 	}
831 
832 	ath_init_leds(sc);
833 	ath_start_rfkill_poll(sc);
834 
835 	return 0;
836 
837 error_world:
838 	ieee80211_unregister_hw(hw);
839 error_register:
840 	ath_rx_cleanup(sc);
841 error_rx:
842 	ath_tx_cleanup(sc);
843 error_tx:
844 	/* Nothing */
845 error_regd:
846 	ath9k_deinit_softc(sc);
847 error_init:
848 	return error;
849 }
850 
851 /*****************************/
852 /*     De-Initialization     */
853 /*****************************/
854 
855 static void ath9k_deinit_softc(struct ath_softc *sc)
856 {
857 	int i = 0;
858 
859 	if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
860 		kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
861 
862 	if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
863 		kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
864 
865 	ath9k_deinit_btcoex(sc);
866 
867 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
868 		if (ATH_TXQ_SETUP(sc, i))
869 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
870 
871 	ath9k_hw_deinit(sc->sc_ah);
872 	if (sc->dfs_detector != NULL)
873 		sc->dfs_detector->exit(sc->dfs_detector);
874 
875 	kfree(sc->sc_ah);
876 	sc->sc_ah = NULL;
877 }
878 
879 void ath9k_deinit_device(struct ath_softc *sc)
880 {
881 	struct ieee80211_hw *hw = sc->hw;
882 
883 	ath9k_ps_wakeup(sc);
884 
885 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
886 	ath_deinit_leds(sc);
887 
888 	ath9k_ps_restore(sc);
889 
890 	ieee80211_unregister_hw(hw);
891 	ath_rx_cleanup(sc);
892 	ath_tx_cleanup(sc);
893 	ath9k_deinit_softc(sc);
894 }
895 
896 void ath_descdma_cleanup(struct ath_softc *sc,
897 			 struct ath_descdma *dd,
898 			 struct list_head *head)
899 {
900 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
901 			  dd->dd_desc_paddr);
902 
903 	INIT_LIST_HEAD(head);
904 	kfree(dd->dd_bufptr);
905 	memset(dd, 0, sizeof(*dd));
906 }
907 
908 /************************/
909 /*     Module Hooks     */
910 /************************/
911 
912 static int __init ath9k_init(void)
913 {
914 	int error;
915 
916 	/* Register rate control algorithm */
917 	error = ath_rate_control_register();
918 	if (error != 0) {
919 		pr_err("Unable to register rate control algorithm: %d\n",
920 		       error);
921 		goto err_out;
922 	}
923 
924 	error = ath_pci_init();
925 	if (error < 0) {
926 		pr_err("No PCI devices found, driver not installed\n");
927 		error = -ENODEV;
928 		goto err_rate_unregister;
929 	}
930 
931 	error = ath_ahb_init();
932 	if (error < 0) {
933 		error = -ENODEV;
934 		goto err_pci_exit;
935 	}
936 
937 	return 0;
938 
939  err_pci_exit:
940 	ath_pci_exit();
941 
942  err_rate_unregister:
943 	ath_rate_control_unregister();
944  err_out:
945 	return error;
946 }
947 module_init(ath9k_init);
948 
949 static void __exit ath9k_exit(void)
950 {
951 	is_ath9k_unloaded = true;
952 	ath_ahb_exit();
953 	ath_pci_exit();
954 	ath_rate_control_unregister();
955 	pr_info("%s: Driver unloaded\n", dev_info);
956 }
957 module_exit(ath9k_exit);
958