1 /*- 2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting 3 * Copyright (c) 2004-2005 Atheros Communications, Inc. 4 * Copyright (c) 2006 Devicescape Software, Inc. 5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> 7 * 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 18 * redistribution must be conditioned upon including a substantially 19 * similar Disclaimer requirement for further binary redistribution. 20 * 3. Neither the names of the above-listed copyright holders nor the names 21 * of any contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * Alternatively, this software may be distributed under the terms of the 25 * GNU General Public License ("GPL") version 2 as published by the Free 26 * Software Foundation. 27 * 28 * NO WARRANTY 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 32 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 33 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 34 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 39 * THE POSSIBILITY OF SUCH DAMAGES. 40 * 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/module.h> 46 #include <linux/delay.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/hardirq.h> 49 #include <linux/if.h> 50 #include <linux/io.h> 51 #include <linux/netdevice.h> 52 #include <linux/cache.h> 53 #include <linux/ethtool.h> 54 #include <linux/uaccess.h> 55 #include <linux/slab.h> 56 #include <linux/etherdevice.h> 57 #include <linux/nl80211.h> 58 59 #include <net/ieee80211_radiotap.h> 60 61 #include <asm/unaligned.h> 62 63 #include "base.h" 64 #include "reg.h" 65 #include "debug.h" 66 #include "ani.h" 67 #include "ath5k.h" 68 #include "../regd.h" 69 70 #define CREATE_TRACE_POINTS 71 #include "trace.h" 72 73 bool ath5k_modparam_nohwcrypt; 74 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); 75 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 76 77 static bool modparam_fastchanswitch; 78 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO); 79 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios."); 80 81 static bool ath5k_modparam_no_hw_rfkill_switch; 82 module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch, 83 bool, S_IRUGO); 84 MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state"); 85 86 87 /* Module info */ 88 MODULE_AUTHOR("Jiri Slaby"); 89 MODULE_AUTHOR("Nick Kossifidis"); 90 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); 91 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); 92 MODULE_LICENSE("Dual BSD/GPL"); 93 94 static int ath5k_init(struct ieee80211_hw *hw); 95 static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, 96 bool skip_pcu); 97 98 /* Known SREVs */ 99 static const struct ath5k_srev_name srev_names[] = { 100 #ifdef CONFIG_ATHEROS_AR231X 101 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 }, 102 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 }, 103 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 }, 104 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 }, 105 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 }, 106 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 }, 107 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 }, 108 #else 109 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, 110 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, 111 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, 112 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B }, 113 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 }, 114 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 }, 115 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 }, 116 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A }, 117 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 }, 118 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 }, 119 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 }, 120 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 }, 121 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 }, 122 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 }, 123 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 }, 124 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, 125 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, 126 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, 127 #endif 128 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, 129 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, 130 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, 131 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A }, 132 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, 133 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, 134 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, 135 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B }, 136 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, 137 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, 138 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B }, 139 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 }, 140 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, 141 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, 142 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, 143 #ifdef CONFIG_ATHEROS_AR231X 144 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, 145 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, 146 #endif 147 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, 148 }; 149 150 static const struct ieee80211_rate ath5k_rates[] = { 151 { .bitrate = 10, 152 .hw_value = ATH5K_RATE_CODE_1M, }, 153 { .bitrate = 20, 154 .hw_value = ATH5K_RATE_CODE_2M, 155 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE, 156 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 157 { .bitrate = 55, 158 .hw_value = ATH5K_RATE_CODE_5_5M, 159 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE, 160 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 161 { .bitrate = 110, 162 .hw_value = ATH5K_RATE_CODE_11M, 163 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE, 164 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 165 { .bitrate = 60, 166 .hw_value = ATH5K_RATE_CODE_6M, 167 .flags = 0 }, 168 { .bitrate = 90, 169 .hw_value = ATH5K_RATE_CODE_9M, 170 .flags = 0 }, 171 { .bitrate = 120, 172 .hw_value = ATH5K_RATE_CODE_12M, 173 .flags = 0 }, 174 { .bitrate = 180, 175 .hw_value = ATH5K_RATE_CODE_18M, 176 .flags = 0 }, 177 { .bitrate = 240, 178 .hw_value = ATH5K_RATE_CODE_24M, 179 .flags = 0 }, 180 { .bitrate = 360, 181 .hw_value = ATH5K_RATE_CODE_36M, 182 .flags = 0 }, 183 { .bitrate = 480, 184 .hw_value = ATH5K_RATE_CODE_48M, 185 .flags = 0 }, 186 { .bitrate = 540, 187 .hw_value = ATH5K_RATE_CODE_54M, 188 .flags = 0 }, 189 }; 190 191 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 192 { 193 u64 tsf = ath5k_hw_get_tsf64(ah); 194 195 if ((tsf & 0x7fff) < rstamp) 196 tsf -= 0x8000; 197 198 return (tsf & ~0x7fff) | rstamp; 199 } 200 201 const char * 202 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) 203 { 204 const char *name = "xxxxx"; 205 unsigned int i; 206 207 for (i = 0; i < ARRAY_SIZE(srev_names); i++) { 208 if (srev_names[i].sr_type != type) 209 continue; 210 211 if ((val & 0xf0) == srev_names[i].sr_val) 212 name = srev_names[i].sr_name; 213 214 if ((val & 0xff) == srev_names[i].sr_val) { 215 name = srev_names[i].sr_name; 216 break; 217 } 218 } 219 220 return name; 221 } 222 static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset) 223 { 224 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; 225 return ath5k_hw_reg_read(ah, reg_offset); 226 } 227 228 static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 229 { 230 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; 231 ath5k_hw_reg_write(ah, val, reg_offset); 232 } 233 234 static const struct ath_ops ath5k_common_ops = { 235 .read = ath5k_ioread32, 236 .write = ath5k_iowrite32, 237 }; 238 239 /***********************\ 240 * Driver Initialization * 241 \***********************/ 242 243 static void ath5k_reg_notifier(struct wiphy *wiphy, 244 struct regulatory_request *request) 245 { 246 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 247 struct ath5k_hw *ah = hw->priv; 248 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); 249 250 ath_reg_notifier_apply(wiphy, request, regulatory); 251 } 252 253 /********************\ 254 * Channel/mode setup * 255 \********************/ 256 257 /* 258 * Returns true for the channel numbers used. 259 */ 260 #ifdef CONFIG_ATH5K_TEST_CHANNELS 261 static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) 262 { 263 return true; 264 } 265 266 #else 267 static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) 268 { 269 if (band == IEEE80211_BAND_2GHZ && chan <= 14) 270 return true; 271 272 return /* UNII 1,2 */ 273 (((chan & 3) == 0 && chan >= 36 && chan <= 64) || 274 /* midband */ 275 ((chan & 3) == 0 && chan >= 100 && chan <= 140) || 276 /* UNII-3 */ 277 ((chan & 3) == 1 && chan >= 149 && chan <= 165) || 278 /* 802.11j 5.030-5.080 GHz (20MHz) */ 279 (chan == 8 || chan == 12 || chan == 16) || 280 /* 802.11j 4.9GHz (20MHz) */ 281 (chan == 184 || chan == 188 || chan == 192 || chan == 196)); 282 } 283 #endif 284 285 static unsigned int 286 ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, 287 unsigned int mode, unsigned int max) 288 { 289 unsigned int count, size, freq, ch; 290 enum ieee80211_band band; 291 292 switch (mode) { 293 case AR5K_MODE_11A: 294 /* 1..220, but 2GHz frequencies are filtered by check_channel */ 295 size = 220; 296 band = IEEE80211_BAND_5GHZ; 297 break; 298 case AR5K_MODE_11B: 299 case AR5K_MODE_11G: 300 size = 26; 301 band = IEEE80211_BAND_2GHZ; 302 break; 303 default: 304 ATH5K_WARN(ah, "bad mode, not copying channels\n"); 305 return 0; 306 } 307 308 count = 0; 309 for (ch = 1; ch <= size && count < max; ch++) { 310 freq = ieee80211_channel_to_frequency(ch, band); 311 312 if (freq == 0) /* mapping failed - not a standard channel */ 313 continue; 314 315 /* Write channel info, needed for ath5k_channel_ok() */ 316 channels[count].center_freq = freq; 317 channels[count].band = band; 318 channels[count].hw_value = mode; 319 320 /* Check if channel is supported by the chipset */ 321 if (!ath5k_channel_ok(ah, &channels[count])) 322 continue; 323 324 if (!ath5k_is_standard_channel(ch, band)) 325 continue; 326 327 count++; 328 } 329 330 return count; 331 } 332 333 static void 334 ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b) 335 { 336 u8 i; 337 338 for (i = 0; i < AR5K_MAX_RATES; i++) 339 ah->rate_idx[b->band][i] = -1; 340 341 for (i = 0; i < b->n_bitrates; i++) { 342 ah->rate_idx[b->band][b->bitrates[i].hw_value] = i; 343 if (b->bitrates[i].hw_value_short) 344 ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i; 345 } 346 } 347 348 static int 349 ath5k_setup_bands(struct ieee80211_hw *hw) 350 { 351 struct ath5k_hw *ah = hw->priv; 352 struct ieee80211_supported_band *sband; 353 int max_c, count_c = 0; 354 int i; 355 356 BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS); 357 max_c = ARRAY_SIZE(ah->channels); 358 359 /* 2GHz band */ 360 sband = &ah->sbands[IEEE80211_BAND_2GHZ]; 361 sband->band = IEEE80211_BAND_2GHZ; 362 sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0]; 363 364 if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) { 365 /* G mode */ 366 memcpy(sband->bitrates, &ath5k_rates[0], 367 sizeof(struct ieee80211_rate) * 12); 368 sband->n_bitrates = 12; 369 370 sband->channels = ah->channels; 371 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 372 AR5K_MODE_11G, max_c); 373 374 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 375 count_c = sband->n_channels; 376 max_c -= count_c; 377 } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) { 378 /* B mode */ 379 memcpy(sband->bitrates, &ath5k_rates[0], 380 sizeof(struct ieee80211_rate) * 4); 381 sband->n_bitrates = 4; 382 383 /* 5211 only supports B rates and uses 4bit rate codes 384 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B) 385 * fix them up here: 386 */ 387 if (ah->ah_version == AR5K_AR5211) { 388 for (i = 0; i < 4; i++) { 389 sband->bitrates[i].hw_value = 390 sband->bitrates[i].hw_value & 0xF; 391 sband->bitrates[i].hw_value_short = 392 sband->bitrates[i].hw_value_short & 0xF; 393 } 394 } 395 396 sband->channels = ah->channels; 397 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 398 AR5K_MODE_11B, max_c); 399 400 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 401 count_c = sband->n_channels; 402 max_c -= count_c; 403 } 404 ath5k_setup_rate_idx(ah, sband); 405 406 /* 5GHz band, A mode */ 407 if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) { 408 sband = &ah->sbands[IEEE80211_BAND_5GHZ]; 409 sband->band = IEEE80211_BAND_5GHZ; 410 sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0]; 411 412 memcpy(sband->bitrates, &ath5k_rates[4], 413 sizeof(struct ieee80211_rate) * 8); 414 sband->n_bitrates = 8; 415 416 sband->channels = &ah->channels[count_c]; 417 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 418 AR5K_MODE_11A, max_c); 419 420 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 421 } 422 ath5k_setup_rate_idx(ah, sband); 423 424 ath5k_debug_dump_bands(ah); 425 426 return 0; 427 } 428 429 /* 430 * Set/change channels. We always reset the chip. 431 * To accomplish this we must first cleanup any pending DMA, 432 * then restart stuff after a la ath5k_init. 433 * 434 * Called with ah->lock. 435 */ 436 int 437 ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan) 438 { 439 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 440 "channel set, resetting (%u -> %u MHz)\n", 441 ah->curchan->center_freq, chan->center_freq); 442 443 /* 444 * To switch channels clear any pending DMA operations; 445 * wait long enough for the RX fifo to drain, reset the 446 * hardware at the new frequency, and then re-enable 447 * the relevant bits of the h/w. 448 */ 449 return ath5k_reset(ah, chan, true); 450 } 451 452 void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 453 { 454 struct ath5k_vif_iter_data *iter_data = data; 455 int i; 456 struct ath5k_vif *avf = (void *)vif->drv_priv; 457 458 if (iter_data->hw_macaddr) 459 for (i = 0; i < ETH_ALEN; i++) 460 iter_data->mask[i] &= 461 ~(iter_data->hw_macaddr[i] ^ mac[i]); 462 463 if (!iter_data->found_active) { 464 iter_data->found_active = true; 465 memcpy(iter_data->active_mac, mac, ETH_ALEN); 466 } 467 468 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr) 469 if (ether_addr_equal(iter_data->hw_macaddr, mac)) 470 iter_data->need_set_hw_addr = false; 471 472 if (!iter_data->any_assoc) { 473 if (avf->assoc) 474 iter_data->any_assoc = true; 475 } 476 477 /* Calculate combined mode - when APs are active, operate in AP mode. 478 * Otherwise use the mode of the new interface. This can currently 479 * only deal with combinations of APs and STAs. Only one ad-hoc 480 * interfaces is allowed. 481 */ 482 if (avf->opmode == NL80211_IFTYPE_AP) 483 iter_data->opmode = NL80211_IFTYPE_AP; 484 else { 485 if (avf->opmode == NL80211_IFTYPE_STATION) 486 iter_data->n_stas++; 487 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED) 488 iter_data->opmode = avf->opmode; 489 } 490 } 491 492 void 493 ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah, 494 struct ieee80211_vif *vif) 495 { 496 struct ath_common *common = ath5k_hw_common(ah); 497 struct ath5k_vif_iter_data iter_data; 498 u32 rfilt; 499 500 /* 501 * Use the hardware MAC address as reference, the hardware uses it 502 * together with the BSSID mask when matching addresses. 503 */ 504 iter_data.hw_macaddr = common->macaddr; 505 memset(&iter_data.mask, 0xff, ETH_ALEN); 506 iter_data.found_active = false; 507 iter_data.need_set_hw_addr = true; 508 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; 509 iter_data.n_stas = 0; 510 511 if (vif) 512 ath5k_vif_iter(&iter_data, vif->addr, vif); 513 514 /* Get list of all active MAC addresses */ 515 ieee80211_iterate_active_interfaces_atomic( 516 ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL, 517 ath5k_vif_iter, &iter_data); 518 memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN); 519 520 ah->opmode = iter_data.opmode; 521 if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED) 522 /* Nothing active, default to station mode */ 523 ah->opmode = NL80211_IFTYPE_STATION; 524 525 ath5k_hw_set_opmode(ah, ah->opmode); 526 ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n", 527 ah->opmode, ath_opmode_to_string(ah->opmode)); 528 529 if (iter_data.need_set_hw_addr && iter_data.found_active) 530 ath5k_hw_set_lladdr(ah, iter_data.active_mac); 531 532 if (ath5k_hw_hasbssidmask(ah)) 533 ath5k_hw_set_bssid_mask(ah, ah->bssidmask); 534 535 /* Set up RX Filter */ 536 if (iter_data.n_stas > 1) { 537 /* If you have multiple STA interfaces connected to 538 * different APs, ARPs are not received (most of the time?) 539 * Enabling PROMISC appears to fix that problem. 540 */ 541 ah->filter_flags |= AR5K_RX_FILTER_PROM; 542 } 543 544 rfilt = ah->filter_flags; 545 ath5k_hw_set_rx_filter(ah, rfilt); 546 ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 547 } 548 549 static inline int 550 ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix) 551 { 552 int rix; 553 554 /* return base rate on errors */ 555 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, 556 "hw_rix out of bounds: %x\n", hw_rix)) 557 return 0; 558 559 rix = ah->rate_idx[ah->curchan->band][hw_rix]; 560 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) 561 rix = 0; 562 563 return rix; 564 } 565 566 /***************\ 567 * Buffers setup * 568 \***************/ 569 570 static 571 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr) 572 { 573 struct ath_common *common = ath5k_hw_common(ah); 574 struct sk_buff *skb; 575 576 /* 577 * Allocate buffer with headroom_needed space for the 578 * fake physical layer header at the start. 579 */ 580 skb = ath_rxbuf_alloc(common, 581 common->rx_bufsize, 582 GFP_ATOMIC); 583 584 if (!skb) { 585 ATH5K_ERR(ah, "can't alloc skbuff of size %u\n", 586 common->rx_bufsize); 587 return NULL; 588 } 589 590 *skb_addr = dma_map_single(ah->dev, 591 skb->data, common->rx_bufsize, 592 DMA_FROM_DEVICE); 593 594 if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) { 595 ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__); 596 dev_kfree_skb(skb); 597 return NULL; 598 } 599 return skb; 600 } 601 602 static int 603 ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) 604 { 605 struct sk_buff *skb = bf->skb; 606 struct ath5k_desc *ds; 607 int ret; 608 609 if (!skb) { 610 skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr); 611 if (!skb) 612 return -ENOMEM; 613 bf->skb = skb; 614 } 615 616 /* 617 * Setup descriptors. For receive we always terminate 618 * the descriptor list with a self-linked entry so we'll 619 * not get overrun under high load (as can happen with a 620 * 5212 when ANI processing enables PHY error frames). 621 * 622 * To ensure the last descriptor is self-linked we create 623 * each descriptor as self-linked and add it to the end. As 624 * each additional descriptor is added the previous self-linked 625 * entry is "fixed" naturally. This should be safe even 626 * if DMA is happening. When processing RX interrupts we 627 * never remove/process the last, self-linked, entry on the 628 * descriptor list. This ensures the hardware always has 629 * someplace to write a new frame. 630 */ 631 ds = bf->desc; 632 ds->ds_link = bf->daddr; /* link to self */ 633 ds->ds_data = bf->skbaddr; 634 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); 635 if (ret) { 636 ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__); 637 return ret; 638 } 639 640 if (ah->rxlink != NULL) 641 *ah->rxlink = bf->daddr; 642 ah->rxlink = &ds->ds_link; 643 return 0; 644 } 645 646 static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb) 647 { 648 struct ieee80211_hdr *hdr; 649 enum ath5k_pkt_type htype; 650 __le16 fc; 651 652 hdr = (struct ieee80211_hdr *)skb->data; 653 fc = hdr->frame_control; 654 655 if (ieee80211_is_beacon(fc)) 656 htype = AR5K_PKT_TYPE_BEACON; 657 else if (ieee80211_is_probe_resp(fc)) 658 htype = AR5K_PKT_TYPE_PROBE_RESP; 659 else if (ieee80211_is_atim(fc)) 660 htype = AR5K_PKT_TYPE_ATIM; 661 else if (ieee80211_is_pspoll(fc)) 662 htype = AR5K_PKT_TYPE_PSPOLL; 663 else 664 htype = AR5K_PKT_TYPE_NORMAL; 665 666 return htype; 667 } 668 669 static int 670 ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, 671 struct ath5k_txq *txq, int padsize) 672 { 673 struct ath5k_desc *ds = bf->desc; 674 struct sk_buff *skb = bf->skb; 675 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 676 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; 677 struct ieee80211_rate *rate; 678 unsigned int mrr_rate[3], mrr_tries[3]; 679 int i, ret; 680 u16 hw_rate; 681 u16 cts_rate = 0; 682 u16 duration = 0; 683 u8 rc_flags; 684 685 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; 686 687 /* XXX endianness */ 688 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, 689 DMA_TO_DEVICE); 690 691 rate = ieee80211_get_tx_rate(ah->hw, info); 692 if (!rate) { 693 ret = -EINVAL; 694 goto err_unmap; 695 } 696 697 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 698 flags |= AR5K_TXDESC_NOACK; 699 700 rc_flags = info->control.rates[0].flags; 701 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? 702 rate->hw_value_short : rate->hw_value; 703 704 pktlen = skb->len; 705 706 /* FIXME: If we are in g mode and rate is a CCK rate 707 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta 708 * from tx power (value is in dB units already) */ 709 if (info->control.hw_key) { 710 keyidx = info->control.hw_key->hw_key_idx; 711 pktlen += info->control.hw_key->icv_len; 712 } 713 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 714 flags |= AR5K_TXDESC_RTSENA; 715 cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value; 716 duration = le16_to_cpu(ieee80211_rts_duration(ah->hw, 717 info->control.vif, pktlen, info)); 718 } 719 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 720 flags |= AR5K_TXDESC_CTSENA; 721 cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value; 722 duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw, 723 info->control.vif, pktlen, info)); 724 } 725 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 726 ieee80211_get_hdrlen_from_skb(skb), padsize, 727 get_hw_packet_type(skb), 728 (ah->ah_txpower.txp_requested * 2), 729 hw_rate, 730 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, 731 cts_rate, duration); 732 if (ret) 733 goto err_unmap; 734 735 /* Set up MRR descriptor */ 736 if (ah->ah_capabilities.cap_has_mrr_support) { 737 memset(mrr_rate, 0, sizeof(mrr_rate)); 738 memset(mrr_tries, 0, sizeof(mrr_tries)); 739 for (i = 0; i < 3; i++) { 740 rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); 741 if (!rate) 742 break; 743 744 mrr_rate[i] = rate->hw_value; 745 mrr_tries[i] = info->control.rates[i + 1].count; 746 } 747 748 ath5k_hw_setup_mrr_tx_desc(ah, ds, 749 mrr_rate[0], mrr_tries[0], 750 mrr_rate[1], mrr_tries[1], 751 mrr_rate[2], mrr_tries[2]); 752 } 753 754 ds->ds_link = 0; 755 ds->ds_data = bf->skbaddr; 756 757 spin_lock_bh(&txq->lock); 758 list_add_tail(&bf->list, &txq->q); 759 txq->txq_len++; 760 if (txq->link == NULL) /* is this first packet? */ 761 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); 762 else /* no, so only link it */ 763 *txq->link = bf->daddr; 764 765 txq->link = &ds->ds_link; 766 ath5k_hw_start_tx_dma(ah, txq->qnum); 767 mmiowb(); 768 spin_unlock_bh(&txq->lock); 769 770 return 0; 771 err_unmap: 772 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); 773 return ret; 774 } 775 776 /*******************\ 777 * Descriptors setup * 778 \*******************/ 779 780 static int 781 ath5k_desc_alloc(struct ath5k_hw *ah) 782 { 783 struct ath5k_desc *ds; 784 struct ath5k_buf *bf; 785 dma_addr_t da; 786 unsigned int i; 787 int ret; 788 789 /* allocate descriptors */ 790 ah->desc_len = sizeof(struct ath5k_desc) * 791 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); 792 793 ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len, 794 &ah->desc_daddr, GFP_KERNEL); 795 if (ah->desc == NULL) { 796 ATH5K_ERR(ah, "can't allocate descriptors\n"); 797 ret = -ENOMEM; 798 goto err; 799 } 800 ds = ah->desc; 801 da = ah->desc_daddr; 802 ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n", 803 ds, ah->desc_len, (unsigned long long)ah->desc_daddr); 804 805 bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, 806 sizeof(struct ath5k_buf), GFP_KERNEL); 807 if (bf == NULL) { 808 ATH5K_ERR(ah, "can't allocate bufptr\n"); 809 ret = -ENOMEM; 810 goto err_free; 811 } 812 ah->bufptr = bf; 813 814 INIT_LIST_HEAD(&ah->rxbuf); 815 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { 816 bf->desc = ds; 817 bf->daddr = da; 818 list_add_tail(&bf->list, &ah->rxbuf); 819 } 820 821 INIT_LIST_HEAD(&ah->txbuf); 822 ah->txbuf_len = ATH_TXBUF; 823 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) { 824 bf->desc = ds; 825 bf->daddr = da; 826 list_add_tail(&bf->list, &ah->txbuf); 827 } 828 829 /* beacon buffers */ 830 INIT_LIST_HEAD(&ah->bcbuf); 831 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) { 832 bf->desc = ds; 833 bf->daddr = da; 834 list_add_tail(&bf->list, &ah->bcbuf); 835 } 836 837 return 0; 838 err_free: 839 dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr); 840 err: 841 ah->desc = NULL; 842 return ret; 843 } 844 845 void 846 ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf) 847 { 848 BUG_ON(!bf); 849 if (!bf->skb) 850 return; 851 dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len, 852 DMA_TO_DEVICE); 853 ieee80211_free_txskb(ah->hw, bf->skb); 854 bf->skb = NULL; 855 bf->skbaddr = 0; 856 bf->desc->ds_data = 0; 857 } 858 859 void 860 ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf) 861 { 862 struct ath_common *common = ath5k_hw_common(ah); 863 864 BUG_ON(!bf); 865 if (!bf->skb) 866 return; 867 dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize, 868 DMA_FROM_DEVICE); 869 dev_kfree_skb_any(bf->skb); 870 bf->skb = NULL; 871 bf->skbaddr = 0; 872 bf->desc->ds_data = 0; 873 } 874 875 static void 876 ath5k_desc_free(struct ath5k_hw *ah) 877 { 878 struct ath5k_buf *bf; 879 880 list_for_each_entry(bf, &ah->txbuf, list) 881 ath5k_txbuf_free_skb(ah, bf); 882 list_for_each_entry(bf, &ah->rxbuf, list) 883 ath5k_rxbuf_free_skb(ah, bf); 884 list_for_each_entry(bf, &ah->bcbuf, list) 885 ath5k_txbuf_free_skb(ah, bf); 886 887 /* Free memory associated with all descriptors */ 888 dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr); 889 ah->desc = NULL; 890 ah->desc_daddr = 0; 891 892 kfree(ah->bufptr); 893 ah->bufptr = NULL; 894 } 895 896 897 /**************\ 898 * Queues setup * 899 \**************/ 900 901 static struct ath5k_txq * 902 ath5k_txq_setup(struct ath5k_hw *ah, 903 int qtype, int subtype) 904 { 905 struct ath5k_txq *txq; 906 struct ath5k_txq_info qi = { 907 .tqi_subtype = subtype, 908 /* XXX: default values not correct for B and XR channels, 909 * but who cares? */ 910 .tqi_aifs = AR5K_TUNE_AIFS, 911 .tqi_cw_min = AR5K_TUNE_CWMIN, 912 .tqi_cw_max = AR5K_TUNE_CWMAX 913 }; 914 int qnum; 915 916 /* 917 * Enable interrupts only for EOL and DESC conditions. 918 * We mark tx descriptors to receive a DESC interrupt 919 * when a tx queue gets deep; otherwise we wait for the 920 * EOL to reap descriptors. Note that this is done to 921 * reduce interrupt load and this only defers reaping 922 * descriptors, never transmitting frames. Aside from 923 * reducing interrupts this also permits more concurrency. 924 * The only potential downside is if the tx queue backs 925 * up in which case the top half of the kernel may backup 926 * due to a lack of tx descriptors. 927 */ 928 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | 929 AR5K_TXQ_FLAG_TXDESCINT_ENABLE; 930 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); 931 if (qnum < 0) { 932 /* 933 * NB: don't print a message, this happens 934 * normally on parts with too few tx queues 935 */ 936 return ERR_PTR(qnum); 937 } 938 txq = &ah->txqs[qnum]; 939 if (!txq->setup) { 940 txq->qnum = qnum; 941 txq->link = NULL; 942 INIT_LIST_HEAD(&txq->q); 943 spin_lock_init(&txq->lock); 944 txq->setup = true; 945 txq->txq_len = 0; 946 txq->txq_max = ATH5K_TXQ_LEN_MAX; 947 txq->txq_poll_mark = false; 948 txq->txq_stuck = 0; 949 } 950 return &ah->txqs[qnum]; 951 } 952 953 static int 954 ath5k_beaconq_setup(struct ath5k_hw *ah) 955 { 956 struct ath5k_txq_info qi = { 957 /* XXX: default values not correct for B and XR channels, 958 * but who cares? */ 959 .tqi_aifs = AR5K_TUNE_AIFS, 960 .tqi_cw_min = AR5K_TUNE_CWMIN, 961 .tqi_cw_max = AR5K_TUNE_CWMAX, 962 /* NB: for dynamic turbo, don't enable any other interrupts */ 963 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE 964 }; 965 966 return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); 967 } 968 969 static int 970 ath5k_beaconq_config(struct ath5k_hw *ah) 971 { 972 struct ath5k_txq_info qi; 973 int ret; 974 975 ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi); 976 if (ret) 977 goto err; 978 979 if (ah->opmode == NL80211_IFTYPE_AP || 980 ah->opmode == NL80211_IFTYPE_MESH_POINT) { 981 /* 982 * Always burst out beacon and CAB traffic 983 * (aifs = cwmin = cwmax = 0) 984 */ 985 qi.tqi_aifs = 0; 986 qi.tqi_cw_min = 0; 987 qi.tqi_cw_max = 0; 988 } else if (ah->opmode == NL80211_IFTYPE_ADHOC) { 989 /* 990 * Adhoc mode; backoff between 0 and (2 * cw_min). 991 */ 992 qi.tqi_aifs = 0; 993 qi.tqi_cw_min = 0; 994 qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN; 995 } 996 997 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 998 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", 999 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); 1000 1001 ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi); 1002 if (ret) { 1003 ATH5K_ERR(ah, "%s: unable to update parameters for beacon " 1004 "hardware queue!\n", __func__); 1005 goto err; 1006 } 1007 ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */ 1008 if (ret) 1009 goto err; 1010 1011 /* reconfigure cabq with ready time to 80% of beacon_interval */ 1012 ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); 1013 if (ret) 1014 goto err; 1015 1016 qi.tqi_ready_time = (ah->bintval * 80) / 100; 1017 ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); 1018 if (ret) 1019 goto err; 1020 1021 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB); 1022 err: 1023 return ret; 1024 } 1025 1026 /** 1027 * ath5k_drain_tx_buffs - Empty tx buffers 1028 * 1029 * @ah The &struct ath5k_hw 1030 * 1031 * Empty tx buffers from all queues in preparation 1032 * of a reset or during shutdown. 1033 * 1034 * NB: this assumes output has been stopped and 1035 * we do not need to block ath5k_tx_tasklet 1036 */ 1037 static void 1038 ath5k_drain_tx_buffs(struct ath5k_hw *ah) 1039 { 1040 struct ath5k_txq *txq; 1041 struct ath5k_buf *bf, *bf0; 1042 int i; 1043 1044 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { 1045 if (ah->txqs[i].setup) { 1046 txq = &ah->txqs[i]; 1047 spin_lock_bh(&txq->lock); 1048 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1049 ath5k_debug_printtxbuf(ah, bf); 1050 1051 ath5k_txbuf_free_skb(ah, bf); 1052 1053 spin_lock(&ah->txbuflock); 1054 list_move_tail(&bf->list, &ah->txbuf); 1055 ah->txbuf_len++; 1056 txq->txq_len--; 1057 spin_unlock(&ah->txbuflock); 1058 } 1059 txq->link = NULL; 1060 txq->txq_poll_mark = false; 1061 spin_unlock_bh(&txq->lock); 1062 } 1063 } 1064 } 1065 1066 static void 1067 ath5k_txq_release(struct ath5k_hw *ah) 1068 { 1069 struct ath5k_txq *txq = ah->txqs; 1070 unsigned int i; 1071 1072 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++) 1073 if (txq->setup) { 1074 ath5k_hw_release_tx_queue(ah, txq->qnum); 1075 txq->setup = false; 1076 } 1077 } 1078 1079 1080 /*************\ 1081 * RX Handling * 1082 \*************/ 1083 1084 /* 1085 * Enable the receive h/w following a reset. 1086 */ 1087 static int 1088 ath5k_rx_start(struct ath5k_hw *ah) 1089 { 1090 struct ath_common *common = ath5k_hw_common(ah); 1091 struct ath5k_buf *bf; 1092 int ret; 1093 1094 common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz); 1095 1096 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n", 1097 common->cachelsz, common->rx_bufsize); 1098 1099 spin_lock_bh(&ah->rxbuflock); 1100 ah->rxlink = NULL; 1101 list_for_each_entry(bf, &ah->rxbuf, list) { 1102 ret = ath5k_rxbuf_setup(ah, bf); 1103 if (ret != 0) { 1104 spin_unlock_bh(&ah->rxbuflock); 1105 goto err; 1106 } 1107 } 1108 bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list); 1109 ath5k_hw_set_rxdp(ah, bf->daddr); 1110 spin_unlock_bh(&ah->rxbuflock); 1111 1112 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ 1113 ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */ 1114 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 1115 1116 return 0; 1117 err: 1118 return ret; 1119 } 1120 1121 /* 1122 * Disable the receive logic on PCU (DRU) 1123 * In preparation for a shutdown. 1124 * 1125 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop 1126 * does. 1127 */ 1128 static void 1129 ath5k_rx_stop(struct ath5k_hw *ah) 1130 { 1131 1132 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ 1133 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */ 1134 1135 ath5k_debug_printrxbuffs(ah); 1136 } 1137 1138 static unsigned int 1139 ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb, 1140 struct ath5k_rx_status *rs) 1141 { 1142 struct ath_common *common = ath5k_hw_common(ah); 1143 struct ieee80211_hdr *hdr = (void *)skb->data; 1144 unsigned int keyix, hlen; 1145 1146 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && 1147 rs->rs_keyix != AR5K_RXKEYIX_INVALID) 1148 return RX_FLAG_DECRYPTED; 1149 1150 /* Apparently when a default key is used to decrypt the packet 1151 the hw does not set the index used to decrypt. In such cases 1152 get the index from the packet. */ 1153 hlen = ieee80211_hdrlen(hdr->frame_control); 1154 if (ieee80211_has_protected(hdr->frame_control) && 1155 !(rs->rs_status & AR5K_RXERR_DECRYPT) && 1156 skb->len >= hlen + 4) { 1157 keyix = skb->data[hlen + 3] >> 6; 1158 1159 if (test_bit(keyix, common->keymap)) 1160 return RX_FLAG_DECRYPTED; 1161 } 1162 1163 return 0; 1164 } 1165 1166 1167 static void 1168 ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb, 1169 struct ieee80211_rx_status *rxs) 1170 { 1171 struct ath_common *common = ath5k_hw_common(ah); 1172 u64 tsf, bc_tstamp; 1173 u32 hw_tu; 1174 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1175 1176 if (ieee80211_is_beacon(mgmt->frame_control) && 1177 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && 1178 ether_addr_equal(mgmt->bssid, common->curbssid)) { 1179 /* 1180 * Received an IBSS beacon with the same BSSID. Hardware *must* 1181 * have updated the local TSF. We have to work around various 1182 * hardware bugs, though... 1183 */ 1184 tsf = ath5k_hw_get_tsf64(ah); 1185 bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp); 1186 hw_tu = TSF_TO_TU(tsf); 1187 1188 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 1189 "beacon %llx mactime %llx (diff %lld) tsf now %llx\n", 1190 (unsigned long long)bc_tstamp, 1191 (unsigned long long)rxs->mactime, 1192 (unsigned long long)(rxs->mactime - bc_tstamp), 1193 (unsigned long long)tsf); 1194 1195 /* 1196 * Sometimes the HW will give us a wrong tstamp in the rx 1197 * status, causing the timestamp extension to go wrong. 1198 * (This seems to happen especially with beacon frames bigger 1199 * than 78 byte (incl. FCS)) 1200 * But we know that the receive timestamp must be later than the 1201 * timestamp of the beacon since HW must have synced to that. 1202 * 1203 * NOTE: here we assume mactime to be after the frame was 1204 * received, not like mac80211 which defines it at the start. 1205 */ 1206 if (bc_tstamp > rxs->mactime) { 1207 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 1208 "fixing mactime from %llx to %llx\n", 1209 (unsigned long long)rxs->mactime, 1210 (unsigned long long)tsf); 1211 rxs->mactime = tsf; 1212 } 1213 1214 /* 1215 * Local TSF might have moved higher than our beacon timers, 1216 * in that case we have to update them to continue sending 1217 * beacons. This also takes care of synchronizing beacon sending 1218 * times with other stations. 1219 */ 1220 if (hw_tu >= ah->nexttbtt) 1221 ath5k_beacon_update_timers(ah, bc_tstamp); 1222 1223 /* Check if the beacon timers are still correct, because a TSF 1224 * update might have created a window between them - for a 1225 * longer description see the comment of this function: */ 1226 if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) { 1227 ath5k_beacon_update_timers(ah, bc_tstamp); 1228 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 1229 "fixed beacon timers after beacon receive\n"); 1230 } 1231 } 1232 } 1233 1234 static void 1235 ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi) 1236 { 1237 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1238 struct ath_common *common = ath5k_hw_common(ah); 1239 1240 /* only beacons from our BSSID */ 1241 if (!ieee80211_is_beacon(mgmt->frame_control) || 1242 !ether_addr_equal(mgmt->bssid, common->curbssid)) 1243 return; 1244 1245 ewma_add(&ah->ah_beacon_rssi_avg, rssi); 1246 1247 /* in IBSS mode we should keep RSSI statistics per neighbour */ 1248 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */ 1249 } 1250 1251 /* 1252 * Compute padding position. skb must contain an IEEE 802.11 frame 1253 */ 1254 static int ath5k_common_padpos(struct sk_buff *skb) 1255 { 1256 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1257 __le16 frame_control = hdr->frame_control; 1258 int padpos = 24; 1259 1260 if (ieee80211_has_a4(frame_control)) 1261 padpos += ETH_ALEN; 1262 1263 if (ieee80211_is_data_qos(frame_control)) 1264 padpos += IEEE80211_QOS_CTL_LEN; 1265 1266 return padpos; 1267 } 1268 1269 /* 1270 * This function expects an 802.11 frame and returns the number of 1271 * bytes added, or -1 if we don't have enough header room. 1272 */ 1273 static int ath5k_add_padding(struct sk_buff *skb) 1274 { 1275 int padpos = ath5k_common_padpos(skb); 1276 int padsize = padpos & 3; 1277 1278 if (padsize && skb->len > padpos) { 1279 1280 if (skb_headroom(skb) < padsize) 1281 return -1; 1282 1283 skb_push(skb, padsize); 1284 memmove(skb->data, skb->data + padsize, padpos); 1285 return padsize; 1286 } 1287 1288 return 0; 1289 } 1290 1291 /* 1292 * The MAC header is padded to have 32-bit boundary if the 1293 * packet payload is non-zero. The general calculation for 1294 * padsize would take into account odd header lengths: 1295 * padsize = 4 - (hdrlen & 3); however, since only 1296 * even-length headers are used, padding can only be 0 or 2 1297 * bytes and we can optimize this a bit. We must not try to 1298 * remove padding from short control frames that do not have a 1299 * payload. 1300 * 1301 * This function expects an 802.11 frame and returns the number of 1302 * bytes removed. 1303 */ 1304 static int ath5k_remove_padding(struct sk_buff *skb) 1305 { 1306 int padpos = ath5k_common_padpos(skb); 1307 int padsize = padpos & 3; 1308 1309 if (padsize && skb->len >= padpos + padsize) { 1310 memmove(skb->data + padsize, skb->data, padpos); 1311 skb_pull(skb, padsize); 1312 return padsize; 1313 } 1314 1315 return 0; 1316 } 1317 1318 static void 1319 ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, 1320 struct ath5k_rx_status *rs) 1321 { 1322 struct ieee80211_rx_status *rxs; 1323 1324 ath5k_remove_padding(skb); 1325 1326 rxs = IEEE80211_SKB_RXCB(skb); 1327 1328 rxs->flag = 0; 1329 if (unlikely(rs->rs_status & AR5K_RXERR_MIC)) 1330 rxs->flag |= RX_FLAG_MMIC_ERROR; 1331 1332 /* 1333 * always extend the mac timestamp, since this information is 1334 * also needed for proper IBSS merging. 1335 * 1336 * XXX: it might be too late to do it here, since rs_tstamp is 1337 * 15bit only. that means TSF extension has to be done within 1338 * 32768usec (about 32ms). it might be necessary to move this to 1339 * the interrupt handler, like it is done in madwifi. 1340 */ 1341 rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp); 1342 rxs->flag |= RX_FLAG_MACTIME_END; 1343 1344 rxs->freq = ah->curchan->center_freq; 1345 rxs->band = ah->curchan->band; 1346 1347 rxs->signal = ah->ah_noise_floor + rs->rs_rssi; 1348 1349 rxs->antenna = rs->rs_antenna; 1350 1351 if (rs->rs_antenna > 0 && rs->rs_antenna < 5) 1352 ah->stats.antenna_rx[rs->rs_antenna]++; 1353 else 1354 ah->stats.antenna_rx[0]++; /* invalid */ 1355 1356 rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate); 1357 rxs->flag |= ath5k_rx_decrypted(ah, skb, rs); 1358 1359 if (rxs->rate_idx >= 0 && rs->rs_rate == 1360 ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) 1361 rxs->flag |= RX_FLAG_SHORTPRE; 1362 1363 trace_ath5k_rx(ah, skb); 1364 1365 ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi); 1366 1367 /* check beacons in IBSS mode */ 1368 if (ah->opmode == NL80211_IFTYPE_ADHOC) 1369 ath5k_check_ibss_tsf(ah, skb, rxs); 1370 1371 ieee80211_rx(ah->hw, skb); 1372 } 1373 1374 /** ath5k_frame_receive_ok() - Do we want to receive this frame or not? 1375 * 1376 * Check if we want to further process this frame or not. Also update 1377 * statistics. Return true if we want this frame, false if not. 1378 */ 1379 static bool 1380 ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs) 1381 { 1382 ah->stats.rx_all_count++; 1383 ah->stats.rx_bytes_count += rs->rs_datalen; 1384 1385 if (unlikely(rs->rs_status)) { 1386 if (rs->rs_status & AR5K_RXERR_CRC) 1387 ah->stats.rxerr_crc++; 1388 if (rs->rs_status & AR5K_RXERR_FIFO) 1389 ah->stats.rxerr_fifo++; 1390 if (rs->rs_status & AR5K_RXERR_PHY) { 1391 ah->stats.rxerr_phy++; 1392 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32) 1393 ah->stats.rxerr_phy_code[rs->rs_phyerr]++; 1394 return false; 1395 } 1396 if (rs->rs_status & AR5K_RXERR_DECRYPT) { 1397 /* 1398 * Decrypt error. If the error occurred 1399 * because there was no hardware key, then 1400 * let the frame through so the upper layers 1401 * can process it. This is necessary for 5210 1402 * parts which have no way to setup a ``clear'' 1403 * key cache entry. 1404 * 1405 * XXX do key cache faulting 1406 */ 1407 ah->stats.rxerr_decrypt++; 1408 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID && 1409 !(rs->rs_status & AR5K_RXERR_CRC)) 1410 return true; 1411 } 1412 if (rs->rs_status & AR5K_RXERR_MIC) { 1413 ah->stats.rxerr_mic++; 1414 return true; 1415 } 1416 1417 /* reject any frames with non-crypto errors */ 1418 if (rs->rs_status & ~(AR5K_RXERR_DECRYPT)) 1419 return false; 1420 } 1421 1422 if (unlikely(rs->rs_more)) { 1423 ah->stats.rxerr_jumbo++; 1424 return false; 1425 } 1426 return true; 1427 } 1428 1429 static void 1430 ath5k_set_current_imask(struct ath5k_hw *ah) 1431 { 1432 enum ath5k_int imask; 1433 unsigned long flags; 1434 1435 spin_lock_irqsave(&ah->irqlock, flags); 1436 imask = ah->imask; 1437 if (ah->rx_pending) 1438 imask &= ~AR5K_INT_RX_ALL; 1439 if (ah->tx_pending) 1440 imask &= ~AR5K_INT_TX_ALL; 1441 ath5k_hw_set_imr(ah, imask); 1442 spin_unlock_irqrestore(&ah->irqlock, flags); 1443 } 1444 1445 static void 1446 ath5k_tasklet_rx(unsigned long data) 1447 { 1448 struct ath5k_rx_status rs = {}; 1449 struct sk_buff *skb, *next_skb; 1450 dma_addr_t next_skb_addr; 1451 struct ath5k_hw *ah = (void *)data; 1452 struct ath_common *common = ath5k_hw_common(ah); 1453 struct ath5k_buf *bf; 1454 struct ath5k_desc *ds; 1455 int ret; 1456 1457 spin_lock(&ah->rxbuflock); 1458 if (list_empty(&ah->rxbuf)) { 1459 ATH5K_WARN(ah, "empty rx buf pool\n"); 1460 goto unlock; 1461 } 1462 do { 1463 bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list); 1464 BUG_ON(bf->skb == NULL); 1465 skb = bf->skb; 1466 ds = bf->desc; 1467 1468 /* bail if HW is still using self-linked descriptor */ 1469 if (ath5k_hw_get_rxdp(ah) == bf->daddr) 1470 break; 1471 1472 ret = ah->ah_proc_rx_desc(ah, ds, &rs); 1473 if (unlikely(ret == -EINPROGRESS)) 1474 break; 1475 else if (unlikely(ret)) { 1476 ATH5K_ERR(ah, "error in processing rx descriptor\n"); 1477 ah->stats.rxerr_proc++; 1478 break; 1479 } 1480 1481 if (ath5k_receive_frame_ok(ah, &rs)) { 1482 next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr); 1483 1484 /* 1485 * If we can't replace bf->skb with a new skb under 1486 * memory pressure, just skip this packet 1487 */ 1488 if (!next_skb) 1489 goto next; 1490 1491 dma_unmap_single(ah->dev, bf->skbaddr, 1492 common->rx_bufsize, 1493 DMA_FROM_DEVICE); 1494 1495 skb_put(skb, rs.rs_datalen); 1496 1497 ath5k_receive_frame(ah, skb, &rs); 1498 1499 bf->skb = next_skb; 1500 bf->skbaddr = next_skb_addr; 1501 } 1502 next: 1503 list_move_tail(&bf->list, &ah->rxbuf); 1504 } while (ath5k_rxbuf_setup(ah, bf) == 0); 1505 unlock: 1506 spin_unlock(&ah->rxbuflock); 1507 ah->rx_pending = false; 1508 ath5k_set_current_imask(ah); 1509 } 1510 1511 1512 /*************\ 1513 * TX Handling * 1514 \*************/ 1515 1516 void 1517 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 1518 struct ath5k_txq *txq) 1519 { 1520 struct ath5k_hw *ah = hw->priv; 1521 struct ath5k_buf *bf; 1522 unsigned long flags; 1523 int padsize; 1524 1525 trace_ath5k_tx(ah, skb, txq); 1526 1527 /* 1528 * The hardware expects the header padded to 4 byte boundaries. 1529 * If this is not the case, we add the padding after the header. 1530 */ 1531 padsize = ath5k_add_padding(skb); 1532 if (padsize < 0) { 1533 ATH5K_ERR(ah, "tx hdrlen not %%4: not enough" 1534 " headroom to pad"); 1535 goto drop_packet; 1536 } 1537 1538 if (txq->txq_len >= txq->txq_max && 1539 txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX) 1540 ieee80211_stop_queue(hw, txq->qnum); 1541 1542 spin_lock_irqsave(&ah->txbuflock, flags); 1543 if (list_empty(&ah->txbuf)) { 1544 ATH5K_ERR(ah, "no further txbuf available, dropping packet\n"); 1545 spin_unlock_irqrestore(&ah->txbuflock, flags); 1546 ieee80211_stop_queues(hw); 1547 goto drop_packet; 1548 } 1549 bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list); 1550 list_del(&bf->list); 1551 ah->txbuf_len--; 1552 if (list_empty(&ah->txbuf)) 1553 ieee80211_stop_queues(hw); 1554 spin_unlock_irqrestore(&ah->txbuflock, flags); 1555 1556 bf->skb = skb; 1557 1558 if (ath5k_txbuf_setup(ah, bf, txq, padsize)) { 1559 bf->skb = NULL; 1560 spin_lock_irqsave(&ah->txbuflock, flags); 1561 list_add_tail(&bf->list, &ah->txbuf); 1562 ah->txbuf_len++; 1563 spin_unlock_irqrestore(&ah->txbuflock, flags); 1564 goto drop_packet; 1565 } 1566 return; 1567 1568 drop_packet: 1569 ieee80211_free_txskb(hw, skb); 1570 } 1571 1572 static void 1573 ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, 1574 struct ath5k_txq *txq, struct ath5k_tx_status *ts) 1575 { 1576 struct ieee80211_tx_info *info; 1577 u8 tries[3]; 1578 int i; 1579 1580 ah->stats.tx_all_count++; 1581 ah->stats.tx_bytes_count += skb->len; 1582 info = IEEE80211_SKB_CB(skb); 1583 1584 tries[0] = info->status.rates[0].count; 1585 tries[1] = info->status.rates[1].count; 1586 tries[2] = info->status.rates[2].count; 1587 1588 ieee80211_tx_info_clear_status(info); 1589 1590 for (i = 0; i < ts->ts_final_idx; i++) { 1591 struct ieee80211_tx_rate *r = 1592 &info->status.rates[i]; 1593 1594 r->count = tries[i]; 1595 } 1596 1597 info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry; 1598 info->status.rates[ts->ts_final_idx + 1].idx = -1; 1599 1600 if (unlikely(ts->ts_status)) { 1601 ah->stats.ack_fail++; 1602 if (ts->ts_status & AR5K_TXERR_FILT) { 1603 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1604 ah->stats.txerr_filt++; 1605 } 1606 if (ts->ts_status & AR5K_TXERR_XRETRY) 1607 ah->stats.txerr_retry++; 1608 if (ts->ts_status & AR5K_TXERR_FIFO) 1609 ah->stats.txerr_fifo++; 1610 } else { 1611 info->flags |= IEEE80211_TX_STAT_ACK; 1612 info->status.ack_signal = ts->ts_rssi; 1613 1614 /* count the successful attempt as well */ 1615 info->status.rates[ts->ts_final_idx].count++; 1616 } 1617 1618 /* 1619 * Remove MAC header padding before giving the frame 1620 * back to mac80211. 1621 */ 1622 ath5k_remove_padding(skb); 1623 1624 if (ts->ts_antenna > 0 && ts->ts_antenna < 5) 1625 ah->stats.antenna_tx[ts->ts_antenna]++; 1626 else 1627 ah->stats.antenna_tx[0]++; /* invalid */ 1628 1629 trace_ath5k_tx_complete(ah, skb, txq, ts); 1630 ieee80211_tx_status(ah->hw, skb); 1631 } 1632 1633 static void 1634 ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq) 1635 { 1636 struct ath5k_tx_status ts = {}; 1637 struct ath5k_buf *bf, *bf0; 1638 struct ath5k_desc *ds; 1639 struct sk_buff *skb; 1640 int ret; 1641 1642 spin_lock(&txq->lock); 1643 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1644 1645 txq->txq_poll_mark = false; 1646 1647 /* skb might already have been processed last time. */ 1648 if (bf->skb != NULL) { 1649 ds = bf->desc; 1650 1651 ret = ah->ah_proc_tx_desc(ah, ds, &ts); 1652 if (unlikely(ret == -EINPROGRESS)) 1653 break; 1654 else if (unlikely(ret)) { 1655 ATH5K_ERR(ah, 1656 "error %d while processing " 1657 "queue %u\n", ret, txq->qnum); 1658 break; 1659 } 1660 1661 skb = bf->skb; 1662 bf->skb = NULL; 1663 1664 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, 1665 DMA_TO_DEVICE); 1666 ath5k_tx_frame_completed(ah, skb, txq, &ts); 1667 } 1668 1669 /* 1670 * It's possible that the hardware can say the buffer is 1671 * completed when it hasn't yet loaded the ds_link from 1672 * host memory and moved on. 1673 * Always keep the last descriptor to avoid HW races... 1674 */ 1675 if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) { 1676 spin_lock(&ah->txbuflock); 1677 list_move_tail(&bf->list, &ah->txbuf); 1678 ah->txbuf_len++; 1679 txq->txq_len--; 1680 spin_unlock(&ah->txbuflock); 1681 } 1682 } 1683 spin_unlock(&txq->lock); 1684 if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4) 1685 ieee80211_wake_queue(ah->hw, txq->qnum); 1686 } 1687 1688 static void 1689 ath5k_tasklet_tx(unsigned long data) 1690 { 1691 int i; 1692 struct ath5k_hw *ah = (void *)data; 1693 1694 for (i = 0; i < AR5K_NUM_TX_QUEUES; i++) 1695 if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i))) 1696 ath5k_tx_processq(ah, &ah->txqs[i]); 1697 1698 ah->tx_pending = false; 1699 ath5k_set_current_imask(ah); 1700 } 1701 1702 1703 /*****************\ 1704 * Beacon handling * 1705 \*****************/ 1706 1707 /* 1708 * Setup the beacon frame for transmit. 1709 */ 1710 static int 1711 ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) 1712 { 1713 struct sk_buff *skb = bf->skb; 1714 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1715 struct ath5k_desc *ds; 1716 int ret = 0; 1717 u8 antenna; 1718 u32 flags; 1719 const int padsize = 0; 1720 1721 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, 1722 DMA_TO_DEVICE); 1723 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " 1724 "skbaddr %llx\n", skb, skb->data, skb->len, 1725 (unsigned long long)bf->skbaddr); 1726 1727 if (dma_mapping_error(ah->dev, bf->skbaddr)) { 1728 ATH5K_ERR(ah, "beacon DMA mapping failed\n"); 1729 dev_kfree_skb_any(skb); 1730 bf->skb = NULL; 1731 return -EIO; 1732 } 1733 1734 ds = bf->desc; 1735 antenna = ah->ah_tx_ant; 1736 1737 flags = AR5K_TXDESC_NOACK; 1738 if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) { 1739 ds->ds_link = bf->daddr; /* self-linked */ 1740 flags |= AR5K_TXDESC_VEOL; 1741 } else 1742 ds->ds_link = 0; 1743 1744 /* 1745 * If we use multiple antennas on AP and use 1746 * the Sectored AP scenario, switch antenna every 1747 * 4 beacons to make sure everybody hears our AP. 1748 * When a client tries to associate, hw will keep 1749 * track of the tx antenna to be used for this client 1750 * automatically, based on ACKed packets. 1751 * 1752 * Note: AP still listens and transmits RTS on the 1753 * default antenna which is supposed to be an omni. 1754 * 1755 * Note2: On sectored scenarios it's possible to have 1756 * multiple antennas (1 omni -- the default -- and 14 1757 * sectors), so if we choose to actually support this 1758 * mode, we need to allow the user to set how many antennas 1759 * we have and tweak the code below to send beacons 1760 * on all of them. 1761 */ 1762 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP) 1763 antenna = ah->bsent & 4 ? 2 : 1; 1764 1765 1766 /* FIXME: If we are in g mode and rate is a CCK rate 1767 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta 1768 * from tx power (value is in dB units already) */ 1769 ds->ds_data = bf->skbaddr; 1770 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 1771 ieee80211_get_hdrlen_from_skb(skb), padsize, 1772 AR5K_PKT_TYPE_BEACON, 1773 (ah->ah_txpower.txp_requested * 2), 1774 ieee80211_get_tx_rate(ah->hw, info)->hw_value, 1775 1, AR5K_TXKEYIX_INVALID, 1776 antenna, flags, 0, 0); 1777 if (ret) 1778 goto err_unmap; 1779 1780 return 0; 1781 err_unmap: 1782 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); 1783 return ret; 1784 } 1785 1786 /* 1787 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc, 1788 * this is called only once at config_bss time, for AP we do it every 1789 * SWBA interrupt so that the TIM will reflect buffered frames. 1790 * 1791 * Called with the beacon lock. 1792 */ 1793 int 1794 ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1795 { 1796 int ret; 1797 struct ath5k_hw *ah = hw->priv; 1798 struct ath5k_vif *avf; 1799 struct sk_buff *skb; 1800 1801 if (WARN_ON(!vif)) { 1802 ret = -EINVAL; 1803 goto out; 1804 } 1805 1806 skb = ieee80211_beacon_get(hw, vif); 1807 1808 if (!skb) { 1809 ret = -ENOMEM; 1810 goto out; 1811 } 1812 1813 avf = (void *)vif->drv_priv; 1814 ath5k_txbuf_free_skb(ah, avf->bbuf); 1815 avf->bbuf->skb = skb; 1816 ret = ath5k_beacon_setup(ah, avf->bbuf); 1817 out: 1818 return ret; 1819 } 1820 1821 /* 1822 * Transmit a beacon frame at SWBA. Dynamic updates to the 1823 * frame contents are done as needed and the slot time is 1824 * also adjusted based on current state. 1825 * 1826 * This is called from software irq context (beacontq tasklets) 1827 * or user context from ath5k_beacon_config. 1828 */ 1829 static void 1830 ath5k_beacon_send(struct ath5k_hw *ah) 1831 { 1832 struct ieee80211_vif *vif; 1833 struct ath5k_vif *avf; 1834 struct ath5k_buf *bf; 1835 struct sk_buff *skb; 1836 int err; 1837 1838 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1839 1840 /* 1841 * Check if the previous beacon has gone out. If 1842 * not, don't don't try to post another: skip this 1843 * period and wait for the next. Missed beacons 1844 * indicate a problem and should not occur. If we 1845 * miss too many consecutive beacons reset the device. 1846 */ 1847 if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) { 1848 ah->bmisscount++; 1849 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 1850 "missed %u consecutive beacons\n", ah->bmisscount); 1851 if (ah->bmisscount > 10) { /* NB: 10 is a guess */ 1852 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 1853 "stuck beacon time (%u missed)\n", 1854 ah->bmisscount); 1855 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 1856 "stuck beacon, resetting\n"); 1857 ieee80211_queue_work(ah->hw, &ah->reset_work); 1858 } 1859 return; 1860 } 1861 if (unlikely(ah->bmisscount != 0)) { 1862 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 1863 "resume beacon xmit after %u misses\n", 1864 ah->bmisscount); 1865 ah->bmisscount = 0; 1866 } 1867 1868 if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs + 1869 ah->num_mesh_vifs > 1) || 1870 ah->opmode == NL80211_IFTYPE_MESH_POINT) { 1871 u64 tsf = ath5k_hw_get_tsf64(ah); 1872 u32 tsftu = TSF_TO_TU(tsf); 1873 int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval; 1874 vif = ah->bslot[(slot + 1) % ATH_BCBUF]; 1875 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 1876 "tsf %llx tsftu %x intval %u slot %u vif %p\n", 1877 (unsigned long long)tsf, tsftu, ah->bintval, slot, vif); 1878 } else /* only one interface */ 1879 vif = ah->bslot[0]; 1880 1881 if (!vif) 1882 return; 1883 1884 avf = (void *)vif->drv_priv; 1885 bf = avf->bbuf; 1886 1887 /* 1888 * Stop any current dma and put the new frame on the queue. 1889 * This should never fail since we check above that no frames 1890 * are still pending on the queue. 1891 */ 1892 if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) { 1893 ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq); 1894 /* NB: hw still stops DMA, so proceed */ 1895 } 1896 1897 /* refresh the beacon for AP or MESH mode */ 1898 if (ah->opmode == NL80211_IFTYPE_AP || 1899 ah->opmode == NL80211_IFTYPE_MESH_POINT) { 1900 err = ath5k_beacon_update(ah->hw, vif); 1901 if (err) 1902 return; 1903 } 1904 1905 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION || 1906 ah->opmode == NL80211_IFTYPE_MONITOR)) { 1907 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb); 1908 return; 1909 } 1910 1911 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); 1912 1913 ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr); 1914 ath5k_hw_start_tx_dma(ah, ah->bhalq); 1915 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", 1916 ah->bhalq, (unsigned long long)bf->daddr, bf->desc); 1917 1918 skb = ieee80211_get_buffered_bc(ah->hw, vif); 1919 while (skb) { 1920 ath5k_tx_queue(ah->hw, skb, ah->cabq); 1921 1922 if (ah->cabq->txq_len >= ah->cabq->txq_max) 1923 break; 1924 1925 skb = ieee80211_get_buffered_bc(ah->hw, vif); 1926 } 1927 1928 ah->bsent++; 1929 } 1930 1931 /** 1932 * ath5k_beacon_update_timers - update beacon timers 1933 * 1934 * @ah: struct ath5k_hw pointer we are operating on 1935 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a 1936 * beacon timer update based on the current HW TSF. 1937 * 1938 * Calculate the next target beacon transmit time (TBTT) based on the timestamp 1939 * of a received beacon or the current local hardware TSF and write it to the 1940 * beacon timer registers. 1941 * 1942 * This is called in a variety of situations, e.g. when a beacon is received, 1943 * when a TSF update has been detected, but also when an new IBSS is created or 1944 * when we otherwise know we have to update the timers, but we keep it in this 1945 * function to have it all together in one place. 1946 */ 1947 void 1948 ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf) 1949 { 1950 u32 nexttbtt, intval, hw_tu, bc_tu; 1951 u64 hw_tsf; 1952 1953 intval = ah->bintval & AR5K_BEACON_PERIOD; 1954 if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs 1955 + ah->num_mesh_vifs > 1) { 1956 intval /= ATH_BCBUF; /* staggered multi-bss beacons */ 1957 if (intval < 15) 1958 ATH5K_WARN(ah, "intval %u is too low, min 15\n", 1959 intval); 1960 } 1961 if (WARN_ON(!intval)) 1962 return; 1963 1964 /* beacon TSF converted to TU */ 1965 bc_tu = TSF_TO_TU(bc_tsf); 1966 1967 /* current TSF converted to TU */ 1968 hw_tsf = ath5k_hw_get_tsf64(ah); 1969 hw_tu = TSF_TO_TU(hw_tsf); 1970 1971 #define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3) 1972 /* We use FUDGE to make sure the next TBTT is ahead of the current TU. 1973 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer 1974 * configuration we need to make sure it is bigger than that. */ 1975 1976 if (bc_tsf == -1) { 1977 /* 1978 * no beacons received, called internally. 1979 * just need to refresh timers based on HW TSF. 1980 */ 1981 nexttbtt = roundup(hw_tu + FUDGE, intval); 1982 } else if (bc_tsf == 0) { 1983 /* 1984 * no beacon received, probably called by ath5k_reset_tsf(). 1985 * reset TSF to start with 0. 1986 */ 1987 nexttbtt = intval; 1988 intval |= AR5K_BEACON_RESET_TSF; 1989 } else if (bc_tsf > hw_tsf) { 1990 /* 1991 * beacon received, SW merge happened but HW TSF not yet updated. 1992 * not possible to reconfigure timers yet, but next time we 1993 * receive a beacon with the same BSSID, the hardware will 1994 * automatically update the TSF and then we need to reconfigure 1995 * the timers. 1996 */ 1997 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 1998 "need to wait for HW TSF sync\n"); 1999 return; 2000 } else { 2001 /* 2002 * most important case for beacon synchronization between STA. 2003 * 2004 * beacon received and HW TSF has been already updated by HW. 2005 * update next TBTT based on the TSF of the beacon, but make 2006 * sure it is ahead of our local TSF timer. 2007 */ 2008 nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval); 2009 } 2010 #undef FUDGE 2011 2012 ah->nexttbtt = nexttbtt; 2013 2014 intval |= AR5K_BEACON_ENA; 2015 ath5k_hw_init_beacon_timers(ah, nexttbtt, intval); 2016 2017 /* 2018 * debugging output last in order to preserve the time critical aspect 2019 * of this function 2020 */ 2021 if (bc_tsf == -1) 2022 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2023 "reconfigured timers based on HW TSF\n"); 2024 else if (bc_tsf == 0) 2025 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2026 "reset HW TSF and timers\n"); 2027 else 2028 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2029 "updated timers based on beacon TSF\n"); 2030 2031 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2032 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", 2033 (unsigned long long) bc_tsf, 2034 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt); 2035 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", 2036 intval & AR5K_BEACON_PERIOD, 2037 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", 2038 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); 2039 } 2040 2041 /** 2042 * ath5k_beacon_config - Configure the beacon queues and interrupts 2043 * 2044 * @ah: struct ath5k_hw pointer we are operating on 2045 * 2046 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA 2047 * interrupts to detect TSF updates only. 2048 */ 2049 void 2050 ath5k_beacon_config(struct ath5k_hw *ah) 2051 { 2052 spin_lock_bh(&ah->block); 2053 ah->bmisscount = 0; 2054 ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); 2055 2056 if (ah->enable_beacon) { 2057 /* 2058 * In IBSS mode we use a self-linked tx descriptor and let the 2059 * hardware send the beacons automatically. We have to load it 2060 * only once here. 2061 * We use the SWBA interrupt only to keep track of the beacon 2062 * timers in order to detect automatic TSF updates. 2063 */ 2064 ath5k_beaconq_config(ah); 2065 2066 ah->imask |= AR5K_INT_SWBA; 2067 2068 if (ah->opmode == NL80211_IFTYPE_ADHOC) { 2069 if (ath5k_hw_hasveol(ah)) 2070 ath5k_beacon_send(ah); 2071 } else 2072 ath5k_beacon_update_timers(ah, -1); 2073 } else { 2074 ath5k_hw_stop_beacon_queue(ah, ah->bhalq); 2075 } 2076 2077 ath5k_hw_set_imr(ah, ah->imask); 2078 mmiowb(); 2079 spin_unlock_bh(&ah->block); 2080 } 2081 2082 static void ath5k_tasklet_beacon(unsigned long data) 2083 { 2084 struct ath5k_hw *ah = (struct ath5k_hw *) data; 2085 2086 /* 2087 * Software beacon alert--time to send a beacon. 2088 * 2089 * In IBSS mode we use this interrupt just to 2090 * keep track of the next TBTT (target beacon 2091 * transmission time) in order to detect whether 2092 * automatic TSF updates happened. 2093 */ 2094 if (ah->opmode == NL80211_IFTYPE_ADHOC) { 2095 /* XXX: only if VEOL supported */ 2096 u64 tsf = ath5k_hw_get_tsf64(ah); 2097 ah->nexttbtt += ah->bintval; 2098 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 2099 "SWBA nexttbtt: %x hw_tu: %x " 2100 "TSF: %llx\n", 2101 ah->nexttbtt, 2102 TSF_TO_TU(tsf), 2103 (unsigned long long) tsf); 2104 } else { 2105 spin_lock(&ah->block); 2106 ath5k_beacon_send(ah); 2107 spin_unlock(&ah->block); 2108 } 2109 } 2110 2111 2112 /********************\ 2113 * Interrupt handling * 2114 \********************/ 2115 2116 static void 2117 ath5k_intr_calibration_poll(struct ath5k_hw *ah) 2118 { 2119 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) && 2120 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && 2121 !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { 2122 2123 /* Run ANI only when calibration is not active */ 2124 2125 ah->ah_cal_next_ani = jiffies + 2126 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); 2127 tasklet_schedule(&ah->ani_tasklet); 2128 2129 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) && 2130 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && 2131 !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { 2132 2133 /* Run calibration only when another calibration 2134 * is not running. 2135 * 2136 * Note: This is for both full/short calibration, 2137 * if it's time for a full one, ath5k_calibrate_work will deal 2138 * with it. */ 2139 2140 ah->ah_cal_next_short = jiffies + 2141 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); 2142 ieee80211_queue_work(ah->hw, &ah->calib_work); 2143 } 2144 /* we could use SWI to generate enough interrupts to meet our 2145 * calibration interval requirements, if necessary: 2146 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */ 2147 } 2148 2149 static void 2150 ath5k_schedule_rx(struct ath5k_hw *ah) 2151 { 2152 ah->rx_pending = true; 2153 tasklet_schedule(&ah->rxtq); 2154 } 2155 2156 static void 2157 ath5k_schedule_tx(struct ath5k_hw *ah) 2158 { 2159 ah->tx_pending = true; 2160 tasklet_schedule(&ah->txtq); 2161 } 2162 2163 static irqreturn_t 2164 ath5k_intr(int irq, void *dev_id) 2165 { 2166 struct ath5k_hw *ah = dev_id; 2167 enum ath5k_int status; 2168 unsigned int counter = 1000; 2169 2170 2171 /* 2172 * If hw is not ready (or detached) and we get an 2173 * interrupt, or if we have no interrupts pending 2174 * (that means it's not for us) skip it. 2175 * 2176 * NOTE: Group 0/1 PCI interface registers are not 2177 * supported on WiSOCs, so we can't check for pending 2178 * interrupts (ISR belongs to another register group 2179 * so we are ok). 2180 */ 2181 if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) || 2182 ((ath5k_get_bus_type(ah) != ATH_AHB) && 2183 !ath5k_hw_is_intr_pending(ah)))) 2184 return IRQ_NONE; 2185 2186 /** Main loop **/ 2187 do { 2188 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ 2189 2190 ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", 2191 status, ah->imask); 2192 2193 /* 2194 * Fatal hw error -> Log and reset 2195 * 2196 * Fatal errors are unrecoverable so we have to 2197 * reset the card. These errors include bus and 2198 * dma errors. 2199 */ 2200 if (unlikely(status & AR5K_INT_FATAL)) { 2201 2202 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2203 "fatal int, resetting\n"); 2204 ieee80211_queue_work(ah->hw, &ah->reset_work); 2205 2206 /* 2207 * RX Overrun -> Count and reset if needed 2208 * 2209 * Receive buffers are full. Either the bus is busy or 2210 * the CPU is not fast enough to process all received 2211 * frames. 2212 */ 2213 } else if (unlikely(status & AR5K_INT_RXORN)) { 2214 2215 /* 2216 * Older chipsets need a reset to come out of this 2217 * condition, but we treat it as RX for newer chips. 2218 * We don't know exactly which versions need a reset 2219 * this guess is copied from the HAL. 2220 */ 2221 ah->stats.rxorn_intr++; 2222 2223 if (ah->ah_mac_srev < AR5K_SREV_AR5212) { 2224 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2225 "rx overrun, resetting\n"); 2226 ieee80211_queue_work(ah->hw, &ah->reset_work); 2227 } else 2228 ath5k_schedule_rx(ah); 2229 2230 } else { 2231 2232 /* Software Beacon Alert -> Schedule beacon tasklet */ 2233 if (status & AR5K_INT_SWBA) 2234 tasklet_hi_schedule(&ah->beacontq); 2235 2236 /* 2237 * No more RX descriptors -> Just count 2238 * 2239 * NB: the hardware should re-read the link when 2240 * RXE bit is written, but it doesn't work at 2241 * least on older hardware revs. 2242 */ 2243 if (status & AR5K_INT_RXEOL) 2244 ah->stats.rxeol_intr++; 2245 2246 2247 /* TX Underrun -> Bump tx trigger level */ 2248 if (status & AR5K_INT_TXURN) 2249 ath5k_hw_update_tx_triglevel(ah, true); 2250 2251 /* RX -> Schedule rx tasklet */ 2252 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) 2253 ath5k_schedule_rx(ah); 2254 2255 /* TX -> Schedule tx tasklet */ 2256 if (status & (AR5K_INT_TXOK 2257 | AR5K_INT_TXDESC 2258 | AR5K_INT_TXERR 2259 | AR5K_INT_TXEOL)) 2260 ath5k_schedule_tx(ah); 2261 2262 /* Missed beacon -> TODO 2263 if (status & AR5K_INT_BMISS) 2264 */ 2265 2266 /* MIB event -> Update counters and notify ANI */ 2267 if (status & AR5K_INT_MIB) { 2268 ah->stats.mib_intr++; 2269 ath5k_hw_update_mib_counters(ah); 2270 ath5k_ani_mib_intr(ah); 2271 } 2272 2273 /* GPIO -> Notify RFKill layer */ 2274 if (status & AR5K_INT_GPIO) 2275 tasklet_schedule(&ah->rf_kill.toggleq); 2276 2277 } 2278 2279 if (ath5k_get_bus_type(ah) == ATH_AHB) 2280 break; 2281 2282 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); 2283 2284 /* 2285 * Until we handle rx/tx interrupts mask them on IMR 2286 * 2287 * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets 2288 * and unset after we 've handled the interrupts. 2289 */ 2290 if (ah->rx_pending || ah->tx_pending) 2291 ath5k_set_current_imask(ah); 2292 2293 if (unlikely(!counter)) 2294 ATH5K_WARN(ah, "too many interrupts, giving up for now\n"); 2295 2296 /* Fire up calibration poll */ 2297 ath5k_intr_calibration_poll(ah); 2298 2299 return IRQ_HANDLED; 2300 } 2301 2302 /* 2303 * Periodically recalibrate the PHY to account 2304 * for temperature/environment changes. 2305 */ 2306 static void 2307 ath5k_calibrate_work(struct work_struct *work) 2308 { 2309 struct ath5k_hw *ah = container_of(work, struct ath5k_hw, 2310 calib_work); 2311 2312 /* Should we run a full calibration ? */ 2313 if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { 2314 2315 ah->ah_cal_next_full = jiffies + 2316 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); 2317 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; 2318 2319 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, 2320 "running full calibration\n"); 2321 2322 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { 2323 /* 2324 * Rfgain is out of bounds, reset the chip 2325 * to load new gain values. 2326 */ 2327 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2328 "got new rfgain, resetting\n"); 2329 ieee80211_queue_work(ah->hw, &ah->reset_work); 2330 } 2331 } else 2332 ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT; 2333 2334 2335 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", 2336 ieee80211_frequency_to_channel(ah->curchan->center_freq), 2337 ah->curchan->hw_value); 2338 2339 if (ath5k_hw_phy_calibrate(ah, ah->curchan)) 2340 ATH5K_ERR(ah, "calibration of channel %u failed\n", 2341 ieee80211_frequency_to_channel( 2342 ah->curchan->center_freq)); 2343 2344 /* Clear calibration flags */ 2345 if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) 2346 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; 2347 else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT) 2348 ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT; 2349 } 2350 2351 2352 static void 2353 ath5k_tasklet_ani(unsigned long data) 2354 { 2355 struct ath5k_hw *ah = (void *)data; 2356 2357 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI; 2358 ath5k_ani_calibration(ah); 2359 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI; 2360 } 2361 2362 2363 static void 2364 ath5k_tx_complete_poll_work(struct work_struct *work) 2365 { 2366 struct ath5k_hw *ah = container_of(work, struct ath5k_hw, 2367 tx_complete_work.work); 2368 struct ath5k_txq *txq; 2369 int i; 2370 bool needreset = false; 2371 2372 if (!test_bit(ATH_STAT_STARTED, ah->status)) 2373 return; 2374 2375 mutex_lock(&ah->lock); 2376 2377 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { 2378 if (ah->txqs[i].setup) { 2379 txq = &ah->txqs[i]; 2380 spin_lock_bh(&txq->lock); 2381 if (txq->txq_len > 1) { 2382 if (txq->txq_poll_mark) { 2383 ATH5K_DBG(ah, ATH5K_DEBUG_XMIT, 2384 "TX queue stuck %d\n", 2385 txq->qnum); 2386 needreset = true; 2387 txq->txq_stuck++; 2388 spin_unlock_bh(&txq->lock); 2389 break; 2390 } else { 2391 txq->txq_poll_mark = true; 2392 } 2393 } 2394 spin_unlock_bh(&txq->lock); 2395 } 2396 } 2397 2398 if (needreset) { 2399 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2400 "TX queues stuck, resetting\n"); 2401 ath5k_reset(ah, NULL, true); 2402 } 2403 2404 mutex_unlock(&ah->lock); 2405 2406 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, 2407 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2408 } 2409 2410 2411 /*************************\ 2412 * Initialization routines * 2413 \*************************/ 2414 2415 static const struct ieee80211_iface_limit if_limits[] = { 2416 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) }, 2417 { .max = 4, .types = 2418 #ifdef CONFIG_MAC80211_MESH 2419 BIT(NL80211_IFTYPE_MESH_POINT) | 2420 #endif 2421 BIT(NL80211_IFTYPE_AP) }, 2422 }; 2423 2424 static const struct ieee80211_iface_combination if_comb = { 2425 .limits = if_limits, 2426 .n_limits = ARRAY_SIZE(if_limits), 2427 .max_interfaces = 2048, 2428 .num_different_channels = 1, 2429 }; 2430 2431 int 2432 ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) 2433 { 2434 struct ieee80211_hw *hw = ah->hw; 2435 struct ath_common *common; 2436 int ret; 2437 int csz; 2438 2439 /* Initialize driver private data */ 2440 SET_IEEE80211_DEV(hw, ah->dev); 2441 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 2442 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2443 IEEE80211_HW_SIGNAL_DBM | 2444 IEEE80211_HW_MFP_CAPABLE | 2445 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 2446 2447 hw->wiphy->interface_modes = 2448 BIT(NL80211_IFTYPE_AP) | 2449 BIT(NL80211_IFTYPE_STATION) | 2450 BIT(NL80211_IFTYPE_ADHOC) | 2451 BIT(NL80211_IFTYPE_MESH_POINT); 2452 2453 hw->wiphy->iface_combinations = &if_comb; 2454 hw->wiphy->n_iface_combinations = 1; 2455 2456 /* SW support for IBSS_RSN is provided by mac80211 */ 2457 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 2458 2459 /* both antennas can be configured as RX or TX */ 2460 hw->wiphy->available_antennas_tx = 0x3; 2461 hw->wiphy->available_antennas_rx = 0x3; 2462 2463 hw->extra_tx_headroom = 2; 2464 hw->channel_change_time = 5000; 2465 2466 /* 2467 * Mark the device as detached to avoid processing 2468 * interrupts until setup is complete. 2469 */ 2470 __set_bit(ATH_STAT_INVALID, ah->status); 2471 2472 ah->opmode = NL80211_IFTYPE_STATION; 2473 ah->bintval = 1000; 2474 mutex_init(&ah->lock); 2475 spin_lock_init(&ah->rxbuflock); 2476 spin_lock_init(&ah->txbuflock); 2477 spin_lock_init(&ah->block); 2478 spin_lock_init(&ah->irqlock); 2479 2480 /* Setup interrupt handler */ 2481 ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah); 2482 if (ret) { 2483 ATH5K_ERR(ah, "request_irq failed\n"); 2484 goto err; 2485 } 2486 2487 common = ath5k_hw_common(ah); 2488 common->ops = &ath5k_common_ops; 2489 common->bus_ops = bus_ops; 2490 common->ah = ah; 2491 common->hw = hw; 2492 common->priv = ah; 2493 common->clockrate = 40; 2494 2495 /* 2496 * Cache line size is used to size and align various 2497 * structures used to communicate with the hardware. 2498 */ 2499 ath5k_read_cachesize(common, &csz); 2500 common->cachelsz = csz << 2; /* convert to bytes */ 2501 2502 spin_lock_init(&common->cc_lock); 2503 2504 /* Initialize device */ 2505 ret = ath5k_hw_init(ah); 2506 if (ret) 2507 goto err_irq; 2508 2509 /* Set up multi-rate retry capabilities */ 2510 if (ah->ah_capabilities.cap_has_mrr_support) { 2511 hw->max_rates = 4; 2512 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT, 2513 AR5K_INIT_RETRY_LONG); 2514 } 2515 2516 hw->vif_data_size = sizeof(struct ath5k_vif); 2517 2518 /* Finish private driver data initialization */ 2519 ret = ath5k_init(hw); 2520 if (ret) 2521 goto err_ah; 2522 2523 ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", 2524 ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev), 2525 ah->ah_mac_srev, 2526 ah->ah_phy_revision); 2527 2528 if (!ah->ah_single_chip) { 2529 /* Single chip radio (!RF5111) */ 2530 if (ah->ah_radio_5ghz_revision && 2531 !ah->ah_radio_2ghz_revision) { 2532 /* No 5GHz support -> report 2GHz radio */ 2533 if (!test_bit(AR5K_MODE_11A, 2534 ah->ah_capabilities.cap_mode)) { 2535 ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n", 2536 ath5k_chip_name(AR5K_VERSION_RAD, 2537 ah->ah_radio_5ghz_revision), 2538 ah->ah_radio_5ghz_revision); 2539 /* No 2GHz support (5110 and some 2540 * 5GHz only cards) -> report 5GHz radio */ 2541 } else if (!test_bit(AR5K_MODE_11B, 2542 ah->ah_capabilities.cap_mode)) { 2543 ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n", 2544 ath5k_chip_name(AR5K_VERSION_RAD, 2545 ah->ah_radio_5ghz_revision), 2546 ah->ah_radio_5ghz_revision); 2547 /* Multiband radio */ 2548 } else { 2549 ATH5K_INFO(ah, "RF%s multiband radio found" 2550 " (0x%x)\n", 2551 ath5k_chip_name(AR5K_VERSION_RAD, 2552 ah->ah_radio_5ghz_revision), 2553 ah->ah_radio_5ghz_revision); 2554 } 2555 } 2556 /* Multi chip radio (RF5111 - RF2111) -> 2557 * report both 2GHz/5GHz radios */ 2558 else if (ah->ah_radio_5ghz_revision && 2559 ah->ah_radio_2ghz_revision) { 2560 ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n", 2561 ath5k_chip_name(AR5K_VERSION_RAD, 2562 ah->ah_radio_5ghz_revision), 2563 ah->ah_radio_5ghz_revision); 2564 ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n", 2565 ath5k_chip_name(AR5K_VERSION_RAD, 2566 ah->ah_radio_2ghz_revision), 2567 ah->ah_radio_2ghz_revision); 2568 } 2569 } 2570 2571 ath5k_debug_init_device(ah); 2572 2573 /* ready to process interrupts */ 2574 __clear_bit(ATH_STAT_INVALID, ah->status); 2575 2576 return 0; 2577 err_ah: 2578 ath5k_hw_deinit(ah); 2579 err_irq: 2580 free_irq(ah->irq, ah); 2581 err: 2582 return ret; 2583 } 2584 2585 static int 2586 ath5k_stop_locked(struct ath5k_hw *ah) 2587 { 2588 2589 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n", 2590 test_bit(ATH_STAT_INVALID, ah->status)); 2591 2592 /* 2593 * Shutdown the hardware and driver: 2594 * stop output from above 2595 * disable interrupts 2596 * turn off timers 2597 * turn off the radio 2598 * clear transmit machinery 2599 * clear receive machinery 2600 * drain and release tx queues 2601 * reclaim beacon resources 2602 * power down hardware 2603 * 2604 * Note that some of this work is not possible if the 2605 * hardware is gone (invalid). 2606 */ 2607 ieee80211_stop_queues(ah->hw); 2608 2609 if (!test_bit(ATH_STAT_INVALID, ah->status)) { 2610 ath5k_led_off(ah); 2611 ath5k_hw_set_imr(ah, 0); 2612 synchronize_irq(ah->irq); 2613 ath5k_rx_stop(ah); 2614 ath5k_hw_dma_stop(ah); 2615 ath5k_drain_tx_buffs(ah); 2616 ath5k_hw_phy_disable(ah); 2617 } 2618 2619 return 0; 2620 } 2621 2622 int ath5k_start(struct ieee80211_hw *hw) 2623 { 2624 struct ath5k_hw *ah = hw->priv; 2625 struct ath_common *common = ath5k_hw_common(ah); 2626 int ret, i; 2627 2628 mutex_lock(&ah->lock); 2629 2630 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode); 2631 2632 /* 2633 * Stop anything previously setup. This is safe 2634 * no matter this is the first time through or not. 2635 */ 2636 ath5k_stop_locked(ah); 2637 2638 /* 2639 * The basic interface to setting the hardware in a good 2640 * state is ``reset''. On return the hardware is known to 2641 * be powered up and with interrupts disabled. This must 2642 * be followed by initialization of the appropriate bits 2643 * and then setup of the interrupt mask. 2644 */ 2645 ah->curchan = ah->hw->conf.chandef.chan; 2646 ah->imask = AR5K_INT_RXOK 2647 | AR5K_INT_RXERR 2648 | AR5K_INT_RXEOL 2649 | AR5K_INT_RXORN 2650 | AR5K_INT_TXDESC 2651 | AR5K_INT_TXEOL 2652 | AR5K_INT_FATAL 2653 | AR5K_INT_GLOBAL 2654 | AR5K_INT_MIB; 2655 2656 ret = ath5k_reset(ah, NULL, false); 2657 if (ret) 2658 goto done; 2659 2660 if (!ath5k_modparam_no_hw_rfkill_switch) 2661 ath5k_rfkill_hw_start(ah); 2662 2663 /* 2664 * Reset the key cache since some parts do not reset the 2665 * contents on initial power up or resume from suspend. 2666 */ 2667 for (i = 0; i < common->keymax; i++) 2668 ath_hw_keyreset(common, (u16) i); 2669 2670 /* Use higher rates for acks instead of base 2671 * rate */ 2672 ah->ah_ack_bitrate_high = true; 2673 2674 for (i = 0; i < ARRAY_SIZE(ah->bslot); i++) 2675 ah->bslot[i] = NULL; 2676 2677 ret = 0; 2678 done: 2679 mmiowb(); 2680 mutex_unlock(&ah->lock); 2681 2682 set_bit(ATH_STAT_STARTED, ah->status); 2683 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, 2684 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2685 2686 return ret; 2687 } 2688 2689 static void ath5k_stop_tasklets(struct ath5k_hw *ah) 2690 { 2691 ah->rx_pending = false; 2692 ah->tx_pending = false; 2693 tasklet_kill(&ah->rxtq); 2694 tasklet_kill(&ah->txtq); 2695 tasklet_kill(&ah->beacontq); 2696 tasklet_kill(&ah->ani_tasklet); 2697 } 2698 2699 /* 2700 * Stop the device, grabbing the top-level lock to protect 2701 * against concurrent entry through ath5k_init (which can happen 2702 * if another thread does a system call and the thread doing the 2703 * stop is preempted). 2704 */ 2705 void ath5k_stop(struct ieee80211_hw *hw) 2706 { 2707 struct ath5k_hw *ah = hw->priv; 2708 int ret; 2709 2710 mutex_lock(&ah->lock); 2711 ret = ath5k_stop_locked(ah); 2712 if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) { 2713 /* 2714 * Don't set the card in full sleep mode! 2715 * 2716 * a) When the device is in this state it must be carefully 2717 * woken up or references to registers in the PCI clock 2718 * domain may freeze the bus (and system). This varies 2719 * by chip and is mostly an issue with newer parts 2720 * (madwifi sources mentioned srev >= 0x78) that go to 2721 * sleep more quickly. 2722 * 2723 * b) On older chips full sleep results a weird behaviour 2724 * during wakeup. I tested various cards with srev < 0x78 2725 * and they don't wake up after module reload, a second 2726 * module reload is needed to bring the card up again. 2727 * 2728 * Until we figure out what's going on don't enable 2729 * full chip reset on any chip (this is what Legacy HAL 2730 * and Sam's HAL do anyway). Instead Perform a full reset 2731 * on the device (same as initial state after attach) and 2732 * leave it idle (keep MAC/BB on warm reset) */ 2733 ret = ath5k_hw_on_hold(ah); 2734 2735 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2736 "putting device to sleep\n"); 2737 } 2738 2739 mmiowb(); 2740 mutex_unlock(&ah->lock); 2741 2742 ath5k_stop_tasklets(ah); 2743 2744 clear_bit(ATH_STAT_STARTED, ah->status); 2745 cancel_delayed_work_sync(&ah->tx_complete_work); 2746 2747 if (!ath5k_modparam_no_hw_rfkill_switch) 2748 ath5k_rfkill_hw_stop(ah); 2749 } 2750 2751 /* 2752 * Reset the hardware. If chan is not NULL, then also pause rx/tx 2753 * and change to the given channel. 2754 * 2755 * This should be called with ah->lock. 2756 */ 2757 static int 2758 ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, 2759 bool skip_pcu) 2760 { 2761 struct ath_common *common = ath5k_hw_common(ah); 2762 int ret, ani_mode; 2763 bool fast; 2764 2765 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n"); 2766 2767 ath5k_hw_set_imr(ah, 0); 2768 synchronize_irq(ah->irq); 2769 ath5k_stop_tasklets(ah); 2770 2771 /* Save ani mode and disable ANI during 2772 * reset. If we don't we might get false 2773 * PHY error interrupts. */ 2774 ani_mode = ah->ani_state.ani_mode; 2775 ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF); 2776 2777 /* We are going to empty hw queues 2778 * so we should also free any remaining 2779 * tx buffers */ 2780 ath5k_drain_tx_buffs(ah); 2781 if (chan) 2782 ah->curchan = chan; 2783 2784 fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0; 2785 2786 ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu); 2787 if (ret) { 2788 ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret); 2789 goto err; 2790 } 2791 2792 ret = ath5k_rx_start(ah); 2793 if (ret) { 2794 ATH5K_ERR(ah, "can't start recv logic\n"); 2795 goto err; 2796 } 2797 2798 ath5k_ani_init(ah, ani_mode); 2799 2800 /* 2801 * Set calibration intervals 2802 * 2803 * Note: We don't need to run calibration imediately 2804 * since some initial calibration is done on reset 2805 * even for fast channel switching. Also on scanning 2806 * this will get set again and again and it won't get 2807 * executed unless we connect somewhere and spend some 2808 * time on the channel (that's what calibration needs 2809 * anyway to be accurate). 2810 */ 2811 ah->ah_cal_next_full = jiffies + 2812 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); 2813 ah->ah_cal_next_ani = jiffies + 2814 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); 2815 ah->ah_cal_next_short = jiffies + 2816 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); 2817 2818 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8); 2819 2820 /* clear survey data and cycle counters */ 2821 memset(&ah->survey, 0, sizeof(ah->survey)); 2822 spin_lock_bh(&common->cc_lock); 2823 ath_hw_cycle_counters_update(common); 2824 memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 2825 memset(&common->cc_ani, 0, sizeof(common->cc_ani)); 2826 spin_unlock_bh(&common->cc_lock); 2827 2828 /* 2829 * Change channels and update the h/w rate map if we're switching; 2830 * e.g. 11a to 11b/g. 2831 * 2832 * We may be doing a reset in response to an ioctl that changes the 2833 * channel so update any state that might change as a result. 2834 * 2835 * XXX needed? 2836 */ 2837 /* ath5k_chan_change(ah, c); */ 2838 2839 ath5k_beacon_config(ah); 2840 /* intrs are enabled by ath5k_beacon_config */ 2841 2842 ieee80211_wake_queues(ah->hw); 2843 2844 return 0; 2845 err: 2846 return ret; 2847 } 2848 2849 static void ath5k_reset_work(struct work_struct *work) 2850 { 2851 struct ath5k_hw *ah = container_of(work, struct ath5k_hw, 2852 reset_work); 2853 2854 mutex_lock(&ah->lock); 2855 ath5k_reset(ah, NULL, true); 2856 mutex_unlock(&ah->lock); 2857 } 2858 2859 static int 2860 ath5k_init(struct ieee80211_hw *hw) 2861 { 2862 2863 struct ath5k_hw *ah = hw->priv; 2864 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); 2865 struct ath5k_txq *txq; 2866 u8 mac[ETH_ALEN] = {}; 2867 int ret; 2868 2869 2870 /* 2871 * Collect the channel list. The 802.11 layer 2872 * is responsible for filtering this list based 2873 * on settings like the phy mode and regulatory 2874 * domain restrictions. 2875 */ 2876 ret = ath5k_setup_bands(hw); 2877 if (ret) { 2878 ATH5K_ERR(ah, "can't get channels\n"); 2879 goto err; 2880 } 2881 2882 /* 2883 * Allocate tx+rx descriptors and populate the lists. 2884 */ 2885 ret = ath5k_desc_alloc(ah); 2886 if (ret) { 2887 ATH5K_ERR(ah, "can't allocate descriptors\n"); 2888 goto err; 2889 } 2890 2891 /* 2892 * Allocate hardware transmit queues: one queue for 2893 * beacon frames and one data queue for each QoS 2894 * priority. Note that hw functions handle resetting 2895 * these queues at the needed time. 2896 */ 2897 ret = ath5k_beaconq_setup(ah); 2898 if (ret < 0) { 2899 ATH5K_ERR(ah, "can't setup a beacon xmit queue\n"); 2900 goto err_desc; 2901 } 2902 ah->bhalq = ret; 2903 ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0); 2904 if (IS_ERR(ah->cabq)) { 2905 ATH5K_ERR(ah, "can't setup cab queue\n"); 2906 ret = PTR_ERR(ah->cabq); 2907 goto err_bhal; 2908 } 2909 2910 /* 5211 and 5212 usually support 10 queues but we better rely on the 2911 * capability information */ 2912 if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) { 2913 /* This order matches mac80211's queue priority, so we can 2914 * directly use the mac80211 queue number without any mapping */ 2915 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO); 2916 if (IS_ERR(txq)) { 2917 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2918 ret = PTR_ERR(txq); 2919 goto err_queues; 2920 } 2921 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI); 2922 if (IS_ERR(txq)) { 2923 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2924 ret = PTR_ERR(txq); 2925 goto err_queues; 2926 } 2927 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE); 2928 if (IS_ERR(txq)) { 2929 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2930 ret = PTR_ERR(txq); 2931 goto err_queues; 2932 } 2933 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); 2934 if (IS_ERR(txq)) { 2935 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2936 ret = PTR_ERR(txq); 2937 goto err_queues; 2938 } 2939 hw->queues = 4; 2940 } else { 2941 /* older hardware (5210) can only support one data queue */ 2942 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE); 2943 if (IS_ERR(txq)) { 2944 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2945 ret = PTR_ERR(txq); 2946 goto err_queues; 2947 } 2948 hw->queues = 1; 2949 } 2950 2951 tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah); 2952 tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah); 2953 tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah); 2954 tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah); 2955 2956 INIT_WORK(&ah->reset_work, ath5k_reset_work); 2957 INIT_WORK(&ah->calib_work, ath5k_calibrate_work); 2958 INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work); 2959 2960 ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac); 2961 if (ret) { 2962 ATH5K_ERR(ah, "unable to read address from EEPROM\n"); 2963 goto err_queues; 2964 } 2965 2966 SET_IEEE80211_PERM_ADDR(hw, mac); 2967 /* All MAC address bits matter for ACKs */ 2968 ath5k_update_bssid_mask_and_opmode(ah, NULL); 2969 2970 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain; 2971 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier); 2972 if (ret) { 2973 ATH5K_ERR(ah, "can't initialize regulatory system\n"); 2974 goto err_queues; 2975 } 2976 2977 ret = ieee80211_register_hw(hw); 2978 if (ret) { 2979 ATH5K_ERR(ah, "can't register ieee80211 hw\n"); 2980 goto err_queues; 2981 } 2982 2983 if (!ath_is_world_regd(regulatory)) 2984 regulatory_hint(hw->wiphy, regulatory->alpha2); 2985 2986 ath5k_init_leds(ah); 2987 2988 ath5k_sysfs_register(ah); 2989 2990 return 0; 2991 err_queues: 2992 ath5k_txq_release(ah); 2993 err_bhal: 2994 ath5k_hw_release_tx_queue(ah, ah->bhalq); 2995 err_desc: 2996 ath5k_desc_free(ah); 2997 err: 2998 return ret; 2999 } 3000 3001 void 3002 ath5k_deinit_ah(struct ath5k_hw *ah) 3003 { 3004 struct ieee80211_hw *hw = ah->hw; 3005 3006 /* 3007 * NB: the order of these is important: 3008 * o call the 802.11 layer before detaching ath5k_hw to 3009 * ensure callbacks into the driver to delete global 3010 * key cache entries can be handled 3011 * o reclaim the tx queue data structures after calling 3012 * the 802.11 layer as we'll get called back to reclaim 3013 * node state and potentially want to use them 3014 * o to cleanup the tx queues the hal is called, so detach 3015 * it last 3016 * XXX: ??? detach ath5k_hw ??? 3017 * Other than that, it's straightforward... 3018 */ 3019 ieee80211_unregister_hw(hw); 3020 ath5k_desc_free(ah); 3021 ath5k_txq_release(ah); 3022 ath5k_hw_release_tx_queue(ah, ah->bhalq); 3023 ath5k_unregister_leds(ah); 3024 3025 ath5k_sysfs_unregister(ah); 3026 /* 3027 * NB: can't reclaim these until after ieee80211_ifdetach 3028 * returns because we'll get called back to reclaim node 3029 * state and potentially want to use them. 3030 */ 3031 ath5k_hw_deinit(ah); 3032 free_irq(ah->irq, ah); 3033 } 3034 3035 bool 3036 ath5k_any_vif_assoc(struct ath5k_hw *ah) 3037 { 3038 struct ath5k_vif_iter_data iter_data; 3039 iter_data.hw_macaddr = NULL; 3040 iter_data.any_assoc = false; 3041 iter_data.need_set_hw_addr = false; 3042 iter_data.found_active = true; 3043 3044 ieee80211_iterate_active_interfaces_atomic( 3045 ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL, 3046 ath5k_vif_iter, &iter_data); 3047 return iter_data.any_assoc; 3048 } 3049 3050 void 3051 ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable) 3052 { 3053 struct ath5k_hw *ah = hw->priv; 3054 u32 rfilt; 3055 rfilt = ath5k_hw_get_rx_filter(ah); 3056 if (enable) 3057 rfilt |= AR5K_RX_FILTER_BEACON; 3058 else 3059 rfilt &= ~AR5K_RX_FILTER_BEACON; 3060 ath5k_hw_set_rx_filter(ah, rfilt); 3061 ah->filter_flags = rfilt; 3062 } 3063 3064 void _ath5k_printk(const struct ath5k_hw *ah, const char *level, 3065 const char *fmt, ...) 3066 { 3067 struct va_format vaf; 3068 va_list args; 3069 3070 va_start(args, fmt); 3071 3072 vaf.fmt = fmt; 3073 vaf.va = &args; 3074 3075 if (ah && ah->hw) 3076 printk("%s" pr_fmt("%s: %pV"), 3077 level, wiphy_name(ah->hw->wiphy), &vaf); 3078 else 3079 printk("%s" pr_fmt("%pV"), level, &vaf); 3080 3081 va_end(args); 3082 } 3083