1 /****************************************************************************** 2 3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. 4 5 802.11 status code portion of this file from ethereal-0.10.6: 6 Copyright 2000, Axis Communications AB 7 Ethereal - Network traffic analyzer 8 By Gerald Combs <gerald@ethereal.com> 9 Copyright 1998 Gerald Combs 10 11 This program is free software; you can redistribute it and/or modify it 12 under the terms of version 2 of the GNU General Public License as 13 published by the Free Software Foundation. 14 15 This program is distributed in the hope that it will be useful, but WITHOUT 16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 more details. 19 20 You should have received a copy of the GNU General Public License along with 21 this program; if not, write to the Free Software Foundation, Inc., 59 22 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 24 The full GNU General Public License is included in this distribution in the 25 file called LICENSE. 26 27 Contact Information: 28 Intel Linux Wireless <ilw@linux.intel.com> 29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 30 31 ******************************************************************************/ 32 33 #include <linux/sched.h> 34 #include <linux/slab.h> 35 #include <net/cfg80211-wext.h> 36 #include "ipw2200.h" 37 #include "ipw.h" 38 39 40 #ifndef KBUILD_EXTMOD 41 #define VK "k" 42 #else 43 #define VK 44 #endif 45 46 #ifdef CONFIG_IPW2200_DEBUG 47 #define VD "d" 48 #else 49 #define VD 50 #endif 51 52 #ifdef CONFIG_IPW2200_MONITOR 53 #define VM "m" 54 #else 55 #define VM 56 #endif 57 58 #ifdef CONFIG_IPW2200_PROMISCUOUS 59 #define VP "p" 60 #else 61 #define VP 62 #endif 63 64 #ifdef CONFIG_IPW2200_RADIOTAP 65 #define VR "r" 66 #else 67 #define VR 68 #endif 69 70 #ifdef CONFIG_IPW2200_QOS 71 #define VQ "q" 72 #else 73 #define VQ 74 #endif 75 76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ 77 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" 78 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 79 #define DRV_VERSION IPW2200_VERSION 80 81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) 82 83 MODULE_DESCRIPTION(DRV_DESCRIPTION); 84 MODULE_VERSION(DRV_VERSION); 85 MODULE_AUTHOR(DRV_COPYRIGHT); 86 MODULE_LICENSE("GPL"); 87 MODULE_FIRMWARE("ipw2200-ibss.fw"); 88 #ifdef CONFIG_IPW2200_MONITOR 89 MODULE_FIRMWARE("ipw2200-sniffer.fw"); 90 #endif 91 MODULE_FIRMWARE("ipw2200-bss.fw"); 92 93 static int cmdlog = 0; 94 static int debug = 0; 95 static int default_channel = 0; 96 static int network_mode = 0; 97 98 static u32 ipw_debug_level; 99 static int associate; 100 static int auto_create = 1; 101 static int led_support = 1; 102 static int disable = 0; 103 static int bt_coexist = 0; 104 static int hwcrypto = 0; 105 static int roaming = 1; 106 static const char ipw_modes[] = { 107 'a', 'b', 'g', '?' 108 }; 109 static int antenna = CFG_SYS_ANTENNA_BOTH; 110 111 #ifdef CONFIG_IPW2200_PROMISCUOUS 112 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ 113 #endif 114 115 static struct ieee80211_rate ipw2200_rates[] = { 116 { .bitrate = 10 }, 117 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 118 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 119 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 120 { .bitrate = 60 }, 121 { .bitrate = 90 }, 122 { .bitrate = 120 }, 123 { .bitrate = 180 }, 124 { .bitrate = 240 }, 125 { .bitrate = 360 }, 126 { .bitrate = 480 }, 127 { .bitrate = 540 } 128 }; 129 130 #define ipw2200_a_rates (ipw2200_rates + 4) 131 #define ipw2200_num_a_rates 8 132 #define ipw2200_bg_rates (ipw2200_rates + 0) 133 #define ipw2200_num_bg_rates 12 134 135 /* Ugly macro to convert literal channel numbers into their mhz equivalents 136 * There are certianly some conditions that will break this (like feeding it '30') 137 * but they shouldn't arise since nothing talks on channel 30. */ 138 #define ieee80211chan2mhz(x) \ 139 (((x) <= 14) ? \ 140 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ 141 ((x) + 1000) * 5) 142 143 #ifdef CONFIG_IPW2200_QOS 144 static int qos_enable = 0; 145 static int qos_burst_enable = 0; 146 static int qos_no_ack_mask = 0; 147 static int burst_duration_CCK = 0; 148 static int burst_duration_OFDM = 0; 149 150 static struct libipw_qos_parameters def_qos_parameters_OFDM = { 151 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM, 152 QOS_TX3_CW_MIN_OFDM}, 153 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM, 154 QOS_TX3_CW_MAX_OFDM}, 155 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, 156 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, 157 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM, 158 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM} 159 }; 160 161 static struct libipw_qos_parameters def_qos_parameters_CCK = { 162 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK, 163 QOS_TX3_CW_MIN_CCK}, 164 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK, 165 QOS_TX3_CW_MAX_CCK}, 166 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, 167 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, 168 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK, 169 QOS_TX3_TXOP_LIMIT_CCK} 170 }; 171 172 static struct libipw_qos_parameters def_parameters_OFDM = { 173 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM, 174 DEF_TX3_CW_MIN_OFDM}, 175 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM, 176 DEF_TX3_CW_MAX_OFDM}, 177 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, 178 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, 179 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM, 180 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM} 181 }; 182 183 static struct libipw_qos_parameters def_parameters_CCK = { 184 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK, 185 DEF_TX3_CW_MIN_CCK}, 186 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK, 187 DEF_TX3_CW_MAX_CCK}, 188 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, 189 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, 190 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK, 191 DEF_TX3_TXOP_LIMIT_CCK} 192 }; 193 194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; 195 196 static int from_priority_to_tx_queue[] = { 197 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1, 198 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4 199 }; 200 201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv); 202 203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters 204 *qos_param); 205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element 206 *qos_param); 207 #endif /* CONFIG_IPW2200_QOS */ 208 209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev); 210 static void ipw_remove_current_network(struct ipw_priv *priv); 211 static void ipw_rx(struct ipw_priv *priv); 212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv, 213 struct clx2_tx_queue *txq, int qindex); 214 static int ipw_queue_reset(struct ipw_priv *priv); 215 216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, 217 int len, int sync); 218 219 static void ipw_tx_queue_free(struct ipw_priv *); 220 221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); 222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); 223 static void ipw_rx_queue_replenish(void *); 224 static int ipw_up(struct ipw_priv *); 225 static void ipw_bg_up(struct work_struct *work); 226 static void ipw_down(struct ipw_priv *); 227 static void ipw_bg_down(struct work_struct *work); 228 static int ipw_config(struct ipw_priv *); 229 static int init_supported_rates(struct ipw_priv *priv, 230 struct ipw_supported_rates *prates); 231 static void ipw_set_hwcrypto_keys(struct ipw_priv *); 232 static void ipw_send_wep_keys(struct ipw_priv *, int); 233 234 static int snprint_line(char *buf, size_t count, 235 const u8 * data, u32 len, u32 ofs) 236 { 237 int out, i, j, l; 238 char c; 239 240 out = snprintf(buf, count, "%08X", ofs); 241 242 for (l = 0, i = 0; i < 2; i++) { 243 out += snprintf(buf + out, count - out, " "); 244 for (j = 0; j < 8 && l < len; j++, l++) 245 out += snprintf(buf + out, count - out, "%02X ", 246 data[(i * 8 + j)]); 247 for (; j < 8; j++) 248 out += snprintf(buf + out, count - out, " "); 249 } 250 251 out += snprintf(buf + out, count - out, " "); 252 for (l = 0, i = 0; i < 2; i++) { 253 out += snprintf(buf + out, count - out, " "); 254 for (j = 0; j < 8 && l < len; j++, l++) { 255 c = data[(i * 8 + j)]; 256 if (!isascii(c) || !isprint(c)) 257 c = '.'; 258 259 out += snprintf(buf + out, count - out, "%c", c); 260 } 261 262 for (; j < 8; j++) 263 out += snprintf(buf + out, count - out, " "); 264 } 265 266 return out; 267 } 268 269 static void printk_buf(int level, const u8 * data, u32 len) 270 { 271 char line[81]; 272 u32 ofs = 0; 273 if (!(ipw_debug_level & level)) 274 return; 275 276 while (len) { 277 snprint_line(line, sizeof(line), &data[ofs], 278 min(len, 16U), ofs); 279 printk(KERN_DEBUG "%s\n", line); 280 ofs += 16; 281 len -= min(len, 16U); 282 } 283 } 284 285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len) 286 { 287 size_t out = size; 288 u32 ofs = 0; 289 int total = 0; 290 291 while (size && len) { 292 out = snprint_line(output, size, &data[ofs], 293 min_t(size_t, len, 16U), ofs); 294 295 ofs += 16; 296 output += out; 297 size -= out; 298 len -= min_t(size_t, len, 16U); 299 total += out; 300 } 301 return total; 302 } 303 304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ 305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); 306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) 307 308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ 309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); 310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) 311 312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); 314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) 315 { 316 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, 317 __LINE__, (u32) (b), (u32) (c)); 318 _ipw_write_reg8(a, b, c); 319 } 320 321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); 323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) 324 { 325 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, 326 __LINE__, (u32) (b), (u32) (c)); 327 _ipw_write_reg16(a, b, c); 328 } 329 330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); 332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) 333 { 334 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, 335 __LINE__, (u32) (b), (u32) (c)); 336 _ipw_write_reg32(a, b, c); 337 } 338 339 /* 8-bit direct write (low 4K) */ 340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs, 341 u8 val) 342 { 343 writeb(val, ipw->hw_base + ofs); 344 } 345 346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 347 #define ipw_write8(ipw, ofs, val) do { \ 348 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \ 349 __LINE__, (u32)(ofs), (u32)(val)); \ 350 _ipw_write8(ipw, ofs, val); \ 351 } while (0) 352 353 /* 16-bit direct write (low 4K) */ 354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs, 355 u16 val) 356 { 357 writew(val, ipw->hw_base + ofs); 358 } 359 360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 361 #define ipw_write16(ipw, ofs, val) do { \ 362 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \ 363 __LINE__, (u32)(ofs), (u32)(val)); \ 364 _ipw_write16(ipw, ofs, val); \ 365 } while (0) 366 367 /* 32-bit direct write (low 4K) */ 368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs, 369 u32 val) 370 { 371 writel(val, ipw->hw_base + ofs); 372 } 373 374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 375 #define ipw_write32(ipw, ofs, val) do { \ 376 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \ 377 __LINE__, (u32)(ofs), (u32)(val)); \ 378 _ipw_write32(ipw, ofs, val); \ 379 } while (0) 380 381 /* 8-bit direct read (low 4K) */ 382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs) 383 { 384 return readb(ipw->hw_base + ofs); 385 } 386 387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 388 #define ipw_read8(ipw, ofs) ({ \ 389 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \ 390 (u32)(ofs)); \ 391 _ipw_read8(ipw, ofs); \ 392 }) 393 394 /* 16-bit direct read (low 4K) */ 395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs) 396 { 397 return readw(ipw->hw_base + ofs); 398 } 399 400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 401 #define ipw_read16(ipw, ofs) ({ \ 402 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \ 403 (u32)(ofs)); \ 404 _ipw_read16(ipw, ofs); \ 405 }) 406 407 /* 32-bit direct read (low 4K) */ 408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs) 409 { 410 return readl(ipw->hw_base + ofs); 411 } 412 413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 414 #define ipw_read32(ipw, ofs) ({ \ 415 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \ 416 (u32)(ofs)); \ 417 _ipw_read32(ipw, ofs); \ 418 }) 419 420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); 421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ 422 #define ipw_read_indirect(a, b, c, d) ({ \ 423 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \ 424 __LINE__, (u32)(b), (u32)(d)); \ 425 _ipw_read_indirect(a, b, c, d); \ 426 }) 427 428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ 429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, 430 int num); 431 #define ipw_write_indirect(a, b, c, d) do { \ 432 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \ 433 __LINE__, (u32)(b), (u32)(d)); \ 434 _ipw_write_indirect(a, b, c, d); \ 435 } while (0) 436 437 /* 32-bit indirect write (above 4K) */ 438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) 439 { 440 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); 441 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 442 _ipw_write32(priv, IPW_INDIRECT_DATA, value); 443 } 444 445 /* 8-bit indirect write (above 4K) */ 446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) 447 { 448 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ 449 u32 dif_len = reg - aligned_addr; 450 451 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 453 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value); 454 } 455 456 /* 16-bit indirect write (above 4K) */ 457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) 458 { 459 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ 460 u32 dif_len = (reg - aligned_addr) & (~0x1ul); 461 462 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 463 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 464 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value); 465 } 466 467 /* 8-bit indirect read (above 4K) */ 468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) 469 { 470 u32 word; 471 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); 472 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg); 473 word = _ipw_read32(priv, IPW_INDIRECT_DATA); 474 return (word >> ((reg & 0x3) * 8)) & 0xff; 475 } 476 477 /* 32-bit indirect read (above 4K) */ 478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) 479 { 480 u32 value; 481 482 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg); 483 484 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 485 value = _ipw_read32(priv, IPW_INDIRECT_DATA); 486 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value); 487 return value; 488 } 489 490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */ 491 /* for area above 1st 4K of SRAM/reg space */ 492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 493 int num) 494 { 495 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ 496 u32 dif_len = addr - aligned_addr; 497 u32 i; 498 499 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); 500 501 if (num <= 0) { 502 return; 503 } 504 505 /* Read the first dword (or portion) byte by byte */ 506 if (unlikely(dif_len)) { 507 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 508 /* Start reading at aligned_addr + dif_len */ 509 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--) 510 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i); 511 aligned_addr += 4; 512 } 513 514 /* Read all of the middle dwords as dwords, with auto-increment */ 515 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 516 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 517 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); 518 519 /* Read the last dword (or portion) byte by byte */ 520 if (unlikely(num)) { 521 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 522 for (i = 0; num > 0; i++, num--) 523 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i); 524 } 525 } 526 527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */ 528 /* for area above 1st 4K of SRAM/reg space */ 529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 530 int num) 531 { 532 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ 533 u32 dif_len = addr - aligned_addr; 534 u32 i; 535 536 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); 537 538 if (num <= 0) { 539 return; 540 } 541 542 /* Write the first dword (or portion) byte by byte */ 543 if (unlikely(dif_len)) { 544 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 545 /* Start writing at aligned_addr + dif_len */ 546 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) 547 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 548 aligned_addr += 4; 549 } 550 551 /* Write all of the middle dwords as dwords, with auto-increment */ 552 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 553 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 554 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); 555 556 /* Write the last dword (or portion) byte by byte */ 557 if (unlikely(num)) { 558 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 559 for (i = 0; num > 0; i++, num--, buf++) 560 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 561 } 562 } 563 564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */ 565 /* for 1st 4K of SRAM/regs space */ 566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, 567 int num) 568 { 569 memcpy_toio((priv->hw_base + addr), buf, num); 570 } 571 572 /* Set bit(s) in low 4K of SRAM/regs */ 573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) 574 { 575 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); 576 } 577 578 /* Clear bit(s) in low 4K of SRAM/regs */ 579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) 580 { 581 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); 582 } 583 584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv) 585 { 586 if (priv->status & STATUS_INT_ENABLED) 587 return; 588 priv->status |= STATUS_INT_ENABLED; 589 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL); 590 } 591 592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv) 593 { 594 if (!(priv->status & STATUS_INT_ENABLED)) 595 return; 596 priv->status &= ~STATUS_INT_ENABLED; 597 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 598 } 599 600 static inline void ipw_enable_interrupts(struct ipw_priv *priv) 601 { 602 unsigned long flags; 603 604 spin_lock_irqsave(&priv->irq_lock, flags); 605 __ipw_enable_interrupts(priv); 606 spin_unlock_irqrestore(&priv->irq_lock, flags); 607 } 608 609 static inline void ipw_disable_interrupts(struct ipw_priv *priv) 610 { 611 unsigned long flags; 612 613 spin_lock_irqsave(&priv->irq_lock, flags); 614 __ipw_disable_interrupts(priv); 615 spin_unlock_irqrestore(&priv->irq_lock, flags); 616 } 617 618 static char *ipw_error_desc(u32 val) 619 { 620 switch (val) { 621 case IPW_FW_ERROR_OK: 622 return "ERROR_OK"; 623 case IPW_FW_ERROR_FAIL: 624 return "ERROR_FAIL"; 625 case IPW_FW_ERROR_MEMORY_UNDERFLOW: 626 return "MEMORY_UNDERFLOW"; 627 case IPW_FW_ERROR_MEMORY_OVERFLOW: 628 return "MEMORY_OVERFLOW"; 629 case IPW_FW_ERROR_BAD_PARAM: 630 return "BAD_PARAM"; 631 case IPW_FW_ERROR_BAD_CHECKSUM: 632 return "BAD_CHECKSUM"; 633 case IPW_FW_ERROR_NMI_INTERRUPT: 634 return "NMI_INTERRUPT"; 635 case IPW_FW_ERROR_BAD_DATABASE: 636 return "BAD_DATABASE"; 637 case IPW_FW_ERROR_ALLOC_FAIL: 638 return "ALLOC_FAIL"; 639 case IPW_FW_ERROR_DMA_UNDERRUN: 640 return "DMA_UNDERRUN"; 641 case IPW_FW_ERROR_DMA_STATUS: 642 return "DMA_STATUS"; 643 case IPW_FW_ERROR_DINO_ERROR: 644 return "DINO_ERROR"; 645 case IPW_FW_ERROR_EEPROM_ERROR: 646 return "EEPROM_ERROR"; 647 case IPW_FW_ERROR_SYSASSERT: 648 return "SYSASSERT"; 649 case IPW_FW_ERROR_FATAL_ERROR: 650 return "FATAL_ERROR"; 651 default: 652 return "UNKNOWN_ERROR"; 653 } 654 } 655 656 static void ipw_dump_error_log(struct ipw_priv *priv, 657 struct ipw_fw_error *error) 658 { 659 u32 i; 660 661 if (!error) { 662 IPW_ERROR("Error allocating and capturing error log. " 663 "Nothing to dump.\n"); 664 return; 665 } 666 667 IPW_ERROR("Start IPW Error Log Dump:\n"); 668 IPW_ERROR("Status: 0x%08X, Config: %08X\n", 669 error->status, error->config); 670 671 for (i = 0; i < error->elem_len; i++) 672 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 673 ipw_error_desc(error->elem[i].desc), 674 error->elem[i].time, 675 error->elem[i].blink1, 676 error->elem[i].blink2, 677 error->elem[i].link1, 678 error->elem[i].link2, error->elem[i].data); 679 for (i = 0; i < error->log_len; i++) 680 IPW_ERROR("%i\t0x%08x\t%i\n", 681 error->log[i].time, 682 error->log[i].data, error->log[i].event); 683 } 684 685 static inline int ipw_is_init(struct ipw_priv *priv) 686 { 687 return (priv->status & STATUS_INIT) ? 1 : 0; 688 } 689 690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len) 691 { 692 u32 addr, field_info, field_len, field_count, total_len; 693 694 IPW_DEBUG_ORD("ordinal = %i\n", ord); 695 696 if (!priv || !val || !len) { 697 IPW_DEBUG_ORD("Invalid argument\n"); 698 return -EINVAL; 699 } 700 701 /* verify device ordinal tables have been initialized */ 702 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) { 703 IPW_DEBUG_ORD("Access ordinals before initialization\n"); 704 return -EINVAL; 705 } 706 707 switch (IPW_ORD_TABLE_ID_MASK & ord) { 708 case IPW_ORD_TABLE_0_MASK: 709 /* 710 * TABLE 0: Direct access to a table of 32 bit values 711 * 712 * This is a very simple table with the data directly 713 * read from the table 714 */ 715 716 /* remove the table id from the ordinal */ 717 ord &= IPW_ORD_TABLE_VALUE_MASK; 718 719 /* boundary check */ 720 if (ord > priv->table0_len) { 721 IPW_DEBUG_ORD("ordinal value (%i) longer then " 722 "max (%i)\n", ord, priv->table0_len); 723 return -EINVAL; 724 } 725 726 /* verify we have enough room to store the value */ 727 if (*len < sizeof(u32)) { 728 IPW_DEBUG_ORD("ordinal buffer length too small, " 729 "need %zd\n", sizeof(u32)); 730 return -EINVAL; 731 } 732 733 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n", 734 ord, priv->table0_addr + (ord << 2)); 735 736 *len = sizeof(u32); 737 ord <<= 2; 738 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord); 739 break; 740 741 case IPW_ORD_TABLE_1_MASK: 742 /* 743 * TABLE 1: Indirect access to a table of 32 bit values 744 * 745 * This is a fairly large table of u32 values each 746 * representing starting addr for the data (which is 747 * also a u32) 748 */ 749 750 /* remove the table id from the ordinal */ 751 ord &= IPW_ORD_TABLE_VALUE_MASK; 752 753 /* boundary check */ 754 if (ord > priv->table1_len) { 755 IPW_DEBUG_ORD("ordinal value too long\n"); 756 return -EINVAL; 757 } 758 759 /* verify we have enough room to store the value */ 760 if (*len < sizeof(u32)) { 761 IPW_DEBUG_ORD("ordinal buffer length too small, " 762 "need %zd\n", sizeof(u32)); 763 return -EINVAL; 764 } 765 766 *((u32 *) val) = 767 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2))); 768 *len = sizeof(u32); 769 break; 770 771 case IPW_ORD_TABLE_2_MASK: 772 /* 773 * TABLE 2: Indirect access to a table of variable sized values 774 * 775 * This table consist of six values, each containing 776 * - dword containing the starting offset of the data 777 * - dword containing the lengh in the first 16bits 778 * and the count in the second 16bits 779 */ 780 781 /* remove the table id from the ordinal */ 782 ord &= IPW_ORD_TABLE_VALUE_MASK; 783 784 /* boundary check */ 785 if (ord > priv->table2_len) { 786 IPW_DEBUG_ORD("ordinal value too long\n"); 787 return -EINVAL; 788 } 789 790 /* get the address of statistic */ 791 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3)); 792 793 /* get the second DW of statistics ; 794 * two 16-bit words - first is length, second is count */ 795 field_info = 796 ipw_read_reg32(priv, 797 priv->table2_addr + (ord << 3) + 798 sizeof(u32)); 799 800 /* get each entry length */ 801 field_len = *((u16 *) & field_info); 802 803 /* get number of entries */ 804 field_count = *(((u16 *) & field_info) + 1); 805 806 /* abort if not enough memory */ 807 total_len = field_len * field_count; 808 if (total_len > *len) { 809 *len = total_len; 810 return -EINVAL; 811 } 812 813 *len = total_len; 814 if (!total_len) 815 return 0; 816 817 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, " 818 "field_info = 0x%08x\n", 819 addr, total_len, field_info); 820 ipw_read_indirect(priv, addr, val, total_len); 821 break; 822 823 default: 824 IPW_DEBUG_ORD("Invalid ordinal!\n"); 825 return -EINVAL; 826 827 } 828 829 return 0; 830 } 831 832 static void ipw_init_ordinals(struct ipw_priv *priv) 833 { 834 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER; 835 priv->table0_len = ipw_read32(priv, priv->table0_addr); 836 837 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n", 838 priv->table0_addr, priv->table0_len); 839 840 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1); 841 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr); 842 843 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n", 844 priv->table1_addr, priv->table1_len); 845 846 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2); 847 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr); 848 priv->table2_len &= 0x0000ffff; /* use first two bytes */ 849 850 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n", 851 priv->table2_addr, priv->table2_len); 852 853 } 854 855 static u32 ipw_register_toggle(u32 reg) 856 { 857 reg &= ~IPW_START_STANDBY; 858 if (reg & IPW_GATE_ODMA) 859 reg &= ~IPW_GATE_ODMA; 860 if (reg & IPW_GATE_IDMA) 861 reg &= ~IPW_GATE_IDMA; 862 if (reg & IPW_GATE_ADMA) 863 reg &= ~IPW_GATE_ADMA; 864 return reg; 865 } 866 867 /* 868 * LED behavior: 869 * - On radio ON, turn on any LEDs that require to be on during start 870 * - On initialization, start unassociated blink 871 * - On association, disable unassociated blink 872 * - On disassociation, start unassociated blink 873 * - On radio OFF, turn off any LEDs started during radio on 874 * 875 */ 876 #define LD_TIME_LINK_ON msecs_to_jiffies(300) 877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700) 878 #define LD_TIME_ACT_ON msecs_to_jiffies(250) 879 880 static void ipw_led_link_on(struct ipw_priv *priv) 881 { 882 unsigned long flags; 883 u32 led; 884 885 /* If configured to not use LEDs, or nic_type is 1, 886 * then we don't toggle a LINK led */ 887 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) 888 return; 889 890 spin_lock_irqsave(&priv->lock, flags); 891 892 if (!(priv->status & STATUS_RF_KILL_MASK) && 893 !(priv->status & STATUS_LED_LINK_ON)) { 894 IPW_DEBUG_LED("Link LED On\n"); 895 led = ipw_read_reg32(priv, IPW_EVENT_REG); 896 led |= priv->led_association_on; 897 898 led = ipw_register_toggle(led); 899 900 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 901 ipw_write_reg32(priv, IPW_EVENT_REG, led); 902 903 priv->status |= STATUS_LED_LINK_ON; 904 905 /* If we aren't associated, schedule turning the LED off */ 906 if (!(priv->status & STATUS_ASSOCIATED)) 907 schedule_delayed_work(&priv->led_link_off, 908 LD_TIME_LINK_ON); 909 } 910 911 spin_unlock_irqrestore(&priv->lock, flags); 912 } 913 914 static void ipw_bg_led_link_on(struct work_struct *work) 915 { 916 struct ipw_priv *priv = 917 container_of(work, struct ipw_priv, led_link_on.work); 918 mutex_lock(&priv->mutex); 919 ipw_led_link_on(priv); 920 mutex_unlock(&priv->mutex); 921 } 922 923 static void ipw_led_link_off(struct ipw_priv *priv) 924 { 925 unsigned long flags; 926 u32 led; 927 928 /* If configured not to use LEDs, or nic type is 1, 929 * then we don't goggle the LINK led. */ 930 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) 931 return; 932 933 spin_lock_irqsave(&priv->lock, flags); 934 935 if (priv->status & STATUS_LED_LINK_ON) { 936 led = ipw_read_reg32(priv, IPW_EVENT_REG); 937 led &= priv->led_association_off; 938 led = ipw_register_toggle(led); 939 940 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 941 ipw_write_reg32(priv, IPW_EVENT_REG, led); 942 943 IPW_DEBUG_LED("Link LED Off\n"); 944 945 priv->status &= ~STATUS_LED_LINK_ON; 946 947 /* If we aren't associated and the radio is on, schedule 948 * turning the LED on (blink while unassociated) */ 949 if (!(priv->status & STATUS_RF_KILL_MASK) && 950 !(priv->status & STATUS_ASSOCIATED)) 951 schedule_delayed_work(&priv->led_link_on, 952 LD_TIME_LINK_OFF); 953 954 } 955 956 spin_unlock_irqrestore(&priv->lock, flags); 957 } 958 959 static void ipw_bg_led_link_off(struct work_struct *work) 960 { 961 struct ipw_priv *priv = 962 container_of(work, struct ipw_priv, led_link_off.work); 963 mutex_lock(&priv->mutex); 964 ipw_led_link_off(priv); 965 mutex_unlock(&priv->mutex); 966 } 967 968 static void __ipw_led_activity_on(struct ipw_priv *priv) 969 { 970 u32 led; 971 972 if (priv->config & CFG_NO_LED) 973 return; 974 975 if (priv->status & STATUS_RF_KILL_MASK) 976 return; 977 978 if (!(priv->status & STATUS_LED_ACT_ON)) { 979 led = ipw_read_reg32(priv, IPW_EVENT_REG); 980 led |= priv->led_activity_on; 981 982 led = ipw_register_toggle(led); 983 984 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 985 ipw_write_reg32(priv, IPW_EVENT_REG, led); 986 987 IPW_DEBUG_LED("Activity LED On\n"); 988 989 priv->status |= STATUS_LED_ACT_ON; 990 991 cancel_delayed_work(&priv->led_act_off); 992 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); 993 } else { 994 /* Reschedule LED off for full time period */ 995 cancel_delayed_work(&priv->led_act_off); 996 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); 997 } 998 } 999 1000 #if 0 1001 void ipw_led_activity_on(struct ipw_priv *priv) 1002 { 1003 unsigned long flags; 1004 spin_lock_irqsave(&priv->lock, flags); 1005 __ipw_led_activity_on(priv); 1006 spin_unlock_irqrestore(&priv->lock, flags); 1007 } 1008 #endif /* 0 */ 1009 1010 static void ipw_led_activity_off(struct ipw_priv *priv) 1011 { 1012 unsigned long flags; 1013 u32 led; 1014 1015 if (priv->config & CFG_NO_LED) 1016 return; 1017 1018 spin_lock_irqsave(&priv->lock, flags); 1019 1020 if (priv->status & STATUS_LED_ACT_ON) { 1021 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1022 led &= priv->led_activity_off; 1023 1024 led = ipw_register_toggle(led); 1025 1026 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1027 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1028 1029 IPW_DEBUG_LED("Activity LED Off\n"); 1030 1031 priv->status &= ~STATUS_LED_ACT_ON; 1032 } 1033 1034 spin_unlock_irqrestore(&priv->lock, flags); 1035 } 1036 1037 static void ipw_bg_led_activity_off(struct work_struct *work) 1038 { 1039 struct ipw_priv *priv = 1040 container_of(work, struct ipw_priv, led_act_off.work); 1041 mutex_lock(&priv->mutex); 1042 ipw_led_activity_off(priv); 1043 mutex_unlock(&priv->mutex); 1044 } 1045 1046 static void ipw_led_band_on(struct ipw_priv *priv) 1047 { 1048 unsigned long flags; 1049 u32 led; 1050 1051 /* Only nic type 1 supports mode LEDs */ 1052 if (priv->config & CFG_NO_LED || 1053 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network) 1054 return; 1055 1056 spin_lock_irqsave(&priv->lock, flags); 1057 1058 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1059 if (priv->assoc_network->mode == IEEE_A) { 1060 led |= priv->led_ofdm_on; 1061 led &= priv->led_association_off; 1062 IPW_DEBUG_LED("Mode LED On: 802.11a\n"); 1063 } else if (priv->assoc_network->mode == IEEE_G) { 1064 led |= priv->led_ofdm_on; 1065 led |= priv->led_association_on; 1066 IPW_DEBUG_LED("Mode LED On: 802.11g\n"); 1067 } else { 1068 led &= priv->led_ofdm_off; 1069 led |= priv->led_association_on; 1070 IPW_DEBUG_LED("Mode LED On: 802.11b\n"); 1071 } 1072 1073 led = ipw_register_toggle(led); 1074 1075 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1076 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1077 1078 spin_unlock_irqrestore(&priv->lock, flags); 1079 } 1080 1081 static void ipw_led_band_off(struct ipw_priv *priv) 1082 { 1083 unsigned long flags; 1084 u32 led; 1085 1086 /* Only nic type 1 supports mode LEDs */ 1087 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1) 1088 return; 1089 1090 spin_lock_irqsave(&priv->lock, flags); 1091 1092 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1093 led &= priv->led_ofdm_off; 1094 led &= priv->led_association_off; 1095 1096 led = ipw_register_toggle(led); 1097 1098 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1099 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1100 1101 spin_unlock_irqrestore(&priv->lock, flags); 1102 } 1103 1104 static void ipw_led_radio_on(struct ipw_priv *priv) 1105 { 1106 ipw_led_link_on(priv); 1107 } 1108 1109 static void ipw_led_radio_off(struct ipw_priv *priv) 1110 { 1111 ipw_led_activity_off(priv); 1112 ipw_led_link_off(priv); 1113 } 1114 1115 static void ipw_led_link_up(struct ipw_priv *priv) 1116 { 1117 /* Set the Link Led on for all nic types */ 1118 ipw_led_link_on(priv); 1119 } 1120 1121 static void ipw_led_link_down(struct ipw_priv *priv) 1122 { 1123 ipw_led_activity_off(priv); 1124 ipw_led_link_off(priv); 1125 1126 if (priv->status & STATUS_RF_KILL_MASK) 1127 ipw_led_radio_off(priv); 1128 } 1129 1130 static void ipw_led_init(struct ipw_priv *priv) 1131 { 1132 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; 1133 1134 /* Set the default PINs for the link and activity leds */ 1135 priv->led_activity_on = IPW_ACTIVITY_LED; 1136 priv->led_activity_off = ~(IPW_ACTIVITY_LED); 1137 1138 priv->led_association_on = IPW_ASSOCIATED_LED; 1139 priv->led_association_off = ~(IPW_ASSOCIATED_LED); 1140 1141 /* Set the default PINs for the OFDM leds */ 1142 priv->led_ofdm_on = IPW_OFDM_LED; 1143 priv->led_ofdm_off = ~(IPW_OFDM_LED); 1144 1145 switch (priv->nic_type) { 1146 case EEPROM_NIC_TYPE_1: 1147 /* In this NIC type, the LEDs are reversed.... */ 1148 priv->led_activity_on = IPW_ASSOCIATED_LED; 1149 priv->led_activity_off = ~(IPW_ASSOCIATED_LED); 1150 priv->led_association_on = IPW_ACTIVITY_LED; 1151 priv->led_association_off = ~(IPW_ACTIVITY_LED); 1152 1153 if (!(priv->config & CFG_NO_LED)) 1154 ipw_led_band_on(priv); 1155 1156 /* And we don't blink link LEDs for this nic, so 1157 * just return here */ 1158 return; 1159 1160 case EEPROM_NIC_TYPE_3: 1161 case EEPROM_NIC_TYPE_2: 1162 case EEPROM_NIC_TYPE_4: 1163 case EEPROM_NIC_TYPE_0: 1164 break; 1165 1166 default: 1167 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n", 1168 priv->nic_type); 1169 priv->nic_type = EEPROM_NIC_TYPE_0; 1170 break; 1171 } 1172 1173 if (!(priv->config & CFG_NO_LED)) { 1174 if (priv->status & STATUS_ASSOCIATED) 1175 ipw_led_link_on(priv); 1176 else 1177 ipw_led_link_off(priv); 1178 } 1179 } 1180 1181 static void ipw_led_shutdown(struct ipw_priv *priv) 1182 { 1183 ipw_led_activity_off(priv); 1184 ipw_led_link_off(priv); 1185 ipw_led_band_off(priv); 1186 cancel_delayed_work(&priv->led_link_on); 1187 cancel_delayed_work(&priv->led_link_off); 1188 cancel_delayed_work(&priv->led_act_off); 1189 } 1190 1191 /* 1192 * The following adds a new attribute to the sysfs representation 1193 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/) 1194 * used for controlling the debug level. 1195 * 1196 * See the level definitions in ipw for details. 1197 */ 1198 static ssize_t debug_level_show(struct device_driver *d, char *buf) 1199 { 1200 return sprintf(buf, "0x%08X\n", ipw_debug_level); 1201 } 1202 1203 static ssize_t debug_level_store(struct device_driver *d, const char *buf, 1204 size_t count) 1205 { 1206 char *p = (char *)buf; 1207 u32 val; 1208 1209 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 1210 p++; 1211 if (p[0] == 'x' || p[0] == 'X') 1212 p++; 1213 val = simple_strtoul(p, &p, 16); 1214 } else 1215 val = simple_strtoul(p, &p, 10); 1216 if (p == buf) 1217 printk(KERN_INFO DRV_NAME 1218 ": %s is not in hex or decimal form.\n", buf); 1219 else 1220 ipw_debug_level = val; 1221 1222 return strnlen(buf, count); 1223 } 1224 static DRIVER_ATTR_RW(debug_level); 1225 1226 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) 1227 { 1228 /* length = 1st dword in log */ 1229 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); 1230 } 1231 1232 static void ipw_capture_event_log(struct ipw_priv *priv, 1233 u32 log_len, struct ipw_event *log) 1234 { 1235 u32 base; 1236 1237 if (log_len) { 1238 base = ipw_read32(priv, IPW_EVENT_LOG); 1239 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32), 1240 (u8 *) log, sizeof(*log) * log_len); 1241 } 1242 } 1243 1244 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv) 1245 { 1246 struct ipw_fw_error *error; 1247 u32 log_len = ipw_get_event_log_len(priv); 1248 u32 base = ipw_read32(priv, IPW_ERROR_LOG); 1249 u32 elem_len = ipw_read_reg32(priv, base); 1250 1251 error = kmalloc(sizeof(*error) + 1252 sizeof(*error->elem) * elem_len + 1253 sizeof(*error->log) * log_len, GFP_ATOMIC); 1254 if (!error) { 1255 IPW_ERROR("Memory allocation for firmware error log " 1256 "failed.\n"); 1257 return NULL; 1258 } 1259 error->jiffies = jiffies; 1260 error->status = priv->status; 1261 error->config = priv->config; 1262 error->elem_len = elem_len; 1263 error->log_len = log_len; 1264 error->elem = (struct ipw_error_elem *)error->payload; 1265 error->log = (struct ipw_event *)(error->elem + elem_len); 1266 1267 ipw_capture_event_log(priv, log_len, error->log); 1268 1269 if (elem_len) 1270 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem, 1271 sizeof(*error->elem) * elem_len); 1272 1273 return error; 1274 } 1275 1276 static ssize_t show_event_log(struct device *d, 1277 struct device_attribute *attr, char *buf) 1278 { 1279 struct ipw_priv *priv = dev_get_drvdata(d); 1280 u32 log_len = ipw_get_event_log_len(priv); 1281 u32 log_size; 1282 struct ipw_event *log; 1283 u32 len = 0, i; 1284 1285 /* not using min() because of its strict type checking */ 1286 log_size = PAGE_SIZE / sizeof(*log) > log_len ? 1287 sizeof(*log) * log_len : PAGE_SIZE; 1288 log = kzalloc(log_size, GFP_KERNEL); 1289 if (!log) { 1290 IPW_ERROR("Unable to allocate memory for log\n"); 1291 return 0; 1292 } 1293 log_len = log_size / sizeof(*log); 1294 ipw_capture_event_log(priv, log_len, log); 1295 1296 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len); 1297 for (i = 0; i < log_len; i++) 1298 len += snprintf(buf + len, PAGE_SIZE - len, 1299 "\n%08X%08X%08X", 1300 log[i].time, log[i].event, log[i].data); 1301 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1302 kfree(log); 1303 return len; 1304 } 1305 1306 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL); 1307 1308 static ssize_t show_error(struct device *d, 1309 struct device_attribute *attr, char *buf) 1310 { 1311 struct ipw_priv *priv = dev_get_drvdata(d); 1312 u32 len = 0, i; 1313 if (!priv->error) 1314 return 0; 1315 len += snprintf(buf + len, PAGE_SIZE - len, 1316 "%08lX%08X%08X%08X", 1317 priv->error->jiffies, 1318 priv->error->status, 1319 priv->error->config, priv->error->elem_len); 1320 for (i = 0; i < priv->error->elem_len; i++) 1321 len += snprintf(buf + len, PAGE_SIZE - len, 1322 "\n%08X%08X%08X%08X%08X%08X%08X", 1323 priv->error->elem[i].time, 1324 priv->error->elem[i].desc, 1325 priv->error->elem[i].blink1, 1326 priv->error->elem[i].blink2, 1327 priv->error->elem[i].link1, 1328 priv->error->elem[i].link2, 1329 priv->error->elem[i].data); 1330 1331 len += snprintf(buf + len, PAGE_SIZE - len, 1332 "\n%08X", priv->error->log_len); 1333 for (i = 0; i < priv->error->log_len; i++) 1334 len += snprintf(buf + len, PAGE_SIZE - len, 1335 "\n%08X%08X%08X", 1336 priv->error->log[i].time, 1337 priv->error->log[i].event, 1338 priv->error->log[i].data); 1339 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1340 return len; 1341 } 1342 1343 static ssize_t clear_error(struct device *d, 1344 struct device_attribute *attr, 1345 const char *buf, size_t count) 1346 { 1347 struct ipw_priv *priv = dev_get_drvdata(d); 1348 1349 kfree(priv->error); 1350 priv->error = NULL; 1351 return count; 1352 } 1353 1354 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error); 1355 1356 static ssize_t show_cmd_log(struct device *d, 1357 struct device_attribute *attr, char *buf) 1358 { 1359 struct ipw_priv *priv = dev_get_drvdata(d); 1360 u32 len = 0, i; 1361 if (!priv->cmdlog) 1362 return 0; 1363 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len; 1364 (i != priv->cmdlog_pos) && (len < PAGE_SIZE); 1365 i = (i + 1) % priv->cmdlog_len) { 1366 len += 1367 snprintf(buf + len, PAGE_SIZE - len, 1368 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies, 1369 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd, 1370 priv->cmdlog[i].cmd.len); 1371 len += 1372 snprintk_buf(buf + len, PAGE_SIZE - len, 1373 (u8 *) priv->cmdlog[i].cmd.param, 1374 priv->cmdlog[i].cmd.len); 1375 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1376 } 1377 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1378 return len; 1379 } 1380 1381 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL); 1382 1383 #ifdef CONFIG_IPW2200_PROMISCUOUS 1384 static void ipw_prom_free(struct ipw_priv *priv); 1385 static int ipw_prom_alloc(struct ipw_priv *priv); 1386 static ssize_t store_rtap_iface(struct device *d, 1387 struct device_attribute *attr, 1388 const char *buf, size_t count) 1389 { 1390 struct ipw_priv *priv = dev_get_drvdata(d); 1391 int rc = 0; 1392 1393 if (count < 1) 1394 return -EINVAL; 1395 1396 switch (buf[0]) { 1397 case '0': 1398 if (!rtap_iface) 1399 return count; 1400 1401 if (netif_running(priv->prom_net_dev)) { 1402 IPW_WARNING("Interface is up. Cannot unregister.\n"); 1403 return count; 1404 } 1405 1406 ipw_prom_free(priv); 1407 rtap_iface = 0; 1408 break; 1409 1410 case '1': 1411 if (rtap_iface) 1412 return count; 1413 1414 rc = ipw_prom_alloc(priv); 1415 if (!rc) 1416 rtap_iface = 1; 1417 break; 1418 1419 default: 1420 return -EINVAL; 1421 } 1422 1423 if (rc) { 1424 IPW_ERROR("Failed to register promiscuous network " 1425 "device (error %d).\n", rc); 1426 } 1427 1428 return count; 1429 } 1430 1431 static ssize_t show_rtap_iface(struct device *d, 1432 struct device_attribute *attr, 1433 char *buf) 1434 { 1435 struct ipw_priv *priv = dev_get_drvdata(d); 1436 if (rtap_iface) 1437 return sprintf(buf, "%s", priv->prom_net_dev->name); 1438 else { 1439 buf[0] = '-'; 1440 buf[1] = '1'; 1441 buf[2] = '\0'; 1442 return 3; 1443 } 1444 } 1445 1446 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface, 1447 store_rtap_iface); 1448 1449 static ssize_t store_rtap_filter(struct device *d, 1450 struct device_attribute *attr, 1451 const char *buf, size_t count) 1452 { 1453 struct ipw_priv *priv = dev_get_drvdata(d); 1454 1455 if (!priv->prom_priv) { 1456 IPW_ERROR("Attempting to set filter without " 1457 "rtap_iface enabled.\n"); 1458 return -EPERM; 1459 } 1460 1461 priv->prom_priv->filter = simple_strtol(buf, NULL, 0); 1462 1463 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n", 1464 BIT_ARG16(priv->prom_priv->filter)); 1465 1466 return count; 1467 } 1468 1469 static ssize_t show_rtap_filter(struct device *d, 1470 struct device_attribute *attr, 1471 char *buf) 1472 { 1473 struct ipw_priv *priv = dev_get_drvdata(d); 1474 return sprintf(buf, "0x%04X", 1475 priv->prom_priv ? priv->prom_priv->filter : 0); 1476 } 1477 1478 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter, 1479 store_rtap_filter); 1480 #endif 1481 1482 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, 1483 char *buf) 1484 { 1485 struct ipw_priv *priv = dev_get_drvdata(d); 1486 return sprintf(buf, "%d\n", priv->ieee->scan_age); 1487 } 1488 1489 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, 1490 const char *buf, size_t count) 1491 { 1492 struct ipw_priv *priv = dev_get_drvdata(d); 1493 struct net_device *dev = priv->net_dev; 1494 char buffer[] = "00000000"; 1495 unsigned long len = 1496 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; 1497 unsigned long val; 1498 char *p = buffer; 1499 1500 IPW_DEBUG_INFO("enter\n"); 1501 1502 strncpy(buffer, buf, len); 1503 buffer[len] = 0; 1504 1505 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 1506 p++; 1507 if (p[0] == 'x' || p[0] == 'X') 1508 p++; 1509 val = simple_strtoul(p, &p, 16); 1510 } else 1511 val = simple_strtoul(p, &p, 10); 1512 if (p == buffer) { 1513 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); 1514 } else { 1515 priv->ieee->scan_age = val; 1516 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age); 1517 } 1518 1519 IPW_DEBUG_INFO("exit\n"); 1520 return len; 1521 } 1522 1523 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); 1524 1525 static ssize_t show_led(struct device *d, struct device_attribute *attr, 1526 char *buf) 1527 { 1528 struct ipw_priv *priv = dev_get_drvdata(d); 1529 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1); 1530 } 1531 1532 static ssize_t store_led(struct device *d, struct device_attribute *attr, 1533 const char *buf, size_t count) 1534 { 1535 struct ipw_priv *priv = dev_get_drvdata(d); 1536 1537 IPW_DEBUG_INFO("enter\n"); 1538 1539 if (count == 0) 1540 return 0; 1541 1542 if (*buf == 0) { 1543 IPW_DEBUG_LED("Disabling LED control.\n"); 1544 priv->config |= CFG_NO_LED; 1545 ipw_led_shutdown(priv); 1546 } else { 1547 IPW_DEBUG_LED("Enabling LED control.\n"); 1548 priv->config &= ~CFG_NO_LED; 1549 ipw_led_init(priv); 1550 } 1551 1552 IPW_DEBUG_INFO("exit\n"); 1553 return count; 1554 } 1555 1556 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led); 1557 1558 static ssize_t show_status(struct device *d, 1559 struct device_attribute *attr, char *buf) 1560 { 1561 struct ipw_priv *p = dev_get_drvdata(d); 1562 return sprintf(buf, "0x%08x\n", (int)p->status); 1563 } 1564 1565 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 1566 1567 static ssize_t show_cfg(struct device *d, struct device_attribute *attr, 1568 char *buf) 1569 { 1570 struct ipw_priv *p = dev_get_drvdata(d); 1571 return sprintf(buf, "0x%08x\n", (int)p->config); 1572 } 1573 1574 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); 1575 1576 static ssize_t show_nic_type(struct device *d, 1577 struct device_attribute *attr, char *buf) 1578 { 1579 struct ipw_priv *priv = dev_get_drvdata(d); 1580 return sprintf(buf, "TYPE: %d\n", priv->nic_type); 1581 } 1582 1583 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL); 1584 1585 static ssize_t show_ucode_version(struct device *d, 1586 struct device_attribute *attr, char *buf) 1587 { 1588 u32 len = sizeof(u32), tmp = 0; 1589 struct ipw_priv *p = dev_get_drvdata(d); 1590 1591 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len)) 1592 return 0; 1593 1594 return sprintf(buf, "0x%08x\n", tmp); 1595 } 1596 1597 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL); 1598 1599 static ssize_t show_rtc(struct device *d, struct device_attribute *attr, 1600 char *buf) 1601 { 1602 u32 len = sizeof(u32), tmp = 0; 1603 struct ipw_priv *p = dev_get_drvdata(d); 1604 1605 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len)) 1606 return 0; 1607 1608 return sprintf(buf, "0x%08x\n", tmp); 1609 } 1610 1611 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL); 1612 1613 /* 1614 * Add a device attribute to view/control the delay between eeprom 1615 * operations. 1616 */ 1617 static ssize_t show_eeprom_delay(struct device *d, 1618 struct device_attribute *attr, char *buf) 1619 { 1620 struct ipw_priv *p = dev_get_drvdata(d); 1621 int n = p->eeprom_delay; 1622 return sprintf(buf, "%i\n", n); 1623 } 1624 static ssize_t store_eeprom_delay(struct device *d, 1625 struct device_attribute *attr, 1626 const char *buf, size_t count) 1627 { 1628 struct ipw_priv *p = dev_get_drvdata(d); 1629 sscanf(buf, "%i", &p->eeprom_delay); 1630 return strnlen(buf, count); 1631 } 1632 1633 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO, 1634 show_eeprom_delay, store_eeprom_delay); 1635 1636 static ssize_t show_command_event_reg(struct device *d, 1637 struct device_attribute *attr, char *buf) 1638 { 1639 u32 reg = 0; 1640 struct ipw_priv *p = dev_get_drvdata(d); 1641 1642 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT); 1643 return sprintf(buf, "0x%08x\n", reg); 1644 } 1645 static ssize_t store_command_event_reg(struct device *d, 1646 struct device_attribute *attr, 1647 const char *buf, size_t count) 1648 { 1649 u32 reg; 1650 struct ipw_priv *p = dev_get_drvdata(d); 1651 1652 sscanf(buf, "%x", ®); 1653 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg); 1654 return strnlen(buf, count); 1655 } 1656 1657 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO, 1658 show_command_event_reg, store_command_event_reg); 1659 1660 static ssize_t show_mem_gpio_reg(struct device *d, 1661 struct device_attribute *attr, char *buf) 1662 { 1663 u32 reg = 0; 1664 struct ipw_priv *p = dev_get_drvdata(d); 1665 1666 reg = ipw_read_reg32(p, 0x301100); 1667 return sprintf(buf, "0x%08x\n", reg); 1668 } 1669 static ssize_t store_mem_gpio_reg(struct device *d, 1670 struct device_attribute *attr, 1671 const char *buf, size_t count) 1672 { 1673 u32 reg; 1674 struct ipw_priv *p = dev_get_drvdata(d); 1675 1676 sscanf(buf, "%x", ®); 1677 ipw_write_reg32(p, 0x301100, reg); 1678 return strnlen(buf, count); 1679 } 1680 1681 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO, 1682 show_mem_gpio_reg, store_mem_gpio_reg); 1683 1684 static ssize_t show_indirect_dword(struct device *d, 1685 struct device_attribute *attr, char *buf) 1686 { 1687 u32 reg = 0; 1688 struct ipw_priv *priv = dev_get_drvdata(d); 1689 1690 if (priv->status & STATUS_INDIRECT_DWORD) 1691 reg = ipw_read_reg32(priv, priv->indirect_dword); 1692 else 1693 reg = 0; 1694 1695 return sprintf(buf, "0x%08x\n", reg); 1696 } 1697 static ssize_t store_indirect_dword(struct device *d, 1698 struct device_attribute *attr, 1699 const char *buf, size_t count) 1700 { 1701 struct ipw_priv *priv = dev_get_drvdata(d); 1702 1703 sscanf(buf, "%x", &priv->indirect_dword); 1704 priv->status |= STATUS_INDIRECT_DWORD; 1705 return strnlen(buf, count); 1706 } 1707 1708 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO, 1709 show_indirect_dword, store_indirect_dword); 1710 1711 static ssize_t show_indirect_byte(struct device *d, 1712 struct device_attribute *attr, char *buf) 1713 { 1714 u8 reg = 0; 1715 struct ipw_priv *priv = dev_get_drvdata(d); 1716 1717 if (priv->status & STATUS_INDIRECT_BYTE) 1718 reg = ipw_read_reg8(priv, priv->indirect_byte); 1719 else 1720 reg = 0; 1721 1722 return sprintf(buf, "0x%02x\n", reg); 1723 } 1724 static ssize_t store_indirect_byte(struct device *d, 1725 struct device_attribute *attr, 1726 const char *buf, size_t count) 1727 { 1728 struct ipw_priv *priv = dev_get_drvdata(d); 1729 1730 sscanf(buf, "%x", &priv->indirect_byte); 1731 priv->status |= STATUS_INDIRECT_BYTE; 1732 return strnlen(buf, count); 1733 } 1734 1735 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO, 1736 show_indirect_byte, store_indirect_byte); 1737 1738 static ssize_t show_direct_dword(struct device *d, 1739 struct device_attribute *attr, char *buf) 1740 { 1741 u32 reg = 0; 1742 struct ipw_priv *priv = dev_get_drvdata(d); 1743 1744 if (priv->status & STATUS_DIRECT_DWORD) 1745 reg = ipw_read32(priv, priv->direct_dword); 1746 else 1747 reg = 0; 1748 1749 return sprintf(buf, "0x%08x\n", reg); 1750 } 1751 static ssize_t store_direct_dword(struct device *d, 1752 struct device_attribute *attr, 1753 const char *buf, size_t count) 1754 { 1755 struct ipw_priv *priv = dev_get_drvdata(d); 1756 1757 sscanf(buf, "%x", &priv->direct_dword); 1758 priv->status |= STATUS_DIRECT_DWORD; 1759 return strnlen(buf, count); 1760 } 1761 1762 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO, 1763 show_direct_dword, store_direct_dword); 1764 1765 static int rf_kill_active(struct ipw_priv *priv) 1766 { 1767 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) { 1768 priv->status |= STATUS_RF_KILL_HW; 1769 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); 1770 } else { 1771 priv->status &= ~STATUS_RF_KILL_HW; 1772 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false); 1773 } 1774 1775 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0; 1776 } 1777 1778 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, 1779 char *buf) 1780 { 1781 /* 0 - RF kill not enabled 1782 1 - SW based RF kill active (sysfs) 1783 2 - HW based RF kill active 1784 3 - Both HW and SW baed RF kill active */ 1785 struct ipw_priv *priv = dev_get_drvdata(d); 1786 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | 1787 (rf_kill_active(priv) ? 0x2 : 0x0); 1788 return sprintf(buf, "%i\n", val); 1789 } 1790 1791 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) 1792 { 1793 if ((disable_radio ? 1 : 0) == 1794 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0)) 1795 return 0; 1796 1797 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", 1798 disable_radio ? "OFF" : "ON"); 1799 1800 if (disable_radio) { 1801 priv->status |= STATUS_RF_KILL_SW; 1802 1803 cancel_delayed_work(&priv->request_scan); 1804 cancel_delayed_work(&priv->request_direct_scan); 1805 cancel_delayed_work(&priv->request_passive_scan); 1806 cancel_delayed_work(&priv->scan_event); 1807 schedule_work(&priv->down); 1808 } else { 1809 priv->status &= ~STATUS_RF_KILL_SW; 1810 if (rf_kill_active(priv)) { 1811 IPW_DEBUG_RF_KILL("Can not turn radio back on - " 1812 "disabled by HW switch\n"); 1813 /* Make sure the RF_KILL check timer is running */ 1814 cancel_delayed_work(&priv->rf_kill); 1815 schedule_delayed_work(&priv->rf_kill, 1816 round_jiffies_relative(2 * HZ)); 1817 } else 1818 schedule_work(&priv->up); 1819 } 1820 1821 return 1; 1822 } 1823 1824 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, 1825 const char *buf, size_t count) 1826 { 1827 struct ipw_priv *priv = dev_get_drvdata(d); 1828 1829 ipw_radio_kill_sw(priv, buf[0] == '1'); 1830 1831 return count; 1832 } 1833 1834 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); 1835 1836 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr, 1837 char *buf) 1838 { 1839 struct ipw_priv *priv = dev_get_drvdata(d); 1840 int pos = 0, len = 0; 1841 if (priv->config & CFG_SPEED_SCAN) { 1842 while (priv->speed_scan[pos] != 0) 1843 len += sprintf(&buf[len], "%d ", 1844 priv->speed_scan[pos++]); 1845 return len + sprintf(&buf[len], "\n"); 1846 } 1847 1848 return sprintf(buf, "0\n"); 1849 } 1850 1851 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, 1852 const char *buf, size_t count) 1853 { 1854 struct ipw_priv *priv = dev_get_drvdata(d); 1855 int channel, pos = 0; 1856 const char *p = buf; 1857 1858 /* list of space separated channels to scan, optionally ending with 0 */ 1859 while ((channel = simple_strtol(p, NULL, 0))) { 1860 if (pos == MAX_SPEED_SCAN - 1) { 1861 priv->speed_scan[pos] = 0; 1862 break; 1863 } 1864 1865 if (libipw_is_valid_channel(priv->ieee, channel)) 1866 priv->speed_scan[pos++] = channel; 1867 else 1868 IPW_WARNING("Skipping invalid channel request: %d\n", 1869 channel); 1870 p = strchr(p, ' '); 1871 if (!p) 1872 break; 1873 while (*p == ' ' || *p == '\t') 1874 p++; 1875 } 1876 1877 if (pos == 0) 1878 priv->config &= ~CFG_SPEED_SCAN; 1879 else { 1880 priv->speed_scan_pos = 0; 1881 priv->config |= CFG_SPEED_SCAN; 1882 } 1883 1884 return count; 1885 } 1886 1887 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan, 1888 store_speed_scan); 1889 1890 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr, 1891 char *buf) 1892 { 1893 struct ipw_priv *priv = dev_get_drvdata(d); 1894 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0'); 1895 } 1896 1897 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr, 1898 const char *buf, size_t count) 1899 { 1900 struct ipw_priv *priv = dev_get_drvdata(d); 1901 if (buf[0] == '1') 1902 priv->config |= CFG_NET_STATS; 1903 else 1904 priv->config &= ~CFG_NET_STATS; 1905 1906 return count; 1907 } 1908 1909 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO, 1910 show_net_stats, store_net_stats); 1911 1912 static ssize_t show_channels(struct device *d, 1913 struct device_attribute *attr, 1914 char *buf) 1915 { 1916 struct ipw_priv *priv = dev_get_drvdata(d); 1917 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 1918 int len = 0, i; 1919 1920 len = sprintf(&buf[len], 1921 "Displaying %d channels in 2.4Ghz band " 1922 "(802.11bg):\n", geo->bg_channels); 1923 1924 for (i = 0; i < geo->bg_channels; i++) { 1925 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n", 1926 geo->bg[i].channel, 1927 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ? 1928 " (radar spectrum)" : "", 1929 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) || 1930 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)) 1931 ? "" : ", IBSS", 1932 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ? 1933 "passive only" : "active/passive", 1934 geo->bg[i].flags & LIBIPW_CH_B_ONLY ? 1935 "B" : "B/G"); 1936 } 1937 1938 len += sprintf(&buf[len], 1939 "Displaying %d channels in 5.2Ghz band " 1940 "(802.11a):\n", geo->a_channels); 1941 for (i = 0; i < geo->a_channels; i++) { 1942 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n", 1943 geo->a[i].channel, 1944 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ? 1945 " (radar spectrum)" : "", 1946 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) || 1947 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)) 1948 ? "" : ", IBSS", 1949 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ? 1950 "passive only" : "active/passive"); 1951 } 1952 1953 return len; 1954 } 1955 1956 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 1957 1958 static void notify_wx_assoc_event(struct ipw_priv *priv) 1959 { 1960 union iwreq_data wrqu; 1961 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1962 if (priv->status & STATUS_ASSOCIATED) 1963 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); 1964 else 1965 eth_zero_addr(wrqu.ap_addr.sa_data); 1966 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); 1967 } 1968 1969 static void ipw_irq_tasklet(struct ipw_priv *priv) 1970 { 1971 u32 inta, inta_mask, handled = 0; 1972 unsigned long flags; 1973 int rc = 0; 1974 1975 spin_lock_irqsave(&priv->irq_lock, flags); 1976 1977 inta = ipw_read32(priv, IPW_INTA_RW); 1978 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 1979 1980 if (inta == 0xFFFFFFFF) { 1981 /* Hardware disappeared */ 1982 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n"); 1983 /* Only handle the cached INTA values */ 1984 inta = 0; 1985 } 1986 inta &= (IPW_INTA_MASK_ALL & inta_mask); 1987 1988 /* Add any cached INTA values that need to be handled */ 1989 inta |= priv->isr_inta; 1990 1991 spin_unlock_irqrestore(&priv->irq_lock, flags); 1992 1993 spin_lock_irqsave(&priv->lock, flags); 1994 1995 /* handle all the justifications for the interrupt */ 1996 if (inta & IPW_INTA_BIT_RX_TRANSFER) { 1997 ipw_rx(priv); 1998 handled |= IPW_INTA_BIT_RX_TRANSFER; 1999 } 2000 2001 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) { 2002 IPW_DEBUG_HC("Command completed.\n"); 2003 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1); 2004 priv->status &= ~STATUS_HCMD_ACTIVE; 2005 wake_up_interruptible(&priv->wait_command_queue); 2006 handled |= IPW_INTA_BIT_TX_CMD_QUEUE; 2007 } 2008 2009 if (inta & IPW_INTA_BIT_TX_QUEUE_1) { 2010 IPW_DEBUG_TX("TX_QUEUE_1\n"); 2011 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0); 2012 handled |= IPW_INTA_BIT_TX_QUEUE_1; 2013 } 2014 2015 if (inta & IPW_INTA_BIT_TX_QUEUE_2) { 2016 IPW_DEBUG_TX("TX_QUEUE_2\n"); 2017 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1); 2018 handled |= IPW_INTA_BIT_TX_QUEUE_2; 2019 } 2020 2021 if (inta & IPW_INTA_BIT_TX_QUEUE_3) { 2022 IPW_DEBUG_TX("TX_QUEUE_3\n"); 2023 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2); 2024 handled |= IPW_INTA_BIT_TX_QUEUE_3; 2025 } 2026 2027 if (inta & IPW_INTA_BIT_TX_QUEUE_4) { 2028 IPW_DEBUG_TX("TX_QUEUE_4\n"); 2029 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3); 2030 handled |= IPW_INTA_BIT_TX_QUEUE_4; 2031 } 2032 2033 if (inta & IPW_INTA_BIT_STATUS_CHANGE) { 2034 IPW_WARNING("STATUS_CHANGE\n"); 2035 handled |= IPW_INTA_BIT_STATUS_CHANGE; 2036 } 2037 2038 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) { 2039 IPW_WARNING("TX_PERIOD_EXPIRED\n"); 2040 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED; 2041 } 2042 2043 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) { 2044 IPW_WARNING("HOST_CMD_DONE\n"); 2045 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE; 2046 } 2047 2048 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) { 2049 IPW_WARNING("FW_INITIALIZATION_DONE\n"); 2050 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE; 2051 } 2052 2053 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) { 2054 IPW_WARNING("PHY_OFF_DONE\n"); 2055 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE; 2056 } 2057 2058 if (inta & IPW_INTA_BIT_RF_KILL_DONE) { 2059 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n"); 2060 priv->status |= STATUS_RF_KILL_HW; 2061 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); 2062 wake_up_interruptible(&priv->wait_command_queue); 2063 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); 2064 cancel_delayed_work(&priv->request_scan); 2065 cancel_delayed_work(&priv->request_direct_scan); 2066 cancel_delayed_work(&priv->request_passive_scan); 2067 cancel_delayed_work(&priv->scan_event); 2068 schedule_work(&priv->link_down); 2069 schedule_delayed_work(&priv->rf_kill, 2 * HZ); 2070 handled |= IPW_INTA_BIT_RF_KILL_DONE; 2071 } 2072 2073 if (inta & IPW_INTA_BIT_FATAL_ERROR) { 2074 IPW_WARNING("Firmware error detected. Restarting.\n"); 2075 if (priv->error) { 2076 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); 2077 if (ipw_debug_level & IPW_DL_FW_ERRORS) { 2078 struct ipw_fw_error *error = 2079 ipw_alloc_error_log(priv); 2080 ipw_dump_error_log(priv, error); 2081 kfree(error); 2082 } 2083 } else { 2084 priv->error = ipw_alloc_error_log(priv); 2085 if (priv->error) 2086 IPW_DEBUG_FW("Sysfs 'error' log captured.\n"); 2087 else 2088 IPW_DEBUG_FW("Error allocating sysfs 'error' " 2089 "log.\n"); 2090 if (ipw_debug_level & IPW_DL_FW_ERRORS) 2091 ipw_dump_error_log(priv, priv->error); 2092 } 2093 2094 /* XXX: If hardware encryption is for WPA/WPA2, 2095 * we have to notify the supplicant. */ 2096 if (priv->ieee->sec.encrypt) { 2097 priv->status &= ~STATUS_ASSOCIATED; 2098 notify_wx_assoc_event(priv); 2099 } 2100 2101 /* Keep the restart process from trying to send host 2102 * commands by clearing the INIT status bit */ 2103 priv->status &= ~STATUS_INIT; 2104 2105 /* Cancel currently queued command. */ 2106 priv->status &= ~STATUS_HCMD_ACTIVE; 2107 wake_up_interruptible(&priv->wait_command_queue); 2108 2109 schedule_work(&priv->adapter_restart); 2110 handled |= IPW_INTA_BIT_FATAL_ERROR; 2111 } 2112 2113 if (inta & IPW_INTA_BIT_PARITY_ERROR) { 2114 IPW_ERROR("Parity error\n"); 2115 handled |= IPW_INTA_BIT_PARITY_ERROR; 2116 } 2117 2118 if (handled != inta) { 2119 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); 2120 } 2121 2122 spin_unlock_irqrestore(&priv->lock, flags); 2123 2124 /* enable all interrupts */ 2125 ipw_enable_interrupts(priv); 2126 } 2127 2128 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x 2129 static char *get_cmd_string(u8 cmd) 2130 { 2131 switch (cmd) { 2132 IPW_CMD(HOST_COMPLETE); 2133 IPW_CMD(POWER_DOWN); 2134 IPW_CMD(SYSTEM_CONFIG); 2135 IPW_CMD(MULTICAST_ADDRESS); 2136 IPW_CMD(SSID); 2137 IPW_CMD(ADAPTER_ADDRESS); 2138 IPW_CMD(PORT_TYPE); 2139 IPW_CMD(RTS_THRESHOLD); 2140 IPW_CMD(FRAG_THRESHOLD); 2141 IPW_CMD(POWER_MODE); 2142 IPW_CMD(WEP_KEY); 2143 IPW_CMD(TGI_TX_KEY); 2144 IPW_CMD(SCAN_REQUEST); 2145 IPW_CMD(SCAN_REQUEST_EXT); 2146 IPW_CMD(ASSOCIATE); 2147 IPW_CMD(SUPPORTED_RATES); 2148 IPW_CMD(SCAN_ABORT); 2149 IPW_CMD(TX_FLUSH); 2150 IPW_CMD(QOS_PARAMETERS); 2151 IPW_CMD(DINO_CONFIG); 2152 IPW_CMD(RSN_CAPABILITIES); 2153 IPW_CMD(RX_KEY); 2154 IPW_CMD(CARD_DISABLE); 2155 IPW_CMD(SEED_NUMBER); 2156 IPW_CMD(TX_POWER); 2157 IPW_CMD(COUNTRY_INFO); 2158 IPW_CMD(AIRONET_INFO); 2159 IPW_CMD(AP_TX_POWER); 2160 IPW_CMD(CCKM_INFO); 2161 IPW_CMD(CCX_VER_INFO); 2162 IPW_CMD(SET_CALIBRATION); 2163 IPW_CMD(SENSITIVITY_CALIB); 2164 IPW_CMD(RETRY_LIMIT); 2165 IPW_CMD(IPW_PRE_POWER_DOWN); 2166 IPW_CMD(VAP_BEACON_TEMPLATE); 2167 IPW_CMD(VAP_DTIM_PERIOD); 2168 IPW_CMD(EXT_SUPPORTED_RATES); 2169 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT); 2170 IPW_CMD(VAP_QUIET_INTERVALS); 2171 IPW_CMD(VAP_CHANNEL_SWITCH); 2172 IPW_CMD(VAP_MANDATORY_CHANNELS); 2173 IPW_CMD(VAP_CELL_PWR_LIMIT); 2174 IPW_CMD(VAP_CF_PARAM_SET); 2175 IPW_CMD(VAP_SET_BEACONING_STATE); 2176 IPW_CMD(MEASUREMENT); 2177 IPW_CMD(POWER_CAPABILITY); 2178 IPW_CMD(SUPPORTED_CHANNELS); 2179 IPW_CMD(TPC_REPORT); 2180 IPW_CMD(WME_INFO); 2181 IPW_CMD(PRODUCTION_COMMAND); 2182 default: 2183 return "UNKNOWN"; 2184 } 2185 } 2186 2187 #define HOST_COMPLETE_TIMEOUT HZ 2188 2189 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) 2190 { 2191 int rc = 0; 2192 unsigned long flags; 2193 unsigned long now, end; 2194 2195 spin_lock_irqsave(&priv->lock, flags); 2196 if (priv->status & STATUS_HCMD_ACTIVE) { 2197 IPW_ERROR("Failed to send %s: Already sending a command.\n", 2198 get_cmd_string(cmd->cmd)); 2199 spin_unlock_irqrestore(&priv->lock, flags); 2200 return -EAGAIN; 2201 } 2202 2203 priv->status |= STATUS_HCMD_ACTIVE; 2204 2205 if (priv->cmdlog) { 2206 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies; 2207 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd; 2208 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len; 2209 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param, 2210 cmd->len); 2211 priv->cmdlog[priv->cmdlog_pos].retcode = -1; 2212 } 2213 2214 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", 2215 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, 2216 priv->status); 2217 2218 #ifndef DEBUG_CMD_WEP_KEY 2219 if (cmd->cmd == IPW_CMD_WEP_KEY) 2220 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n"); 2221 else 2222 #endif 2223 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); 2224 2225 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0); 2226 if (rc) { 2227 priv->status &= ~STATUS_HCMD_ACTIVE; 2228 IPW_ERROR("Failed to send %s: Reason %d\n", 2229 get_cmd_string(cmd->cmd), rc); 2230 spin_unlock_irqrestore(&priv->lock, flags); 2231 goto exit; 2232 } 2233 spin_unlock_irqrestore(&priv->lock, flags); 2234 2235 now = jiffies; 2236 end = now + HOST_COMPLETE_TIMEOUT; 2237 again: 2238 rc = wait_event_interruptible_timeout(priv->wait_command_queue, 2239 !(priv-> 2240 status & STATUS_HCMD_ACTIVE), 2241 end - now); 2242 if (rc < 0) { 2243 now = jiffies; 2244 if (time_before(now, end)) 2245 goto again; 2246 rc = 0; 2247 } 2248 2249 if (rc == 0) { 2250 spin_lock_irqsave(&priv->lock, flags); 2251 if (priv->status & STATUS_HCMD_ACTIVE) { 2252 IPW_ERROR("Failed to send %s: Command timed out.\n", 2253 get_cmd_string(cmd->cmd)); 2254 priv->status &= ~STATUS_HCMD_ACTIVE; 2255 spin_unlock_irqrestore(&priv->lock, flags); 2256 rc = -EIO; 2257 goto exit; 2258 } 2259 spin_unlock_irqrestore(&priv->lock, flags); 2260 } else 2261 rc = 0; 2262 2263 if (priv->status & STATUS_RF_KILL_HW) { 2264 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n", 2265 get_cmd_string(cmd->cmd)); 2266 rc = -EIO; 2267 goto exit; 2268 } 2269 2270 exit: 2271 if (priv->cmdlog) { 2272 priv->cmdlog[priv->cmdlog_pos++].retcode = rc; 2273 priv->cmdlog_pos %= priv->cmdlog_len; 2274 } 2275 return rc; 2276 } 2277 2278 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command) 2279 { 2280 struct host_cmd cmd = { 2281 .cmd = command, 2282 }; 2283 2284 return __ipw_send_cmd(priv, &cmd); 2285 } 2286 2287 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len, 2288 void *data) 2289 { 2290 struct host_cmd cmd = { 2291 .cmd = command, 2292 .len = len, 2293 .param = data, 2294 }; 2295 2296 return __ipw_send_cmd(priv, &cmd); 2297 } 2298 2299 static int ipw_send_host_complete(struct ipw_priv *priv) 2300 { 2301 if (!priv) { 2302 IPW_ERROR("Invalid args\n"); 2303 return -1; 2304 } 2305 2306 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); 2307 } 2308 2309 static int ipw_send_system_config(struct ipw_priv *priv) 2310 { 2311 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, 2312 sizeof(priv->sys_config), 2313 &priv->sys_config); 2314 } 2315 2316 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) 2317 { 2318 if (!priv || !ssid) { 2319 IPW_ERROR("Invalid args\n"); 2320 return -1; 2321 } 2322 2323 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE), 2324 ssid); 2325 } 2326 2327 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) 2328 { 2329 if (!priv || !mac) { 2330 IPW_ERROR("Invalid args\n"); 2331 return -1; 2332 } 2333 2334 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n", 2335 priv->net_dev->name, mac); 2336 2337 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); 2338 } 2339 2340 static void ipw_adapter_restart(void *adapter) 2341 { 2342 struct ipw_priv *priv = adapter; 2343 2344 if (priv->status & STATUS_RF_KILL_MASK) 2345 return; 2346 2347 ipw_down(priv); 2348 2349 if (priv->assoc_network && 2350 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS)) 2351 ipw_remove_current_network(priv); 2352 2353 if (ipw_up(priv)) { 2354 IPW_ERROR("Failed to up device\n"); 2355 return; 2356 } 2357 } 2358 2359 static void ipw_bg_adapter_restart(struct work_struct *work) 2360 { 2361 struct ipw_priv *priv = 2362 container_of(work, struct ipw_priv, adapter_restart); 2363 mutex_lock(&priv->mutex); 2364 ipw_adapter_restart(priv); 2365 mutex_unlock(&priv->mutex); 2366 } 2367 2368 static void ipw_abort_scan(struct ipw_priv *priv); 2369 2370 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) 2371 2372 static void ipw_scan_check(void *data) 2373 { 2374 struct ipw_priv *priv = data; 2375 2376 if (priv->status & STATUS_SCAN_ABORTING) { 2377 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2378 "adapter after (%dms).\n", 2379 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2380 schedule_work(&priv->adapter_restart); 2381 } else if (priv->status & STATUS_SCANNING) { 2382 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan " 2383 "after (%dms).\n", 2384 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2385 ipw_abort_scan(priv); 2386 schedule_delayed_work(&priv->scan_check, HZ); 2387 } 2388 } 2389 2390 static void ipw_bg_scan_check(struct work_struct *work) 2391 { 2392 struct ipw_priv *priv = 2393 container_of(work, struct ipw_priv, scan_check.work); 2394 mutex_lock(&priv->mutex); 2395 ipw_scan_check(priv); 2396 mutex_unlock(&priv->mutex); 2397 } 2398 2399 static int ipw_send_scan_request_ext(struct ipw_priv *priv, 2400 struct ipw_scan_request_ext *request) 2401 { 2402 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT, 2403 sizeof(*request), request); 2404 } 2405 2406 static int ipw_send_scan_abort(struct ipw_priv *priv) 2407 { 2408 if (!priv) { 2409 IPW_ERROR("Invalid args\n"); 2410 return -1; 2411 } 2412 2413 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT); 2414 } 2415 2416 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) 2417 { 2418 struct ipw_sensitivity_calib calib = { 2419 .beacon_rssi_raw = cpu_to_le16(sens), 2420 }; 2421 2422 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), 2423 &calib); 2424 } 2425 2426 static int ipw_send_associate(struct ipw_priv *priv, 2427 struct ipw_associate *associate) 2428 { 2429 if (!priv || !associate) { 2430 IPW_ERROR("Invalid args\n"); 2431 return -1; 2432 } 2433 2434 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate), 2435 associate); 2436 } 2437 2438 static int ipw_send_supported_rates(struct ipw_priv *priv, 2439 struct ipw_supported_rates *rates) 2440 { 2441 if (!priv || !rates) { 2442 IPW_ERROR("Invalid args\n"); 2443 return -1; 2444 } 2445 2446 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates), 2447 rates); 2448 } 2449 2450 static int ipw_set_random_seed(struct ipw_priv *priv) 2451 { 2452 u32 val; 2453 2454 if (!priv) { 2455 IPW_ERROR("Invalid args\n"); 2456 return -1; 2457 } 2458 2459 get_random_bytes(&val, sizeof(val)); 2460 2461 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val); 2462 } 2463 2464 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) 2465 { 2466 __le32 v = cpu_to_le32(phy_off); 2467 if (!priv) { 2468 IPW_ERROR("Invalid args\n"); 2469 return -1; 2470 } 2471 2472 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v); 2473 } 2474 2475 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) 2476 { 2477 if (!priv || !power) { 2478 IPW_ERROR("Invalid args\n"); 2479 return -1; 2480 } 2481 2482 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power); 2483 } 2484 2485 static int ipw_set_tx_power(struct ipw_priv *priv) 2486 { 2487 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 2488 struct ipw_tx_power tx_power; 2489 s8 max_power; 2490 int i; 2491 2492 memset(&tx_power, 0, sizeof(tx_power)); 2493 2494 /* configure device for 'G' band */ 2495 tx_power.ieee_mode = IPW_G_MODE; 2496 tx_power.num_channels = geo->bg_channels; 2497 for (i = 0; i < geo->bg_channels; i++) { 2498 max_power = geo->bg[i].max_power; 2499 tx_power.channels_tx_power[i].channel_number = 2500 geo->bg[i].channel; 2501 tx_power.channels_tx_power[i].tx_power = max_power ? 2502 min(max_power, priv->tx_power) : priv->tx_power; 2503 } 2504 if (ipw_send_tx_power(priv, &tx_power)) 2505 return -EIO; 2506 2507 /* configure device to also handle 'B' band */ 2508 tx_power.ieee_mode = IPW_B_MODE; 2509 if (ipw_send_tx_power(priv, &tx_power)) 2510 return -EIO; 2511 2512 /* configure device to also handle 'A' band */ 2513 if (priv->ieee->abg_true) { 2514 tx_power.ieee_mode = IPW_A_MODE; 2515 tx_power.num_channels = geo->a_channels; 2516 for (i = 0; i < tx_power.num_channels; i++) { 2517 max_power = geo->a[i].max_power; 2518 tx_power.channels_tx_power[i].channel_number = 2519 geo->a[i].channel; 2520 tx_power.channels_tx_power[i].tx_power = max_power ? 2521 min(max_power, priv->tx_power) : priv->tx_power; 2522 } 2523 if (ipw_send_tx_power(priv, &tx_power)) 2524 return -EIO; 2525 } 2526 return 0; 2527 } 2528 2529 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) 2530 { 2531 struct ipw_rts_threshold rts_threshold = { 2532 .rts_threshold = cpu_to_le16(rts), 2533 }; 2534 2535 if (!priv) { 2536 IPW_ERROR("Invalid args\n"); 2537 return -1; 2538 } 2539 2540 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD, 2541 sizeof(rts_threshold), &rts_threshold); 2542 } 2543 2544 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) 2545 { 2546 struct ipw_frag_threshold frag_threshold = { 2547 .frag_threshold = cpu_to_le16(frag), 2548 }; 2549 2550 if (!priv) { 2551 IPW_ERROR("Invalid args\n"); 2552 return -1; 2553 } 2554 2555 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD, 2556 sizeof(frag_threshold), &frag_threshold); 2557 } 2558 2559 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) 2560 { 2561 __le32 param; 2562 2563 if (!priv) { 2564 IPW_ERROR("Invalid args\n"); 2565 return -1; 2566 } 2567 2568 /* If on battery, set to 3, if AC set to CAM, else user 2569 * level */ 2570 switch (mode) { 2571 case IPW_POWER_BATTERY: 2572 param = cpu_to_le32(IPW_POWER_INDEX_3); 2573 break; 2574 case IPW_POWER_AC: 2575 param = cpu_to_le32(IPW_POWER_MODE_CAM); 2576 break; 2577 default: 2578 param = cpu_to_le32(mode); 2579 break; 2580 } 2581 2582 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), 2583 ¶m); 2584 } 2585 2586 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) 2587 { 2588 struct ipw_retry_limit retry_limit = { 2589 .short_retry_limit = slimit, 2590 .long_retry_limit = llimit 2591 }; 2592 2593 if (!priv) { 2594 IPW_ERROR("Invalid args\n"); 2595 return -1; 2596 } 2597 2598 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit), 2599 &retry_limit); 2600 } 2601 2602 /* 2603 * The IPW device contains a Microwire compatible EEPROM that stores 2604 * various data like the MAC address. Usually the firmware has exclusive 2605 * access to the eeprom, but during device initialization (before the 2606 * device driver has sent the HostComplete command to the firmware) the 2607 * device driver has read access to the EEPROM by way of indirect addressing 2608 * through a couple of memory mapped registers. 2609 * 2610 * The following is a simplified implementation for pulling data out of the 2611 * the eeprom, along with some helper functions to find information in 2612 * the per device private data's copy of the eeprom. 2613 * 2614 * NOTE: To better understand how these functions work (i.e what is a chip 2615 * select and why do have to keep driving the eeprom clock?), read 2616 * just about any data sheet for a Microwire compatible EEPROM. 2617 */ 2618 2619 /* write a 32 bit value into the indirect accessor register */ 2620 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data) 2621 { 2622 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data); 2623 2624 /* the eeprom requires some time to complete the operation */ 2625 udelay(p->eeprom_delay); 2626 } 2627 2628 /* perform a chip select operation */ 2629 static void eeprom_cs(struct ipw_priv *priv) 2630 { 2631 eeprom_write_reg(priv, 0); 2632 eeprom_write_reg(priv, EEPROM_BIT_CS); 2633 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); 2634 eeprom_write_reg(priv, EEPROM_BIT_CS); 2635 } 2636 2637 /* perform a chip select operation */ 2638 static void eeprom_disable_cs(struct ipw_priv *priv) 2639 { 2640 eeprom_write_reg(priv, EEPROM_BIT_CS); 2641 eeprom_write_reg(priv, 0); 2642 eeprom_write_reg(priv, EEPROM_BIT_SK); 2643 } 2644 2645 /* push a single bit down to the eeprom */ 2646 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit) 2647 { 2648 int d = (bit ? EEPROM_BIT_DI : 0); 2649 eeprom_write_reg(p, EEPROM_BIT_CS | d); 2650 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK); 2651 } 2652 2653 /* push an opcode followed by an address down to the eeprom */ 2654 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr) 2655 { 2656 int i; 2657 2658 eeprom_cs(priv); 2659 eeprom_write_bit(priv, 1); 2660 eeprom_write_bit(priv, op & 2); 2661 eeprom_write_bit(priv, op & 1); 2662 for (i = 7; i >= 0; i--) { 2663 eeprom_write_bit(priv, addr & (1 << i)); 2664 } 2665 } 2666 2667 /* pull 16 bits off the eeprom, one bit at a time */ 2668 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr) 2669 { 2670 int i; 2671 u16 r = 0; 2672 2673 /* Send READ Opcode */ 2674 eeprom_op(priv, EEPROM_CMD_READ, addr); 2675 2676 /* Send dummy bit */ 2677 eeprom_write_reg(priv, EEPROM_BIT_CS); 2678 2679 /* Read the byte off the eeprom one bit at a time */ 2680 for (i = 0; i < 16; i++) { 2681 u32 data = 0; 2682 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); 2683 eeprom_write_reg(priv, EEPROM_BIT_CS); 2684 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS); 2685 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0); 2686 } 2687 2688 /* Send another dummy bit */ 2689 eeprom_write_reg(priv, 0); 2690 eeprom_disable_cs(priv); 2691 2692 return r; 2693 } 2694 2695 /* helper function for pulling the mac address out of the private */ 2696 /* data's copy of the eeprom data */ 2697 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac) 2698 { 2699 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN); 2700 } 2701 2702 static void ipw_read_eeprom(struct ipw_priv *priv) 2703 { 2704 int i; 2705 __le16 *eeprom = (__le16 *) priv->eeprom; 2706 2707 IPW_DEBUG_TRACE(">>\n"); 2708 2709 /* read entire contents of eeprom into private buffer */ 2710 for (i = 0; i < 128; i++) 2711 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i)); 2712 2713 IPW_DEBUG_TRACE("<<\n"); 2714 } 2715 2716 /* 2717 * Either the device driver (i.e. the host) or the firmware can 2718 * load eeprom data into the designated region in SRAM. If neither 2719 * happens then the FW will shutdown with a fatal error. 2720 * 2721 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE 2722 * bit needs region of shared SRAM needs to be non-zero. 2723 */ 2724 static void ipw_eeprom_init_sram(struct ipw_priv *priv) 2725 { 2726 int i; 2727 2728 IPW_DEBUG_TRACE(">>\n"); 2729 2730 /* 2731 If the data looks correct, then copy it to our private 2732 copy. Otherwise let the firmware know to perform the operation 2733 on its own. 2734 */ 2735 if (priv->eeprom[EEPROM_VERSION] != 0) { 2736 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); 2737 2738 /* write the eeprom data to sram */ 2739 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) 2740 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]); 2741 2742 /* Do not load eeprom data on fatal error or suspend */ 2743 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 2744 } else { 2745 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n"); 2746 2747 /* Load eeprom data on fatal error or suspend */ 2748 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1); 2749 } 2750 2751 IPW_DEBUG_TRACE("<<\n"); 2752 } 2753 2754 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) 2755 { 2756 count >>= 2; 2757 if (!count) 2758 return; 2759 _ipw_write32(priv, IPW_AUTOINC_ADDR, start); 2760 while (count--) 2761 _ipw_write32(priv, IPW_AUTOINC_DATA, 0); 2762 } 2763 2764 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv) 2765 { 2766 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL, 2767 CB_NUMBER_OF_ELEMENTS_SMALL * 2768 sizeof(struct command_block)); 2769 } 2770 2771 static int ipw_fw_dma_enable(struct ipw_priv *priv) 2772 { /* start dma engine but no transfers yet */ 2773 2774 IPW_DEBUG_FW(">> :\n"); 2775 2776 /* Start the dma */ 2777 ipw_fw_dma_reset_command_blocks(priv); 2778 2779 /* Write CB base address */ 2780 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL); 2781 2782 IPW_DEBUG_FW("<< :\n"); 2783 return 0; 2784 } 2785 2786 static void ipw_fw_dma_abort(struct ipw_priv *priv) 2787 { 2788 u32 control = 0; 2789 2790 IPW_DEBUG_FW(">> :\n"); 2791 2792 /* set the Stop and Abort bit */ 2793 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; 2794 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2795 priv->sram_desc.last_cb_index = 0; 2796 2797 IPW_DEBUG_FW("<<\n"); 2798 } 2799 2800 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, 2801 struct command_block *cb) 2802 { 2803 u32 address = 2804 IPW_SHARED_SRAM_DMA_CONTROL + 2805 (sizeof(struct command_block) * index); 2806 IPW_DEBUG_FW(">> :\n"); 2807 2808 ipw_write_indirect(priv, address, (u8 *) cb, 2809 (int)sizeof(struct command_block)); 2810 2811 IPW_DEBUG_FW("<< :\n"); 2812 return 0; 2813 2814 } 2815 2816 static int ipw_fw_dma_kick(struct ipw_priv *priv) 2817 { 2818 u32 control = 0; 2819 u32 index = 0; 2820 2821 IPW_DEBUG_FW(">> :\n"); 2822 2823 for (index = 0; index < priv->sram_desc.last_cb_index; index++) 2824 ipw_fw_dma_write_command_block(priv, index, 2825 &priv->sram_desc.cb_list[index]); 2826 2827 /* Enable the DMA in the CSR register */ 2828 ipw_clear_bit(priv, IPW_RESET_REG, 2829 IPW_RESET_REG_MASTER_DISABLED | 2830 IPW_RESET_REG_STOP_MASTER); 2831 2832 /* Set the Start bit. */ 2833 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START; 2834 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2835 2836 IPW_DEBUG_FW("<< :\n"); 2837 return 0; 2838 } 2839 2840 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv) 2841 { 2842 u32 address; 2843 u32 register_value = 0; 2844 u32 cb_fields_address = 0; 2845 2846 IPW_DEBUG_FW(">> :\n"); 2847 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2848 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address); 2849 2850 /* Read the DMA Controlor register */ 2851 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL); 2852 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value); 2853 2854 /* Print the CB values */ 2855 cb_fields_address = address; 2856 register_value = ipw_read_reg32(priv, cb_fields_address); 2857 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value); 2858 2859 cb_fields_address += sizeof(u32); 2860 register_value = ipw_read_reg32(priv, cb_fields_address); 2861 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value); 2862 2863 cb_fields_address += sizeof(u32); 2864 register_value = ipw_read_reg32(priv, cb_fields_address); 2865 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n", 2866 register_value); 2867 2868 cb_fields_address += sizeof(u32); 2869 register_value = ipw_read_reg32(priv, cb_fields_address); 2870 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value); 2871 2872 IPW_DEBUG_FW(">> :\n"); 2873 } 2874 2875 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv) 2876 { 2877 u32 current_cb_address = 0; 2878 u32 current_cb_index = 0; 2879 2880 IPW_DEBUG_FW("<< :\n"); 2881 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2882 2883 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) / 2884 sizeof(struct command_block); 2885 2886 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n", 2887 current_cb_index, current_cb_address); 2888 2889 IPW_DEBUG_FW(">> :\n"); 2890 return current_cb_index; 2891 2892 } 2893 2894 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv, 2895 u32 src_address, 2896 u32 dest_address, 2897 u32 length, 2898 int interrupt_enabled, int is_last) 2899 { 2900 2901 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC | 2902 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG | 2903 CB_DEST_SIZE_LONG; 2904 struct command_block *cb; 2905 u32 last_cb_element = 0; 2906 2907 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n", 2908 src_address, dest_address, length); 2909 2910 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL) 2911 return -1; 2912 2913 last_cb_element = priv->sram_desc.last_cb_index; 2914 cb = &priv->sram_desc.cb_list[last_cb_element]; 2915 priv->sram_desc.last_cb_index++; 2916 2917 /* Calculate the new CB control word */ 2918 if (interrupt_enabled) 2919 control |= CB_INT_ENABLED; 2920 2921 if (is_last) 2922 control |= CB_LAST_VALID; 2923 2924 control |= length; 2925 2926 /* Calculate the CB Element's checksum value */ 2927 cb->status = control ^ src_address ^ dest_address; 2928 2929 /* Copy the Source and Destination addresses */ 2930 cb->dest_addr = dest_address; 2931 cb->source_addr = src_address; 2932 2933 /* Copy the Control Word last */ 2934 cb->control = control; 2935 2936 return 0; 2937 } 2938 2939 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address, 2940 int nr, u32 dest_address, u32 len) 2941 { 2942 int ret, i; 2943 u32 size; 2944 2945 IPW_DEBUG_FW(">>\n"); 2946 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", 2947 nr, dest_address, len); 2948 2949 for (i = 0; i < nr; i++) { 2950 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH); 2951 ret = ipw_fw_dma_add_command_block(priv, src_address[i], 2952 dest_address + 2953 i * CB_MAX_LENGTH, size, 2954 0, 0); 2955 if (ret) { 2956 IPW_DEBUG_FW_INFO(": Failed\n"); 2957 return -1; 2958 } else 2959 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2960 } 2961 2962 IPW_DEBUG_FW("<<\n"); 2963 return 0; 2964 } 2965 2966 static int ipw_fw_dma_wait(struct ipw_priv *priv) 2967 { 2968 u32 current_index = 0, previous_index; 2969 u32 watchdog = 0; 2970 2971 IPW_DEBUG_FW(">> :\n"); 2972 2973 current_index = ipw_fw_dma_command_block_index(priv); 2974 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", 2975 (int)priv->sram_desc.last_cb_index); 2976 2977 while (current_index < priv->sram_desc.last_cb_index) { 2978 udelay(50); 2979 previous_index = current_index; 2980 current_index = ipw_fw_dma_command_block_index(priv); 2981 2982 if (previous_index < current_index) { 2983 watchdog = 0; 2984 continue; 2985 } 2986 if (++watchdog > 400) { 2987 IPW_DEBUG_FW_INFO("Timeout\n"); 2988 ipw_fw_dma_dump_command_block(priv); 2989 ipw_fw_dma_abort(priv); 2990 return -1; 2991 } 2992 } 2993 2994 ipw_fw_dma_abort(priv); 2995 2996 /*Disable the DMA in the CSR register */ 2997 ipw_set_bit(priv, IPW_RESET_REG, 2998 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER); 2999 3000 IPW_DEBUG_FW("<< dmaWaitSync\n"); 3001 return 0; 3002 } 3003 3004 static void ipw_remove_current_network(struct ipw_priv *priv) 3005 { 3006 struct list_head *element, *safe; 3007 struct libipw_network *network = NULL; 3008 unsigned long flags; 3009 3010 spin_lock_irqsave(&priv->ieee->lock, flags); 3011 list_for_each_safe(element, safe, &priv->ieee->network_list) { 3012 network = list_entry(element, struct libipw_network, list); 3013 if (ether_addr_equal(network->bssid, priv->bssid)) { 3014 list_del(element); 3015 list_add_tail(&network->list, 3016 &priv->ieee->network_free_list); 3017 } 3018 } 3019 spin_unlock_irqrestore(&priv->ieee->lock, flags); 3020 } 3021 3022 /** 3023 * Check that card is still alive. 3024 * Reads debug register from domain0. 3025 * If card is present, pre-defined value should 3026 * be found there. 3027 * 3028 * @param priv 3029 * @return 1 if card is present, 0 otherwise 3030 */ 3031 static inline int ipw_alive(struct ipw_priv *priv) 3032 { 3033 return ipw_read32(priv, 0x90) == 0xd55555d5; 3034 } 3035 3036 /* timeout in msec, attempted in 10-msec quanta */ 3037 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, 3038 int timeout) 3039 { 3040 int i = 0; 3041 3042 do { 3043 if ((ipw_read32(priv, addr) & mask) == mask) 3044 return i; 3045 mdelay(10); 3046 i += 10; 3047 } while (i < timeout); 3048 3049 return -ETIME; 3050 } 3051 3052 /* These functions load the firmware and micro code for the operation of 3053 * the ipw hardware. It assumes the buffer has all the bits for the 3054 * image and the caller is handling the memory allocation and clean up. 3055 */ 3056 3057 static int ipw_stop_master(struct ipw_priv *priv) 3058 { 3059 int rc; 3060 3061 IPW_DEBUG_TRACE(">>\n"); 3062 /* stop master. typical delay - 0 */ 3063 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3064 3065 /* timeout is in msec, polled in 10-msec quanta */ 3066 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3067 IPW_RESET_REG_MASTER_DISABLED, 100); 3068 if (rc < 0) { 3069 IPW_ERROR("wait for stop master failed after 100ms\n"); 3070 return -1; 3071 } 3072 3073 IPW_DEBUG_INFO("stop master %dms\n", rc); 3074 3075 return rc; 3076 } 3077 3078 static void ipw_arc_release(struct ipw_priv *priv) 3079 { 3080 IPW_DEBUG_TRACE(">>\n"); 3081 mdelay(5); 3082 3083 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3084 3085 /* no one knows timing, for safety add some delay */ 3086 mdelay(5); 3087 } 3088 3089 struct fw_chunk { 3090 __le32 address; 3091 __le32 length; 3092 }; 3093 3094 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) 3095 { 3096 int rc = 0, i, addr; 3097 u8 cr = 0; 3098 __le16 *image; 3099 3100 image = (__le16 *) data; 3101 3102 IPW_DEBUG_TRACE(">>\n"); 3103 3104 rc = ipw_stop_master(priv); 3105 3106 if (rc < 0) 3107 return rc; 3108 3109 for (addr = IPW_SHARED_LOWER_BOUND; 3110 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) { 3111 ipw_write32(priv, addr, 0); 3112 } 3113 3114 /* no ucode (yet) */ 3115 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive)); 3116 /* destroy DMA queues */ 3117 /* reset sequence */ 3118 3119 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON); 3120 ipw_arc_release(priv); 3121 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF); 3122 mdelay(1); 3123 3124 /* reset PHY */ 3125 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN); 3126 mdelay(1); 3127 3128 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0); 3129 mdelay(1); 3130 3131 /* enable ucode store */ 3132 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0); 3133 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS); 3134 mdelay(1); 3135 3136 /* write ucode */ 3137 /** 3138 * @bug 3139 * Do NOT set indirect address register once and then 3140 * store data to indirect data register in the loop. 3141 * It seems very reasonable, but in this case DINO do not 3142 * accept ucode. It is essential to set address each time. 3143 */ 3144 /* load new ipw uCode */ 3145 for (i = 0; i < len / 2; i++) 3146 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE, 3147 le16_to_cpu(image[i])); 3148 3149 /* enable DINO */ 3150 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3151 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM); 3152 3153 /* this is where the igx / win driver deveates from the VAP driver. */ 3154 3155 /* wait for alive response */ 3156 for (i = 0; i < 100; i++) { 3157 /* poll for incoming data */ 3158 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS); 3159 if (cr & DINO_RXFIFO_DATA) 3160 break; 3161 mdelay(1); 3162 } 3163 3164 if (cr & DINO_RXFIFO_DATA) { 3165 /* alive_command_responce size is NOT multiple of 4 */ 3166 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4]; 3167 3168 for (i = 0; i < ARRAY_SIZE(response_buffer); i++) 3169 response_buffer[i] = 3170 cpu_to_le32(ipw_read_reg32(priv, 3171 IPW_BASEBAND_RX_FIFO_READ)); 3172 memcpy(&priv->dino_alive, response_buffer, 3173 sizeof(priv->dino_alive)); 3174 if (priv->dino_alive.alive_command == 1 3175 && priv->dino_alive.ucode_valid == 1) { 3176 rc = 0; 3177 IPW_DEBUG_INFO 3178 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) " 3179 "of %02d/%02d/%02d %02d:%02d\n", 3180 priv->dino_alive.software_revision, 3181 priv->dino_alive.software_revision, 3182 priv->dino_alive.device_identifier, 3183 priv->dino_alive.device_identifier, 3184 priv->dino_alive.time_stamp[0], 3185 priv->dino_alive.time_stamp[1], 3186 priv->dino_alive.time_stamp[2], 3187 priv->dino_alive.time_stamp[3], 3188 priv->dino_alive.time_stamp[4]); 3189 } else { 3190 IPW_DEBUG_INFO("Microcode is not alive\n"); 3191 rc = -EINVAL; 3192 } 3193 } else { 3194 IPW_DEBUG_INFO("No alive response from DINO\n"); 3195 rc = -ETIME; 3196 } 3197 3198 /* disable DINO, otherwise for some reason 3199 firmware have problem getting alive resp. */ 3200 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3201 3202 return rc; 3203 } 3204 3205 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) 3206 { 3207 int ret = -1; 3208 int offset = 0; 3209 struct fw_chunk *chunk; 3210 int total_nr = 0; 3211 int i; 3212 struct dma_pool *pool; 3213 void **virts; 3214 dma_addr_t *phys; 3215 3216 IPW_DEBUG_TRACE("<< :\n"); 3217 3218 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL, 3219 GFP_KERNEL); 3220 if (!virts) 3221 return -ENOMEM; 3222 3223 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL, 3224 GFP_KERNEL); 3225 if (!phys) { 3226 kfree(virts); 3227 return -ENOMEM; 3228 } 3229 pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0, 3230 0); 3231 if (!pool) { 3232 IPW_ERROR("dma_pool_create failed\n"); 3233 kfree(phys); 3234 kfree(virts); 3235 return -ENOMEM; 3236 } 3237 3238 /* Start the Dma */ 3239 ret = ipw_fw_dma_enable(priv); 3240 3241 /* the DMA is already ready this would be a bug. */ 3242 BUG_ON(priv->sram_desc.last_cb_index > 0); 3243 3244 do { 3245 u32 chunk_len; 3246 u8 *start; 3247 int size; 3248 int nr = 0; 3249 3250 chunk = (struct fw_chunk *)(data + offset); 3251 offset += sizeof(struct fw_chunk); 3252 chunk_len = le32_to_cpu(chunk->length); 3253 start = data + offset; 3254 3255 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH; 3256 for (i = 0; i < nr; i++) { 3257 virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL, 3258 &phys[total_nr]); 3259 if (!virts[total_nr]) { 3260 ret = -ENOMEM; 3261 goto out; 3262 } 3263 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH, 3264 CB_MAX_LENGTH); 3265 memcpy(virts[total_nr], start, size); 3266 start += size; 3267 total_nr++; 3268 /* We don't support fw chunk larger than 64*8K */ 3269 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL); 3270 } 3271 3272 /* build DMA packet and queue up for sending */ 3273 /* dma to chunk->address, the chunk->length bytes from data + 3274 * offeset*/ 3275 /* Dma loading */ 3276 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr], 3277 nr, le32_to_cpu(chunk->address), 3278 chunk_len); 3279 if (ret) { 3280 IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); 3281 goto out; 3282 } 3283 3284 offset += chunk_len; 3285 } while (offset < len); 3286 3287 /* Run the DMA and wait for the answer */ 3288 ret = ipw_fw_dma_kick(priv); 3289 if (ret) { 3290 IPW_ERROR("dmaKick Failed\n"); 3291 goto out; 3292 } 3293 3294 ret = ipw_fw_dma_wait(priv); 3295 if (ret) { 3296 IPW_ERROR("dmaWaitSync Failed\n"); 3297 goto out; 3298 } 3299 out: 3300 for (i = 0; i < total_nr; i++) 3301 dma_pool_free(pool, virts[i], phys[i]); 3302 3303 dma_pool_destroy(pool); 3304 kfree(phys); 3305 kfree(virts); 3306 3307 return ret; 3308 } 3309 3310 /* stop nic */ 3311 static int ipw_stop_nic(struct ipw_priv *priv) 3312 { 3313 int rc = 0; 3314 3315 /* stop */ 3316 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3317 3318 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3319 IPW_RESET_REG_MASTER_DISABLED, 500); 3320 if (rc < 0) { 3321 IPW_ERROR("wait for reg master disabled failed after 500ms\n"); 3322 return rc; 3323 } 3324 3325 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3326 3327 return rc; 3328 } 3329 3330 static void ipw_start_nic(struct ipw_priv *priv) 3331 { 3332 IPW_DEBUG_TRACE(">>\n"); 3333 3334 /* prvHwStartNic release ARC */ 3335 ipw_clear_bit(priv, IPW_RESET_REG, 3336 IPW_RESET_REG_MASTER_DISABLED | 3337 IPW_RESET_REG_STOP_MASTER | 3338 CBD_RESET_REG_PRINCETON_RESET); 3339 3340 /* enable power management */ 3341 ipw_set_bit(priv, IPW_GP_CNTRL_RW, 3342 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); 3343 3344 IPW_DEBUG_TRACE("<<\n"); 3345 } 3346 3347 static int ipw_init_nic(struct ipw_priv *priv) 3348 { 3349 int rc; 3350 3351 IPW_DEBUG_TRACE(">>\n"); 3352 /* reset */ 3353 /*prvHwInitNic */ 3354 /* set "initialization complete" bit to move adapter to D0 state */ 3355 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); 3356 3357 /* low-level PLL activation */ 3358 ipw_write32(priv, IPW_READ_INT_REGISTER, 3359 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER); 3360 3361 /* wait for clock stabilization */ 3362 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW, 3363 IPW_GP_CNTRL_BIT_CLOCK_READY, 250); 3364 if (rc < 0) 3365 IPW_DEBUG_INFO("FAILED wait for clock stablization\n"); 3366 3367 /* assert SW reset */ 3368 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET); 3369 3370 udelay(10); 3371 3372 /* set "initialization complete" bit to move adapter to D0 state */ 3373 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); 3374 3375 IPW_DEBUG_TRACE(">>\n"); 3376 return 0; 3377 } 3378 3379 /* Call this function from process context, it will sleep in request_firmware. 3380 * Probe is an ok place to call this from. 3381 */ 3382 static int ipw_reset_nic(struct ipw_priv *priv) 3383 { 3384 int rc = 0; 3385 unsigned long flags; 3386 3387 IPW_DEBUG_TRACE(">>\n"); 3388 3389 rc = ipw_init_nic(priv); 3390 3391 spin_lock_irqsave(&priv->lock, flags); 3392 /* Clear the 'host command active' bit... */ 3393 priv->status &= ~STATUS_HCMD_ACTIVE; 3394 wake_up_interruptible(&priv->wait_command_queue); 3395 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); 3396 wake_up_interruptible(&priv->wait_state); 3397 spin_unlock_irqrestore(&priv->lock, flags); 3398 3399 IPW_DEBUG_TRACE("<<\n"); 3400 return rc; 3401 } 3402 3403 3404 struct ipw_fw { 3405 __le32 ver; 3406 __le32 boot_size; 3407 __le32 ucode_size; 3408 __le32 fw_size; 3409 u8 data[0]; 3410 }; 3411 3412 static int ipw_get_fw(struct ipw_priv *priv, 3413 const struct firmware **raw, const char *name) 3414 { 3415 struct ipw_fw *fw; 3416 int rc; 3417 3418 /* ask firmware_class module to get the boot firmware off disk */ 3419 rc = request_firmware(raw, name, &priv->pci_dev->dev); 3420 if (rc < 0) { 3421 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc); 3422 return rc; 3423 } 3424 3425 if ((*raw)->size < sizeof(*fw)) { 3426 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size); 3427 return -EINVAL; 3428 } 3429 3430 fw = (void *)(*raw)->data; 3431 3432 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) + 3433 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) { 3434 IPW_ERROR("%s is too small or corrupt (%zd)\n", 3435 name, (*raw)->size); 3436 return -EINVAL; 3437 } 3438 3439 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n", 3440 name, 3441 le32_to_cpu(fw->ver) >> 16, 3442 le32_to_cpu(fw->ver) & 0xff, 3443 (*raw)->size - sizeof(*fw)); 3444 return 0; 3445 } 3446 3447 #define IPW_RX_BUF_SIZE (3000) 3448 3449 static void ipw_rx_queue_reset(struct ipw_priv *priv, 3450 struct ipw_rx_queue *rxq) 3451 { 3452 unsigned long flags; 3453 int i; 3454 3455 spin_lock_irqsave(&rxq->lock, flags); 3456 3457 INIT_LIST_HEAD(&rxq->rx_free); 3458 INIT_LIST_HEAD(&rxq->rx_used); 3459 3460 /* Fill the rx_used queue with _all_ of the Rx buffers */ 3461 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 3462 /* In the reset function, these buffers may have been allocated 3463 * to an SKB, so we need to unmap and free potential storage */ 3464 if (rxq->pool[i].skb != NULL) { 3465 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 3466 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 3467 dev_kfree_skb(rxq->pool[i].skb); 3468 rxq->pool[i].skb = NULL; 3469 } 3470 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 3471 } 3472 3473 /* Set us so that we have processed and used all buffers, but have 3474 * not restocked the Rx queue with fresh buffers */ 3475 rxq->read = rxq->write = 0; 3476 rxq->free_count = 0; 3477 spin_unlock_irqrestore(&rxq->lock, flags); 3478 } 3479 3480 #ifdef CONFIG_PM 3481 static int fw_loaded = 0; 3482 static const struct firmware *raw = NULL; 3483 3484 static void free_firmware(void) 3485 { 3486 if (fw_loaded) { 3487 release_firmware(raw); 3488 raw = NULL; 3489 fw_loaded = 0; 3490 } 3491 } 3492 #else 3493 #define free_firmware() do {} while (0) 3494 #endif 3495 3496 static int ipw_load(struct ipw_priv *priv) 3497 { 3498 #ifndef CONFIG_PM 3499 const struct firmware *raw = NULL; 3500 #endif 3501 struct ipw_fw *fw; 3502 u8 *boot_img, *ucode_img, *fw_img; 3503 u8 *name = NULL; 3504 int rc = 0, retries = 3; 3505 3506 switch (priv->ieee->iw_mode) { 3507 case IW_MODE_ADHOC: 3508 name = "ipw2200-ibss.fw"; 3509 break; 3510 #ifdef CONFIG_IPW2200_MONITOR 3511 case IW_MODE_MONITOR: 3512 name = "ipw2200-sniffer.fw"; 3513 break; 3514 #endif 3515 case IW_MODE_INFRA: 3516 name = "ipw2200-bss.fw"; 3517 break; 3518 } 3519 3520 if (!name) { 3521 rc = -EINVAL; 3522 goto error; 3523 } 3524 3525 #ifdef CONFIG_PM 3526 if (!fw_loaded) { 3527 #endif 3528 rc = ipw_get_fw(priv, &raw, name); 3529 if (rc < 0) 3530 goto error; 3531 #ifdef CONFIG_PM 3532 } 3533 #endif 3534 3535 fw = (void *)raw->data; 3536 boot_img = &fw->data[0]; 3537 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)]; 3538 fw_img = &fw->data[le32_to_cpu(fw->boot_size) + 3539 le32_to_cpu(fw->ucode_size)]; 3540 3541 if (!priv->rxq) 3542 priv->rxq = ipw_rx_queue_alloc(priv); 3543 else 3544 ipw_rx_queue_reset(priv, priv->rxq); 3545 if (!priv->rxq) { 3546 IPW_ERROR("Unable to initialize Rx queue\n"); 3547 rc = -ENOMEM; 3548 goto error; 3549 } 3550 3551 retry: 3552 /* Ensure interrupts are disabled */ 3553 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 3554 priv->status &= ~STATUS_INT_ENABLED; 3555 3556 /* ack pending interrupts */ 3557 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3558 3559 ipw_stop_nic(priv); 3560 3561 rc = ipw_reset_nic(priv); 3562 if (rc < 0) { 3563 IPW_ERROR("Unable to reset NIC\n"); 3564 goto error; 3565 } 3566 3567 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND, 3568 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); 3569 3570 /* DMA the initial boot firmware into the device */ 3571 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size)); 3572 if (rc < 0) { 3573 IPW_ERROR("Unable to load boot firmware: %d\n", rc); 3574 goto error; 3575 } 3576 3577 /* kick start the device */ 3578 ipw_start_nic(priv); 3579 3580 /* wait for the device to finish its initial startup sequence */ 3581 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3582 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3583 if (rc < 0) { 3584 IPW_ERROR("device failed to boot initial fw image\n"); 3585 goto error; 3586 } 3587 IPW_DEBUG_INFO("initial device response after %dms\n", rc); 3588 3589 /* ack fw init done interrupt */ 3590 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3591 3592 /* DMA the ucode into the device */ 3593 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size)); 3594 if (rc < 0) { 3595 IPW_ERROR("Unable to load ucode: %d\n", rc); 3596 goto error; 3597 } 3598 3599 /* stop nic */ 3600 ipw_stop_nic(priv); 3601 3602 /* DMA bss firmware into the device */ 3603 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size)); 3604 if (rc < 0) { 3605 IPW_ERROR("Unable to load firmware: %d\n", rc); 3606 goto error; 3607 } 3608 #ifdef CONFIG_PM 3609 fw_loaded = 1; 3610 #endif 3611 3612 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 3613 3614 rc = ipw_queue_reset(priv); 3615 if (rc < 0) { 3616 IPW_ERROR("Unable to initialize queues\n"); 3617 goto error; 3618 } 3619 3620 /* Ensure interrupts are disabled */ 3621 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 3622 /* ack pending interrupts */ 3623 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3624 3625 /* kick start the device */ 3626 ipw_start_nic(priv); 3627 3628 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) { 3629 if (retries > 0) { 3630 IPW_WARNING("Parity error. Retrying init.\n"); 3631 retries--; 3632 goto retry; 3633 } 3634 3635 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n"); 3636 rc = -EIO; 3637 goto error; 3638 } 3639 3640 /* wait for the device */ 3641 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3642 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3643 if (rc < 0) { 3644 IPW_ERROR("device failed to start within 500ms\n"); 3645 goto error; 3646 } 3647 IPW_DEBUG_INFO("device response after %dms\n", rc); 3648 3649 /* ack fw init done interrupt */ 3650 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3651 3652 /* read eeprom data */ 3653 priv->eeprom_delay = 1; 3654 ipw_read_eeprom(priv); 3655 /* initialize the eeprom region of sram */ 3656 ipw_eeprom_init_sram(priv); 3657 3658 /* enable interrupts */ 3659 ipw_enable_interrupts(priv); 3660 3661 /* Ensure our queue has valid packets */ 3662 ipw_rx_queue_replenish(priv); 3663 3664 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read); 3665 3666 /* ack pending interrupts */ 3667 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3668 3669 #ifndef CONFIG_PM 3670 release_firmware(raw); 3671 #endif 3672 return 0; 3673 3674 error: 3675 if (priv->rxq) { 3676 ipw_rx_queue_free(priv, priv->rxq); 3677 priv->rxq = NULL; 3678 } 3679 ipw_tx_queue_free(priv); 3680 release_firmware(raw); 3681 #ifdef CONFIG_PM 3682 fw_loaded = 0; 3683 raw = NULL; 3684 #endif 3685 3686 return rc; 3687 } 3688 3689 /** 3690 * DMA services 3691 * 3692 * Theory of operation 3693 * 3694 * A queue is a circular buffers with 'Read' and 'Write' pointers. 3695 * 2 empty entries always kept in the buffer to protect from overflow. 3696 * 3697 * For Tx queue, there are low mark and high mark limits. If, after queuing 3698 * the packet for Tx, free space become < low mark, Tx queue stopped. When 3699 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 3700 * Tx queue resumed. 3701 * 3702 * The IPW operates with six queues, one receive queue in the device's 3703 * sram, one transmit queue for sending commands to the device firmware, 3704 * and four transmit queues for data. 3705 * 3706 * The four transmit queues allow for performing quality of service (qos) 3707 * transmissions as per the 802.11 protocol. Currently Linux does not 3708 * provide a mechanism to the user for utilizing prioritized queues, so 3709 * we only utilize the first data transmit queue (queue1). 3710 */ 3711 3712 /** 3713 * Driver allocates buffers of this size for Rx 3714 */ 3715 3716 /** 3717 * ipw_rx_queue_space - Return number of free slots available in queue. 3718 */ 3719 static int ipw_rx_queue_space(const struct ipw_rx_queue *q) 3720 { 3721 int s = q->read - q->write; 3722 if (s <= 0) 3723 s += RX_QUEUE_SIZE; 3724 /* keep some buffer to not confuse full and empty queue */ 3725 s -= 2; 3726 if (s < 0) 3727 s = 0; 3728 return s; 3729 } 3730 3731 static inline int ipw_tx_queue_space(const struct clx2_queue *q) 3732 { 3733 int s = q->last_used - q->first_empty; 3734 if (s <= 0) 3735 s += q->n_bd; 3736 s -= 2; /* keep some reserve to not confuse empty and full situations */ 3737 if (s < 0) 3738 s = 0; 3739 return s; 3740 } 3741 3742 static inline int ipw_queue_inc_wrap(int index, int n_bd) 3743 { 3744 return (++index == n_bd) ? 0 : index; 3745 } 3746 3747 /** 3748 * Initialize common DMA queue structure 3749 * 3750 * @param q queue to init 3751 * @param count Number of BD's to allocate. Should be power of 2 3752 * @param read_register Address for 'read' register 3753 * (not offset within BAR, full address) 3754 * @param write_register Address for 'write' register 3755 * (not offset within BAR, full address) 3756 * @param base_register Address for 'base' register 3757 * (not offset within BAR, full address) 3758 * @param size Address for 'size' register 3759 * (not offset within BAR, full address) 3760 */ 3761 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q, 3762 int count, u32 read, u32 write, u32 base, u32 size) 3763 { 3764 q->n_bd = count; 3765 3766 q->low_mark = q->n_bd / 4; 3767 if (q->low_mark < 4) 3768 q->low_mark = 4; 3769 3770 q->high_mark = q->n_bd / 8; 3771 if (q->high_mark < 2) 3772 q->high_mark = 2; 3773 3774 q->first_empty = q->last_used = 0; 3775 q->reg_r = read; 3776 q->reg_w = write; 3777 3778 ipw_write32(priv, base, q->dma_addr); 3779 ipw_write32(priv, size, count); 3780 ipw_write32(priv, read, 0); 3781 ipw_write32(priv, write, 0); 3782 3783 _ipw_read32(priv, 0x90); 3784 } 3785 3786 static int ipw_queue_tx_init(struct ipw_priv *priv, 3787 struct clx2_tx_queue *q, 3788 int count, u32 read, u32 write, u32 base, u32 size) 3789 { 3790 struct pci_dev *dev = priv->pci_dev; 3791 3792 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL); 3793 if (!q->txb) { 3794 IPW_ERROR("vmalloc for auxiliary BD structures failed\n"); 3795 return -ENOMEM; 3796 } 3797 3798 q->bd = 3799 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); 3800 if (!q->bd) { 3801 IPW_ERROR("pci_alloc_consistent(%zd) failed\n", 3802 sizeof(q->bd[0]) * count); 3803 kfree(q->txb); 3804 q->txb = NULL; 3805 return -ENOMEM; 3806 } 3807 3808 ipw_queue_init(priv, &q->q, count, read, write, base, size); 3809 return 0; 3810 } 3811 3812 /** 3813 * Free one TFD, those at index [txq->q.last_used]. 3814 * Do NOT advance any indexes 3815 * 3816 * @param dev 3817 * @param txq 3818 */ 3819 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv, 3820 struct clx2_tx_queue *txq) 3821 { 3822 struct tfd_frame *bd = &txq->bd[txq->q.last_used]; 3823 struct pci_dev *dev = priv->pci_dev; 3824 int i; 3825 3826 /* classify bd */ 3827 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE) 3828 /* nothing to cleanup after for host commands */ 3829 return; 3830 3831 /* sanity check */ 3832 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) { 3833 IPW_ERROR("Too many chunks: %i\n", 3834 le32_to_cpu(bd->u.data.num_chunks)); 3835 /** @todo issue fatal error, it is quite serious situation */ 3836 return; 3837 } 3838 3839 /* unmap chunks if any */ 3840 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) { 3841 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]), 3842 le16_to_cpu(bd->u.data.chunk_len[i]), 3843 PCI_DMA_TODEVICE); 3844 if (txq->txb[txq->q.last_used]) { 3845 libipw_txb_free(txq->txb[txq->q.last_used]); 3846 txq->txb[txq->q.last_used] = NULL; 3847 } 3848 } 3849 } 3850 3851 /** 3852 * Deallocate DMA queue. 3853 * 3854 * Empty queue by removing and destroying all BD's. 3855 * Free all buffers. 3856 * 3857 * @param dev 3858 * @param q 3859 */ 3860 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq) 3861 { 3862 struct clx2_queue *q = &txq->q; 3863 struct pci_dev *dev = priv->pci_dev; 3864 3865 if (q->n_bd == 0) 3866 return; 3867 3868 /* first, empty all BD's */ 3869 for (; q->first_empty != q->last_used; 3870 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { 3871 ipw_queue_tx_free_tfd(priv, txq); 3872 } 3873 3874 /* free buffers belonging to queue itself */ 3875 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, 3876 q->dma_addr); 3877 kfree(txq->txb); 3878 3879 /* 0 fill whole structure */ 3880 memset(txq, 0, sizeof(*txq)); 3881 } 3882 3883 /** 3884 * Destroy all DMA queues and structures 3885 * 3886 * @param priv 3887 */ 3888 static void ipw_tx_queue_free(struct ipw_priv *priv) 3889 { 3890 /* Tx CMD queue */ 3891 ipw_queue_tx_free(priv, &priv->txq_cmd); 3892 3893 /* Tx queues */ 3894 ipw_queue_tx_free(priv, &priv->txq[0]); 3895 ipw_queue_tx_free(priv, &priv->txq[1]); 3896 ipw_queue_tx_free(priv, &priv->txq[2]); 3897 ipw_queue_tx_free(priv, &priv->txq[3]); 3898 } 3899 3900 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid) 3901 { 3902 /* First 3 bytes are manufacturer */ 3903 bssid[0] = priv->mac_addr[0]; 3904 bssid[1] = priv->mac_addr[1]; 3905 bssid[2] = priv->mac_addr[2]; 3906 3907 /* Last bytes are random */ 3908 get_random_bytes(&bssid[3], ETH_ALEN - 3); 3909 3910 bssid[0] &= 0xfe; /* clear multicast bit */ 3911 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ 3912 } 3913 3914 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) 3915 { 3916 struct ipw_station_entry entry; 3917 int i; 3918 3919 for (i = 0; i < priv->num_stations; i++) { 3920 if (ether_addr_equal(priv->stations[i], bssid)) { 3921 /* Another node is active in network */ 3922 priv->missed_adhoc_beacons = 0; 3923 if (!(priv->config & CFG_STATIC_CHANNEL)) 3924 /* when other nodes drop out, we drop out */ 3925 priv->config &= ~CFG_ADHOC_PERSIST; 3926 3927 return i; 3928 } 3929 } 3930 3931 if (i == MAX_STATIONS) 3932 return IPW_INVALID_STATION; 3933 3934 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid); 3935 3936 entry.reserved = 0; 3937 entry.support_mode = 0; 3938 memcpy(entry.mac_addr, bssid, ETH_ALEN); 3939 memcpy(priv->stations[i], bssid, ETH_ALEN); 3940 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry), 3941 &entry, sizeof(entry)); 3942 priv->num_stations++; 3943 3944 return i; 3945 } 3946 3947 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) 3948 { 3949 int i; 3950 3951 for (i = 0; i < priv->num_stations; i++) 3952 if (ether_addr_equal(priv->stations[i], bssid)) 3953 return i; 3954 3955 return IPW_INVALID_STATION; 3956 } 3957 3958 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) 3959 { 3960 int err; 3961 3962 if (priv->status & STATUS_ASSOCIATING) { 3963 IPW_DEBUG_ASSOC("Disassociating while associating.\n"); 3964 schedule_work(&priv->disassociate); 3965 return; 3966 } 3967 3968 if (!(priv->status & STATUS_ASSOCIATED)) { 3969 IPW_DEBUG_ASSOC("Disassociating while not associated.\n"); 3970 return; 3971 } 3972 3973 IPW_DEBUG_ASSOC("Disassociation attempt from %pM " 3974 "on channel %d.\n", 3975 priv->assoc_request.bssid, 3976 priv->assoc_request.channel); 3977 3978 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); 3979 priv->status |= STATUS_DISASSOCIATING; 3980 3981 if (quiet) 3982 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET; 3983 else 3984 priv->assoc_request.assoc_type = HC_DISASSOCIATE; 3985 3986 err = ipw_send_associate(priv, &priv->assoc_request); 3987 if (err) { 3988 IPW_DEBUG_HC("Attempt to send [dis]associate command " 3989 "failed.\n"); 3990 return; 3991 } 3992 3993 } 3994 3995 static int ipw_disassociate(void *data) 3996 { 3997 struct ipw_priv *priv = data; 3998 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3999 return 0; 4000 ipw_send_disassociate(data, 0); 4001 netif_carrier_off(priv->net_dev); 4002 return 1; 4003 } 4004 4005 static void ipw_bg_disassociate(struct work_struct *work) 4006 { 4007 struct ipw_priv *priv = 4008 container_of(work, struct ipw_priv, disassociate); 4009 mutex_lock(&priv->mutex); 4010 ipw_disassociate(priv); 4011 mutex_unlock(&priv->mutex); 4012 } 4013 4014 static void ipw_system_config(struct work_struct *work) 4015 { 4016 struct ipw_priv *priv = 4017 container_of(work, struct ipw_priv, system_config); 4018 4019 #ifdef CONFIG_IPW2200_PROMISCUOUS 4020 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 4021 priv->sys_config.accept_all_data_frames = 1; 4022 priv->sys_config.accept_non_directed_frames = 1; 4023 priv->sys_config.accept_all_mgmt_bcpr = 1; 4024 priv->sys_config.accept_all_mgmt_frames = 1; 4025 } 4026 #endif 4027 4028 ipw_send_system_config(priv); 4029 } 4030 4031 struct ipw_status_code { 4032 u16 status; 4033 const char *reason; 4034 }; 4035 4036 static const struct ipw_status_code ipw_status_codes[] = { 4037 {0x00, "Successful"}, 4038 {0x01, "Unspecified failure"}, 4039 {0x0A, "Cannot support all requested capabilities in the " 4040 "Capability information field"}, 4041 {0x0B, "Reassociation denied due to inability to confirm that " 4042 "association exists"}, 4043 {0x0C, "Association denied due to reason outside the scope of this " 4044 "standard"}, 4045 {0x0D, 4046 "Responding station does not support the specified authentication " 4047 "algorithm"}, 4048 {0x0E, 4049 "Received an Authentication frame with authentication sequence " 4050 "transaction sequence number out of expected sequence"}, 4051 {0x0F, "Authentication rejected because of challenge failure"}, 4052 {0x10, "Authentication rejected due to timeout waiting for next " 4053 "frame in sequence"}, 4054 {0x11, "Association denied because AP is unable to handle additional " 4055 "associated stations"}, 4056 {0x12, 4057 "Association denied due to requesting station not supporting all " 4058 "of the datarates in the BSSBasicServiceSet Parameter"}, 4059 {0x13, 4060 "Association denied due to requesting station not supporting " 4061 "short preamble operation"}, 4062 {0x14, 4063 "Association denied due to requesting station not supporting " 4064 "PBCC encoding"}, 4065 {0x15, 4066 "Association denied due to requesting station not supporting " 4067 "channel agility"}, 4068 {0x19, 4069 "Association denied due to requesting station not supporting " 4070 "short slot operation"}, 4071 {0x1A, 4072 "Association denied due to requesting station not supporting " 4073 "DSSS-OFDM operation"}, 4074 {0x28, "Invalid Information Element"}, 4075 {0x29, "Group Cipher is not valid"}, 4076 {0x2A, "Pairwise Cipher is not valid"}, 4077 {0x2B, "AKMP is not valid"}, 4078 {0x2C, "Unsupported RSN IE version"}, 4079 {0x2D, "Invalid RSN IE Capabilities"}, 4080 {0x2E, "Cipher suite is rejected per security policy"}, 4081 }; 4082 4083 static const char *ipw_get_status_code(u16 status) 4084 { 4085 int i; 4086 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++) 4087 if (ipw_status_codes[i].status == (status & 0xff)) 4088 return ipw_status_codes[i].reason; 4089 return "Unknown status value."; 4090 } 4091 4092 static inline void average_init(struct average *avg) 4093 { 4094 memset(avg, 0, sizeof(*avg)); 4095 } 4096 4097 #define DEPTH_RSSI 8 4098 #define DEPTH_NOISE 16 4099 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth) 4100 { 4101 return ((depth-1)*prev_avg + val)/depth; 4102 } 4103 4104 static void average_add(struct average *avg, s16 val) 4105 { 4106 avg->sum -= avg->entries[avg->pos]; 4107 avg->sum += val; 4108 avg->entries[avg->pos++] = val; 4109 if (unlikely(avg->pos == AVG_ENTRIES)) { 4110 avg->init = 1; 4111 avg->pos = 0; 4112 } 4113 } 4114 4115 static s16 average_value(struct average *avg) 4116 { 4117 if (!unlikely(avg->init)) { 4118 if (avg->pos) 4119 return avg->sum / avg->pos; 4120 return 0; 4121 } 4122 4123 return avg->sum / AVG_ENTRIES; 4124 } 4125 4126 static void ipw_reset_stats(struct ipw_priv *priv) 4127 { 4128 u32 len = sizeof(u32); 4129 4130 priv->quality = 0; 4131 4132 average_init(&priv->average_missed_beacons); 4133 priv->exp_avg_rssi = -60; 4134 priv->exp_avg_noise = -85 + 0x100; 4135 4136 priv->last_rate = 0; 4137 priv->last_missed_beacons = 0; 4138 priv->last_rx_packets = 0; 4139 priv->last_tx_packets = 0; 4140 priv->last_tx_failures = 0; 4141 4142 /* Firmware managed, reset only when NIC is restarted, so we have to 4143 * normalize on the current value */ 4144 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, 4145 &priv->last_rx_err, &len); 4146 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, 4147 &priv->last_tx_failures, &len); 4148 4149 /* Driver managed, reset with each association */ 4150 priv->missed_adhoc_beacons = 0; 4151 priv->missed_beacons = 0; 4152 priv->tx_packets = 0; 4153 priv->rx_packets = 0; 4154 4155 } 4156 4157 static u32 ipw_get_max_rate(struct ipw_priv *priv) 4158 { 4159 u32 i = 0x80000000; 4160 u32 mask = priv->rates_mask; 4161 /* If currently associated in B mode, restrict the maximum 4162 * rate match to B rates */ 4163 if (priv->assoc_request.ieee_mode == IPW_B_MODE) 4164 mask &= LIBIPW_CCK_RATES_MASK; 4165 4166 /* TODO: Verify that the rate is supported by the current rates 4167 * list. */ 4168 4169 while (i && !(mask & i)) 4170 i >>= 1; 4171 switch (i) { 4172 case LIBIPW_CCK_RATE_1MB_MASK: 4173 return 1000000; 4174 case LIBIPW_CCK_RATE_2MB_MASK: 4175 return 2000000; 4176 case LIBIPW_CCK_RATE_5MB_MASK: 4177 return 5500000; 4178 case LIBIPW_OFDM_RATE_6MB_MASK: 4179 return 6000000; 4180 case LIBIPW_OFDM_RATE_9MB_MASK: 4181 return 9000000; 4182 case LIBIPW_CCK_RATE_11MB_MASK: 4183 return 11000000; 4184 case LIBIPW_OFDM_RATE_12MB_MASK: 4185 return 12000000; 4186 case LIBIPW_OFDM_RATE_18MB_MASK: 4187 return 18000000; 4188 case LIBIPW_OFDM_RATE_24MB_MASK: 4189 return 24000000; 4190 case LIBIPW_OFDM_RATE_36MB_MASK: 4191 return 36000000; 4192 case LIBIPW_OFDM_RATE_48MB_MASK: 4193 return 48000000; 4194 case LIBIPW_OFDM_RATE_54MB_MASK: 4195 return 54000000; 4196 } 4197 4198 if (priv->ieee->mode == IEEE_B) 4199 return 11000000; 4200 else 4201 return 54000000; 4202 } 4203 4204 static u32 ipw_get_current_rate(struct ipw_priv *priv) 4205 { 4206 u32 rate, len = sizeof(rate); 4207 int err; 4208 4209 if (!(priv->status & STATUS_ASSOCIATED)) 4210 return 0; 4211 4212 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) { 4213 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate, 4214 &len); 4215 if (err) { 4216 IPW_DEBUG_INFO("failed querying ordinals.\n"); 4217 return 0; 4218 } 4219 } else 4220 return ipw_get_max_rate(priv); 4221 4222 switch (rate) { 4223 case IPW_TX_RATE_1MB: 4224 return 1000000; 4225 case IPW_TX_RATE_2MB: 4226 return 2000000; 4227 case IPW_TX_RATE_5MB: 4228 return 5500000; 4229 case IPW_TX_RATE_6MB: 4230 return 6000000; 4231 case IPW_TX_RATE_9MB: 4232 return 9000000; 4233 case IPW_TX_RATE_11MB: 4234 return 11000000; 4235 case IPW_TX_RATE_12MB: 4236 return 12000000; 4237 case IPW_TX_RATE_18MB: 4238 return 18000000; 4239 case IPW_TX_RATE_24MB: 4240 return 24000000; 4241 case IPW_TX_RATE_36MB: 4242 return 36000000; 4243 case IPW_TX_RATE_48MB: 4244 return 48000000; 4245 case IPW_TX_RATE_54MB: 4246 return 54000000; 4247 } 4248 4249 return 0; 4250 } 4251 4252 #define IPW_STATS_INTERVAL (2 * HZ) 4253 static void ipw_gather_stats(struct ipw_priv *priv) 4254 { 4255 u32 rx_err, rx_err_delta, rx_packets_delta; 4256 u32 tx_failures, tx_failures_delta, tx_packets_delta; 4257 u32 missed_beacons_percent, missed_beacons_delta; 4258 u32 quality = 0; 4259 u32 len = sizeof(u32); 4260 s16 rssi; 4261 u32 beacon_quality, signal_quality, tx_quality, rx_quality, 4262 rate_quality; 4263 u32 max_rate; 4264 4265 if (!(priv->status & STATUS_ASSOCIATED)) { 4266 priv->quality = 0; 4267 return; 4268 } 4269 4270 /* Update the statistics */ 4271 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS, 4272 &priv->missed_beacons, &len); 4273 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons; 4274 priv->last_missed_beacons = priv->missed_beacons; 4275 if (priv->assoc_request.beacon_interval) { 4276 missed_beacons_percent = missed_beacons_delta * 4277 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) / 4278 (IPW_STATS_INTERVAL * 10); 4279 } else { 4280 missed_beacons_percent = 0; 4281 } 4282 average_add(&priv->average_missed_beacons, missed_beacons_percent); 4283 4284 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len); 4285 rx_err_delta = rx_err - priv->last_rx_err; 4286 priv->last_rx_err = rx_err; 4287 4288 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len); 4289 tx_failures_delta = tx_failures - priv->last_tx_failures; 4290 priv->last_tx_failures = tx_failures; 4291 4292 rx_packets_delta = priv->rx_packets - priv->last_rx_packets; 4293 priv->last_rx_packets = priv->rx_packets; 4294 4295 tx_packets_delta = priv->tx_packets - priv->last_tx_packets; 4296 priv->last_tx_packets = priv->tx_packets; 4297 4298 /* Calculate quality based on the following: 4299 * 4300 * Missed beacon: 100% = 0, 0% = 70% missed 4301 * Rate: 60% = 1Mbs, 100% = Max 4302 * Rx and Tx errors represent a straight % of total Rx/Tx 4303 * RSSI: 100% = > -50, 0% = < -80 4304 * Rx errors: 100% = 0, 0% = 50% missed 4305 * 4306 * The lowest computed quality is used. 4307 * 4308 */ 4309 #define BEACON_THRESHOLD 5 4310 beacon_quality = 100 - missed_beacons_percent; 4311 if (beacon_quality < BEACON_THRESHOLD) 4312 beacon_quality = 0; 4313 else 4314 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 / 4315 (100 - BEACON_THRESHOLD); 4316 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n", 4317 beacon_quality, missed_beacons_percent); 4318 4319 priv->last_rate = ipw_get_current_rate(priv); 4320 max_rate = ipw_get_max_rate(priv); 4321 rate_quality = priv->last_rate * 40 / max_rate + 60; 4322 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n", 4323 rate_quality, priv->last_rate / 1000000); 4324 4325 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta) 4326 rx_quality = 100 - (rx_err_delta * 100) / 4327 (rx_packets_delta + rx_err_delta); 4328 else 4329 rx_quality = 100; 4330 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n", 4331 rx_quality, rx_err_delta, rx_packets_delta); 4332 4333 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta) 4334 tx_quality = 100 - (tx_failures_delta * 100) / 4335 (tx_packets_delta + tx_failures_delta); 4336 else 4337 tx_quality = 100; 4338 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", 4339 tx_quality, tx_failures_delta, tx_packets_delta); 4340 4341 rssi = priv->exp_avg_rssi; 4342 signal_quality = 4343 (100 * 4344 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * 4345 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) - 4346 (priv->ieee->perfect_rssi - rssi) * 4347 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) + 4348 62 * (priv->ieee->perfect_rssi - rssi))) / 4349 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * 4350 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi)); 4351 if (signal_quality > 100) 4352 signal_quality = 100; 4353 else if (signal_quality < 1) 4354 signal_quality = 0; 4355 4356 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n", 4357 signal_quality, rssi); 4358 4359 quality = min(rx_quality, signal_quality); 4360 quality = min(tx_quality, quality); 4361 quality = min(rate_quality, quality); 4362 quality = min(beacon_quality, quality); 4363 if (quality == beacon_quality) 4364 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n", 4365 quality); 4366 if (quality == rate_quality) 4367 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n", 4368 quality); 4369 if (quality == tx_quality) 4370 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n", 4371 quality); 4372 if (quality == rx_quality) 4373 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n", 4374 quality); 4375 if (quality == signal_quality) 4376 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n", 4377 quality); 4378 4379 priv->quality = quality; 4380 4381 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL); 4382 } 4383 4384 static void ipw_bg_gather_stats(struct work_struct *work) 4385 { 4386 struct ipw_priv *priv = 4387 container_of(work, struct ipw_priv, gather_stats.work); 4388 mutex_lock(&priv->mutex); 4389 ipw_gather_stats(priv); 4390 mutex_unlock(&priv->mutex); 4391 } 4392 4393 /* Missed beacon behavior: 4394 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam. 4395 * roaming_threshold -> disassociate_threshold, scan and roam for better signal. 4396 * Above disassociate threshold, give up and stop scanning. 4397 * Roaming is disabled if disassociate_threshold <= roaming_threshold */ 4398 static void ipw_handle_missed_beacon(struct ipw_priv *priv, 4399 int missed_count) 4400 { 4401 priv->notif_missed_beacons = missed_count; 4402 4403 if (missed_count > priv->disassociate_threshold && 4404 priv->status & STATUS_ASSOCIATED) { 4405 /* If associated and we've hit the missed 4406 * beacon threshold, disassociate, turn 4407 * off roaming, and abort any active scans */ 4408 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4409 IPW_DL_STATE | IPW_DL_ASSOC, 4410 "Missed beacon: %d - disassociate\n", missed_count); 4411 priv->status &= ~STATUS_ROAMING; 4412 if (priv->status & STATUS_SCANNING) { 4413 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4414 IPW_DL_STATE, 4415 "Aborting scan with missed beacon.\n"); 4416 schedule_work(&priv->abort_scan); 4417 } 4418 4419 schedule_work(&priv->disassociate); 4420 return; 4421 } 4422 4423 if (priv->status & STATUS_ROAMING) { 4424 /* If we are currently roaming, then just 4425 * print a debug statement... */ 4426 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4427 "Missed beacon: %d - roam in progress\n", 4428 missed_count); 4429 return; 4430 } 4431 4432 if (roaming && 4433 (missed_count > priv->roaming_threshold && 4434 missed_count <= priv->disassociate_threshold)) { 4435 /* If we are not already roaming, set the ROAM 4436 * bit in the status and kick off a scan. 4437 * This can happen several times before we reach 4438 * disassociate_threshold. */ 4439 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4440 "Missed beacon: %d - initiate " 4441 "roaming\n", missed_count); 4442 if (!(priv->status & STATUS_ROAMING)) { 4443 priv->status |= STATUS_ROAMING; 4444 if (!(priv->status & STATUS_SCANNING)) 4445 schedule_delayed_work(&priv->request_scan, 0); 4446 } 4447 return; 4448 } 4449 4450 if (priv->status & STATUS_SCANNING && 4451 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) { 4452 /* Stop scan to keep fw from getting 4453 * stuck (only if we aren't roaming -- 4454 * otherwise we'll never scan more than 2 or 3 4455 * channels..) */ 4456 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, 4457 "Aborting scan with missed beacon.\n"); 4458 schedule_work(&priv->abort_scan); 4459 } 4460 4461 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); 4462 } 4463 4464 static void ipw_scan_event(struct work_struct *work) 4465 { 4466 union iwreq_data wrqu; 4467 4468 struct ipw_priv *priv = 4469 container_of(work, struct ipw_priv, scan_event.work); 4470 4471 wrqu.data.length = 0; 4472 wrqu.data.flags = 0; 4473 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); 4474 } 4475 4476 static void handle_scan_event(struct ipw_priv *priv) 4477 { 4478 /* Only userspace-requested scan completion events go out immediately */ 4479 if (!priv->user_requested_scan) { 4480 schedule_delayed_work(&priv->scan_event, 4481 round_jiffies_relative(msecs_to_jiffies(4000))); 4482 } else { 4483 priv->user_requested_scan = 0; 4484 mod_delayed_work(system_wq, &priv->scan_event, 0); 4485 } 4486 } 4487 4488 /** 4489 * Handle host notification packet. 4490 * Called from interrupt routine 4491 */ 4492 static void ipw_rx_notification(struct ipw_priv *priv, 4493 struct ipw_rx_notification *notif) 4494 { 4495 u16 size = le16_to_cpu(notif->size); 4496 4497 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size); 4498 4499 switch (notif->subtype) { 4500 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{ 4501 struct notif_association *assoc = ¬if->u.assoc; 4502 4503 switch (assoc->state) { 4504 case CMAS_ASSOCIATED:{ 4505 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4506 IPW_DL_ASSOC, 4507 "associated: '%*pE' %pM\n", 4508 priv->essid_len, priv->essid, 4509 priv->bssid); 4510 4511 switch (priv->ieee->iw_mode) { 4512 case IW_MODE_INFRA: 4513 memcpy(priv->ieee->bssid, 4514 priv->bssid, ETH_ALEN); 4515 break; 4516 4517 case IW_MODE_ADHOC: 4518 memcpy(priv->ieee->bssid, 4519 priv->bssid, ETH_ALEN); 4520 4521 /* clear out the station table */ 4522 priv->num_stations = 0; 4523 4524 IPW_DEBUG_ASSOC 4525 ("queueing adhoc check\n"); 4526 schedule_delayed_work( 4527 &priv->adhoc_check, 4528 le16_to_cpu(priv-> 4529 assoc_request. 4530 beacon_interval)); 4531 break; 4532 } 4533 4534 priv->status &= ~STATUS_ASSOCIATING; 4535 priv->status |= STATUS_ASSOCIATED; 4536 schedule_work(&priv->system_config); 4537 4538 #ifdef CONFIG_IPW2200_QOS 4539 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ 4540 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control)) 4541 if ((priv->status & STATUS_AUTH) && 4542 (IPW_GET_PACKET_STYPE(¬if->u.raw) 4543 == IEEE80211_STYPE_ASSOC_RESP)) { 4544 if ((sizeof 4545 (struct 4546 libipw_assoc_response) 4547 <= size) 4548 && (size <= 2314)) { 4549 struct 4550 libipw_rx_stats 4551 stats = { 4552 .len = size - 1, 4553 }; 4554 4555 IPW_DEBUG_QOS 4556 ("QoS Associate " 4557 "size %d\n", size); 4558 libipw_rx_mgt(priv-> 4559 ieee, 4560 (struct 4561 libipw_hdr_4addr 4562 *) 4563 ¬if->u.raw, &stats); 4564 } 4565 } 4566 #endif 4567 4568 schedule_work(&priv->link_up); 4569 4570 break; 4571 } 4572 4573 case CMAS_AUTHENTICATED:{ 4574 if (priv-> 4575 status & (STATUS_ASSOCIATED | 4576 STATUS_AUTH)) { 4577 struct notif_authenticate *auth 4578 = ¬if->u.auth; 4579 IPW_DEBUG(IPW_DL_NOTIF | 4580 IPW_DL_STATE | 4581 IPW_DL_ASSOC, 4582 "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n", 4583 priv->essid_len, 4584 priv->essid, 4585 priv->bssid, 4586 le16_to_cpu(auth->status), 4587 ipw_get_status_code 4588 (le16_to_cpu 4589 (auth->status))); 4590 4591 priv->status &= 4592 ~(STATUS_ASSOCIATING | 4593 STATUS_AUTH | 4594 STATUS_ASSOCIATED); 4595 4596 schedule_work(&priv->link_down); 4597 break; 4598 } 4599 4600 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4601 IPW_DL_ASSOC, 4602 "authenticated: '%*pE' %pM\n", 4603 priv->essid_len, priv->essid, 4604 priv->bssid); 4605 break; 4606 } 4607 4608 case CMAS_INIT:{ 4609 if (priv->status & STATUS_AUTH) { 4610 struct 4611 libipw_assoc_response 4612 *resp; 4613 resp = 4614 (struct 4615 libipw_assoc_response 4616 *)¬if->u.raw; 4617 IPW_DEBUG(IPW_DL_NOTIF | 4618 IPW_DL_STATE | 4619 IPW_DL_ASSOC, 4620 "association failed (0x%04X): %s\n", 4621 le16_to_cpu(resp->status), 4622 ipw_get_status_code 4623 (le16_to_cpu 4624 (resp->status))); 4625 } 4626 4627 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4628 IPW_DL_ASSOC, 4629 "disassociated: '%*pE' %pM\n", 4630 priv->essid_len, priv->essid, 4631 priv->bssid); 4632 4633 priv->status &= 4634 ~(STATUS_DISASSOCIATING | 4635 STATUS_ASSOCIATING | 4636 STATUS_ASSOCIATED | STATUS_AUTH); 4637 if (priv->assoc_network 4638 && (priv->assoc_network-> 4639 capability & 4640 WLAN_CAPABILITY_IBSS)) 4641 ipw_remove_current_network 4642 (priv); 4643 4644 schedule_work(&priv->link_down); 4645 4646 break; 4647 } 4648 4649 case CMAS_RX_ASSOC_RESP: 4650 break; 4651 4652 default: 4653 IPW_ERROR("assoc: unknown (%d)\n", 4654 assoc->state); 4655 break; 4656 } 4657 4658 break; 4659 } 4660 4661 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{ 4662 struct notif_authenticate *auth = ¬if->u.auth; 4663 switch (auth->state) { 4664 case CMAS_AUTHENTICATED: 4665 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4666 "authenticated: '%*pE' %pM\n", 4667 priv->essid_len, priv->essid, 4668 priv->bssid); 4669 priv->status |= STATUS_AUTH; 4670 break; 4671 4672 case CMAS_INIT: 4673 if (priv->status & STATUS_AUTH) { 4674 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4675 IPW_DL_ASSOC, 4676 "authentication failed (0x%04X): %s\n", 4677 le16_to_cpu(auth->status), 4678 ipw_get_status_code(le16_to_cpu 4679 (auth-> 4680 status))); 4681 } 4682 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4683 IPW_DL_ASSOC, 4684 "deauthenticated: '%*pE' %pM\n", 4685 priv->essid_len, priv->essid, 4686 priv->bssid); 4687 4688 priv->status &= ~(STATUS_ASSOCIATING | 4689 STATUS_AUTH | 4690 STATUS_ASSOCIATED); 4691 4692 schedule_work(&priv->link_down); 4693 break; 4694 4695 case CMAS_TX_AUTH_SEQ_1: 4696 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4697 IPW_DL_ASSOC, "AUTH_SEQ_1\n"); 4698 break; 4699 case CMAS_RX_AUTH_SEQ_2: 4700 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4701 IPW_DL_ASSOC, "AUTH_SEQ_2\n"); 4702 break; 4703 case CMAS_AUTH_SEQ_1_PASS: 4704 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4705 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n"); 4706 break; 4707 case CMAS_AUTH_SEQ_1_FAIL: 4708 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4709 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n"); 4710 break; 4711 case CMAS_TX_AUTH_SEQ_3: 4712 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4713 IPW_DL_ASSOC, "AUTH_SEQ_3\n"); 4714 break; 4715 case CMAS_RX_AUTH_SEQ_4: 4716 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4717 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n"); 4718 break; 4719 case CMAS_AUTH_SEQ_2_PASS: 4720 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4721 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n"); 4722 break; 4723 case CMAS_AUTH_SEQ_2_FAIL: 4724 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4725 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n"); 4726 break; 4727 case CMAS_TX_ASSOC: 4728 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4729 IPW_DL_ASSOC, "TX_ASSOC\n"); 4730 break; 4731 case CMAS_RX_ASSOC_RESP: 4732 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4733 IPW_DL_ASSOC, "RX_ASSOC_RESP\n"); 4734 4735 break; 4736 case CMAS_ASSOCIATED: 4737 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4738 IPW_DL_ASSOC, "ASSOCIATED\n"); 4739 break; 4740 default: 4741 IPW_DEBUG_NOTIF("auth: failure - %d\n", 4742 auth->state); 4743 break; 4744 } 4745 break; 4746 } 4747 4748 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{ 4749 struct notif_channel_result *x = 4750 ¬if->u.channel_result; 4751 4752 if (size == sizeof(*x)) { 4753 IPW_DEBUG_SCAN("Scan result for channel %d\n", 4754 x->channel_num); 4755 } else { 4756 IPW_DEBUG_SCAN("Scan result of wrong size %d " 4757 "(should be %zd)\n", 4758 size, sizeof(*x)); 4759 } 4760 break; 4761 } 4762 4763 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{ 4764 struct notif_scan_complete *x = ¬if->u.scan_complete; 4765 if (size == sizeof(*x)) { 4766 IPW_DEBUG_SCAN 4767 ("Scan completed: type %d, %d channels, " 4768 "%d status\n", x->scan_type, 4769 x->num_channels, x->status); 4770 } else { 4771 IPW_ERROR("Scan completed of wrong size %d " 4772 "(should be %zd)\n", 4773 size, sizeof(*x)); 4774 } 4775 4776 priv->status &= 4777 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); 4778 4779 wake_up_interruptible(&priv->wait_state); 4780 cancel_delayed_work(&priv->scan_check); 4781 4782 if (priv->status & STATUS_EXIT_PENDING) 4783 break; 4784 4785 priv->ieee->scans++; 4786 4787 #ifdef CONFIG_IPW2200_MONITOR 4788 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4789 priv->status |= STATUS_SCAN_FORCED; 4790 schedule_delayed_work(&priv->request_scan, 0); 4791 break; 4792 } 4793 priv->status &= ~STATUS_SCAN_FORCED; 4794 #endif /* CONFIG_IPW2200_MONITOR */ 4795 4796 /* Do queued direct scans first */ 4797 if (priv->status & STATUS_DIRECT_SCAN_PENDING) 4798 schedule_delayed_work(&priv->request_direct_scan, 0); 4799 4800 if (!(priv->status & (STATUS_ASSOCIATED | 4801 STATUS_ASSOCIATING | 4802 STATUS_ROAMING | 4803 STATUS_DISASSOCIATING))) 4804 schedule_work(&priv->associate); 4805 else if (priv->status & STATUS_ROAMING) { 4806 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4807 /* If a scan completed and we are in roam mode, then 4808 * the scan that completed was the one requested as a 4809 * result of entering roam... so, schedule the 4810 * roam work */ 4811 schedule_work(&priv->roam); 4812 else 4813 /* Don't schedule if we aborted the scan */ 4814 priv->status &= ~STATUS_ROAMING; 4815 } else if (priv->status & STATUS_SCAN_PENDING) 4816 schedule_delayed_work(&priv->request_scan, 0); 4817 else if (priv->config & CFG_BACKGROUND_SCAN 4818 && priv->status & STATUS_ASSOCIATED) 4819 schedule_delayed_work(&priv->request_scan, 4820 round_jiffies_relative(HZ)); 4821 4822 /* Send an empty event to user space. 4823 * We don't send the received data on the event because 4824 * it would require us to do complex transcoding, and 4825 * we want to minimise the work done in the irq handler 4826 * Use a request to extract the data. 4827 * Also, we generate this even for any scan, regardless 4828 * on how the scan was initiated. User space can just 4829 * sync on periodic scan to get fresh data... 4830 * Jean II */ 4831 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4832 handle_scan_event(priv); 4833 break; 4834 } 4835 4836 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{ 4837 struct notif_frag_length *x = ¬if->u.frag_len; 4838 4839 if (size == sizeof(*x)) 4840 IPW_ERROR("Frag length: %d\n", 4841 le16_to_cpu(x->frag_length)); 4842 else 4843 IPW_ERROR("Frag length of wrong size %d " 4844 "(should be %zd)\n", 4845 size, sizeof(*x)); 4846 break; 4847 } 4848 4849 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{ 4850 struct notif_link_deterioration *x = 4851 ¬if->u.link_deterioration; 4852 4853 if (size == sizeof(*x)) { 4854 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4855 "link deterioration: type %d, cnt %d\n", 4856 x->silence_notification_type, 4857 x->silence_count); 4858 memcpy(&priv->last_link_deterioration, x, 4859 sizeof(*x)); 4860 } else { 4861 IPW_ERROR("Link Deterioration of wrong size %d " 4862 "(should be %zd)\n", 4863 size, sizeof(*x)); 4864 } 4865 break; 4866 } 4867 4868 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{ 4869 IPW_ERROR("Dino config\n"); 4870 if (priv->hcmd 4871 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG) 4872 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n"); 4873 4874 break; 4875 } 4876 4877 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{ 4878 struct notif_beacon_state *x = ¬if->u.beacon_state; 4879 if (size != sizeof(*x)) { 4880 IPW_ERROR 4881 ("Beacon state of wrong size %d (should " 4882 "be %zd)\n", size, sizeof(*x)); 4883 break; 4884 } 4885 4886 if (le32_to_cpu(x->state) == 4887 HOST_NOTIFICATION_STATUS_BEACON_MISSING) 4888 ipw_handle_missed_beacon(priv, 4889 le32_to_cpu(x-> 4890 number)); 4891 4892 break; 4893 } 4894 4895 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{ 4896 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key; 4897 if (size == sizeof(*x)) { 4898 IPW_ERROR("TGi Tx Key: state 0x%02x sec type " 4899 "0x%02x station %d\n", 4900 x->key_state, x->security_type, 4901 x->station_index); 4902 break; 4903 } 4904 4905 IPW_ERROR 4906 ("TGi Tx Key of wrong size %d (should be %zd)\n", 4907 size, sizeof(*x)); 4908 break; 4909 } 4910 4911 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{ 4912 struct notif_calibration *x = ¬if->u.calibration; 4913 4914 if (size == sizeof(*x)) { 4915 memcpy(&priv->calib, x, sizeof(*x)); 4916 IPW_DEBUG_INFO("TODO: Calibration\n"); 4917 break; 4918 } 4919 4920 IPW_ERROR 4921 ("Calibration of wrong size %d (should be %zd)\n", 4922 size, sizeof(*x)); 4923 break; 4924 } 4925 4926 case HOST_NOTIFICATION_NOISE_STATS:{ 4927 if (size == sizeof(u32)) { 4928 priv->exp_avg_noise = 4929 exponential_average(priv->exp_avg_noise, 4930 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff), 4931 DEPTH_NOISE); 4932 break; 4933 } 4934 4935 IPW_ERROR 4936 ("Noise stat is wrong size %d (should be %zd)\n", 4937 size, sizeof(u32)); 4938 break; 4939 } 4940 4941 default: 4942 IPW_DEBUG_NOTIF("Unknown notification: " 4943 "subtype=%d,flags=0x%2x,size=%d\n", 4944 notif->subtype, notif->flags, size); 4945 } 4946 } 4947 4948 /** 4949 * Destroys all DMA structures and initialise them again 4950 * 4951 * @param priv 4952 * @return error code 4953 */ 4954 static int ipw_queue_reset(struct ipw_priv *priv) 4955 { 4956 int rc = 0; 4957 /** @todo customize queue sizes */ 4958 int nTx = 64, nTxCmd = 8; 4959 ipw_tx_queue_free(priv); 4960 /* Tx CMD queue */ 4961 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd, 4962 IPW_TX_CMD_QUEUE_READ_INDEX, 4963 IPW_TX_CMD_QUEUE_WRITE_INDEX, 4964 IPW_TX_CMD_QUEUE_BD_BASE, 4965 IPW_TX_CMD_QUEUE_BD_SIZE); 4966 if (rc) { 4967 IPW_ERROR("Tx Cmd queue init failed\n"); 4968 goto error; 4969 } 4970 /* Tx queue(s) */ 4971 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx, 4972 IPW_TX_QUEUE_0_READ_INDEX, 4973 IPW_TX_QUEUE_0_WRITE_INDEX, 4974 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE); 4975 if (rc) { 4976 IPW_ERROR("Tx 0 queue init failed\n"); 4977 goto error; 4978 } 4979 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx, 4980 IPW_TX_QUEUE_1_READ_INDEX, 4981 IPW_TX_QUEUE_1_WRITE_INDEX, 4982 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE); 4983 if (rc) { 4984 IPW_ERROR("Tx 1 queue init failed\n"); 4985 goto error; 4986 } 4987 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx, 4988 IPW_TX_QUEUE_2_READ_INDEX, 4989 IPW_TX_QUEUE_2_WRITE_INDEX, 4990 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE); 4991 if (rc) { 4992 IPW_ERROR("Tx 2 queue init failed\n"); 4993 goto error; 4994 } 4995 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx, 4996 IPW_TX_QUEUE_3_READ_INDEX, 4997 IPW_TX_QUEUE_3_WRITE_INDEX, 4998 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE); 4999 if (rc) { 5000 IPW_ERROR("Tx 3 queue init failed\n"); 5001 goto error; 5002 } 5003 /* statistics */ 5004 priv->rx_bufs_min = 0; 5005 priv->rx_pend_max = 0; 5006 return rc; 5007 5008 error: 5009 ipw_tx_queue_free(priv); 5010 return rc; 5011 } 5012 5013 /** 5014 * Reclaim Tx queue entries no more used by NIC. 5015 * 5016 * When FW advances 'R' index, all entries between old and 5017 * new 'R' index need to be reclaimed. As result, some free space 5018 * forms. If there is enough free space (> low mark), wake Tx queue. 5019 * 5020 * @note Need to protect against garbage in 'R' index 5021 * @param priv 5022 * @param txq 5023 * @param qindex 5024 * @return Number of used entries remains in the queue 5025 */ 5026 static int ipw_queue_tx_reclaim(struct ipw_priv *priv, 5027 struct clx2_tx_queue *txq, int qindex) 5028 { 5029 u32 hw_tail; 5030 int used; 5031 struct clx2_queue *q = &txq->q; 5032 5033 hw_tail = ipw_read32(priv, q->reg_r); 5034 if (hw_tail >= q->n_bd) { 5035 IPW_ERROR 5036 ("Read index for DMA queue (%d) is out of range [0-%d)\n", 5037 hw_tail, q->n_bd); 5038 goto done; 5039 } 5040 for (; q->last_used != hw_tail; 5041 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { 5042 ipw_queue_tx_free_tfd(priv, txq); 5043 priv->tx_packets++; 5044 } 5045 done: 5046 if ((ipw_tx_queue_space(q) > q->low_mark) && 5047 (qindex >= 0)) 5048 netif_wake_queue(priv->net_dev); 5049 used = q->first_empty - q->last_used; 5050 if (used < 0) 5051 used += q->n_bd; 5052 5053 return used; 5054 } 5055 5056 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, 5057 int len, int sync) 5058 { 5059 struct clx2_tx_queue *txq = &priv->txq_cmd; 5060 struct clx2_queue *q = &txq->q; 5061 struct tfd_frame *tfd; 5062 5063 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) { 5064 IPW_ERROR("No space for Tx\n"); 5065 return -EBUSY; 5066 } 5067 5068 tfd = &txq->bd[q->first_empty]; 5069 txq->txb[q->first_empty] = NULL; 5070 5071 memset(tfd, 0, sizeof(*tfd)); 5072 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE; 5073 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; 5074 priv->hcmd_seq++; 5075 tfd->u.cmd.index = hcmd; 5076 tfd->u.cmd.length = len; 5077 memcpy(tfd->u.cmd.payload, buf, len); 5078 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 5079 ipw_write32(priv, q->reg_w, q->first_empty); 5080 _ipw_read32(priv, 0x90); 5081 5082 return 0; 5083 } 5084 5085 /* 5086 * Rx theory of operation 5087 * 5088 * The host allocates 32 DMA target addresses and passes the host address 5089 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is 5090 * 0 to 31 5091 * 5092 * Rx Queue Indexes 5093 * The host/firmware share two index registers for managing the Rx buffers. 5094 * 5095 * The READ index maps to the first position that the firmware may be writing 5096 * to -- the driver can read up to (but not including) this position and get 5097 * good data. 5098 * The READ index is managed by the firmware once the card is enabled. 5099 * 5100 * The WRITE index maps to the last position the driver has read from -- the 5101 * position preceding WRITE is the last slot the firmware can place a packet. 5102 * 5103 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 5104 * WRITE = READ. 5105 * 5106 * During initialization the host sets up the READ queue position to the first 5107 * INDEX position, and WRITE to the last (READ - 1 wrapped) 5108 * 5109 * When the firmware places a packet in a buffer it will advance the READ index 5110 * and fire the RX interrupt. The driver can then query the READ index and 5111 * process as many packets as possible, moving the WRITE index forward as it 5112 * resets the Rx queue buffers with new memory. 5113 * 5114 * The management in the driver is as follows: 5115 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When 5116 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 5117 * to replensish the ipw->rxq->rx_free. 5118 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the 5119 * ipw->rxq is replenished and the READ INDEX is updated (updating the 5120 * 'processed' and 'read' driver indexes as well) 5121 * + A received packet is processed and handed to the kernel network stack, 5122 * detached from the ipw->rxq. The driver 'processed' index is updated. 5123 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free 5124 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ 5125 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there 5126 * were enough free buffers and RX_STALLED is set it is cleared. 5127 * 5128 * 5129 * Driver sequence: 5130 * 5131 * ipw_rx_queue_alloc() Allocates rx_free 5132 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls 5133 * ipw_rx_queue_restock 5134 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx 5135 * queue, updates firmware pointers, and updates 5136 * the WRITE index. If insufficient rx_free buffers 5137 * are available, schedules ipw_rx_queue_replenish 5138 * 5139 * -- enable interrupts -- 5140 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the 5141 * READ INDEX, detaching the SKB from the pool. 5142 * Moves the packet buffer from queue to rx_used. 5143 * Calls ipw_rx_queue_restock to refill any empty 5144 * slots. 5145 * ... 5146 * 5147 */ 5148 5149 /* 5150 * If there are slots in the RX queue that need to be restocked, 5151 * and we have free pre-allocated buffers, fill the ranks as much 5152 * as we can pulling from rx_free. 5153 * 5154 * This moves the 'write' index forward to catch up with 'processed', and 5155 * also updates the memory address in the firmware to reference the new 5156 * target buffer. 5157 */ 5158 static void ipw_rx_queue_restock(struct ipw_priv *priv) 5159 { 5160 struct ipw_rx_queue *rxq = priv->rxq; 5161 struct list_head *element; 5162 struct ipw_rx_mem_buffer *rxb; 5163 unsigned long flags; 5164 int write; 5165 5166 spin_lock_irqsave(&rxq->lock, flags); 5167 write = rxq->write; 5168 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 5169 element = rxq->rx_free.next; 5170 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5171 list_del(element); 5172 5173 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE, 5174 rxb->dma_addr); 5175 rxq->queue[rxq->write] = rxb; 5176 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE; 5177 rxq->free_count--; 5178 } 5179 spin_unlock_irqrestore(&rxq->lock, flags); 5180 5181 /* If the pre-allocated buffer pool is dropping low, schedule to 5182 * refill it */ 5183 if (rxq->free_count <= RX_LOW_WATERMARK) 5184 schedule_work(&priv->rx_replenish); 5185 5186 /* If we've added more space for the firmware to place data, tell it */ 5187 if (write != rxq->write) 5188 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write); 5189 } 5190 5191 /* 5192 * Move all used packet from rx_used to rx_free, allocating a new SKB for each. 5193 * Also restock the Rx queue via ipw_rx_queue_restock. 5194 * 5195 * This is called as a scheduled work item (except for during initialization) 5196 */ 5197 static void ipw_rx_queue_replenish(void *data) 5198 { 5199 struct ipw_priv *priv = data; 5200 struct ipw_rx_queue *rxq = priv->rxq; 5201 struct list_head *element; 5202 struct ipw_rx_mem_buffer *rxb; 5203 unsigned long flags; 5204 5205 spin_lock_irqsave(&rxq->lock, flags); 5206 while (!list_empty(&rxq->rx_used)) { 5207 element = rxq->rx_used.next; 5208 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5209 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC); 5210 if (!rxb->skb) { 5211 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n", 5212 priv->net_dev->name); 5213 /* We don't reschedule replenish work here -- we will 5214 * call the restock method and if it still needs 5215 * more buffers it will schedule replenish */ 5216 break; 5217 } 5218 list_del(element); 5219 5220 rxb->dma_addr = 5221 pci_map_single(priv->pci_dev, rxb->skb->data, 5222 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5223 5224 list_add_tail(&rxb->list, &rxq->rx_free); 5225 rxq->free_count++; 5226 } 5227 spin_unlock_irqrestore(&rxq->lock, flags); 5228 5229 ipw_rx_queue_restock(priv); 5230 } 5231 5232 static void ipw_bg_rx_queue_replenish(struct work_struct *work) 5233 { 5234 struct ipw_priv *priv = 5235 container_of(work, struct ipw_priv, rx_replenish); 5236 mutex_lock(&priv->mutex); 5237 ipw_rx_queue_replenish(priv); 5238 mutex_unlock(&priv->mutex); 5239 } 5240 5241 /* Assumes that the skb field of the buffers in 'pool' is kept accurate. 5242 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 5243 * This free routine walks the list of POOL entries and if SKB is set to 5244 * non NULL it is unmapped and freed 5245 */ 5246 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq) 5247 { 5248 int i; 5249 5250 if (!rxq) 5251 return; 5252 5253 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 5254 if (rxq->pool[i].skb != NULL) { 5255 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 5256 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5257 dev_kfree_skb(rxq->pool[i].skb); 5258 } 5259 } 5260 5261 kfree(rxq); 5262 } 5263 5264 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv) 5265 { 5266 struct ipw_rx_queue *rxq; 5267 int i; 5268 5269 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); 5270 if (unlikely(!rxq)) { 5271 IPW_ERROR("memory allocation failed\n"); 5272 return NULL; 5273 } 5274 spin_lock_init(&rxq->lock); 5275 INIT_LIST_HEAD(&rxq->rx_free); 5276 INIT_LIST_HEAD(&rxq->rx_used); 5277 5278 /* Fill the rx_used queue with _all_ of the Rx buffers */ 5279 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 5280 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 5281 5282 /* Set us so that we have processed and used all buffers, but have 5283 * not restocked the Rx queue with fresh buffers */ 5284 rxq->read = rxq->write = 0; 5285 rxq->free_count = 0; 5286 5287 return rxq; 5288 } 5289 5290 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate) 5291 { 5292 rate &= ~LIBIPW_BASIC_RATE_MASK; 5293 if (ieee_mode == IEEE_A) { 5294 switch (rate) { 5295 case LIBIPW_OFDM_RATE_6MB: 5296 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 5297 1 : 0; 5298 case LIBIPW_OFDM_RATE_9MB: 5299 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 5300 1 : 0; 5301 case LIBIPW_OFDM_RATE_12MB: 5302 return priv-> 5303 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; 5304 case LIBIPW_OFDM_RATE_18MB: 5305 return priv-> 5306 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; 5307 case LIBIPW_OFDM_RATE_24MB: 5308 return priv-> 5309 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; 5310 case LIBIPW_OFDM_RATE_36MB: 5311 return priv-> 5312 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; 5313 case LIBIPW_OFDM_RATE_48MB: 5314 return priv-> 5315 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; 5316 case LIBIPW_OFDM_RATE_54MB: 5317 return priv-> 5318 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; 5319 default: 5320 return 0; 5321 } 5322 } 5323 5324 /* B and G mixed */ 5325 switch (rate) { 5326 case LIBIPW_CCK_RATE_1MB: 5327 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0; 5328 case LIBIPW_CCK_RATE_2MB: 5329 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0; 5330 case LIBIPW_CCK_RATE_5MB: 5331 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0; 5332 case LIBIPW_CCK_RATE_11MB: 5333 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0; 5334 } 5335 5336 /* If we are limited to B modulations, bail at this point */ 5337 if (ieee_mode == IEEE_B) 5338 return 0; 5339 5340 /* G */ 5341 switch (rate) { 5342 case LIBIPW_OFDM_RATE_6MB: 5343 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0; 5344 case LIBIPW_OFDM_RATE_9MB: 5345 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0; 5346 case LIBIPW_OFDM_RATE_12MB: 5347 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; 5348 case LIBIPW_OFDM_RATE_18MB: 5349 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; 5350 case LIBIPW_OFDM_RATE_24MB: 5351 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; 5352 case LIBIPW_OFDM_RATE_36MB: 5353 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; 5354 case LIBIPW_OFDM_RATE_48MB: 5355 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; 5356 case LIBIPW_OFDM_RATE_54MB: 5357 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; 5358 } 5359 5360 return 0; 5361 } 5362 5363 static int ipw_compatible_rates(struct ipw_priv *priv, 5364 const struct libipw_network *network, 5365 struct ipw_supported_rates *rates) 5366 { 5367 int num_rates, i; 5368 5369 memset(rates, 0, sizeof(*rates)); 5370 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES); 5371 rates->num_rates = 0; 5372 for (i = 0; i < num_rates; i++) { 5373 if (!ipw_is_rate_in_mask(priv, network->mode, 5374 network->rates[i])) { 5375 5376 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) { 5377 IPW_DEBUG_SCAN("Adding masked mandatory " 5378 "rate %02X\n", 5379 network->rates[i]); 5380 rates->supported_rates[rates->num_rates++] = 5381 network->rates[i]; 5382 continue; 5383 } 5384 5385 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 5386 network->rates[i], priv->rates_mask); 5387 continue; 5388 } 5389 5390 rates->supported_rates[rates->num_rates++] = network->rates[i]; 5391 } 5392 5393 num_rates = min(network->rates_ex_len, 5394 (u8) (IPW_MAX_RATES - num_rates)); 5395 for (i = 0; i < num_rates; i++) { 5396 if (!ipw_is_rate_in_mask(priv, network->mode, 5397 network->rates_ex[i])) { 5398 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) { 5399 IPW_DEBUG_SCAN("Adding masked mandatory " 5400 "rate %02X\n", 5401 network->rates_ex[i]); 5402 rates->supported_rates[rates->num_rates++] = 5403 network->rates[i]; 5404 continue; 5405 } 5406 5407 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 5408 network->rates_ex[i], priv->rates_mask); 5409 continue; 5410 } 5411 5412 rates->supported_rates[rates->num_rates++] = 5413 network->rates_ex[i]; 5414 } 5415 5416 return 1; 5417 } 5418 5419 static void ipw_copy_rates(struct ipw_supported_rates *dest, 5420 const struct ipw_supported_rates *src) 5421 { 5422 u8 i; 5423 for (i = 0; i < src->num_rates; i++) 5424 dest->supported_rates[i] = src->supported_rates[i]; 5425 dest->num_rates = src->num_rates; 5426 } 5427 5428 /* TODO: Look at sniffed packets in the air to determine if the basic rate 5429 * mask should ever be used -- right now all callers to add the scan rates are 5430 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */ 5431 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates, 5432 u8 modulation, u32 rate_mask) 5433 { 5434 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? 5435 LIBIPW_BASIC_RATE_MASK : 0; 5436 5437 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK) 5438 rates->supported_rates[rates->num_rates++] = 5439 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB; 5440 5441 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK) 5442 rates->supported_rates[rates->num_rates++] = 5443 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB; 5444 5445 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK) 5446 rates->supported_rates[rates->num_rates++] = basic_mask | 5447 LIBIPW_CCK_RATE_5MB; 5448 5449 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK) 5450 rates->supported_rates[rates->num_rates++] = basic_mask | 5451 LIBIPW_CCK_RATE_11MB; 5452 } 5453 5454 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates, 5455 u8 modulation, u32 rate_mask) 5456 { 5457 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? 5458 LIBIPW_BASIC_RATE_MASK : 0; 5459 5460 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK) 5461 rates->supported_rates[rates->num_rates++] = basic_mask | 5462 LIBIPW_OFDM_RATE_6MB; 5463 5464 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK) 5465 rates->supported_rates[rates->num_rates++] = 5466 LIBIPW_OFDM_RATE_9MB; 5467 5468 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK) 5469 rates->supported_rates[rates->num_rates++] = basic_mask | 5470 LIBIPW_OFDM_RATE_12MB; 5471 5472 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK) 5473 rates->supported_rates[rates->num_rates++] = 5474 LIBIPW_OFDM_RATE_18MB; 5475 5476 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK) 5477 rates->supported_rates[rates->num_rates++] = basic_mask | 5478 LIBIPW_OFDM_RATE_24MB; 5479 5480 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK) 5481 rates->supported_rates[rates->num_rates++] = 5482 LIBIPW_OFDM_RATE_36MB; 5483 5484 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK) 5485 rates->supported_rates[rates->num_rates++] = 5486 LIBIPW_OFDM_RATE_48MB; 5487 5488 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK) 5489 rates->supported_rates[rates->num_rates++] = 5490 LIBIPW_OFDM_RATE_54MB; 5491 } 5492 5493 struct ipw_network_match { 5494 struct libipw_network *network; 5495 struct ipw_supported_rates rates; 5496 }; 5497 5498 static int ipw_find_adhoc_network(struct ipw_priv *priv, 5499 struct ipw_network_match *match, 5500 struct libipw_network *network, 5501 int roaming) 5502 { 5503 struct ipw_supported_rates rates; 5504 5505 /* Verify that this network's capability is compatible with the 5506 * current mode (AdHoc or Infrastructure) */ 5507 if ((priv->ieee->iw_mode == IW_MODE_ADHOC && 5508 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5509 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n", 5510 network->ssid_len, network->ssid, 5511 network->bssid); 5512 return 0; 5513 } 5514 5515 if (unlikely(roaming)) { 5516 /* If we are roaming, then ensure check if this is a valid 5517 * network to try and roam to */ 5518 if ((network->ssid_len != match->network->ssid_len) || 5519 memcmp(network->ssid, match->network->ssid, 5520 network->ssid_len)) { 5521 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n", 5522 network->ssid_len, network->ssid, 5523 network->bssid); 5524 return 0; 5525 } 5526 } else { 5527 /* If an ESSID has been configured then compare the broadcast 5528 * ESSID to ours */ 5529 if ((priv->config & CFG_STATIC_ESSID) && 5530 ((network->ssid_len != priv->essid_len) || 5531 memcmp(network->ssid, priv->essid, 5532 min(network->ssid_len, priv->essid_len)))) { 5533 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n", 5534 network->ssid_len, network->ssid, 5535 network->bssid, priv->essid_len, 5536 priv->essid); 5537 return 0; 5538 } 5539 } 5540 5541 /* If the old network rate is better than this one, don't bother 5542 * testing everything else. */ 5543 5544 if (network->time_stamp[0] < match->network->time_stamp[0]) { 5545 IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n", 5546 match->network->ssid_len, match->network->ssid); 5547 return 0; 5548 } else if (network->time_stamp[1] < match->network->time_stamp[1]) { 5549 IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n", 5550 match->network->ssid_len, match->network->ssid); 5551 return 0; 5552 } 5553 5554 /* Now go through and see if the requested network is valid... */ 5555 if (priv->ieee->scan_age != 0 && 5556 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5557 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n", 5558 network->ssid_len, network->ssid, 5559 network->bssid, 5560 jiffies_to_msecs(jiffies - 5561 network->last_scanned)); 5562 return 0; 5563 } 5564 5565 if ((priv->config & CFG_STATIC_CHANNEL) && 5566 (network->channel != priv->channel)) { 5567 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n", 5568 network->ssid_len, network->ssid, 5569 network->bssid, 5570 network->channel, priv->channel); 5571 return 0; 5572 } 5573 5574 /* Verify privacy compatibility */ 5575 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5576 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5577 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n", 5578 network->ssid_len, network->ssid, 5579 network->bssid, 5580 priv-> 5581 capability & CAP_PRIVACY_ON ? "on" : "off", 5582 network-> 5583 capability & WLAN_CAPABILITY_PRIVACY ? "on" : 5584 "off"); 5585 return 0; 5586 } 5587 5588 if (ether_addr_equal(network->bssid, priv->bssid)) { 5589 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n", 5590 network->ssid_len, network->ssid, 5591 network->bssid, priv->bssid); 5592 return 0; 5593 } 5594 5595 /* Filter out any incompatible freq / mode combinations */ 5596 if (!libipw_is_valid_mode(priv->ieee, network->mode)) { 5597 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n", 5598 network->ssid_len, network->ssid, 5599 network->bssid); 5600 return 0; 5601 } 5602 5603 /* Ensure that the rates supported by the driver are compatible with 5604 * this AP, including verification of basic rates (mandatory) */ 5605 if (!ipw_compatible_rates(priv, network, &rates)) { 5606 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n", 5607 network->ssid_len, network->ssid, 5608 network->bssid); 5609 return 0; 5610 } 5611 5612 if (rates.num_rates == 0) { 5613 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n", 5614 network->ssid_len, network->ssid, 5615 network->bssid); 5616 return 0; 5617 } 5618 5619 /* TODO: Perform any further minimal comparititive tests. We do not 5620 * want to put too much policy logic here; intelligent scan selection 5621 * should occur within a generic IEEE 802.11 user space tool. */ 5622 5623 /* Set up 'new' AP to this network */ 5624 ipw_copy_rates(&match->rates, &rates); 5625 match->network = network; 5626 IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n", 5627 network->ssid_len, network->ssid, network->bssid); 5628 5629 return 1; 5630 } 5631 5632 static void ipw_merge_adhoc_network(struct work_struct *work) 5633 { 5634 struct ipw_priv *priv = 5635 container_of(work, struct ipw_priv, merge_networks); 5636 struct libipw_network *network = NULL; 5637 struct ipw_network_match match = { 5638 .network = priv->assoc_network 5639 }; 5640 5641 if ((priv->status & STATUS_ASSOCIATED) && 5642 (priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5643 /* First pass through ROAM process -- look for a better 5644 * network */ 5645 unsigned long flags; 5646 5647 spin_lock_irqsave(&priv->ieee->lock, flags); 5648 list_for_each_entry(network, &priv->ieee->network_list, list) { 5649 if (network != priv->assoc_network) 5650 ipw_find_adhoc_network(priv, &match, network, 5651 1); 5652 } 5653 spin_unlock_irqrestore(&priv->ieee->lock, flags); 5654 5655 if (match.network == priv->assoc_network) { 5656 IPW_DEBUG_MERGE("No better ADHOC in this network to " 5657 "merge to.\n"); 5658 return; 5659 } 5660 5661 mutex_lock(&priv->mutex); 5662 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5663 IPW_DEBUG_MERGE("remove network %*pE\n", 5664 priv->essid_len, priv->essid); 5665 ipw_remove_current_network(priv); 5666 } 5667 5668 ipw_disassociate(priv); 5669 priv->assoc_network = match.network; 5670 mutex_unlock(&priv->mutex); 5671 return; 5672 } 5673 } 5674 5675 static int ipw_best_network(struct ipw_priv *priv, 5676 struct ipw_network_match *match, 5677 struct libipw_network *network, int roaming) 5678 { 5679 struct ipw_supported_rates rates; 5680 5681 /* Verify that this network's capability is compatible with the 5682 * current mode (AdHoc or Infrastructure) */ 5683 if ((priv->ieee->iw_mode == IW_MODE_INFRA && 5684 !(network->capability & WLAN_CAPABILITY_ESS)) || 5685 (priv->ieee->iw_mode == IW_MODE_ADHOC && 5686 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5687 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n", 5688 network->ssid_len, network->ssid, 5689 network->bssid); 5690 return 0; 5691 } 5692 5693 if (unlikely(roaming)) { 5694 /* If we are roaming, then ensure check if this is a valid 5695 * network to try and roam to */ 5696 if ((network->ssid_len != match->network->ssid_len) || 5697 memcmp(network->ssid, match->network->ssid, 5698 network->ssid_len)) { 5699 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n", 5700 network->ssid_len, network->ssid, 5701 network->bssid); 5702 return 0; 5703 } 5704 } else { 5705 /* If an ESSID has been configured then compare the broadcast 5706 * ESSID to ours */ 5707 if ((priv->config & CFG_STATIC_ESSID) && 5708 ((network->ssid_len != priv->essid_len) || 5709 memcmp(network->ssid, priv->essid, 5710 min(network->ssid_len, priv->essid_len)))) { 5711 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n", 5712 network->ssid_len, network->ssid, 5713 network->bssid, priv->essid_len, 5714 priv->essid); 5715 return 0; 5716 } 5717 } 5718 5719 /* If the old network rate is better than this one, don't bother 5720 * testing everything else. */ 5721 if (match->network && match->network->stats.rssi > network->stats.rssi) { 5722 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n", 5723 network->ssid_len, network->ssid, 5724 network->bssid, match->network->ssid_len, 5725 match->network->ssid, match->network->bssid); 5726 return 0; 5727 } 5728 5729 /* If this network has already had an association attempt within the 5730 * last 3 seconds, do not try and associate again... */ 5731 if (network->last_associate && 5732 time_after(network->last_associate + (HZ * 3UL), jiffies)) { 5733 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n", 5734 network->ssid_len, network->ssid, 5735 network->bssid, 5736 jiffies_to_msecs(jiffies - 5737 network->last_associate)); 5738 return 0; 5739 } 5740 5741 /* Now go through and see if the requested network is valid... */ 5742 if (priv->ieee->scan_age != 0 && 5743 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5744 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n", 5745 network->ssid_len, network->ssid, 5746 network->bssid, 5747 jiffies_to_msecs(jiffies - 5748 network->last_scanned)); 5749 return 0; 5750 } 5751 5752 if ((priv->config & CFG_STATIC_CHANNEL) && 5753 (network->channel != priv->channel)) { 5754 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n", 5755 network->ssid_len, network->ssid, 5756 network->bssid, 5757 network->channel, priv->channel); 5758 return 0; 5759 } 5760 5761 /* Verify privacy compatibility */ 5762 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5763 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5764 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n", 5765 network->ssid_len, network->ssid, 5766 network->bssid, 5767 priv->capability & CAP_PRIVACY_ON ? "on" : 5768 "off", 5769 network->capability & 5770 WLAN_CAPABILITY_PRIVACY ? "on" : "off"); 5771 return 0; 5772 } 5773 5774 if ((priv->config & CFG_STATIC_BSSID) && 5775 !ether_addr_equal(network->bssid, priv->bssid)) { 5776 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n", 5777 network->ssid_len, network->ssid, 5778 network->bssid, priv->bssid); 5779 return 0; 5780 } 5781 5782 /* Filter out any incompatible freq / mode combinations */ 5783 if (!libipw_is_valid_mode(priv->ieee, network->mode)) { 5784 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n", 5785 network->ssid_len, network->ssid, 5786 network->bssid); 5787 return 0; 5788 } 5789 5790 /* Filter out invalid channel in current GEO */ 5791 if (!libipw_is_valid_channel(priv->ieee, network->channel)) { 5792 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n", 5793 network->ssid_len, network->ssid, 5794 network->bssid); 5795 return 0; 5796 } 5797 5798 /* Ensure that the rates supported by the driver are compatible with 5799 * this AP, including verification of basic rates (mandatory) */ 5800 if (!ipw_compatible_rates(priv, network, &rates)) { 5801 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n", 5802 network->ssid_len, network->ssid, 5803 network->bssid); 5804 return 0; 5805 } 5806 5807 if (rates.num_rates == 0) { 5808 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n", 5809 network->ssid_len, network->ssid, 5810 network->bssid); 5811 return 0; 5812 } 5813 5814 /* TODO: Perform any further minimal comparititive tests. We do not 5815 * want to put too much policy logic here; intelligent scan selection 5816 * should occur within a generic IEEE 802.11 user space tool. */ 5817 5818 /* Set up 'new' AP to this network */ 5819 ipw_copy_rates(&match->rates, &rates); 5820 match->network = network; 5821 5822 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n", 5823 network->ssid_len, network->ssid, network->bssid); 5824 5825 return 1; 5826 } 5827 5828 static void ipw_adhoc_create(struct ipw_priv *priv, 5829 struct libipw_network *network) 5830 { 5831 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 5832 int i; 5833 5834 /* 5835 * For the purposes of scanning, we can set our wireless mode 5836 * to trigger scans across combinations of bands, but when it 5837 * comes to creating a new ad-hoc network, we have tell the FW 5838 * exactly which band to use. 5839 * 5840 * We also have the possibility of an invalid channel for the 5841 * chossen band. Attempting to create a new ad-hoc network 5842 * with an invalid channel for wireless mode will trigger a 5843 * FW fatal error. 5844 * 5845 */ 5846 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 5847 case LIBIPW_52GHZ_BAND: 5848 network->mode = IEEE_A; 5849 i = libipw_channel_to_index(priv->ieee, priv->channel); 5850 BUG_ON(i == -1); 5851 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) { 5852 IPW_WARNING("Overriding invalid channel\n"); 5853 priv->channel = geo->a[0].channel; 5854 } 5855 break; 5856 5857 case LIBIPW_24GHZ_BAND: 5858 if (priv->ieee->mode & IEEE_G) 5859 network->mode = IEEE_G; 5860 else 5861 network->mode = IEEE_B; 5862 i = libipw_channel_to_index(priv->ieee, priv->channel); 5863 BUG_ON(i == -1); 5864 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) { 5865 IPW_WARNING("Overriding invalid channel\n"); 5866 priv->channel = geo->bg[0].channel; 5867 } 5868 break; 5869 5870 default: 5871 IPW_WARNING("Overriding invalid channel\n"); 5872 if (priv->ieee->mode & IEEE_A) { 5873 network->mode = IEEE_A; 5874 priv->channel = geo->a[0].channel; 5875 } else if (priv->ieee->mode & IEEE_G) { 5876 network->mode = IEEE_G; 5877 priv->channel = geo->bg[0].channel; 5878 } else { 5879 network->mode = IEEE_B; 5880 priv->channel = geo->bg[0].channel; 5881 } 5882 break; 5883 } 5884 5885 network->channel = priv->channel; 5886 priv->config |= CFG_ADHOC_PERSIST; 5887 ipw_create_bssid(priv, network->bssid); 5888 network->ssid_len = priv->essid_len; 5889 memcpy(network->ssid, priv->essid, priv->essid_len); 5890 memset(&network->stats, 0, sizeof(network->stats)); 5891 network->capability = WLAN_CAPABILITY_IBSS; 5892 if (!(priv->config & CFG_PREAMBLE_LONG)) 5893 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE; 5894 if (priv->capability & CAP_PRIVACY_ON) 5895 network->capability |= WLAN_CAPABILITY_PRIVACY; 5896 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH); 5897 memcpy(network->rates, priv->rates.supported_rates, network->rates_len); 5898 network->rates_ex_len = priv->rates.num_rates - network->rates_len; 5899 memcpy(network->rates_ex, 5900 &priv->rates.supported_rates[network->rates_len], 5901 network->rates_ex_len); 5902 network->last_scanned = 0; 5903 network->flags = 0; 5904 network->last_associate = 0; 5905 network->time_stamp[0] = 0; 5906 network->time_stamp[1] = 0; 5907 network->beacon_interval = 100; /* Default */ 5908 network->listen_interval = 10; /* Default */ 5909 network->atim_window = 0; /* Default */ 5910 network->wpa_ie_len = 0; 5911 network->rsn_ie_len = 0; 5912 } 5913 5914 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) 5915 { 5916 struct ipw_tgi_tx_key key; 5917 5918 if (!(priv->ieee->sec.flags & (1 << index))) 5919 return; 5920 5921 key.key_id = index; 5922 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); 5923 key.security_type = type; 5924 key.station_index = 0; /* always 0 for BSS */ 5925 key.flags = 0; 5926 /* 0 for new key; previous value of counter (after fatal error) */ 5927 key.tx_counter[0] = cpu_to_le32(0); 5928 key.tx_counter[1] = cpu_to_le32(0); 5929 5930 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); 5931 } 5932 5933 static void ipw_send_wep_keys(struct ipw_priv *priv, int type) 5934 { 5935 struct ipw_wep_key key; 5936 int i; 5937 5938 key.cmd_id = DINO_CMD_WEP_KEY; 5939 key.seq_num = 0; 5940 5941 /* Note: AES keys cannot be set for multiple times. 5942 * Only set it at the first time. */ 5943 for (i = 0; i < 4; i++) { 5944 key.key_index = i | type; 5945 if (!(priv->ieee->sec.flags & (1 << i))) { 5946 key.key_size = 0; 5947 continue; 5948 } 5949 5950 key.key_size = priv->ieee->sec.key_sizes[i]; 5951 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size); 5952 5953 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key); 5954 } 5955 } 5956 5957 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level) 5958 { 5959 if (priv->ieee->host_encrypt) 5960 return; 5961 5962 switch (level) { 5963 case SEC_LEVEL_3: 5964 priv->sys_config.disable_unicast_decryption = 0; 5965 priv->ieee->host_decrypt = 0; 5966 break; 5967 case SEC_LEVEL_2: 5968 priv->sys_config.disable_unicast_decryption = 1; 5969 priv->ieee->host_decrypt = 1; 5970 break; 5971 case SEC_LEVEL_1: 5972 priv->sys_config.disable_unicast_decryption = 0; 5973 priv->ieee->host_decrypt = 0; 5974 break; 5975 case SEC_LEVEL_0: 5976 priv->sys_config.disable_unicast_decryption = 1; 5977 break; 5978 default: 5979 break; 5980 } 5981 } 5982 5983 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level) 5984 { 5985 if (priv->ieee->host_encrypt) 5986 return; 5987 5988 switch (level) { 5989 case SEC_LEVEL_3: 5990 priv->sys_config.disable_multicast_decryption = 0; 5991 break; 5992 case SEC_LEVEL_2: 5993 priv->sys_config.disable_multicast_decryption = 1; 5994 break; 5995 case SEC_LEVEL_1: 5996 priv->sys_config.disable_multicast_decryption = 0; 5997 break; 5998 case SEC_LEVEL_0: 5999 priv->sys_config.disable_multicast_decryption = 1; 6000 break; 6001 default: 6002 break; 6003 } 6004 } 6005 6006 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv) 6007 { 6008 switch (priv->ieee->sec.level) { 6009 case SEC_LEVEL_3: 6010 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) 6011 ipw_send_tgi_tx_key(priv, 6012 DCT_FLAG_EXT_SECURITY_CCM, 6013 priv->ieee->sec.active_key); 6014 6015 if (!priv->ieee->host_mc_decrypt) 6016 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM); 6017 break; 6018 case SEC_LEVEL_2: 6019 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) 6020 ipw_send_tgi_tx_key(priv, 6021 DCT_FLAG_EXT_SECURITY_TKIP, 6022 priv->ieee->sec.active_key); 6023 break; 6024 case SEC_LEVEL_1: 6025 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 6026 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level); 6027 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level); 6028 break; 6029 case SEC_LEVEL_0: 6030 default: 6031 break; 6032 } 6033 } 6034 6035 static void ipw_adhoc_check(void *data) 6036 { 6037 struct ipw_priv *priv = data; 6038 6039 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold && 6040 !(priv->config & CFG_ADHOC_PERSIST)) { 6041 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 6042 IPW_DL_STATE | IPW_DL_ASSOC, 6043 "Missed beacon: %d - disassociate\n", 6044 priv->missed_adhoc_beacons); 6045 ipw_remove_current_network(priv); 6046 ipw_disassociate(priv); 6047 return; 6048 } 6049 6050 schedule_delayed_work(&priv->adhoc_check, 6051 le16_to_cpu(priv->assoc_request.beacon_interval)); 6052 } 6053 6054 static void ipw_bg_adhoc_check(struct work_struct *work) 6055 { 6056 struct ipw_priv *priv = 6057 container_of(work, struct ipw_priv, adhoc_check.work); 6058 mutex_lock(&priv->mutex); 6059 ipw_adhoc_check(priv); 6060 mutex_unlock(&priv->mutex); 6061 } 6062 6063 static void ipw_debug_config(struct ipw_priv *priv) 6064 { 6065 IPW_DEBUG_INFO("Scan completed, no valid APs matched " 6066 "[CFG 0x%08X]\n", priv->config); 6067 if (priv->config & CFG_STATIC_CHANNEL) 6068 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel); 6069 else 6070 IPW_DEBUG_INFO("Channel unlocked.\n"); 6071 if (priv->config & CFG_STATIC_ESSID) 6072 IPW_DEBUG_INFO("ESSID locked to '%*pE'\n", 6073 priv->essid_len, priv->essid); 6074 else 6075 IPW_DEBUG_INFO("ESSID unlocked.\n"); 6076 if (priv->config & CFG_STATIC_BSSID) 6077 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid); 6078 else 6079 IPW_DEBUG_INFO("BSSID unlocked.\n"); 6080 if (priv->capability & CAP_PRIVACY_ON) 6081 IPW_DEBUG_INFO("PRIVACY on\n"); 6082 else 6083 IPW_DEBUG_INFO("PRIVACY off\n"); 6084 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); 6085 } 6086 6087 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) 6088 { 6089 /* TODO: Verify that this works... */ 6090 struct ipw_fixed_rate fr; 6091 u32 reg; 6092 u16 mask = 0; 6093 u16 new_tx_rates = priv->rates_mask; 6094 6095 /* Identify 'current FW band' and match it with the fixed 6096 * Tx rates */ 6097 6098 switch (priv->ieee->freq_band) { 6099 case LIBIPW_52GHZ_BAND: /* A only */ 6100 /* IEEE_A */ 6101 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) { 6102 /* Invalid fixed rate mask */ 6103 IPW_DEBUG_WX 6104 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6105 new_tx_rates = 0; 6106 break; 6107 } 6108 6109 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A; 6110 break; 6111 6112 default: /* 2.4Ghz or Mixed */ 6113 /* IEEE_B */ 6114 if (mode == IEEE_B) { 6115 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) { 6116 /* Invalid fixed rate mask */ 6117 IPW_DEBUG_WX 6118 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6119 new_tx_rates = 0; 6120 } 6121 break; 6122 } 6123 6124 /* IEEE_G */ 6125 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK | 6126 LIBIPW_OFDM_RATES_MASK)) { 6127 /* Invalid fixed rate mask */ 6128 IPW_DEBUG_WX 6129 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6130 new_tx_rates = 0; 6131 break; 6132 } 6133 6134 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) { 6135 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1); 6136 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK; 6137 } 6138 6139 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) { 6140 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1); 6141 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK; 6142 } 6143 6144 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) { 6145 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1); 6146 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK; 6147 } 6148 6149 new_tx_rates |= mask; 6150 break; 6151 } 6152 6153 fr.tx_rates = cpu_to_le16(new_tx_rates); 6154 6155 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE); 6156 ipw_write_reg32(priv, reg, *(u32 *) & fr); 6157 } 6158 6159 static void ipw_abort_scan(struct ipw_priv *priv) 6160 { 6161 int err; 6162 6163 if (priv->status & STATUS_SCAN_ABORTING) { 6164 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n"); 6165 return; 6166 } 6167 priv->status |= STATUS_SCAN_ABORTING; 6168 6169 err = ipw_send_scan_abort(priv); 6170 if (err) 6171 IPW_DEBUG_HC("Request to abort scan failed.\n"); 6172 } 6173 6174 static void ipw_add_scan_channels(struct ipw_priv *priv, 6175 struct ipw_scan_request_ext *scan, 6176 int scan_type) 6177 { 6178 int channel_index = 0; 6179 const struct libipw_geo *geo; 6180 int i; 6181 6182 geo = libipw_get_geo(priv->ieee); 6183 6184 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) { 6185 int start = channel_index; 6186 for (i = 0; i < geo->a_channels; i++) { 6187 if ((priv->status & STATUS_ASSOCIATED) && 6188 geo->a[i].channel == priv->channel) 6189 continue; 6190 channel_index++; 6191 scan->channels_list[channel_index] = geo->a[i].channel; 6192 ipw_set_scan_type(scan, channel_index, 6193 geo->a[i]. 6194 flags & LIBIPW_CH_PASSIVE_ONLY ? 6195 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN : 6196 scan_type); 6197 } 6198 6199 if (start != channel_index) { 6200 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) | 6201 (channel_index - start); 6202 channel_index++; 6203 } 6204 } 6205 6206 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) { 6207 int start = channel_index; 6208 if (priv->config & CFG_SPEED_SCAN) { 6209 int index; 6210 u8 channels[LIBIPW_24GHZ_CHANNELS] = { 6211 /* nop out the list */ 6212 [0] = 0 6213 }; 6214 6215 u8 channel; 6216 while (channel_index < IPW_SCAN_CHANNELS - 1) { 6217 channel = 6218 priv->speed_scan[priv->speed_scan_pos]; 6219 if (channel == 0) { 6220 priv->speed_scan_pos = 0; 6221 channel = priv->speed_scan[0]; 6222 } 6223 if ((priv->status & STATUS_ASSOCIATED) && 6224 channel == priv->channel) { 6225 priv->speed_scan_pos++; 6226 continue; 6227 } 6228 6229 /* If this channel has already been 6230 * added in scan, break from loop 6231 * and this will be the first channel 6232 * in the next scan. 6233 */ 6234 if (channels[channel - 1] != 0) 6235 break; 6236 6237 channels[channel - 1] = 1; 6238 priv->speed_scan_pos++; 6239 channel_index++; 6240 scan->channels_list[channel_index] = channel; 6241 index = 6242 libipw_channel_to_index(priv->ieee, channel); 6243 ipw_set_scan_type(scan, channel_index, 6244 geo->bg[index]. 6245 flags & 6246 LIBIPW_CH_PASSIVE_ONLY ? 6247 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN 6248 : scan_type); 6249 } 6250 } else { 6251 for (i = 0; i < geo->bg_channels; i++) { 6252 if ((priv->status & STATUS_ASSOCIATED) && 6253 geo->bg[i].channel == priv->channel) 6254 continue; 6255 channel_index++; 6256 scan->channels_list[channel_index] = 6257 geo->bg[i].channel; 6258 ipw_set_scan_type(scan, channel_index, 6259 geo->bg[i]. 6260 flags & 6261 LIBIPW_CH_PASSIVE_ONLY ? 6262 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN 6263 : scan_type); 6264 } 6265 } 6266 6267 if (start != channel_index) { 6268 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) | 6269 (channel_index - start); 6270 } 6271 } 6272 } 6273 6274 static int ipw_passive_dwell_time(struct ipw_priv *priv) 6275 { 6276 /* staying on passive channels longer than the DTIM interval during a 6277 * scan, while associated, causes the firmware to cancel the scan 6278 * without notification. Hence, don't stay on passive channels longer 6279 * than the beacon interval. 6280 */ 6281 if (priv->status & STATUS_ASSOCIATED 6282 && priv->assoc_network->beacon_interval > 10) 6283 return priv->assoc_network->beacon_interval - 10; 6284 else 6285 return 120; 6286 } 6287 6288 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) 6289 { 6290 struct ipw_scan_request_ext scan; 6291 int err = 0, scan_type; 6292 6293 if (!(priv->status & STATUS_INIT) || 6294 (priv->status & STATUS_EXIT_PENDING)) 6295 return 0; 6296 6297 mutex_lock(&priv->mutex); 6298 6299 if (direct && (priv->direct_scan_ssid_len == 0)) { 6300 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n"); 6301 priv->status &= ~STATUS_DIRECT_SCAN_PENDING; 6302 goto done; 6303 } 6304 6305 if (priv->status & STATUS_SCANNING) { 6306 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n"); 6307 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6308 STATUS_SCAN_PENDING; 6309 goto done; 6310 } 6311 6312 if (!(priv->status & STATUS_SCAN_FORCED) && 6313 priv->status & STATUS_SCAN_ABORTING) { 6314 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); 6315 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6316 STATUS_SCAN_PENDING; 6317 goto done; 6318 } 6319 6320 if (priv->status & STATUS_RF_KILL_MASK) { 6321 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n"); 6322 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6323 STATUS_SCAN_PENDING; 6324 goto done; 6325 } 6326 6327 memset(&scan, 0, sizeof(scan)); 6328 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee)); 6329 6330 if (type == IW_SCAN_TYPE_PASSIVE) { 6331 IPW_DEBUG_WX("use passive scanning\n"); 6332 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN; 6333 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6334 cpu_to_le16(ipw_passive_dwell_time(priv)); 6335 ipw_add_scan_channels(priv, &scan, scan_type); 6336 goto send_request; 6337 } 6338 6339 /* Use active scan by default. */ 6340 if (priv->config & CFG_SPEED_SCAN) 6341 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6342 cpu_to_le16(30); 6343 else 6344 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6345 cpu_to_le16(20); 6346 6347 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 6348 cpu_to_le16(20); 6349 6350 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6351 cpu_to_le16(ipw_passive_dwell_time(priv)); 6352 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); 6353 6354 #ifdef CONFIG_IPW2200_MONITOR 6355 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 6356 u8 channel; 6357 u8 band = 0; 6358 6359 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 6360 case LIBIPW_52GHZ_BAND: 6361 band = (u8) (IPW_A_MODE << 6) | 1; 6362 channel = priv->channel; 6363 break; 6364 6365 case LIBIPW_24GHZ_BAND: 6366 band = (u8) (IPW_B_MODE << 6) | 1; 6367 channel = priv->channel; 6368 break; 6369 6370 default: 6371 band = (u8) (IPW_B_MODE << 6) | 1; 6372 channel = 9; 6373 break; 6374 } 6375 6376 scan.channels_list[0] = band; 6377 scan.channels_list[1] = channel; 6378 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN); 6379 6380 /* NOTE: The card will sit on this channel for this time 6381 * period. Scan aborts are timing sensitive and frequently 6382 * result in firmware restarts. As such, it is best to 6383 * set a small dwell_time here and just keep re-issuing 6384 * scans. Otherwise fast channel hopping will not actually 6385 * hop channels. 6386 * 6387 * TODO: Move SPEED SCAN support to all modes and bands */ 6388 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6389 cpu_to_le16(2000); 6390 } else { 6391 #endif /* CONFIG_IPW2200_MONITOR */ 6392 /* Honor direct scans first, otherwise if we are roaming make 6393 * this a direct scan for the current network. Finally, 6394 * ensure that every other scan is a fast channel hop scan */ 6395 if (direct) { 6396 err = ipw_send_ssid(priv, priv->direct_scan_ssid, 6397 priv->direct_scan_ssid_len); 6398 if (err) { 6399 IPW_DEBUG_HC("Attempt to send SSID command " 6400 "failed\n"); 6401 goto done; 6402 } 6403 6404 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; 6405 } else if ((priv->status & STATUS_ROAMING) 6406 || (!(priv->status & STATUS_ASSOCIATED) 6407 && (priv->config & CFG_STATIC_ESSID) 6408 && (le32_to_cpu(scan.full_scan_index) % 2))) { 6409 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 6410 if (err) { 6411 IPW_DEBUG_HC("Attempt to send SSID command " 6412 "failed.\n"); 6413 goto done; 6414 } 6415 6416 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; 6417 } else 6418 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN; 6419 6420 ipw_add_scan_channels(priv, &scan, scan_type); 6421 #ifdef CONFIG_IPW2200_MONITOR 6422 } 6423 #endif 6424 6425 send_request: 6426 err = ipw_send_scan_request_ext(priv, &scan); 6427 if (err) { 6428 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); 6429 goto done; 6430 } 6431 6432 priv->status |= STATUS_SCANNING; 6433 if (direct) { 6434 priv->status &= ~STATUS_DIRECT_SCAN_PENDING; 6435 priv->direct_scan_ssid_len = 0; 6436 } else 6437 priv->status &= ~STATUS_SCAN_PENDING; 6438 6439 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG); 6440 done: 6441 mutex_unlock(&priv->mutex); 6442 return err; 6443 } 6444 6445 static void ipw_request_passive_scan(struct work_struct *work) 6446 { 6447 struct ipw_priv *priv = 6448 container_of(work, struct ipw_priv, request_passive_scan.work); 6449 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0); 6450 } 6451 6452 static void ipw_request_scan(struct work_struct *work) 6453 { 6454 struct ipw_priv *priv = 6455 container_of(work, struct ipw_priv, request_scan.work); 6456 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0); 6457 } 6458 6459 static void ipw_request_direct_scan(struct work_struct *work) 6460 { 6461 struct ipw_priv *priv = 6462 container_of(work, struct ipw_priv, request_direct_scan.work); 6463 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1); 6464 } 6465 6466 static void ipw_bg_abort_scan(struct work_struct *work) 6467 { 6468 struct ipw_priv *priv = 6469 container_of(work, struct ipw_priv, abort_scan); 6470 mutex_lock(&priv->mutex); 6471 ipw_abort_scan(priv); 6472 mutex_unlock(&priv->mutex); 6473 } 6474 6475 static int ipw_wpa_enable(struct ipw_priv *priv, int value) 6476 { 6477 /* This is called when wpa_supplicant loads and closes the driver 6478 * interface. */ 6479 priv->ieee->wpa_enabled = value; 6480 return 0; 6481 } 6482 6483 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) 6484 { 6485 struct libipw_device *ieee = priv->ieee; 6486 struct libipw_security sec = { 6487 .flags = SEC_AUTH_MODE, 6488 }; 6489 int ret = 0; 6490 6491 if (value & IW_AUTH_ALG_SHARED_KEY) { 6492 sec.auth_mode = WLAN_AUTH_SHARED_KEY; 6493 ieee->open_wep = 0; 6494 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { 6495 sec.auth_mode = WLAN_AUTH_OPEN; 6496 ieee->open_wep = 1; 6497 } else if (value & IW_AUTH_ALG_LEAP) { 6498 sec.auth_mode = WLAN_AUTH_LEAP; 6499 ieee->open_wep = 1; 6500 } else 6501 return -EINVAL; 6502 6503 if (ieee->set_security) 6504 ieee->set_security(ieee->dev, &sec); 6505 else 6506 ret = -EOPNOTSUPP; 6507 6508 return ret; 6509 } 6510 6511 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, 6512 int wpa_ie_len) 6513 { 6514 /* make sure WPA is enabled */ 6515 ipw_wpa_enable(priv, 1); 6516 } 6517 6518 static int ipw_set_rsn_capa(struct ipw_priv *priv, 6519 char *capabilities, int length) 6520 { 6521 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); 6522 6523 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length, 6524 capabilities); 6525 } 6526 6527 /* 6528 * WE-18 support 6529 */ 6530 6531 /* SIOCSIWGENIE */ 6532 static int ipw_wx_set_genie(struct net_device *dev, 6533 struct iw_request_info *info, 6534 union iwreq_data *wrqu, char *extra) 6535 { 6536 struct ipw_priv *priv = libipw_priv(dev); 6537 struct libipw_device *ieee = priv->ieee; 6538 u8 *buf; 6539 int err = 0; 6540 6541 if (wrqu->data.length > MAX_WPA_IE_LEN || 6542 (wrqu->data.length && extra == NULL)) 6543 return -EINVAL; 6544 6545 if (wrqu->data.length) { 6546 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); 6547 if (buf == NULL) { 6548 err = -ENOMEM; 6549 goto out; 6550 } 6551 6552 kfree(ieee->wpa_ie); 6553 ieee->wpa_ie = buf; 6554 ieee->wpa_ie_len = wrqu->data.length; 6555 } else { 6556 kfree(ieee->wpa_ie); 6557 ieee->wpa_ie = NULL; 6558 ieee->wpa_ie_len = 0; 6559 } 6560 6561 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); 6562 out: 6563 return err; 6564 } 6565 6566 /* SIOCGIWGENIE */ 6567 static int ipw_wx_get_genie(struct net_device *dev, 6568 struct iw_request_info *info, 6569 union iwreq_data *wrqu, char *extra) 6570 { 6571 struct ipw_priv *priv = libipw_priv(dev); 6572 struct libipw_device *ieee = priv->ieee; 6573 int err = 0; 6574 6575 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { 6576 wrqu->data.length = 0; 6577 goto out; 6578 } 6579 6580 if (wrqu->data.length < ieee->wpa_ie_len) { 6581 err = -E2BIG; 6582 goto out; 6583 } 6584 6585 wrqu->data.length = ieee->wpa_ie_len; 6586 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); 6587 6588 out: 6589 return err; 6590 } 6591 6592 static int wext_cipher2level(int cipher) 6593 { 6594 switch (cipher) { 6595 case IW_AUTH_CIPHER_NONE: 6596 return SEC_LEVEL_0; 6597 case IW_AUTH_CIPHER_WEP40: 6598 case IW_AUTH_CIPHER_WEP104: 6599 return SEC_LEVEL_1; 6600 case IW_AUTH_CIPHER_TKIP: 6601 return SEC_LEVEL_2; 6602 case IW_AUTH_CIPHER_CCMP: 6603 return SEC_LEVEL_3; 6604 default: 6605 return -1; 6606 } 6607 } 6608 6609 /* SIOCSIWAUTH */ 6610 static int ipw_wx_set_auth(struct net_device *dev, 6611 struct iw_request_info *info, 6612 union iwreq_data *wrqu, char *extra) 6613 { 6614 struct ipw_priv *priv = libipw_priv(dev); 6615 struct libipw_device *ieee = priv->ieee; 6616 struct iw_param *param = &wrqu->param; 6617 struct lib80211_crypt_data *crypt; 6618 unsigned long flags; 6619 int ret = 0; 6620 6621 switch (param->flags & IW_AUTH_INDEX) { 6622 case IW_AUTH_WPA_VERSION: 6623 break; 6624 case IW_AUTH_CIPHER_PAIRWISE: 6625 ipw_set_hw_decrypt_unicast(priv, 6626 wext_cipher2level(param->value)); 6627 break; 6628 case IW_AUTH_CIPHER_GROUP: 6629 ipw_set_hw_decrypt_multicast(priv, 6630 wext_cipher2level(param->value)); 6631 break; 6632 case IW_AUTH_KEY_MGMT: 6633 /* 6634 * ipw2200 does not use these parameters 6635 */ 6636 break; 6637 6638 case IW_AUTH_TKIP_COUNTERMEASURES: 6639 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6640 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) 6641 break; 6642 6643 flags = crypt->ops->get_flags(crypt->priv); 6644 6645 if (param->value) 6646 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; 6647 else 6648 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; 6649 6650 crypt->ops->set_flags(flags, crypt->priv); 6651 6652 break; 6653 6654 case IW_AUTH_DROP_UNENCRYPTED:{ 6655 /* HACK: 6656 * 6657 * wpa_supplicant calls set_wpa_enabled when the driver 6658 * is loaded and unloaded, regardless of if WPA is being 6659 * used. No other calls are made which can be used to 6660 * determine if encryption will be used or not prior to 6661 * association being expected. If encryption is not being 6662 * used, drop_unencrypted is set to false, else true -- we 6663 * can use this to determine if the CAP_PRIVACY_ON bit should 6664 * be set. 6665 */ 6666 struct libipw_security sec = { 6667 .flags = SEC_ENABLED, 6668 .enabled = param->value, 6669 }; 6670 priv->ieee->drop_unencrypted = param->value; 6671 /* We only change SEC_LEVEL for open mode. Others 6672 * are set by ipw_wpa_set_encryption. 6673 */ 6674 if (!param->value) { 6675 sec.flags |= SEC_LEVEL; 6676 sec.level = SEC_LEVEL_0; 6677 } else { 6678 sec.flags |= SEC_LEVEL; 6679 sec.level = SEC_LEVEL_1; 6680 } 6681 if (priv->ieee->set_security) 6682 priv->ieee->set_security(priv->ieee->dev, &sec); 6683 break; 6684 } 6685 6686 case IW_AUTH_80211_AUTH_ALG: 6687 ret = ipw_wpa_set_auth_algs(priv, param->value); 6688 break; 6689 6690 case IW_AUTH_WPA_ENABLED: 6691 ret = ipw_wpa_enable(priv, param->value); 6692 ipw_disassociate(priv); 6693 break; 6694 6695 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 6696 ieee->ieee802_1x = param->value; 6697 break; 6698 6699 case IW_AUTH_PRIVACY_INVOKED: 6700 ieee->privacy_invoked = param->value; 6701 break; 6702 6703 default: 6704 return -EOPNOTSUPP; 6705 } 6706 return ret; 6707 } 6708 6709 /* SIOCGIWAUTH */ 6710 static int ipw_wx_get_auth(struct net_device *dev, 6711 struct iw_request_info *info, 6712 union iwreq_data *wrqu, char *extra) 6713 { 6714 struct ipw_priv *priv = libipw_priv(dev); 6715 struct libipw_device *ieee = priv->ieee; 6716 struct lib80211_crypt_data *crypt; 6717 struct iw_param *param = &wrqu->param; 6718 6719 switch (param->flags & IW_AUTH_INDEX) { 6720 case IW_AUTH_WPA_VERSION: 6721 case IW_AUTH_CIPHER_PAIRWISE: 6722 case IW_AUTH_CIPHER_GROUP: 6723 case IW_AUTH_KEY_MGMT: 6724 /* 6725 * wpa_supplicant will control these internally 6726 */ 6727 return -EOPNOTSUPP; 6728 6729 case IW_AUTH_TKIP_COUNTERMEASURES: 6730 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6731 if (!crypt || !crypt->ops->get_flags) 6732 break; 6733 6734 param->value = (crypt->ops->get_flags(crypt->priv) & 6735 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0; 6736 6737 break; 6738 6739 case IW_AUTH_DROP_UNENCRYPTED: 6740 param->value = ieee->drop_unencrypted; 6741 break; 6742 6743 case IW_AUTH_80211_AUTH_ALG: 6744 param->value = ieee->sec.auth_mode; 6745 break; 6746 6747 case IW_AUTH_WPA_ENABLED: 6748 param->value = ieee->wpa_enabled; 6749 break; 6750 6751 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 6752 param->value = ieee->ieee802_1x; 6753 break; 6754 6755 case IW_AUTH_ROAMING_CONTROL: 6756 case IW_AUTH_PRIVACY_INVOKED: 6757 param->value = ieee->privacy_invoked; 6758 break; 6759 6760 default: 6761 return -EOPNOTSUPP; 6762 } 6763 return 0; 6764 } 6765 6766 /* SIOCSIWENCODEEXT */ 6767 static int ipw_wx_set_encodeext(struct net_device *dev, 6768 struct iw_request_info *info, 6769 union iwreq_data *wrqu, char *extra) 6770 { 6771 struct ipw_priv *priv = libipw_priv(dev); 6772 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 6773 6774 if (hwcrypto) { 6775 if (ext->alg == IW_ENCODE_ALG_TKIP) { 6776 /* IPW HW can't build TKIP MIC, 6777 host decryption still needed */ 6778 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) 6779 priv->ieee->host_mc_decrypt = 1; 6780 else { 6781 priv->ieee->host_encrypt = 0; 6782 priv->ieee->host_encrypt_msdu = 1; 6783 priv->ieee->host_decrypt = 1; 6784 } 6785 } else { 6786 priv->ieee->host_encrypt = 0; 6787 priv->ieee->host_encrypt_msdu = 0; 6788 priv->ieee->host_decrypt = 0; 6789 priv->ieee->host_mc_decrypt = 0; 6790 } 6791 } 6792 6793 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra); 6794 } 6795 6796 /* SIOCGIWENCODEEXT */ 6797 static int ipw_wx_get_encodeext(struct net_device *dev, 6798 struct iw_request_info *info, 6799 union iwreq_data *wrqu, char *extra) 6800 { 6801 struct ipw_priv *priv = libipw_priv(dev); 6802 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra); 6803 } 6804 6805 /* SIOCSIWMLME */ 6806 static int ipw_wx_set_mlme(struct net_device *dev, 6807 struct iw_request_info *info, 6808 union iwreq_data *wrqu, char *extra) 6809 { 6810 struct ipw_priv *priv = libipw_priv(dev); 6811 struct iw_mlme *mlme = (struct iw_mlme *)extra; 6812 __le16 reason; 6813 6814 reason = cpu_to_le16(mlme->reason_code); 6815 6816 switch (mlme->cmd) { 6817 case IW_MLME_DEAUTH: 6818 /* silently ignore */ 6819 break; 6820 6821 case IW_MLME_DISASSOC: 6822 ipw_disassociate(priv); 6823 break; 6824 6825 default: 6826 return -EOPNOTSUPP; 6827 } 6828 return 0; 6829 } 6830 6831 #ifdef CONFIG_IPW2200_QOS 6832 6833 /* QoS */ 6834 /* 6835 * get the modulation type of the current network or 6836 * the card current mode 6837 */ 6838 static u8 ipw_qos_current_mode(struct ipw_priv * priv) 6839 { 6840 u8 mode = 0; 6841 6842 if (priv->status & STATUS_ASSOCIATED) { 6843 unsigned long flags; 6844 6845 spin_lock_irqsave(&priv->ieee->lock, flags); 6846 mode = priv->assoc_network->mode; 6847 spin_unlock_irqrestore(&priv->ieee->lock, flags); 6848 } else { 6849 mode = priv->ieee->mode; 6850 } 6851 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode); 6852 return mode; 6853 } 6854 6855 /* 6856 * Handle management frame beacon and probe response 6857 */ 6858 static int ipw_qos_handle_probe_response(struct ipw_priv *priv, 6859 int active_network, 6860 struct libipw_network *network) 6861 { 6862 u32 size = sizeof(struct libipw_qos_parameters); 6863 6864 if (network->capability & WLAN_CAPABILITY_IBSS) 6865 network->qos_data.active = network->qos_data.supported; 6866 6867 if (network->flags & NETWORK_HAS_QOS_MASK) { 6868 if (active_network && 6869 (network->flags & NETWORK_HAS_QOS_PARAMETERS)) 6870 network->qos_data.active = network->qos_data.supported; 6871 6872 if ((network->qos_data.active == 1) && (active_network == 1) && 6873 (network->flags & NETWORK_HAS_QOS_PARAMETERS) && 6874 (network->qos_data.old_param_count != 6875 network->qos_data.param_count)) { 6876 network->qos_data.old_param_count = 6877 network->qos_data.param_count; 6878 schedule_work(&priv->qos_activate); 6879 IPW_DEBUG_QOS("QoS parameters change call " 6880 "qos_activate\n"); 6881 } 6882 } else { 6883 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B)) 6884 memcpy(&network->qos_data.parameters, 6885 &def_parameters_CCK, size); 6886 else 6887 memcpy(&network->qos_data.parameters, 6888 &def_parameters_OFDM, size); 6889 6890 if ((network->qos_data.active == 1) && (active_network == 1)) { 6891 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n"); 6892 schedule_work(&priv->qos_activate); 6893 } 6894 6895 network->qos_data.active = 0; 6896 network->qos_data.supported = 0; 6897 } 6898 if ((priv->status & STATUS_ASSOCIATED) && 6899 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) { 6900 if (!ether_addr_equal(network->bssid, priv->bssid)) 6901 if (network->capability & WLAN_CAPABILITY_IBSS) 6902 if ((network->ssid_len == 6903 priv->assoc_network->ssid_len) && 6904 !memcmp(network->ssid, 6905 priv->assoc_network->ssid, 6906 network->ssid_len)) { 6907 schedule_work(&priv->merge_networks); 6908 } 6909 } 6910 6911 return 0; 6912 } 6913 6914 /* 6915 * This function set up the firmware to support QoS. It sends 6916 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO 6917 */ 6918 static int ipw_qos_activate(struct ipw_priv *priv, 6919 struct libipw_qos_data *qos_network_data) 6920 { 6921 int err; 6922 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS]; 6923 struct libipw_qos_parameters *active_one = NULL; 6924 u32 size = sizeof(struct libipw_qos_parameters); 6925 u32 burst_duration; 6926 int i; 6927 u8 type; 6928 6929 type = ipw_qos_current_mode(priv); 6930 6931 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]); 6932 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size); 6933 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]); 6934 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size); 6935 6936 if (qos_network_data == NULL) { 6937 if (type == IEEE_B) { 6938 IPW_DEBUG_QOS("QoS activate network mode %d\n", type); 6939 active_one = &def_parameters_CCK; 6940 } else 6941 active_one = &def_parameters_OFDM; 6942 6943 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 6944 burst_duration = ipw_qos_get_burst_duration(priv); 6945 for (i = 0; i < QOS_QUEUE_NUM; i++) 6946 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] = 6947 cpu_to_le16(burst_duration); 6948 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 6949 if (type == IEEE_B) { 6950 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n", 6951 type); 6952 if (priv->qos_data.qos_enable == 0) 6953 active_one = &def_parameters_CCK; 6954 else 6955 active_one = priv->qos_data.def_qos_parm_CCK; 6956 } else { 6957 if (priv->qos_data.qos_enable == 0) 6958 active_one = &def_parameters_OFDM; 6959 else 6960 active_one = priv->qos_data.def_qos_parm_OFDM; 6961 } 6962 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 6963 } else { 6964 unsigned long flags; 6965 int active; 6966 6967 spin_lock_irqsave(&priv->ieee->lock, flags); 6968 active_one = &(qos_network_data->parameters); 6969 qos_network_data->old_param_count = 6970 qos_network_data->param_count; 6971 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 6972 active = qos_network_data->supported; 6973 spin_unlock_irqrestore(&priv->ieee->lock, flags); 6974 6975 if (active == 0) { 6976 burst_duration = ipw_qos_get_burst_duration(priv); 6977 for (i = 0; i < QOS_QUEUE_NUM; i++) 6978 qos_parameters[QOS_PARAM_SET_ACTIVE]. 6979 tx_op_limit[i] = cpu_to_le16(burst_duration); 6980 } 6981 } 6982 6983 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); 6984 err = ipw_send_qos_params_command(priv, &qos_parameters[0]); 6985 if (err) 6986 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n"); 6987 6988 return err; 6989 } 6990 6991 /* 6992 * send IPW_CMD_WME_INFO to the firmware 6993 */ 6994 static int ipw_qos_set_info_element(struct ipw_priv *priv) 6995 { 6996 int ret = 0; 6997 struct libipw_qos_information_element qos_info; 6998 6999 if (priv == NULL) 7000 return -1; 7001 7002 qos_info.elementID = QOS_ELEMENT_ID; 7003 qos_info.length = sizeof(struct libipw_qos_information_element) - 2; 7004 7005 qos_info.version = QOS_VERSION_1; 7006 qos_info.ac_info = 0; 7007 7008 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN); 7009 qos_info.qui_type = QOS_OUI_TYPE; 7010 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE; 7011 7012 ret = ipw_send_qos_info_command(priv, &qos_info); 7013 if (ret != 0) { 7014 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n"); 7015 } 7016 return ret; 7017 } 7018 7019 /* 7020 * Set the QoS parameter with the association request structure 7021 */ 7022 static int ipw_qos_association(struct ipw_priv *priv, 7023 struct libipw_network *network) 7024 { 7025 int err = 0; 7026 struct libipw_qos_data *qos_data = NULL; 7027 struct libipw_qos_data ibss_data = { 7028 .supported = 1, 7029 .active = 1, 7030 }; 7031 7032 switch (priv->ieee->iw_mode) { 7033 case IW_MODE_ADHOC: 7034 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS)); 7035 7036 qos_data = &ibss_data; 7037 break; 7038 7039 case IW_MODE_INFRA: 7040 qos_data = &network->qos_data; 7041 break; 7042 7043 default: 7044 BUG(); 7045 break; 7046 } 7047 7048 err = ipw_qos_activate(priv, qos_data); 7049 if (err) { 7050 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC; 7051 return err; 7052 } 7053 7054 if (priv->qos_data.qos_enable && qos_data->supported) { 7055 IPW_DEBUG_QOS("QoS will be enabled for this association\n"); 7056 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC; 7057 return ipw_qos_set_info_element(priv); 7058 } 7059 7060 return 0; 7061 } 7062 7063 /* 7064 * handling the beaconing responses. if we get different QoS setting 7065 * off the network from the associated setting, adjust the QoS 7066 * setting 7067 */ 7068 static int ipw_qos_association_resp(struct ipw_priv *priv, 7069 struct libipw_network *network) 7070 { 7071 int ret = 0; 7072 unsigned long flags; 7073 u32 size = sizeof(struct libipw_qos_parameters); 7074 int set_qos_param = 0; 7075 7076 if ((priv == NULL) || (network == NULL) || 7077 (priv->assoc_network == NULL)) 7078 return ret; 7079 7080 if (!(priv->status & STATUS_ASSOCIATED)) 7081 return ret; 7082 7083 if ((priv->ieee->iw_mode != IW_MODE_INFRA)) 7084 return ret; 7085 7086 spin_lock_irqsave(&priv->ieee->lock, flags); 7087 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) { 7088 memcpy(&priv->assoc_network->qos_data, &network->qos_data, 7089 sizeof(struct libipw_qos_data)); 7090 priv->assoc_network->qos_data.active = 1; 7091 if ((network->qos_data.old_param_count != 7092 network->qos_data.param_count)) { 7093 set_qos_param = 1; 7094 network->qos_data.old_param_count = 7095 network->qos_data.param_count; 7096 } 7097 7098 } else { 7099 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B)) 7100 memcpy(&priv->assoc_network->qos_data.parameters, 7101 &def_parameters_CCK, size); 7102 else 7103 memcpy(&priv->assoc_network->qos_data.parameters, 7104 &def_parameters_OFDM, size); 7105 priv->assoc_network->qos_data.active = 0; 7106 priv->assoc_network->qos_data.supported = 0; 7107 set_qos_param = 1; 7108 } 7109 7110 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7111 7112 if (set_qos_param == 1) 7113 schedule_work(&priv->qos_activate); 7114 7115 return ret; 7116 } 7117 7118 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv) 7119 { 7120 u32 ret = 0; 7121 7122 if ((priv == NULL)) 7123 return 0; 7124 7125 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION)) 7126 ret = priv->qos_data.burst_duration_CCK; 7127 else 7128 ret = priv->qos_data.burst_duration_OFDM; 7129 7130 return ret; 7131 } 7132 7133 /* 7134 * Initialize the setting of QoS global 7135 */ 7136 static void ipw_qos_init(struct ipw_priv *priv, int enable, 7137 int burst_enable, u32 burst_duration_CCK, 7138 u32 burst_duration_OFDM) 7139 { 7140 priv->qos_data.qos_enable = enable; 7141 7142 if (priv->qos_data.qos_enable) { 7143 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK; 7144 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM; 7145 IPW_DEBUG_QOS("QoS is enabled\n"); 7146 } else { 7147 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK; 7148 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM; 7149 IPW_DEBUG_QOS("QoS is not enabled\n"); 7150 } 7151 7152 priv->qos_data.burst_enable = burst_enable; 7153 7154 if (burst_enable) { 7155 priv->qos_data.burst_duration_CCK = burst_duration_CCK; 7156 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM; 7157 } else { 7158 priv->qos_data.burst_duration_CCK = 0; 7159 priv->qos_data.burst_duration_OFDM = 0; 7160 } 7161 } 7162 7163 /* 7164 * map the packet priority to the right TX Queue 7165 */ 7166 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority) 7167 { 7168 if (priority > 7 || !priv->qos_data.qos_enable) 7169 priority = 0; 7170 7171 return from_priority_to_tx_queue[priority] - 1; 7172 } 7173 7174 static int ipw_is_qos_active(struct net_device *dev, 7175 struct sk_buff *skb) 7176 { 7177 struct ipw_priv *priv = libipw_priv(dev); 7178 struct libipw_qos_data *qos_data = NULL; 7179 int active, supported; 7180 u8 *daddr = skb->data + ETH_ALEN; 7181 int unicast = !is_multicast_ether_addr(daddr); 7182 7183 if (!(priv->status & STATUS_ASSOCIATED)) 7184 return 0; 7185 7186 qos_data = &priv->assoc_network->qos_data; 7187 7188 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7189 if (unicast == 0) 7190 qos_data->active = 0; 7191 else 7192 qos_data->active = qos_data->supported; 7193 } 7194 active = qos_data->active; 7195 supported = qos_data->supported; 7196 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d " 7197 "unicast %d\n", 7198 priv->qos_data.qos_enable, active, supported, unicast); 7199 if (active && priv->qos_data.qos_enable) 7200 return 1; 7201 7202 return 0; 7203 7204 } 7205 /* 7206 * add QoS parameter to the TX command 7207 */ 7208 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, 7209 u16 priority, 7210 struct tfd_data *tfd) 7211 { 7212 int tx_queue_id = 0; 7213 7214 7215 tx_queue_id = from_priority_to_tx_queue[priority] - 1; 7216 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED; 7217 7218 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { 7219 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; 7220 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK); 7221 } 7222 return 0; 7223 } 7224 7225 /* 7226 * background support to run QoS activate functionality 7227 */ 7228 static void ipw_bg_qos_activate(struct work_struct *work) 7229 { 7230 struct ipw_priv *priv = 7231 container_of(work, struct ipw_priv, qos_activate); 7232 7233 mutex_lock(&priv->mutex); 7234 7235 if (priv->status & STATUS_ASSOCIATED) 7236 ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); 7237 7238 mutex_unlock(&priv->mutex); 7239 } 7240 7241 static int ipw_handle_probe_response(struct net_device *dev, 7242 struct libipw_probe_response *resp, 7243 struct libipw_network *network) 7244 { 7245 struct ipw_priv *priv = libipw_priv(dev); 7246 int active_network = ((priv->status & STATUS_ASSOCIATED) && 7247 (network == priv->assoc_network)); 7248 7249 ipw_qos_handle_probe_response(priv, active_network, network); 7250 7251 return 0; 7252 } 7253 7254 static int ipw_handle_beacon(struct net_device *dev, 7255 struct libipw_beacon *resp, 7256 struct libipw_network *network) 7257 { 7258 struct ipw_priv *priv = libipw_priv(dev); 7259 int active_network = ((priv->status & STATUS_ASSOCIATED) && 7260 (network == priv->assoc_network)); 7261 7262 ipw_qos_handle_probe_response(priv, active_network, network); 7263 7264 return 0; 7265 } 7266 7267 static int ipw_handle_assoc_response(struct net_device *dev, 7268 struct libipw_assoc_response *resp, 7269 struct libipw_network *network) 7270 { 7271 struct ipw_priv *priv = libipw_priv(dev); 7272 ipw_qos_association_resp(priv, network); 7273 return 0; 7274 } 7275 7276 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters 7277 *qos_param) 7278 { 7279 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, 7280 sizeof(*qos_param) * 3, qos_param); 7281 } 7282 7283 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element 7284 *qos_param) 7285 { 7286 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param), 7287 qos_param); 7288 } 7289 7290 #endif /* CONFIG_IPW2200_QOS */ 7291 7292 static int ipw_associate_network(struct ipw_priv *priv, 7293 struct libipw_network *network, 7294 struct ipw_supported_rates *rates, int roaming) 7295 { 7296 int err; 7297 7298 if (priv->config & CFG_FIXED_RATE) 7299 ipw_set_fixed_rate(priv, network->mode); 7300 7301 if (!(priv->config & CFG_STATIC_ESSID)) { 7302 priv->essid_len = min(network->ssid_len, 7303 (u8) IW_ESSID_MAX_SIZE); 7304 memcpy(priv->essid, network->ssid, priv->essid_len); 7305 } 7306 7307 network->last_associate = jiffies; 7308 7309 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); 7310 priv->assoc_request.channel = network->channel; 7311 priv->assoc_request.auth_key = 0; 7312 7313 if ((priv->capability & CAP_PRIVACY_ON) && 7314 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) { 7315 priv->assoc_request.auth_type = AUTH_SHARED_KEY; 7316 priv->assoc_request.auth_key = priv->ieee->sec.active_key; 7317 7318 if (priv->ieee->sec.level == SEC_LEVEL_1) 7319 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 7320 7321 } else if ((priv->capability & CAP_PRIVACY_ON) && 7322 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)) 7323 priv->assoc_request.auth_type = AUTH_LEAP; 7324 else 7325 priv->assoc_request.auth_type = AUTH_OPEN; 7326 7327 if (priv->ieee->wpa_ie_len) { 7328 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */ 7329 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie, 7330 priv->ieee->wpa_ie_len); 7331 } 7332 7333 /* 7334 * It is valid for our ieee device to support multiple modes, but 7335 * when it comes to associating to a given network we have to choose 7336 * just one mode. 7337 */ 7338 if (network->mode & priv->ieee->mode & IEEE_A) 7339 priv->assoc_request.ieee_mode = IPW_A_MODE; 7340 else if (network->mode & priv->ieee->mode & IEEE_G) 7341 priv->assoc_request.ieee_mode = IPW_G_MODE; 7342 else if (network->mode & priv->ieee->mode & IEEE_B) 7343 priv->assoc_request.ieee_mode = IPW_B_MODE; 7344 7345 priv->assoc_request.capability = cpu_to_le16(network->capability); 7346 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 7347 && !(priv->config & CFG_PREAMBLE_LONG)) { 7348 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE; 7349 } else { 7350 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE; 7351 7352 /* Clear the short preamble if we won't be supporting it */ 7353 priv->assoc_request.capability &= 7354 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); 7355 } 7356 7357 /* Clear capability bits that aren't used in Ad Hoc */ 7358 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 7359 priv->assoc_request.capability &= 7360 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME); 7361 7362 IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n", 7363 roaming ? "Rea" : "A", 7364 priv->essid_len, priv->essid, 7365 network->channel, 7366 ipw_modes[priv->assoc_request.ieee_mode], 7367 rates->num_rates, 7368 (priv->assoc_request.preamble_length == 7369 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short", 7370 network->capability & 7371 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long", 7372 priv->capability & CAP_PRIVACY_ON ? "on " : "off", 7373 priv->capability & CAP_PRIVACY_ON ? 7374 (priv->capability & CAP_SHARED_KEY ? "(shared)" : 7375 "(open)") : "", 7376 priv->capability & CAP_PRIVACY_ON ? " key=" : "", 7377 priv->capability & CAP_PRIVACY_ON ? 7378 '1' + priv->ieee->sec.active_key : '.', 7379 priv->capability & CAP_PRIVACY_ON ? '.' : ' '); 7380 7381 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval); 7382 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 7383 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) { 7384 priv->assoc_request.assoc_type = HC_IBSS_START; 7385 priv->assoc_request.assoc_tsf_msw = 0; 7386 priv->assoc_request.assoc_tsf_lsw = 0; 7387 } else { 7388 if (unlikely(roaming)) 7389 priv->assoc_request.assoc_type = HC_REASSOCIATE; 7390 else 7391 priv->assoc_request.assoc_type = HC_ASSOCIATE; 7392 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]); 7393 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]); 7394 } 7395 7396 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); 7397 7398 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7399 eth_broadcast_addr(priv->assoc_request.dest); 7400 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); 7401 } else { 7402 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); 7403 priv->assoc_request.atim_window = 0; 7404 } 7405 7406 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval); 7407 7408 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 7409 if (err) { 7410 IPW_DEBUG_HC("Attempt to send SSID command failed.\n"); 7411 return err; 7412 } 7413 7414 rates->ieee_mode = priv->assoc_request.ieee_mode; 7415 rates->purpose = IPW_RATE_CONNECT; 7416 ipw_send_supported_rates(priv, rates); 7417 7418 if (priv->assoc_request.ieee_mode == IPW_G_MODE) 7419 priv->sys_config.dot11g_auto_detection = 1; 7420 else 7421 priv->sys_config.dot11g_auto_detection = 0; 7422 7423 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 7424 priv->sys_config.answer_broadcast_ssid_probe = 1; 7425 else 7426 priv->sys_config.answer_broadcast_ssid_probe = 0; 7427 7428 err = ipw_send_system_config(priv); 7429 if (err) { 7430 IPW_DEBUG_HC("Attempt to send sys config command failed.\n"); 7431 return err; 7432 } 7433 7434 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi); 7435 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM); 7436 if (err) { 7437 IPW_DEBUG_HC("Attempt to send associate command failed.\n"); 7438 return err; 7439 } 7440 7441 /* 7442 * If preemption is enabled, it is possible for the association 7443 * to complete before we return from ipw_send_associate. Therefore 7444 * we have to be sure and update our priviate data first. 7445 */ 7446 priv->channel = network->channel; 7447 memcpy(priv->bssid, network->bssid, ETH_ALEN); 7448 priv->status |= STATUS_ASSOCIATING; 7449 priv->status &= ~STATUS_SECURITY_UPDATED; 7450 7451 priv->assoc_network = network; 7452 7453 #ifdef CONFIG_IPW2200_QOS 7454 ipw_qos_association(priv, network); 7455 #endif 7456 7457 err = ipw_send_associate(priv, &priv->assoc_request); 7458 if (err) { 7459 IPW_DEBUG_HC("Attempt to send associate command failed.\n"); 7460 return err; 7461 } 7462 7463 IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n", 7464 priv->essid_len, priv->essid, priv->bssid); 7465 7466 return 0; 7467 } 7468 7469 static void ipw_roam(void *data) 7470 { 7471 struct ipw_priv *priv = data; 7472 struct libipw_network *network = NULL; 7473 struct ipw_network_match match = { 7474 .network = priv->assoc_network 7475 }; 7476 7477 /* The roaming process is as follows: 7478 * 7479 * 1. Missed beacon threshold triggers the roaming process by 7480 * setting the status ROAM bit and requesting a scan. 7481 * 2. When the scan completes, it schedules the ROAM work 7482 * 3. The ROAM work looks at all of the known networks for one that 7483 * is a better network than the currently associated. If none 7484 * found, the ROAM process is over (ROAM bit cleared) 7485 * 4. If a better network is found, a disassociation request is 7486 * sent. 7487 * 5. When the disassociation completes, the roam work is again 7488 * scheduled. The second time through, the driver is no longer 7489 * associated, and the newly selected network is sent an 7490 * association request. 7491 * 6. At this point ,the roaming process is complete and the ROAM 7492 * status bit is cleared. 7493 */ 7494 7495 /* If we are no longer associated, and the roaming bit is no longer 7496 * set, then we are not actively roaming, so just return */ 7497 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING))) 7498 return; 7499 7500 if (priv->status & STATUS_ASSOCIATED) { 7501 /* First pass through ROAM process -- look for a better 7502 * network */ 7503 unsigned long flags; 7504 u8 rssi = priv->assoc_network->stats.rssi; 7505 priv->assoc_network->stats.rssi = -128; 7506 spin_lock_irqsave(&priv->ieee->lock, flags); 7507 list_for_each_entry(network, &priv->ieee->network_list, list) { 7508 if (network != priv->assoc_network) 7509 ipw_best_network(priv, &match, network, 1); 7510 } 7511 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7512 priv->assoc_network->stats.rssi = rssi; 7513 7514 if (match.network == priv->assoc_network) { 7515 IPW_DEBUG_ASSOC("No better APs in this network to " 7516 "roam to.\n"); 7517 priv->status &= ~STATUS_ROAMING; 7518 ipw_debug_config(priv); 7519 return; 7520 } 7521 7522 ipw_send_disassociate(priv, 1); 7523 priv->assoc_network = match.network; 7524 7525 return; 7526 } 7527 7528 /* Second pass through ROAM process -- request association */ 7529 ipw_compatible_rates(priv, priv->assoc_network, &match.rates); 7530 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1); 7531 priv->status &= ~STATUS_ROAMING; 7532 } 7533 7534 static void ipw_bg_roam(struct work_struct *work) 7535 { 7536 struct ipw_priv *priv = 7537 container_of(work, struct ipw_priv, roam); 7538 mutex_lock(&priv->mutex); 7539 ipw_roam(priv); 7540 mutex_unlock(&priv->mutex); 7541 } 7542 7543 static int ipw_associate(void *data) 7544 { 7545 struct ipw_priv *priv = data; 7546 7547 struct libipw_network *network = NULL; 7548 struct ipw_network_match match = { 7549 .network = NULL 7550 }; 7551 struct ipw_supported_rates *rates; 7552 struct list_head *element; 7553 unsigned long flags; 7554 7555 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 7556 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n"); 7557 return 0; 7558 } 7559 7560 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 7561 IPW_DEBUG_ASSOC("Not attempting association (already in " 7562 "progress)\n"); 7563 return 0; 7564 } 7565 7566 if (priv->status & STATUS_DISASSOCIATING) { 7567 IPW_DEBUG_ASSOC("Not attempting association (in " 7568 "disassociating)\n "); 7569 schedule_work(&priv->associate); 7570 return 0; 7571 } 7572 7573 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) { 7574 IPW_DEBUG_ASSOC("Not attempting association (scanning or not " 7575 "initialized)\n"); 7576 return 0; 7577 } 7578 7579 if (!(priv->config & CFG_ASSOCIATE) && 7580 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) { 7581 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n"); 7582 return 0; 7583 } 7584 7585 /* Protect our use of the network_list */ 7586 spin_lock_irqsave(&priv->ieee->lock, flags); 7587 list_for_each_entry(network, &priv->ieee->network_list, list) 7588 ipw_best_network(priv, &match, network, 0); 7589 7590 network = match.network; 7591 rates = &match.rates; 7592 7593 if (network == NULL && 7594 priv->ieee->iw_mode == IW_MODE_ADHOC && 7595 priv->config & CFG_ADHOC_CREATE && 7596 priv->config & CFG_STATIC_ESSID && 7597 priv->config & CFG_STATIC_CHANNEL) { 7598 /* Use oldest network if the free list is empty */ 7599 if (list_empty(&priv->ieee->network_free_list)) { 7600 struct libipw_network *oldest = NULL; 7601 struct libipw_network *target; 7602 7603 list_for_each_entry(target, &priv->ieee->network_list, list) { 7604 if ((oldest == NULL) || 7605 (target->last_scanned < oldest->last_scanned)) 7606 oldest = target; 7607 } 7608 7609 /* If there are no more slots, expire the oldest */ 7610 list_del(&oldest->list); 7611 target = oldest; 7612 IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n", 7613 target->ssid_len, target->ssid, 7614 target->bssid); 7615 list_add_tail(&target->list, 7616 &priv->ieee->network_free_list); 7617 } 7618 7619 element = priv->ieee->network_free_list.next; 7620 network = list_entry(element, struct libipw_network, list); 7621 ipw_adhoc_create(priv, network); 7622 rates = &priv->rates; 7623 list_del(element); 7624 list_add_tail(&network->list, &priv->ieee->network_list); 7625 } 7626 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7627 7628 /* If we reached the end of the list, then we don't have any valid 7629 * matching APs */ 7630 if (!network) { 7631 ipw_debug_config(priv); 7632 7633 if (!(priv->status & STATUS_SCANNING)) { 7634 if (!(priv->config & CFG_SPEED_SCAN)) 7635 schedule_delayed_work(&priv->request_scan, 7636 SCAN_INTERVAL); 7637 else 7638 schedule_delayed_work(&priv->request_scan, 0); 7639 } 7640 7641 return 0; 7642 } 7643 7644 ipw_associate_network(priv, network, rates, 0); 7645 7646 return 1; 7647 } 7648 7649 static void ipw_bg_associate(struct work_struct *work) 7650 { 7651 struct ipw_priv *priv = 7652 container_of(work, struct ipw_priv, associate); 7653 mutex_lock(&priv->mutex); 7654 ipw_associate(priv); 7655 mutex_unlock(&priv->mutex); 7656 } 7657 7658 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, 7659 struct sk_buff *skb) 7660 { 7661 struct ieee80211_hdr *hdr; 7662 u16 fc; 7663 7664 hdr = (struct ieee80211_hdr *)skb->data; 7665 fc = le16_to_cpu(hdr->frame_control); 7666 if (!(fc & IEEE80211_FCTL_PROTECTED)) 7667 return; 7668 7669 fc &= ~IEEE80211_FCTL_PROTECTED; 7670 hdr->frame_control = cpu_to_le16(fc); 7671 switch (priv->ieee->sec.level) { 7672 case SEC_LEVEL_3: 7673 /* Remove CCMP HDR */ 7674 memmove(skb->data + LIBIPW_3ADDR_LEN, 7675 skb->data + LIBIPW_3ADDR_LEN + 8, 7676 skb->len - LIBIPW_3ADDR_LEN - 8); 7677 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */ 7678 break; 7679 case SEC_LEVEL_2: 7680 break; 7681 case SEC_LEVEL_1: 7682 /* Remove IV */ 7683 memmove(skb->data + LIBIPW_3ADDR_LEN, 7684 skb->data + LIBIPW_3ADDR_LEN + 4, 7685 skb->len - LIBIPW_3ADDR_LEN - 4); 7686 skb_trim(skb, skb->len - 8); /* IV + ICV */ 7687 break; 7688 case SEC_LEVEL_0: 7689 break; 7690 default: 7691 printk(KERN_ERR "Unknown security level %d\n", 7692 priv->ieee->sec.level); 7693 break; 7694 } 7695 } 7696 7697 static void ipw_handle_data_packet(struct ipw_priv *priv, 7698 struct ipw_rx_mem_buffer *rxb, 7699 struct libipw_rx_stats *stats) 7700 { 7701 struct net_device *dev = priv->net_dev; 7702 struct libipw_hdr_4addr *hdr; 7703 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7704 7705 /* We received data from the HW, so stop the watchdog */ 7706 netif_trans_update(dev); 7707 7708 /* We only process data packets if the 7709 * interface is open */ 7710 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > 7711 skb_tailroom(rxb->skb))) { 7712 dev->stats.rx_errors++; 7713 priv->wstats.discard.misc++; 7714 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7715 return; 7716 } else if (unlikely(!netif_running(priv->net_dev))) { 7717 dev->stats.rx_dropped++; 7718 priv->wstats.discard.misc++; 7719 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7720 return; 7721 } 7722 7723 /* Advance skb->data to the start of the actual payload */ 7724 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data)); 7725 7726 /* Set the size of the skb to the size of the frame */ 7727 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length)); 7728 7729 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); 7730 7731 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */ 7732 hdr = (struct libipw_hdr_4addr *)rxb->skb->data; 7733 if (priv->ieee->iw_mode != IW_MODE_MONITOR && 7734 (is_multicast_ether_addr(hdr->addr1) ? 7735 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt)) 7736 ipw_rebuild_decrypted_skb(priv, rxb->skb); 7737 7738 if (!libipw_rx(priv->ieee, rxb->skb, stats)) 7739 dev->stats.rx_errors++; 7740 else { /* libipw_rx succeeded, so it now owns the SKB */ 7741 rxb->skb = NULL; 7742 __ipw_led_activity_on(priv); 7743 } 7744 } 7745 7746 #ifdef CONFIG_IPW2200_RADIOTAP 7747 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, 7748 struct ipw_rx_mem_buffer *rxb, 7749 struct libipw_rx_stats *stats) 7750 { 7751 struct net_device *dev = priv->net_dev; 7752 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7753 struct ipw_rx_frame *frame = &pkt->u.frame; 7754 7755 /* initial pull of some data */ 7756 u16 received_channel = frame->received_channel; 7757 u8 antennaAndPhy = frame->antennaAndPhy; 7758 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */ 7759 u16 pktrate = frame->rate; 7760 7761 /* Magic struct that slots into the radiotap header -- no reason 7762 * to build this manually element by element, we can write it much 7763 * more efficiently than we can parse it. ORDER MATTERS HERE */ 7764 struct ipw_rt_hdr *ipw_rt; 7765 7766 unsigned short len = le16_to_cpu(pkt->u.frame.length); 7767 7768 /* We received data from the HW, so stop the watchdog */ 7769 netif_trans_update(dev); 7770 7771 /* We only process data packets if the 7772 * interface is open */ 7773 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > 7774 skb_tailroom(rxb->skb))) { 7775 dev->stats.rx_errors++; 7776 priv->wstats.discard.misc++; 7777 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7778 return; 7779 } else if (unlikely(!netif_running(priv->net_dev))) { 7780 dev->stats.rx_dropped++; 7781 priv->wstats.discard.misc++; 7782 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7783 return; 7784 } 7785 7786 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use 7787 * that now */ 7788 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { 7789 /* FIXME: Should alloc bigger skb instead */ 7790 dev->stats.rx_dropped++; 7791 priv->wstats.discard.misc++; 7792 IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); 7793 return; 7794 } 7795 7796 /* copy the frame itself */ 7797 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr), 7798 rxb->skb->data + IPW_RX_FRAME_SIZE, len); 7799 7800 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data; 7801 7802 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 7803 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 7804 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */ 7805 7806 /* Big bitfield of all the fields we provide in radiotap */ 7807 ipw_rt->rt_hdr.it_present = cpu_to_le32( 7808 (1 << IEEE80211_RADIOTAP_TSFT) | 7809 (1 << IEEE80211_RADIOTAP_FLAGS) | 7810 (1 << IEEE80211_RADIOTAP_RATE) | 7811 (1 << IEEE80211_RADIOTAP_CHANNEL) | 7812 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 7813 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 7814 (1 << IEEE80211_RADIOTAP_ANTENNA)); 7815 7816 /* Zero the flags, we'll add to them as we go */ 7817 ipw_rt->rt_flags = 0; 7818 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | 7819 frame->parent_tsf[2] << 16 | 7820 frame->parent_tsf[1] << 8 | 7821 frame->parent_tsf[0]); 7822 7823 /* Convert signal to DBM */ 7824 ipw_rt->rt_dbmsignal = antsignal; 7825 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise); 7826 7827 /* Convert the channel data and set the flags */ 7828 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel)); 7829 if (received_channel > 14) { /* 802.11a */ 7830 ipw_rt->rt_chbitmask = 7831 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); 7832 } else if (antennaAndPhy & 32) { /* 802.11b */ 7833 ipw_rt->rt_chbitmask = 7834 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); 7835 } else { /* 802.11g */ 7836 ipw_rt->rt_chbitmask = 7837 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); 7838 } 7839 7840 /* set the rate in multiples of 500k/s */ 7841 switch (pktrate) { 7842 case IPW_TX_RATE_1MB: 7843 ipw_rt->rt_rate = 2; 7844 break; 7845 case IPW_TX_RATE_2MB: 7846 ipw_rt->rt_rate = 4; 7847 break; 7848 case IPW_TX_RATE_5MB: 7849 ipw_rt->rt_rate = 10; 7850 break; 7851 case IPW_TX_RATE_6MB: 7852 ipw_rt->rt_rate = 12; 7853 break; 7854 case IPW_TX_RATE_9MB: 7855 ipw_rt->rt_rate = 18; 7856 break; 7857 case IPW_TX_RATE_11MB: 7858 ipw_rt->rt_rate = 22; 7859 break; 7860 case IPW_TX_RATE_12MB: 7861 ipw_rt->rt_rate = 24; 7862 break; 7863 case IPW_TX_RATE_18MB: 7864 ipw_rt->rt_rate = 36; 7865 break; 7866 case IPW_TX_RATE_24MB: 7867 ipw_rt->rt_rate = 48; 7868 break; 7869 case IPW_TX_RATE_36MB: 7870 ipw_rt->rt_rate = 72; 7871 break; 7872 case IPW_TX_RATE_48MB: 7873 ipw_rt->rt_rate = 96; 7874 break; 7875 case IPW_TX_RATE_54MB: 7876 ipw_rt->rt_rate = 108; 7877 break; 7878 default: 7879 ipw_rt->rt_rate = 0; 7880 break; 7881 } 7882 7883 /* antenna number */ 7884 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */ 7885 7886 /* set the preamble flag if we have it */ 7887 if ((antennaAndPhy & 64)) 7888 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 7889 7890 /* Set the size of the skb to the size of the frame */ 7891 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr)); 7892 7893 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); 7894 7895 if (!libipw_rx(priv->ieee, rxb->skb, stats)) 7896 dev->stats.rx_errors++; 7897 else { /* libipw_rx succeeded, so it now owns the SKB */ 7898 rxb->skb = NULL; 7899 /* no LED during capture */ 7900 } 7901 } 7902 #endif 7903 7904 #ifdef CONFIG_IPW2200_PROMISCUOUS 7905 #define libipw_is_probe_response(fc) \ 7906 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \ 7907 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP ) 7908 7909 #define libipw_is_management(fc) \ 7910 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) 7911 7912 #define libipw_is_control(fc) \ 7913 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) 7914 7915 #define libipw_is_data(fc) \ 7916 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 7917 7918 #define libipw_is_assoc_request(fc) \ 7919 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ) 7920 7921 #define libipw_is_reassoc_request(fc) \ 7922 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) 7923 7924 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, 7925 struct ipw_rx_mem_buffer *rxb, 7926 struct libipw_rx_stats *stats) 7927 { 7928 struct net_device *dev = priv->prom_net_dev; 7929 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7930 struct ipw_rx_frame *frame = &pkt->u.frame; 7931 struct ipw_rt_hdr *ipw_rt; 7932 7933 /* First cache any information we need before we overwrite 7934 * the information provided in the skb from the hardware */ 7935 struct ieee80211_hdr *hdr; 7936 u16 channel = frame->received_channel; 7937 u8 phy_flags = frame->antennaAndPhy; 7938 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM; 7939 s8 noise = (s8) le16_to_cpu(frame->noise); 7940 u8 rate = frame->rate; 7941 unsigned short len = le16_to_cpu(pkt->u.frame.length); 7942 struct sk_buff *skb; 7943 int hdr_only = 0; 7944 u16 filter = priv->prom_priv->filter; 7945 7946 /* If the filter is set to not include Rx frames then return */ 7947 if (filter & IPW_PROM_NO_RX) 7948 return; 7949 7950 /* We received data from the HW, so stop the watchdog */ 7951 netif_trans_update(dev); 7952 7953 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { 7954 dev->stats.rx_errors++; 7955 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7956 return; 7957 } 7958 7959 /* We only process data packets if the interface is open */ 7960 if (unlikely(!netif_running(dev))) { 7961 dev->stats.rx_dropped++; 7962 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7963 return; 7964 } 7965 7966 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use 7967 * that now */ 7968 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { 7969 /* FIXME: Should alloc bigger skb instead */ 7970 dev->stats.rx_dropped++; 7971 IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); 7972 return; 7973 } 7974 7975 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; 7976 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { 7977 if (filter & IPW_PROM_NO_MGMT) 7978 return; 7979 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 7980 hdr_only = 1; 7981 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { 7982 if (filter & IPW_PROM_NO_CTL) 7983 return; 7984 if (filter & IPW_PROM_CTL_HEADER_ONLY) 7985 hdr_only = 1; 7986 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { 7987 if (filter & IPW_PROM_NO_DATA) 7988 return; 7989 if (filter & IPW_PROM_DATA_HEADER_ONLY) 7990 hdr_only = 1; 7991 } 7992 7993 /* Copy the SKB since this is for the promiscuous side */ 7994 skb = skb_copy(rxb->skb, GFP_ATOMIC); 7995 if (skb == NULL) { 7996 IPW_ERROR("skb_clone failed for promiscuous copy.\n"); 7997 return; 7998 } 7999 8000 /* copy the frame data to write after where the radiotap header goes */ 8001 ipw_rt = (void *)skb->data; 8002 8003 if (hdr_only) 8004 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); 8005 8006 memcpy(ipw_rt->payload, hdr, len); 8007 8008 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 8009 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 8010 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */ 8011 8012 /* Set the size of the skb to the size of the frame */ 8013 skb_put(skb, sizeof(*ipw_rt) + len); 8014 8015 /* Big bitfield of all the fields we provide in radiotap */ 8016 ipw_rt->rt_hdr.it_present = cpu_to_le32( 8017 (1 << IEEE80211_RADIOTAP_TSFT) | 8018 (1 << IEEE80211_RADIOTAP_FLAGS) | 8019 (1 << IEEE80211_RADIOTAP_RATE) | 8020 (1 << IEEE80211_RADIOTAP_CHANNEL) | 8021 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 8022 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 8023 (1 << IEEE80211_RADIOTAP_ANTENNA)); 8024 8025 /* Zero the flags, we'll add to them as we go */ 8026 ipw_rt->rt_flags = 0; 8027 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | 8028 frame->parent_tsf[2] << 16 | 8029 frame->parent_tsf[1] << 8 | 8030 frame->parent_tsf[0]); 8031 8032 /* Convert to DBM */ 8033 ipw_rt->rt_dbmsignal = signal; 8034 ipw_rt->rt_dbmnoise = noise; 8035 8036 /* Convert the channel data and set the flags */ 8037 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel)); 8038 if (channel > 14) { /* 802.11a */ 8039 ipw_rt->rt_chbitmask = 8040 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); 8041 } else if (phy_flags & (1 << 5)) { /* 802.11b */ 8042 ipw_rt->rt_chbitmask = 8043 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); 8044 } else { /* 802.11g */ 8045 ipw_rt->rt_chbitmask = 8046 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); 8047 } 8048 8049 /* set the rate in multiples of 500k/s */ 8050 switch (rate) { 8051 case IPW_TX_RATE_1MB: 8052 ipw_rt->rt_rate = 2; 8053 break; 8054 case IPW_TX_RATE_2MB: 8055 ipw_rt->rt_rate = 4; 8056 break; 8057 case IPW_TX_RATE_5MB: 8058 ipw_rt->rt_rate = 10; 8059 break; 8060 case IPW_TX_RATE_6MB: 8061 ipw_rt->rt_rate = 12; 8062 break; 8063 case IPW_TX_RATE_9MB: 8064 ipw_rt->rt_rate = 18; 8065 break; 8066 case IPW_TX_RATE_11MB: 8067 ipw_rt->rt_rate = 22; 8068 break; 8069 case IPW_TX_RATE_12MB: 8070 ipw_rt->rt_rate = 24; 8071 break; 8072 case IPW_TX_RATE_18MB: 8073 ipw_rt->rt_rate = 36; 8074 break; 8075 case IPW_TX_RATE_24MB: 8076 ipw_rt->rt_rate = 48; 8077 break; 8078 case IPW_TX_RATE_36MB: 8079 ipw_rt->rt_rate = 72; 8080 break; 8081 case IPW_TX_RATE_48MB: 8082 ipw_rt->rt_rate = 96; 8083 break; 8084 case IPW_TX_RATE_54MB: 8085 ipw_rt->rt_rate = 108; 8086 break; 8087 default: 8088 ipw_rt->rt_rate = 0; 8089 break; 8090 } 8091 8092 /* antenna number */ 8093 ipw_rt->rt_antenna = (phy_flags & 3); 8094 8095 /* set the preamble flag if we have it */ 8096 if (phy_flags & (1 << 6)) 8097 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 8098 8099 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len); 8100 8101 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) { 8102 dev->stats.rx_errors++; 8103 dev_kfree_skb_any(skb); 8104 } 8105 } 8106 #endif 8107 8108 static int is_network_packet(struct ipw_priv *priv, 8109 struct libipw_hdr_4addr *header) 8110 { 8111 /* Filter incoming packets to determine if they are targeted toward 8112 * this network, discarding packets coming from ourselves */ 8113 switch (priv->ieee->iw_mode) { 8114 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */ 8115 /* packets from our adapter are dropped (echo) */ 8116 if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr)) 8117 return 0; 8118 8119 /* {broad,multi}cast packets to our BSSID go through */ 8120 if (is_multicast_ether_addr(header->addr1)) 8121 return ether_addr_equal(header->addr3, priv->bssid); 8122 8123 /* packets to our adapter go through */ 8124 return ether_addr_equal(header->addr1, 8125 priv->net_dev->dev_addr); 8126 8127 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */ 8128 /* packets from our adapter are dropped (echo) */ 8129 if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr)) 8130 return 0; 8131 8132 /* {broad,multi}cast packets to our BSS go through */ 8133 if (is_multicast_ether_addr(header->addr1)) 8134 return ether_addr_equal(header->addr2, priv->bssid); 8135 8136 /* packets to our adapter go through */ 8137 return ether_addr_equal(header->addr1, 8138 priv->net_dev->dev_addr); 8139 } 8140 8141 return 1; 8142 } 8143 8144 #define IPW_PACKET_RETRY_TIME HZ 8145 8146 static int is_duplicate_packet(struct ipw_priv *priv, 8147 struct libipw_hdr_4addr *header) 8148 { 8149 u16 sc = le16_to_cpu(header->seq_ctl); 8150 u16 seq = WLAN_GET_SEQ_SEQ(sc); 8151 u16 frag = WLAN_GET_SEQ_FRAG(sc); 8152 u16 *last_seq, *last_frag; 8153 unsigned long *last_time; 8154 8155 switch (priv->ieee->iw_mode) { 8156 case IW_MODE_ADHOC: 8157 { 8158 struct list_head *p; 8159 struct ipw_ibss_seq *entry = NULL; 8160 u8 *mac = header->addr2; 8161 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE; 8162 8163 list_for_each(p, &priv->ibss_mac_hash[index]) { 8164 entry = 8165 list_entry(p, struct ipw_ibss_seq, list); 8166 if (ether_addr_equal(entry->mac, mac)) 8167 break; 8168 } 8169 if (p == &priv->ibss_mac_hash[index]) { 8170 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 8171 if (!entry) { 8172 IPW_ERROR 8173 ("Cannot malloc new mac entry\n"); 8174 return 0; 8175 } 8176 memcpy(entry->mac, mac, ETH_ALEN); 8177 entry->seq_num = seq; 8178 entry->frag_num = frag; 8179 entry->packet_time = jiffies; 8180 list_add(&entry->list, 8181 &priv->ibss_mac_hash[index]); 8182 return 0; 8183 } 8184 last_seq = &entry->seq_num; 8185 last_frag = &entry->frag_num; 8186 last_time = &entry->packet_time; 8187 break; 8188 } 8189 case IW_MODE_INFRA: 8190 last_seq = &priv->last_seq_num; 8191 last_frag = &priv->last_frag_num; 8192 last_time = &priv->last_packet_time; 8193 break; 8194 default: 8195 return 0; 8196 } 8197 if ((*last_seq == seq) && 8198 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) { 8199 if (*last_frag == frag) 8200 goto drop; 8201 if (*last_frag + 1 != frag) 8202 /* out-of-order fragment */ 8203 goto drop; 8204 } else 8205 *last_seq = seq; 8206 8207 *last_frag = frag; 8208 *last_time = jiffies; 8209 return 0; 8210 8211 drop: 8212 /* Comment this line now since we observed the card receives 8213 * duplicate packets but the FCTL_RETRY bit is not set in the 8214 * IBSS mode with fragmentation enabled. 8215 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */ 8216 return 1; 8217 } 8218 8219 static void ipw_handle_mgmt_packet(struct ipw_priv *priv, 8220 struct ipw_rx_mem_buffer *rxb, 8221 struct libipw_rx_stats *stats) 8222 { 8223 struct sk_buff *skb = rxb->skb; 8224 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data; 8225 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *) 8226 (skb->data + IPW_RX_FRAME_SIZE); 8227 8228 libipw_rx_mgt(priv->ieee, header, stats); 8229 8230 if (priv->ieee->iw_mode == IW_MODE_ADHOC && 8231 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == 8232 IEEE80211_STYPE_PROBE_RESP) || 8233 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == 8234 IEEE80211_STYPE_BEACON))) { 8235 if (ether_addr_equal(header->addr3, priv->bssid)) 8236 ipw_add_station(priv, header->addr2); 8237 } 8238 8239 if (priv->config & CFG_NET_STATS) { 8240 IPW_DEBUG_HC("sending stat packet\n"); 8241 8242 /* Set the size of the skb to the size of the full 8243 * ipw header and 802.11 frame */ 8244 skb_put(skb, le16_to_cpu(pkt->u.frame.length) + 8245 IPW_RX_FRAME_SIZE); 8246 8247 /* Advance past the ipw packet header to the 802.11 frame */ 8248 skb_pull(skb, IPW_RX_FRAME_SIZE); 8249 8250 /* Push the libipw_rx_stats before the 802.11 frame */ 8251 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats)); 8252 8253 skb->dev = priv->ieee->dev; 8254 8255 /* Point raw at the libipw_stats */ 8256 skb_reset_mac_header(skb); 8257 8258 skb->pkt_type = PACKET_OTHERHOST; 8259 skb->protocol = cpu_to_be16(ETH_P_80211_STATS); 8260 memset(skb->cb, 0, sizeof(rxb->skb->cb)); 8261 netif_rx(skb); 8262 rxb->skb = NULL; 8263 } 8264 } 8265 8266 /* 8267 * Main entry function for receiving a packet with 80211 headers. This 8268 * should be called when ever the FW has notified us that there is a new 8269 * skb in the receive queue. 8270 */ 8271 static void ipw_rx(struct ipw_priv *priv) 8272 { 8273 struct ipw_rx_mem_buffer *rxb; 8274 struct ipw_rx_packet *pkt; 8275 struct libipw_hdr_4addr *header; 8276 u32 r, w, i; 8277 u8 network_packet; 8278 u8 fill_rx = 0; 8279 8280 r = ipw_read32(priv, IPW_RX_READ_INDEX); 8281 w = ipw_read32(priv, IPW_RX_WRITE_INDEX); 8282 i = priv->rxq->read; 8283 8284 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2)) 8285 fill_rx = 1; 8286 8287 while (i != r) { 8288 rxb = priv->rxq->queue[i]; 8289 if (unlikely(rxb == NULL)) { 8290 printk(KERN_CRIT "Queue not allocated!\n"); 8291 break; 8292 } 8293 priv->rxq->queue[i] = NULL; 8294 8295 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 8296 IPW_RX_BUF_SIZE, 8297 PCI_DMA_FROMDEVICE); 8298 8299 pkt = (struct ipw_rx_packet *)rxb->skb->data; 8300 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", 8301 pkt->header.message_type, 8302 pkt->header.rx_seq_num, pkt->header.control_bits); 8303 8304 switch (pkt->header.message_type) { 8305 case RX_FRAME_TYPE: /* 802.11 frame */ { 8306 struct libipw_rx_stats stats = { 8307 .rssi = pkt->u.frame.rssi_dbm - 8308 IPW_RSSI_TO_DBM, 8309 .signal = 8310 pkt->u.frame.rssi_dbm - 8311 IPW_RSSI_TO_DBM + 0x100, 8312 .noise = 8313 le16_to_cpu(pkt->u.frame.noise), 8314 .rate = pkt->u.frame.rate, 8315 .mac_time = jiffies, 8316 .received_channel = 8317 pkt->u.frame.received_channel, 8318 .freq = 8319 (pkt->u.frame. 8320 control & (1 << 0)) ? 8321 LIBIPW_24GHZ_BAND : 8322 LIBIPW_52GHZ_BAND, 8323 .len = le16_to_cpu(pkt->u.frame.length), 8324 }; 8325 8326 if (stats.rssi != 0) 8327 stats.mask |= LIBIPW_STATMASK_RSSI; 8328 if (stats.signal != 0) 8329 stats.mask |= LIBIPW_STATMASK_SIGNAL; 8330 if (stats.noise != 0) 8331 stats.mask |= LIBIPW_STATMASK_NOISE; 8332 if (stats.rate != 0) 8333 stats.mask |= LIBIPW_STATMASK_RATE; 8334 8335 priv->rx_packets++; 8336 8337 #ifdef CONFIG_IPW2200_PROMISCUOUS 8338 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) 8339 ipw_handle_promiscuous_rx(priv, rxb, &stats); 8340 #endif 8341 8342 #ifdef CONFIG_IPW2200_MONITOR 8343 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 8344 #ifdef CONFIG_IPW2200_RADIOTAP 8345 8346 ipw_handle_data_packet_monitor(priv, 8347 rxb, 8348 &stats); 8349 #else 8350 ipw_handle_data_packet(priv, rxb, 8351 &stats); 8352 #endif 8353 break; 8354 } 8355 #endif 8356 8357 header = 8358 (struct libipw_hdr_4addr *)(rxb->skb-> 8359 data + 8360 IPW_RX_FRAME_SIZE); 8361 /* TODO: Check Ad-Hoc dest/source and make sure 8362 * that we are actually parsing these packets 8363 * correctly -- we should probably use the 8364 * frame control of the packet and disregard 8365 * the current iw_mode */ 8366 8367 network_packet = 8368 is_network_packet(priv, header); 8369 if (network_packet && priv->assoc_network) { 8370 priv->assoc_network->stats.rssi = 8371 stats.rssi; 8372 priv->exp_avg_rssi = 8373 exponential_average(priv->exp_avg_rssi, 8374 stats.rssi, DEPTH_RSSI); 8375 } 8376 8377 IPW_DEBUG_RX("Frame: len=%u\n", 8378 le16_to_cpu(pkt->u.frame.length)); 8379 8380 if (le16_to_cpu(pkt->u.frame.length) < 8381 libipw_get_hdrlen(le16_to_cpu( 8382 header->frame_ctl))) { 8383 IPW_DEBUG_DROP 8384 ("Received packet is too small. " 8385 "Dropping.\n"); 8386 priv->net_dev->stats.rx_errors++; 8387 priv->wstats.discard.misc++; 8388 break; 8389 } 8390 8391 switch (WLAN_FC_GET_TYPE 8392 (le16_to_cpu(header->frame_ctl))) { 8393 8394 case IEEE80211_FTYPE_MGMT: 8395 ipw_handle_mgmt_packet(priv, rxb, 8396 &stats); 8397 break; 8398 8399 case IEEE80211_FTYPE_CTL: 8400 break; 8401 8402 case IEEE80211_FTYPE_DATA: 8403 if (unlikely(!network_packet || 8404 is_duplicate_packet(priv, 8405 header))) 8406 { 8407 IPW_DEBUG_DROP("Dropping: " 8408 "%pM, " 8409 "%pM, " 8410 "%pM\n", 8411 header->addr1, 8412 header->addr2, 8413 header->addr3); 8414 break; 8415 } 8416 8417 ipw_handle_data_packet(priv, rxb, 8418 &stats); 8419 8420 break; 8421 } 8422 break; 8423 } 8424 8425 case RX_HOST_NOTIFICATION_TYPE:{ 8426 IPW_DEBUG_RX 8427 ("Notification: subtype=%02X flags=%02X size=%d\n", 8428 pkt->u.notification.subtype, 8429 pkt->u.notification.flags, 8430 le16_to_cpu(pkt->u.notification.size)); 8431 ipw_rx_notification(priv, &pkt->u.notification); 8432 break; 8433 } 8434 8435 default: 8436 IPW_DEBUG_RX("Bad Rx packet of type %d\n", 8437 pkt->header.message_type); 8438 break; 8439 } 8440 8441 /* For now we just don't re-use anything. We can tweak this 8442 * later to try and re-use notification packets and SKBs that 8443 * fail to Rx correctly */ 8444 if (rxb->skb != NULL) { 8445 dev_kfree_skb_any(rxb->skb); 8446 rxb->skb = NULL; 8447 } 8448 8449 pci_unmap_single(priv->pci_dev, rxb->dma_addr, 8450 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 8451 list_add_tail(&rxb->list, &priv->rxq->rx_used); 8452 8453 i = (i + 1) % RX_QUEUE_SIZE; 8454 8455 /* If there are a lot of unsued frames, restock the Rx queue 8456 * so the ucode won't assert */ 8457 if (fill_rx) { 8458 priv->rxq->read = i; 8459 ipw_rx_queue_replenish(priv); 8460 } 8461 } 8462 8463 /* Backtrack one entry */ 8464 priv->rxq->read = i; 8465 ipw_rx_queue_restock(priv); 8466 } 8467 8468 #define DEFAULT_RTS_THRESHOLD 2304U 8469 #define MIN_RTS_THRESHOLD 1U 8470 #define MAX_RTS_THRESHOLD 2304U 8471 #define DEFAULT_BEACON_INTERVAL 100U 8472 #define DEFAULT_SHORT_RETRY_LIMIT 7U 8473 #define DEFAULT_LONG_RETRY_LIMIT 4U 8474 8475 /** 8476 * ipw_sw_reset 8477 * @option: options to control different reset behaviour 8478 * 0 = reset everything except the 'disable' module_param 8479 * 1 = reset everything and print out driver info (for probe only) 8480 * 2 = reset everything 8481 */ 8482 static int ipw_sw_reset(struct ipw_priv *priv, int option) 8483 { 8484 int band, modulation; 8485 int old_mode = priv->ieee->iw_mode; 8486 8487 /* Initialize module parameter values here */ 8488 priv->config = 0; 8489 8490 /* We default to disabling the LED code as right now it causes 8491 * too many systems to lock up... */ 8492 if (!led_support) 8493 priv->config |= CFG_NO_LED; 8494 8495 if (associate) 8496 priv->config |= CFG_ASSOCIATE; 8497 else 8498 IPW_DEBUG_INFO("Auto associate disabled.\n"); 8499 8500 if (auto_create) 8501 priv->config |= CFG_ADHOC_CREATE; 8502 else 8503 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n"); 8504 8505 priv->config &= ~CFG_STATIC_ESSID; 8506 priv->essid_len = 0; 8507 memset(priv->essid, 0, IW_ESSID_MAX_SIZE); 8508 8509 if (disable && option) { 8510 priv->status |= STATUS_RF_KILL_SW; 8511 IPW_DEBUG_INFO("Radio disabled.\n"); 8512 } 8513 8514 if (default_channel != 0) { 8515 priv->config |= CFG_STATIC_CHANNEL; 8516 priv->channel = default_channel; 8517 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel); 8518 /* TODO: Validate that provided channel is in range */ 8519 } 8520 #ifdef CONFIG_IPW2200_QOS 8521 ipw_qos_init(priv, qos_enable, qos_burst_enable, 8522 burst_duration_CCK, burst_duration_OFDM); 8523 #endif /* CONFIG_IPW2200_QOS */ 8524 8525 switch (network_mode) { 8526 case 1: 8527 priv->ieee->iw_mode = IW_MODE_ADHOC; 8528 priv->net_dev->type = ARPHRD_ETHER; 8529 8530 break; 8531 #ifdef CONFIG_IPW2200_MONITOR 8532 case 2: 8533 priv->ieee->iw_mode = IW_MODE_MONITOR; 8534 #ifdef CONFIG_IPW2200_RADIOTAP 8535 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 8536 #else 8537 priv->net_dev->type = ARPHRD_IEEE80211; 8538 #endif 8539 break; 8540 #endif 8541 default: 8542 case 0: 8543 priv->net_dev->type = ARPHRD_ETHER; 8544 priv->ieee->iw_mode = IW_MODE_INFRA; 8545 break; 8546 } 8547 8548 if (hwcrypto) { 8549 priv->ieee->host_encrypt = 0; 8550 priv->ieee->host_encrypt_msdu = 0; 8551 priv->ieee->host_decrypt = 0; 8552 priv->ieee->host_mc_decrypt = 0; 8553 } 8554 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off"); 8555 8556 /* IPW2200/2915 is abled to do hardware fragmentation. */ 8557 priv->ieee->host_open_frag = 0; 8558 8559 if ((priv->pci_dev->device == 0x4223) || 8560 (priv->pci_dev->device == 0x4224)) { 8561 if (option == 1) 8562 printk(KERN_INFO DRV_NAME 8563 ": Detected Intel PRO/Wireless 2915ABG Network " 8564 "Connection\n"); 8565 priv->ieee->abg_true = 1; 8566 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND; 8567 modulation = LIBIPW_OFDM_MODULATION | 8568 LIBIPW_CCK_MODULATION; 8569 priv->adapter = IPW_2915ABG; 8570 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; 8571 } else { 8572 if (option == 1) 8573 printk(KERN_INFO DRV_NAME 8574 ": Detected Intel PRO/Wireless 2200BG Network " 8575 "Connection\n"); 8576 8577 priv->ieee->abg_true = 0; 8578 band = LIBIPW_24GHZ_BAND; 8579 modulation = LIBIPW_OFDM_MODULATION | 8580 LIBIPW_CCK_MODULATION; 8581 priv->adapter = IPW_2200BG; 8582 priv->ieee->mode = IEEE_G | IEEE_B; 8583 } 8584 8585 priv->ieee->freq_band = band; 8586 priv->ieee->modulation = modulation; 8587 8588 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK; 8589 8590 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; 8591 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; 8592 8593 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 8594 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT; 8595 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT; 8596 8597 /* If power management is turned on, default to AC mode */ 8598 priv->power_mode = IPW_POWER_AC; 8599 priv->tx_power = IPW_TX_POWER_DEFAULT; 8600 8601 return old_mode == priv->ieee->iw_mode; 8602 } 8603 8604 /* 8605 * This file defines the Wireless Extension handlers. It does not 8606 * define any methods of hardware manipulation and relies on the 8607 * functions defined in ipw_main to provide the HW interaction. 8608 * 8609 * The exception to this is the use of the ipw_get_ordinal() 8610 * function used to poll the hardware vs. making unnecessary calls. 8611 * 8612 */ 8613 8614 static int ipw_set_channel(struct ipw_priv *priv, u8 channel) 8615 { 8616 if (channel == 0) { 8617 IPW_DEBUG_INFO("Setting channel to ANY (0)\n"); 8618 priv->config &= ~CFG_STATIC_CHANNEL; 8619 IPW_DEBUG_ASSOC("Attempting to associate with new " 8620 "parameters.\n"); 8621 ipw_associate(priv); 8622 return 0; 8623 } 8624 8625 priv->config |= CFG_STATIC_CHANNEL; 8626 8627 if (priv->channel == channel) { 8628 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n", 8629 channel); 8630 return 0; 8631 } 8632 8633 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel); 8634 priv->channel = channel; 8635 8636 #ifdef CONFIG_IPW2200_MONITOR 8637 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 8638 int i; 8639 if (priv->status & STATUS_SCANNING) { 8640 IPW_DEBUG_SCAN("Scan abort triggered due to " 8641 "channel change.\n"); 8642 ipw_abort_scan(priv); 8643 } 8644 8645 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--) 8646 udelay(10); 8647 8648 if (priv->status & STATUS_SCANNING) 8649 IPW_DEBUG_SCAN("Still scanning...\n"); 8650 else 8651 IPW_DEBUG_SCAN("Took %dms to abort current scan\n", 8652 1000 - i); 8653 8654 return 0; 8655 } 8656 #endif /* CONFIG_IPW2200_MONITOR */ 8657 8658 /* Network configuration changed -- force [re]association */ 8659 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n"); 8660 if (!ipw_disassociate(priv)) 8661 ipw_associate(priv); 8662 8663 return 0; 8664 } 8665 8666 static int ipw_wx_set_freq(struct net_device *dev, 8667 struct iw_request_info *info, 8668 union iwreq_data *wrqu, char *extra) 8669 { 8670 struct ipw_priv *priv = libipw_priv(dev); 8671 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 8672 struct iw_freq *fwrq = &wrqu->freq; 8673 int ret = 0, i; 8674 u8 channel, flags; 8675 int band; 8676 8677 if (fwrq->m == 0) { 8678 IPW_DEBUG_WX("SET Freq/Channel -> any\n"); 8679 mutex_lock(&priv->mutex); 8680 ret = ipw_set_channel(priv, 0); 8681 mutex_unlock(&priv->mutex); 8682 return ret; 8683 } 8684 /* if setting by freq convert to channel */ 8685 if (fwrq->e == 1) { 8686 channel = libipw_freq_to_channel(priv->ieee, fwrq->m); 8687 if (channel == 0) 8688 return -EINVAL; 8689 } else 8690 channel = fwrq->m; 8691 8692 if (!(band = libipw_is_valid_channel(priv->ieee, channel))) 8693 return -EINVAL; 8694 8695 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 8696 i = libipw_channel_to_index(priv->ieee, channel); 8697 if (i == -1) 8698 return -EINVAL; 8699 8700 flags = (band == LIBIPW_24GHZ_BAND) ? 8701 geo->bg[i].flags : geo->a[i].flags; 8702 if (flags & LIBIPW_CH_PASSIVE_ONLY) { 8703 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n"); 8704 return -EINVAL; 8705 } 8706 } 8707 8708 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m); 8709 mutex_lock(&priv->mutex); 8710 ret = ipw_set_channel(priv, channel); 8711 mutex_unlock(&priv->mutex); 8712 return ret; 8713 } 8714 8715 static int ipw_wx_get_freq(struct net_device *dev, 8716 struct iw_request_info *info, 8717 union iwreq_data *wrqu, char *extra) 8718 { 8719 struct ipw_priv *priv = libipw_priv(dev); 8720 8721 wrqu->freq.e = 0; 8722 8723 /* If we are associated, trying to associate, or have a statically 8724 * configured CHANNEL then return that; otherwise return ANY */ 8725 mutex_lock(&priv->mutex); 8726 if (priv->config & CFG_STATIC_CHANNEL || 8727 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) { 8728 int i; 8729 8730 i = libipw_channel_to_index(priv->ieee, priv->channel); 8731 BUG_ON(i == -1); 8732 wrqu->freq.e = 1; 8733 8734 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 8735 case LIBIPW_52GHZ_BAND: 8736 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000; 8737 break; 8738 8739 case LIBIPW_24GHZ_BAND: 8740 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000; 8741 break; 8742 8743 default: 8744 BUG(); 8745 } 8746 } else 8747 wrqu->freq.m = 0; 8748 8749 mutex_unlock(&priv->mutex); 8750 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel); 8751 return 0; 8752 } 8753 8754 static int ipw_wx_set_mode(struct net_device *dev, 8755 struct iw_request_info *info, 8756 union iwreq_data *wrqu, char *extra) 8757 { 8758 struct ipw_priv *priv = libipw_priv(dev); 8759 int err = 0; 8760 8761 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode); 8762 8763 switch (wrqu->mode) { 8764 #ifdef CONFIG_IPW2200_MONITOR 8765 case IW_MODE_MONITOR: 8766 #endif 8767 case IW_MODE_ADHOC: 8768 case IW_MODE_INFRA: 8769 break; 8770 case IW_MODE_AUTO: 8771 wrqu->mode = IW_MODE_INFRA; 8772 break; 8773 default: 8774 return -EINVAL; 8775 } 8776 if (wrqu->mode == priv->ieee->iw_mode) 8777 return 0; 8778 8779 mutex_lock(&priv->mutex); 8780 8781 ipw_sw_reset(priv, 0); 8782 8783 #ifdef CONFIG_IPW2200_MONITOR 8784 if (priv->ieee->iw_mode == IW_MODE_MONITOR) 8785 priv->net_dev->type = ARPHRD_ETHER; 8786 8787 if (wrqu->mode == IW_MODE_MONITOR) 8788 #ifdef CONFIG_IPW2200_RADIOTAP 8789 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 8790 #else 8791 priv->net_dev->type = ARPHRD_IEEE80211; 8792 #endif 8793 #endif /* CONFIG_IPW2200_MONITOR */ 8794 8795 /* Free the existing firmware and reset the fw_loaded 8796 * flag so ipw_load() will bring in the new firmware */ 8797 free_firmware(); 8798 8799 priv->ieee->iw_mode = wrqu->mode; 8800 8801 schedule_work(&priv->adapter_restart); 8802 mutex_unlock(&priv->mutex); 8803 return err; 8804 } 8805 8806 static int ipw_wx_get_mode(struct net_device *dev, 8807 struct iw_request_info *info, 8808 union iwreq_data *wrqu, char *extra) 8809 { 8810 struct ipw_priv *priv = libipw_priv(dev); 8811 mutex_lock(&priv->mutex); 8812 wrqu->mode = priv->ieee->iw_mode; 8813 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); 8814 mutex_unlock(&priv->mutex); 8815 return 0; 8816 } 8817 8818 /* Values are in microsecond */ 8819 static const s32 timeout_duration[] = { 8820 350000, 8821 250000, 8822 75000, 8823 37000, 8824 25000, 8825 }; 8826 8827 static const s32 period_duration[] = { 8828 400000, 8829 700000, 8830 1000000, 8831 1000000, 8832 1000000 8833 }; 8834 8835 static int ipw_wx_get_range(struct net_device *dev, 8836 struct iw_request_info *info, 8837 union iwreq_data *wrqu, char *extra) 8838 { 8839 struct ipw_priv *priv = libipw_priv(dev); 8840 struct iw_range *range = (struct iw_range *)extra; 8841 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 8842 int i = 0, j; 8843 8844 wrqu->data.length = sizeof(*range); 8845 memset(range, 0, sizeof(*range)); 8846 8847 /* 54Mbs == ~27 Mb/s real (802.11g) */ 8848 range->throughput = 27 * 1000 * 1000; 8849 8850 range->max_qual.qual = 100; 8851 /* TODO: Find real max RSSI and stick here */ 8852 range->max_qual.level = 0; 8853 range->max_qual.noise = 0; 8854 range->max_qual.updated = 7; /* Updated all three */ 8855 8856 range->avg_qual.qual = 70; 8857 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ 8858 range->avg_qual.level = 0; /* FIXME to real average level */ 8859 range->avg_qual.noise = 0; 8860 range->avg_qual.updated = 7; /* Updated all three */ 8861 mutex_lock(&priv->mutex); 8862 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); 8863 8864 for (i = 0; i < range->num_bitrates; i++) 8865 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) * 8866 500000; 8867 8868 range->max_rts = DEFAULT_RTS_THRESHOLD; 8869 range->min_frag = MIN_FRAG_THRESHOLD; 8870 range->max_frag = MAX_FRAG_THRESHOLD; 8871 8872 range->encoding_size[0] = 5; 8873 range->encoding_size[1] = 13; 8874 range->num_encoding_sizes = 2; 8875 range->max_encoding_tokens = WEP_KEYS; 8876 8877 /* Set the Wireless Extension versions */ 8878 range->we_version_compiled = WIRELESS_EXT; 8879 range->we_version_source = 18; 8880 8881 i = 0; 8882 if (priv->ieee->mode & (IEEE_B | IEEE_G)) { 8883 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) { 8884 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 8885 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY)) 8886 continue; 8887 8888 range->freq[i].i = geo->bg[j].channel; 8889 range->freq[i].m = geo->bg[j].freq * 100000; 8890 range->freq[i].e = 1; 8891 i++; 8892 } 8893 } 8894 8895 if (priv->ieee->mode & IEEE_A) { 8896 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) { 8897 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 8898 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY)) 8899 continue; 8900 8901 range->freq[i].i = geo->a[j].channel; 8902 range->freq[i].m = geo->a[j].freq * 100000; 8903 range->freq[i].e = 1; 8904 i++; 8905 } 8906 } 8907 8908 range->num_channels = i; 8909 range->num_frequency = i; 8910 8911 mutex_unlock(&priv->mutex); 8912 8913 /* Event capability (kernel + driver) */ 8914 range->event_capa[0] = (IW_EVENT_CAPA_K_0 | 8915 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | 8916 IW_EVENT_CAPA_MASK(SIOCGIWAP) | 8917 IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); 8918 range->event_capa[1] = IW_EVENT_CAPA_K_1; 8919 8920 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 8921 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 8922 8923 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE; 8924 8925 IPW_DEBUG_WX("GET Range\n"); 8926 return 0; 8927 } 8928 8929 static int ipw_wx_set_wap(struct net_device *dev, 8930 struct iw_request_info *info, 8931 union iwreq_data *wrqu, char *extra) 8932 { 8933 struct ipw_priv *priv = libipw_priv(dev); 8934 8935 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 8936 return -EINVAL; 8937 mutex_lock(&priv->mutex); 8938 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) || 8939 is_zero_ether_addr(wrqu->ap_addr.sa_data)) { 8940 /* we disable mandatory BSSID association */ 8941 IPW_DEBUG_WX("Setting AP BSSID to ANY\n"); 8942 priv->config &= ~CFG_STATIC_BSSID; 8943 IPW_DEBUG_ASSOC("Attempting to associate with new " 8944 "parameters.\n"); 8945 ipw_associate(priv); 8946 mutex_unlock(&priv->mutex); 8947 return 0; 8948 } 8949 8950 priv->config |= CFG_STATIC_BSSID; 8951 if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) { 8952 IPW_DEBUG_WX("BSSID set to current BSSID.\n"); 8953 mutex_unlock(&priv->mutex); 8954 return 0; 8955 } 8956 8957 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n", 8958 wrqu->ap_addr.sa_data); 8959 8960 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); 8961 8962 /* Network configuration changed -- force [re]association */ 8963 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n"); 8964 if (!ipw_disassociate(priv)) 8965 ipw_associate(priv); 8966 8967 mutex_unlock(&priv->mutex); 8968 return 0; 8969 } 8970 8971 static int ipw_wx_get_wap(struct net_device *dev, 8972 struct iw_request_info *info, 8973 union iwreq_data *wrqu, char *extra) 8974 { 8975 struct ipw_priv *priv = libipw_priv(dev); 8976 8977 /* If we are associated, trying to associate, or have a statically 8978 * configured BSSID then return that; otherwise return ANY */ 8979 mutex_lock(&priv->mutex); 8980 if (priv->config & CFG_STATIC_BSSID || 8981 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 8982 wrqu->ap_addr.sa_family = ARPHRD_ETHER; 8983 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); 8984 } else 8985 eth_zero_addr(wrqu->ap_addr.sa_data); 8986 8987 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", 8988 wrqu->ap_addr.sa_data); 8989 mutex_unlock(&priv->mutex); 8990 return 0; 8991 } 8992 8993 static int ipw_wx_set_essid(struct net_device *dev, 8994 struct iw_request_info *info, 8995 union iwreq_data *wrqu, char *extra) 8996 { 8997 struct ipw_priv *priv = libipw_priv(dev); 8998 int length; 8999 9000 mutex_lock(&priv->mutex); 9001 9002 if (!wrqu->essid.flags) 9003 { 9004 IPW_DEBUG_WX("Setting ESSID to ANY\n"); 9005 ipw_disassociate(priv); 9006 priv->config &= ~CFG_STATIC_ESSID; 9007 ipw_associate(priv); 9008 mutex_unlock(&priv->mutex); 9009 return 0; 9010 } 9011 9012 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE); 9013 9014 priv->config |= CFG_STATIC_ESSID; 9015 9016 if (priv->essid_len == length && !memcmp(priv->essid, extra, length) 9017 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) { 9018 IPW_DEBUG_WX("ESSID set to current ESSID.\n"); 9019 mutex_unlock(&priv->mutex); 9020 return 0; 9021 } 9022 9023 IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length); 9024 9025 priv->essid_len = length; 9026 memcpy(priv->essid, extra, priv->essid_len); 9027 9028 /* Network configuration changed -- force [re]association */ 9029 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n"); 9030 if (!ipw_disassociate(priv)) 9031 ipw_associate(priv); 9032 9033 mutex_unlock(&priv->mutex); 9034 return 0; 9035 } 9036 9037 static int ipw_wx_get_essid(struct net_device *dev, 9038 struct iw_request_info *info, 9039 union iwreq_data *wrqu, char *extra) 9040 { 9041 struct ipw_priv *priv = libipw_priv(dev); 9042 9043 /* If we are associated, trying to associate, or have a statically 9044 * configured ESSID then return that; otherwise return ANY */ 9045 mutex_lock(&priv->mutex); 9046 if (priv->config & CFG_STATIC_ESSID || 9047 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 9048 IPW_DEBUG_WX("Getting essid: '%*pE'\n", 9049 priv->essid_len, priv->essid); 9050 memcpy(extra, priv->essid, priv->essid_len); 9051 wrqu->essid.length = priv->essid_len; 9052 wrqu->essid.flags = 1; /* active */ 9053 } else { 9054 IPW_DEBUG_WX("Getting essid: ANY\n"); 9055 wrqu->essid.length = 0; 9056 wrqu->essid.flags = 0; /* active */ 9057 } 9058 mutex_unlock(&priv->mutex); 9059 return 0; 9060 } 9061 9062 static int ipw_wx_set_nick(struct net_device *dev, 9063 struct iw_request_info *info, 9064 union iwreq_data *wrqu, char *extra) 9065 { 9066 struct ipw_priv *priv = libipw_priv(dev); 9067 9068 IPW_DEBUG_WX("Setting nick to '%s'\n", extra); 9069 if (wrqu->data.length > IW_ESSID_MAX_SIZE) 9070 return -E2BIG; 9071 mutex_lock(&priv->mutex); 9072 wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick)); 9073 memset(priv->nick, 0, sizeof(priv->nick)); 9074 memcpy(priv->nick, extra, wrqu->data.length); 9075 IPW_DEBUG_TRACE("<<\n"); 9076 mutex_unlock(&priv->mutex); 9077 return 0; 9078 9079 } 9080 9081 static int ipw_wx_get_nick(struct net_device *dev, 9082 struct iw_request_info *info, 9083 union iwreq_data *wrqu, char *extra) 9084 { 9085 struct ipw_priv *priv = libipw_priv(dev); 9086 IPW_DEBUG_WX("Getting nick\n"); 9087 mutex_lock(&priv->mutex); 9088 wrqu->data.length = strlen(priv->nick); 9089 memcpy(extra, priv->nick, wrqu->data.length); 9090 wrqu->data.flags = 1; /* active */ 9091 mutex_unlock(&priv->mutex); 9092 return 0; 9093 } 9094 9095 static int ipw_wx_set_sens(struct net_device *dev, 9096 struct iw_request_info *info, 9097 union iwreq_data *wrqu, char *extra) 9098 { 9099 struct ipw_priv *priv = libipw_priv(dev); 9100 int err = 0; 9101 9102 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value); 9103 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value); 9104 mutex_lock(&priv->mutex); 9105 9106 if (wrqu->sens.fixed == 0) 9107 { 9108 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; 9109 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; 9110 goto out; 9111 } 9112 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) || 9113 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) { 9114 err = -EINVAL; 9115 goto out; 9116 } 9117 9118 priv->roaming_threshold = wrqu->sens.value; 9119 priv->disassociate_threshold = 3*wrqu->sens.value; 9120 out: 9121 mutex_unlock(&priv->mutex); 9122 return err; 9123 } 9124 9125 static int ipw_wx_get_sens(struct net_device *dev, 9126 struct iw_request_info *info, 9127 union iwreq_data *wrqu, char *extra) 9128 { 9129 struct ipw_priv *priv = libipw_priv(dev); 9130 mutex_lock(&priv->mutex); 9131 wrqu->sens.fixed = 1; 9132 wrqu->sens.value = priv->roaming_threshold; 9133 mutex_unlock(&priv->mutex); 9134 9135 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n", 9136 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9137 9138 return 0; 9139 } 9140 9141 static int ipw_wx_set_rate(struct net_device *dev, 9142 struct iw_request_info *info, 9143 union iwreq_data *wrqu, char *extra) 9144 { 9145 /* TODO: We should use semaphores or locks for access to priv */ 9146 struct ipw_priv *priv = libipw_priv(dev); 9147 u32 target_rate = wrqu->bitrate.value; 9148 u32 fixed, mask; 9149 9150 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */ 9151 /* value = X, fixed = 1 means only rate X */ 9152 /* value = X, fixed = 0 means all rates lower equal X */ 9153 9154 if (target_rate == -1) { 9155 fixed = 0; 9156 mask = LIBIPW_DEFAULT_RATES_MASK; 9157 /* Now we should reassociate */ 9158 goto apply; 9159 } 9160 9161 mask = 0; 9162 fixed = wrqu->bitrate.fixed; 9163 9164 if (target_rate == 1000000 || !fixed) 9165 mask |= LIBIPW_CCK_RATE_1MB_MASK; 9166 if (target_rate == 1000000) 9167 goto apply; 9168 9169 if (target_rate == 2000000 || !fixed) 9170 mask |= LIBIPW_CCK_RATE_2MB_MASK; 9171 if (target_rate == 2000000) 9172 goto apply; 9173 9174 if (target_rate == 5500000 || !fixed) 9175 mask |= LIBIPW_CCK_RATE_5MB_MASK; 9176 if (target_rate == 5500000) 9177 goto apply; 9178 9179 if (target_rate == 6000000 || !fixed) 9180 mask |= LIBIPW_OFDM_RATE_6MB_MASK; 9181 if (target_rate == 6000000) 9182 goto apply; 9183 9184 if (target_rate == 9000000 || !fixed) 9185 mask |= LIBIPW_OFDM_RATE_9MB_MASK; 9186 if (target_rate == 9000000) 9187 goto apply; 9188 9189 if (target_rate == 11000000 || !fixed) 9190 mask |= LIBIPW_CCK_RATE_11MB_MASK; 9191 if (target_rate == 11000000) 9192 goto apply; 9193 9194 if (target_rate == 12000000 || !fixed) 9195 mask |= LIBIPW_OFDM_RATE_12MB_MASK; 9196 if (target_rate == 12000000) 9197 goto apply; 9198 9199 if (target_rate == 18000000 || !fixed) 9200 mask |= LIBIPW_OFDM_RATE_18MB_MASK; 9201 if (target_rate == 18000000) 9202 goto apply; 9203 9204 if (target_rate == 24000000 || !fixed) 9205 mask |= LIBIPW_OFDM_RATE_24MB_MASK; 9206 if (target_rate == 24000000) 9207 goto apply; 9208 9209 if (target_rate == 36000000 || !fixed) 9210 mask |= LIBIPW_OFDM_RATE_36MB_MASK; 9211 if (target_rate == 36000000) 9212 goto apply; 9213 9214 if (target_rate == 48000000 || !fixed) 9215 mask |= LIBIPW_OFDM_RATE_48MB_MASK; 9216 if (target_rate == 48000000) 9217 goto apply; 9218 9219 if (target_rate == 54000000 || !fixed) 9220 mask |= LIBIPW_OFDM_RATE_54MB_MASK; 9221 if (target_rate == 54000000) 9222 goto apply; 9223 9224 IPW_DEBUG_WX("invalid rate specified, returning error\n"); 9225 return -EINVAL; 9226 9227 apply: 9228 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", 9229 mask, fixed ? "fixed" : "sub-rates"); 9230 mutex_lock(&priv->mutex); 9231 if (mask == LIBIPW_DEFAULT_RATES_MASK) { 9232 priv->config &= ~CFG_FIXED_RATE; 9233 ipw_set_fixed_rate(priv, priv->ieee->mode); 9234 } else 9235 priv->config |= CFG_FIXED_RATE; 9236 9237 if (priv->rates_mask == mask) { 9238 IPW_DEBUG_WX("Mask set to current mask.\n"); 9239 mutex_unlock(&priv->mutex); 9240 return 0; 9241 } 9242 9243 priv->rates_mask = mask; 9244 9245 /* Network configuration changed -- force [re]association */ 9246 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n"); 9247 if (!ipw_disassociate(priv)) 9248 ipw_associate(priv); 9249 9250 mutex_unlock(&priv->mutex); 9251 return 0; 9252 } 9253 9254 static int ipw_wx_get_rate(struct net_device *dev, 9255 struct iw_request_info *info, 9256 union iwreq_data *wrqu, char *extra) 9257 { 9258 struct ipw_priv *priv = libipw_priv(dev); 9259 mutex_lock(&priv->mutex); 9260 wrqu->bitrate.value = priv->last_rate; 9261 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0; 9262 mutex_unlock(&priv->mutex); 9263 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value); 9264 return 0; 9265 } 9266 9267 static int ipw_wx_set_rts(struct net_device *dev, 9268 struct iw_request_info *info, 9269 union iwreq_data *wrqu, char *extra) 9270 { 9271 struct ipw_priv *priv = libipw_priv(dev); 9272 mutex_lock(&priv->mutex); 9273 if (wrqu->rts.disabled || !wrqu->rts.fixed) 9274 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 9275 else { 9276 if (wrqu->rts.value < MIN_RTS_THRESHOLD || 9277 wrqu->rts.value > MAX_RTS_THRESHOLD) { 9278 mutex_unlock(&priv->mutex); 9279 return -EINVAL; 9280 } 9281 priv->rts_threshold = wrqu->rts.value; 9282 } 9283 9284 ipw_send_rts_threshold(priv, priv->rts_threshold); 9285 mutex_unlock(&priv->mutex); 9286 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold); 9287 return 0; 9288 } 9289 9290 static int ipw_wx_get_rts(struct net_device *dev, 9291 struct iw_request_info *info, 9292 union iwreq_data *wrqu, char *extra) 9293 { 9294 struct ipw_priv *priv = libipw_priv(dev); 9295 mutex_lock(&priv->mutex); 9296 wrqu->rts.value = priv->rts_threshold; 9297 wrqu->rts.fixed = 0; /* no auto select */ 9298 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); 9299 mutex_unlock(&priv->mutex); 9300 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value); 9301 return 0; 9302 } 9303 9304 static int ipw_wx_set_txpow(struct net_device *dev, 9305 struct iw_request_info *info, 9306 union iwreq_data *wrqu, char *extra) 9307 { 9308 struct ipw_priv *priv = libipw_priv(dev); 9309 int err = 0; 9310 9311 mutex_lock(&priv->mutex); 9312 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { 9313 err = -EINPROGRESS; 9314 goto out; 9315 } 9316 9317 if (!wrqu->power.fixed) 9318 wrqu->power.value = IPW_TX_POWER_DEFAULT; 9319 9320 if (wrqu->power.flags != IW_TXPOW_DBM) { 9321 err = -EINVAL; 9322 goto out; 9323 } 9324 9325 if ((wrqu->power.value > IPW_TX_POWER_MAX) || 9326 (wrqu->power.value < IPW_TX_POWER_MIN)) { 9327 err = -EINVAL; 9328 goto out; 9329 } 9330 9331 priv->tx_power = wrqu->power.value; 9332 err = ipw_set_tx_power(priv); 9333 out: 9334 mutex_unlock(&priv->mutex); 9335 return err; 9336 } 9337 9338 static int ipw_wx_get_txpow(struct net_device *dev, 9339 struct iw_request_info *info, 9340 union iwreq_data *wrqu, char *extra) 9341 { 9342 struct ipw_priv *priv = libipw_priv(dev); 9343 mutex_lock(&priv->mutex); 9344 wrqu->power.value = priv->tx_power; 9345 wrqu->power.fixed = 1; 9346 wrqu->power.flags = IW_TXPOW_DBM; 9347 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; 9348 mutex_unlock(&priv->mutex); 9349 9350 IPW_DEBUG_WX("GET TX Power -> %s %d\n", 9351 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9352 9353 return 0; 9354 } 9355 9356 static int ipw_wx_set_frag(struct net_device *dev, 9357 struct iw_request_info *info, 9358 union iwreq_data *wrqu, char *extra) 9359 { 9360 struct ipw_priv *priv = libipw_priv(dev); 9361 mutex_lock(&priv->mutex); 9362 if (wrqu->frag.disabled || !wrqu->frag.fixed) 9363 priv->ieee->fts = DEFAULT_FTS; 9364 else { 9365 if (wrqu->frag.value < MIN_FRAG_THRESHOLD || 9366 wrqu->frag.value > MAX_FRAG_THRESHOLD) { 9367 mutex_unlock(&priv->mutex); 9368 return -EINVAL; 9369 } 9370 9371 priv->ieee->fts = wrqu->frag.value & ~0x1; 9372 } 9373 9374 ipw_send_frag_threshold(priv, wrqu->frag.value); 9375 mutex_unlock(&priv->mutex); 9376 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value); 9377 return 0; 9378 } 9379 9380 static int ipw_wx_get_frag(struct net_device *dev, 9381 struct iw_request_info *info, 9382 union iwreq_data *wrqu, char *extra) 9383 { 9384 struct ipw_priv *priv = libipw_priv(dev); 9385 mutex_lock(&priv->mutex); 9386 wrqu->frag.value = priv->ieee->fts; 9387 wrqu->frag.fixed = 0; /* no auto select */ 9388 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); 9389 mutex_unlock(&priv->mutex); 9390 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value); 9391 9392 return 0; 9393 } 9394 9395 static int ipw_wx_set_retry(struct net_device *dev, 9396 struct iw_request_info *info, 9397 union iwreq_data *wrqu, char *extra) 9398 { 9399 struct ipw_priv *priv = libipw_priv(dev); 9400 9401 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) 9402 return -EINVAL; 9403 9404 if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) 9405 return 0; 9406 9407 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255) 9408 return -EINVAL; 9409 9410 mutex_lock(&priv->mutex); 9411 if (wrqu->retry.flags & IW_RETRY_SHORT) 9412 priv->short_retry_limit = (u8) wrqu->retry.value; 9413 else if (wrqu->retry.flags & IW_RETRY_LONG) 9414 priv->long_retry_limit = (u8) wrqu->retry.value; 9415 else { 9416 priv->short_retry_limit = (u8) wrqu->retry.value; 9417 priv->long_retry_limit = (u8) wrqu->retry.value; 9418 } 9419 9420 ipw_send_retry_limit(priv, priv->short_retry_limit, 9421 priv->long_retry_limit); 9422 mutex_unlock(&priv->mutex); 9423 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", 9424 priv->short_retry_limit, priv->long_retry_limit); 9425 return 0; 9426 } 9427 9428 static int ipw_wx_get_retry(struct net_device *dev, 9429 struct iw_request_info *info, 9430 union iwreq_data *wrqu, char *extra) 9431 { 9432 struct ipw_priv *priv = libipw_priv(dev); 9433 9434 mutex_lock(&priv->mutex); 9435 wrqu->retry.disabled = 0; 9436 9437 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { 9438 mutex_unlock(&priv->mutex); 9439 return -EINVAL; 9440 } 9441 9442 if (wrqu->retry.flags & IW_RETRY_LONG) { 9443 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG; 9444 wrqu->retry.value = priv->long_retry_limit; 9445 } else if (wrqu->retry.flags & IW_RETRY_SHORT) { 9446 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; 9447 wrqu->retry.value = priv->short_retry_limit; 9448 } else { 9449 wrqu->retry.flags = IW_RETRY_LIMIT; 9450 wrqu->retry.value = priv->short_retry_limit; 9451 } 9452 mutex_unlock(&priv->mutex); 9453 9454 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value); 9455 9456 return 0; 9457 } 9458 9459 static int ipw_wx_set_scan(struct net_device *dev, 9460 struct iw_request_info *info, 9461 union iwreq_data *wrqu, char *extra) 9462 { 9463 struct ipw_priv *priv = libipw_priv(dev); 9464 struct iw_scan_req *req = (struct iw_scan_req *)extra; 9465 struct delayed_work *work = NULL; 9466 9467 mutex_lock(&priv->mutex); 9468 9469 priv->user_requested_scan = 1; 9470 9471 if (wrqu->data.length == sizeof(struct iw_scan_req)) { 9472 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 9473 int len = min((int)req->essid_len, 9474 (int)sizeof(priv->direct_scan_ssid)); 9475 memcpy(priv->direct_scan_ssid, req->essid, len); 9476 priv->direct_scan_ssid_len = len; 9477 work = &priv->request_direct_scan; 9478 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { 9479 work = &priv->request_passive_scan; 9480 } 9481 } else { 9482 /* Normal active broadcast scan */ 9483 work = &priv->request_scan; 9484 } 9485 9486 mutex_unlock(&priv->mutex); 9487 9488 IPW_DEBUG_WX("Start scan\n"); 9489 9490 schedule_delayed_work(work, 0); 9491 9492 return 0; 9493 } 9494 9495 static int ipw_wx_get_scan(struct net_device *dev, 9496 struct iw_request_info *info, 9497 union iwreq_data *wrqu, char *extra) 9498 { 9499 struct ipw_priv *priv = libipw_priv(dev); 9500 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra); 9501 } 9502 9503 static int ipw_wx_set_encode(struct net_device *dev, 9504 struct iw_request_info *info, 9505 union iwreq_data *wrqu, char *key) 9506 { 9507 struct ipw_priv *priv = libipw_priv(dev); 9508 int ret; 9509 u32 cap = priv->capability; 9510 9511 mutex_lock(&priv->mutex); 9512 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key); 9513 9514 /* In IBSS mode, we need to notify the firmware to update 9515 * the beacon info after we changed the capability. */ 9516 if (cap != priv->capability && 9517 priv->ieee->iw_mode == IW_MODE_ADHOC && 9518 priv->status & STATUS_ASSOCIATED) 9519 ipw_disassociate(priv); 9520 9521 mutex_unlock(&priv->mutex); 9522 return ret; 9523 } 9524 9525 static int ipw_wx_get_encode(struct net_device *dev, 9526 struct iw_request_info *info, 9527 union iwreq_data *wrqu, char *key) 9528 { 9529 struct ipw_priv *priv = libipw_priv(dev); 9530 return libipw_wx_get_encode(priv->ieee, info, wrqu, key); 9531 } 9532 9533 static int ipw_wx_set_power(struct net_device *dev, 9534 struct iw_request_info *info, 9535 union iwreq_data *wrqu, char *extra) 9536 { 9537 struct ipw_priv *priv = libipw_priv(dev); 9538 int err; 9539 mutex_lock(&priv->mutex); 9540 if (wrqu->power.disabled) { 9541 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); 9542 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); 9543 if (err) { 9544 IPW_DEBUG_WX("failed setting power mode.\n"); 9545 mutex_unlock(&priv->mutex); 9546 return err; 9547 } 9548 IPW_DEBUG_WX("SET Power Management Mode -> off\n"); 9549 mutex_unlock(&priv->mutex); 9550 return 0; 9551 } 9552 9553 switch (wrqu->power.flags & IW_POWER_MODE) { 9554 case IW_POWER_ON: /* If not specified */ 9555 case IW_POWER_MODE: /* If set all mask */ 9556 case IW_POWER_ALL_R: /* If explicitly state all */ 9557 break; 9558 default: /* Otherwise we don't support it */ 9559 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", 9560 wrqu->power.flags); 9561 mutex_unlock(&priv->mutex); 9562 return -EOPNOTSUPP; 9563 } 9564 9565 /* If the user hasn't specified a power management mode yet, default 9566 * to BATTERY */ 9567 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC) 9568 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; 9569 else 9570 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; 9571 9572 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); 9573 if (err) { 9574 IPW_DEBUG_WX("failed setting power mode.\n"); 9575 mutex_unlock(&priv->mutex); 9576 return err; 9577 } 9578 9579 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); 9580 mutex_unlock(&priv->mutex); 9581 return 0; 9582 } 9583 9584 static int ipw_wx_get_power(struct net_device *dev, 9585 struct iw_request_info *info, 9586 union iwreq_data *wrqu, char *extra) 9587 { 9588 struct ipw_priv *priv = libipw_priv(dev); 9589 mutex_lock(&priv->mutex); 9590 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9591 wrqu->power.disabled = 1; 9592 else 9593 wrqu->power.disabled = 0; 9594 9595 mutex_unlock(&priv->mutex); 9596 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); 9597 9598 return 0; 9599 } 9600 9601 static int ipw_wx_set_powermode(struct net_device *dev, 9602 struct iw_request_info *info, 9603 union iwreq_data *wrqu, char *extra) 9604 { 9605 struct ipw_priv *priv = libipw_priv(dev); 9606 int mode = *(int *)extra; 9607 int err; 9608 9609 mutex_lock(&priv->mutex); 9610 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) 9611 mode = IPW_POWER_AC; 9612 9613 if (IPW_POWER_LEVEL(priv->power_mode) != mode) { 9614 err = ipw_send_power_mode(priv, mode); 9615 if (err) { 9616 IPW_DEBUG_WX("failed setting power mode.\n"); 9617 mutex_unlock(&priv->mutex); 9618 return err; 9619 } 9620 priv->power_mode = IPW_POWER_ENABLED | mode; 9621 } 9622 mutex_unlock(&priv->mutex); 9623 return 0; 9624 } 9625 9626 #define MAX_WX_STRING 80 9627 static int ipw_wx_get_powermode(struct net_device *dev, 9628 struct iw_request_info *info, 9629 union iwreq_data *wrqu, char *extra) 9630 { 9631 struct ipw_priv *priv = libipw_priv(dev); 9632 int level = IPW_POWER_LEVEL(priv->power_mode); 9633 char *p = extra; 9634 9635 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level); 9636 9637 switch (level) { 9638 case IPW_POWER_AC: 9639 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)"); 9640 break; 9641 case IPW_POWER_BATTERY: 9642 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)"); 9643 break; 9644 default: 9645 p += snprintf(p, MAX_WX_STRING - (p - extra), 9646 "(Timeout %dms, Period %dms)", 9647 timeout_duration[level - 1] / 1000, 9648 period_duration[level - 1] / 1000); 9649 } 9650 9651 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9652 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF"); 9653 9654 wrqu->data.length = p - extra + 1; 9655 9656 return 0; 9657 } 9658 9659 static int ipw_wx_set_wireless_mode(struct net_device *dev, 9660 struct iw_request_info *info, 9661 union iwreq_data *wrqu, char *extra) 9662 { 9663 struct ipw_priv *priv = libipw_priv(dev); 9664 int mode = *(int *)extra; 9665 u8 band = 0, modulation = 0; 9666 9667 if (mode == 0 || mode & ~IEEE_MODE_MASK) { 9668 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); 9669 return -EINVAL; 9670 } 9671 mutex_lock(&priv->mutex); 9672 if (priv->adapter == IPW_2915ABG) { 9673 priv->ieee->abg_true = 1; 9674 if (mode & IEEE_A) { 9675 band |= LIBIPW_52GHZ_BAND; 9676 modulation |= LIBIPW_OFDM_MODULATION; 9677 } else 9678 priv->ieee->abg_true = 0; 9679 } else { 9680 if (mode & IEEE_A) { 9681 IPW_WARNING("Attempt to set 2200BG into " 9682 "802.11a mode\n"); 9683 mutex_unlock(&priv->mutex); 9684 return -EINVAL; 9685 } 9686 9687 priv->ieee->abg_true = 0; 9688 } 9689 9690 if (mode & IEEE_B) { 9691 band |= LIBIPW_24GHZ_BAND; 9692 modulation |= LIBIPW_CCK_MODULATION; 9693 } else 9694 priv->ieee->abg_true = 0; 9695 9696 if (mode & IEEE_G) { 9697 band |= LIBIPW_24GHZ_BAND; 9698 modulation |= LIBIPW_OFDM_MODULATION; 9699 } else 9700 priv->ieee->abg_true = 0; 9701 9702 priv->ieee->mode = mode; 9703 priv->ieee->freq_band = band; 9704 priv->ieee->modulation = modulation; 9705 init_supported_rates(priv, &priv->rates); 9706 9707 /* Network configuration changed -- force [re]association */ 9708 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n"); 9709 if (!ipw_disassociate(priv)) { 9710 ipw_send_supported_rates(priv, &priv->rates); 9711 ipw_associate(priv); 9712 } 9713 9714 /* Update the band LEDs */ 9715 ipw_led_band_on(priv); 9716 9717 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", 9718 mode & IEEE_A ? 'a' : '.', 9719 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); 9720 mutex_unlock(&priv->mutex); 9721 return 0; 9722 } 9723 9724 static int ipw_wx_get_wireless_mode(struct net_device *dev, 9725 struct iw_request_info *info, 9726 union iwreq_data *wrqu, char *extra) 9727 { 9728 struct ipw_priv *priv = libipw_priv(dev); 9729 mutex_lock(&priv->mutex); 9730 switch (priv->ieee->mode) { 9731 case IEEE_A: 9732 strncpy(extra, "802.11a (1)", MAX_WX_STRING); 9733 break; 9734 case IEEE_B: 9735 strncpy(extra, "802.11b (2)", MAX_WX_STRING); 9736 break; 9737 case IEEE_A | IEEE_B: 9738 strncpy(extra, "802.11ab (3)", MAX_WX_STRING); 9739 break; 9740 case IEEE_G: 9741 strncpy(extra, "802.11g (4)", MAX_WX_STRING); 9742 break; 9743 case IEEE_A | IEEE_G: 9744 strncpy(extra, "802.11ag (5)", MAX_WX_STRING); 9745 break; 9746 case IEEE_B | IEEE_G: 9747 strncpy(extra, "802.11bg (6)", MAX_WX_STRING); 9748 break; 9749 case IEEE_A | IEEE_B | IEEE_G: 9750 strncpy(extra, "802.11abg (7)", MAX_WX_STRING); 9751 break; 9752 default: 9753 strncpy(extra, "unknown", MAX_WX_STRING); 9754 break; 9755 } 9756 extra[MAX_WX_STRING - 1] = '\0'; 9757 9758 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); 9759 9760 wrqu->data.length = strlen(extra) + 1; 9761 mutex_unlock(&priv->mutex); 9762 9763 return 0; 9764 } 9765 9766 static int ipw_wx_set_preamble(struct net_device *dev, 9767 struct iw_request_info *info, 9768 union iwreq_data *wrqu, char *extra) 9769 { 9770 struct ipw_priv *priv = libipw_priv(dev); 9771 int mode = *(int *)extra; 9772 mutex_lock(&priv->mutex); 9773 /* Switching from SHORT -> LONG requires a disassociation */ 9774 if (mode == 1) { 9775 if (!(priv->config & CFG_PREAMBLE_LONG)) { 9776 priv->config |= CFG_PREAMBLE_LONG; 9777 9778 /* Network configuration changed -- force [re]association */ 9779 IPW_DEBUG_ASSOC 9780 ("[re]association triggered due to preamble change.\n"); 9781 if (!ipw_disassociate(priv)) 9782 ipw_associate(priv); 9783 } 9784 goto done; 9785 } 9786 9787 if (mode == 0) { 9788 priv->config &= ~CFG_PREAMBLE_LONG; 9789 goto done; 9790 } 9791 mutex_unlock(&priv->mutex); 9792 return -EINVAL; 9793 9794 done: 9795 mutex_unlock(&priv->mutex); 9796 return 0; 9797 } 9798 9799 static int ipw_wx_get_preamble(struct net_device *dev, 9800 struct iw_request_info *info, 9801 union iwreq_data *wrqu, char *extra) 9802 { 9803 struct ipw_priv *priv = libipw_priv(dev); 9804 mutex_lock(&priv->mutex); 9805 if (priv->config & CFG_PREAMBLE_LONG) 9806 snprintf(wrqu->name, IFNAMSIZ, "long (1)"); 9807 else 9808 snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); 9809 mutex_unlock(&priv->mutex); 9810 return 0; 9811 } 9812 9813 #ifdef CONFIG_IPW2200_MONITOR 9814 static int ipw_wx_set_monitor(struct net_device *dev, 9815 struct iw_request_info *info, 9816 union iwreq_data *wrqu, char *extra) 9817 { 9818 struct ipw_priv *priv = libipw_priv(dev); 9819 int *parms = (int *)extra; 9820 int enable = (parms[0] > 0); 9821 mutex_lock(&priv->mutex); 9822 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); 9823 if (enable) { 9824 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9825 #ifdef CONFIG_IPW2200_RADIOTAP 9826 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 9827 #else 9828 priv->net_dev->type = ARPHRD_IEEE80211; 9829 #endif 9830 schedule_work(&priv->adapter_restart); 9831 } 9832 9833 ipw_set_channel(priv, parms[1]); 9834 } else { 9835 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9836 mutex_unlock(&priv->mutex); 9837 return 0; 9838 } 9839 priv->net_dev->type = ARPHRD_ETHER; 9840 schedule_work(&priv->adapter_restart); 9841 } 9842 mutex_unlock(&priv->mutex); 9843 return 0; 9844 } 9845 9846 #endif /* CONFIG_IPW2200_MONITOR */ 9847 9848 static int ipw_wx_reset(struct net_device *dev, 9849 struct iw_request_info *info, 9850 union iwreq_data *wrqu, char *extra) 9851 { 9852 struct ipw_priv *priv = libipw_priv(dev); 9853 IPW_DEBUG_WX("RESET\n"); 9854 schedule_work(&priv->adapter_restart); 9855 return 0; 9856 } 9857 9858 static int ipw_wx_sw_reset(struct net_device *dev, 9859 struct iw_request_info *info, 9860 union iwreq_data *wrqu, char *extra) 9861 { 9862 struct ipw_priv *priv = libipw_priv(dev); 9863 union iwreq_data wrqu_sec = { 9864 .encoding = { 9865 .flags = IW_ENCODE_DISABLED, 9866 }, 9867 }; 9868 int ret; 9869 9870 IPW_DEBUG_WX("SW_RESET\n"); 9871 9872 mutex_lock(&priv->mutex); 9873 9874 ret = ipw_sw_reset(priv, 2); 9875 if (!ret) { 9876 free_firmware(); 9877 ipw_adapter_restart(priv); 9878 } 9879 9880 /* The SW reset bit might have been toggled on by the 'disable' 9881 * module parameter, so take appropriate action */ 9882 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); 9883 9884 mutex_unlock(&priv->mutex); 9885 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); 9886 mutex_lock(&priv->mutex); 9887 9888 if (!(priv->status & STATUS_RF_KILL_MASK)) { 9889 /* Configuration likely changed -- force [re]association */ 9890 IPW_DEBUG_ASSOC("[re]association triggered due to sw " 9891 "reset.\n"); 9892 if (!ipw_disassociate(priv)) 9893 ipw_associate(priv); 9894 } 9895 9896 mutex_unlock(&priv->mutex); 9897 9898 return 0; 9899 } 9900 9901 /* Rebase the WE IOCTLs to zero for the handler array */ 9902 static iw_handler ipw_wx_handlers[] = { 9903 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname), 9904 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq), 9905 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq), 9906 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode), 9907 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode), 9908 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens), 9909 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens), 9910 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range), 9911 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap), 9912 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap), 9913 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan), 9914 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan), 9915 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid), 9916 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid), 9917 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick), 9918 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick), 9919 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate), 9920 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate), 9921 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts), 9922 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts), 9923 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag), 9924 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag), 9925 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow), 9926 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow), 9927 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry), 9928 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry), 9929 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode), 9930 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode), 9931 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power), 9932 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power), 9933 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), 9934 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), 9935 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), 9936 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), 9937 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie), 9938 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie), 9939 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme), 9940 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth), 9941 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth), 9942 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext), 9943 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext), 9944 }; 9945 9946 enum { 9947 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV, 9948 IPW_PRIV_GET_POWER, 9949 IPW_PRIV_SET_MODE, 9950 IPW_PRIV_GET_MODE, 9951 IPW_PRIV_SET_PREAMBLE, 9952 IPW_PRIV_GET_PREAMBLE, 9953 IPW_PRIV_RESET, 9954 IPW_PRIV_SW_RESET, 9955 #ifdef CONFIG_IPW2200_MONITOR 9956 IPW_PRIV_SET_MONITOR, 9957 #endif 9958 }; 9959 9960 static struct iw_priv_args ipw_priv_args[] = { 9961 { 9962 .cmd = IPW_PRIV_SET_POWER, 9963 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 9964 .name = "set_power"}, 9965 { 9966 .cmd = IPW_PRIV_GET_POWER, 9967 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 9968 .name = "get_power"}, 9969 { 9970 .cmd = IPW_PRIV_SET_MODE, 9971 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 9972 .name = "set_mode"}, 9973 { 9974 .cmd = IPW_PRIV_GET_MODE, 9975 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 9976 .name = "get_mode"}, 9977 { 9978 .cmd = IPW_PRIV_SET_PREAMBLE, 9979 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 9980 .name = "set_preamble"}, 9981 { 9982 .cmd = IPW_PRIV_GET_PREAMBLE, 9983 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, 9984 .name = "get_preamble"}, 9985 { 9986 IPW_PRIV_RESET, 9987 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"}, 9988 { 9989 IPW_PRIV_SW_RESET, 9990 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"}, 9991 #ifdef CONFIG_IPW2200_MONITOR 9992 { 9993 IPW_PRIV_SET_MONITOR, 9994 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"}, 9995 #endif /* CONFIG_IPW2200_MONITOR */ 9996 }; 9997 9998 static iw_handler ipw_priv_handler[] = { 9999 ipw_wx_set_powermode, 10000 ipw_wx_get_powermode, 10001 ipw_wx_set_wireless_mode, 10002 ipw_wx_get_wireless_mode, 10003 ipw_wx_set_preamble, 10004 ipw_wx_get_preamble, 10005 ipw_wx_reset, 10006 ipw_wx_sw_reset, 10007 #ifdef CONFIG_IPW2200_MONITOR 10008 ipw_wx_set_monitor, 10009 #endif 10010 }; 10011 10012 static const struct iw_handler_def ipw_wx_handler_def = { 10013 .standard = ipw_wx_handlers, 10014 .num_standard = ARRAY_SIZE(ipw_wx_handlers), 10015 .num_private = ARRAY_SIZE(ipw_priv_handler), 10016 .num_private_args = ARRAY_SIZE(ipw_priv_args), 10017 .private = ipw_priv_handler, 10018 .private_args = ipw_priv_args, 10019 .get_wireless_stats = ipw_get_wireless_stats, 10020 }; 10021 10022 /* 10023 * Get wireless statistics. 10024 * Called by /proc/net/wireless 10025 * Also called by SIOCGIWSTATS 10026 */ 10027 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) 10028 { 10029 struct ipw_priv *priv = libipw_priv(dev); 10030 struct iw_statistics *wstats; 10031 10032 wstats = &priv->wstats; 10033 10034 /* if hw is disabled, then ipw_get_ordinal() can't be called. 10035 * netdev->get_wireless_stats seems to be called before fw is 10036 * initialized. STATUS_ASSOCIATED will only be set if the hw is up 10037 * and associated; if not associcated, the values are all meaningless 10038 * anyway, so set them all to NULL and INVALID */ 10039 if (!(priv->status & STATUS_ASSOCIATED)) { 10040 wstats->miss.beacon = 0; 10041 wstats->discard.retries = 0; 10042 wstats->qual.qual = 0; 10043 wstats->qual.level = 0; 10044 wstats->qual.noise = 0; 10045 wstats->qual.updated = 7; 10046 wstats->qual.updated |= IW_QUAL_NOISE_INVALID | 10047 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; 10048 return wstats; 10049 } 10050 10051 wstats->qual.qual = priv->quality; 10052 wstats->qual.level = priv->exp_avg_rssi; 10053 wstats->qual.noise = priv->exp_avg_noise; 10054 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | 10055 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; 10056 10057 wstats->miss.beacon = average_value(&priv->average_missed_beacons); 10058 wstats->discard.retries = priv->last_tx_failures; 10059 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable; 10060 10061 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len)) 10062 goto fail_get_ordinal; 10063 wstats->discard.retries += tx_retry; */ 10064 10065 return wstats; 10066 } 10067 10068 /* net device stuff */ 10069 10070 static void init_sys_config(struct ipw_sys_config *sys_config) 10071 { 10072 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 10073 sys_config->bt_coexistence = 0; 10074 sys_config->answer_broadcast_ssid_probe = 0; 10075 sys_config->accept_all_data_frames = 0; 10076 sys_config->accept_non_directed_frames = 1; 10077 sys_config->exclude_unicast_unencrypted = 0; 10078 sys_config->disable_unicast_decryption = 1; 10079 sys_config->exclude_multicast_unencrypted = 0; 10080 sys_config->disable_multicast_decryption = 1; 10081 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B) 10082 antenna = CFG_SYS_ANTENNA_BOTH; 10083 sys_config->antenna_diversity = antenna; 10084 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ 10085 sys_config->dot11g_auto_detection = 0; 10086 sys_config->enable_cts_to_self = 0; 10087 sys_config->bt_coexist_collision_thr = 0; 10088 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */ 10089 sys_config->silence_threshold = 0x1e; 10090 } 10091 10092 static int ipw_net_open(struct net_device *dev) 10093 { 10094 IPW_DEBUG_INFO("dev->open\n"); 10095 netif_start_queue(dev); 10096 return 0; 10097 } 10098 10099 static int ipw_net_stop(struct net_device *dev) 10100 { 10101 IPW_DEBUG_INFO("dev->close\n"); 10102 netif_stop_queue(dev); 10103 return 0; 10104 } 10105 10106 /* 10107 todo: 10108 10109 modify to send one tfd per fragment instead of using chunking. otherwise 10110 we need to heavily modify the libipw_skb_to_txb. 10111 */ 10112 10113 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, 10114 int pri) 10115 { 10116 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *) 10117 txb->fragments[0]->data; 10118 int i = 0; 10119 struct tfd_frame *tfd; 10120 #ifdef CONFIG_IPW2200_QOS 10121 int tx_id = ipw_get_tx_queue_number(priv, pri); 10122 struct clx2_tx_queue *txq = &priv->txq[tx_id]; 10123 #else 10124 struct clx2_tx_queue *txq = &priv->txq[0]; 10125 #endif 10126 struct clx2_queue *q = &txq->q; 10127 u8 id, hdr_len, unicast; 10128 int fc; 10129 10130 if (!(priv->status & STATUS_ASSOCIATED)) 10131 goto drop; 10132 10133 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10134 switch (priv->ieee->iw_mode) { 10135 case IW_MODE_ADHOC: 10136 unicast = !is_multicast_ether_addr(hdr->addr1); 10137 id = ipw_find_station(priv, hdr->addr1); 10138 if (id == IPW_INVALID_STATION) { 10139 id = ipw_add_station(priv, hdr->addr1); 10140 if (id == IPW_INVALID_STATION) { 10141 IPW_WARNING("Attempt to send data to " 10142 "invalid cell: %pM\n", 10143 hdr->addr1); 10144 goto drop; 10145 } 10146 } 10147 break; 10148 10149 case IW_MODE_INFRA: 10150 default: 10151 unicast = !is_multicast_ether_addr(hdr->addr3); 10152 id = 0; 10153 break; 10154 } 10155 10156 tfd = &txq->bd[q->first_empty]; 10157 txq->txb[q->first_empty] = txb; 10158 memset(tfd, 0, sizeof(*tfd)); 10159 tfd->u.data.station_number = id; 10160 10161 tfd->control_flags.message_type = TX_FRAME_TYPE; 10162 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; 10163 10164 tfd->u.data.cmd_id = DINO_CMD_TX; 10165 tfd->u.data.len = cpu_to_le16(txb->payload_size); 10166 10167 if (priv->assoc_request.ieee_mode == IPW_B_MODE) 10168 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK; 10169 else 10170 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM; 10171 10172 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE) 10173 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE; 10174 10175 fc = le16_to_cpu(hdr->frame_ctl); 10176 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS); 10177 10178 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len); 10179 10180 if (likely(unicast)) 10181 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; 10182 10183 if (txb->encrypted && !priv->ieee->host_encrypt) { 10184 switch (priv->ieee->sec.level) { 10185 case SEC_LEVEL_3: 10186 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10187 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10188 /* XXX: ACK flag must be set for CCMP even if it 10189 * is a multicast/broadcast packet, because CCMP 10190 * group communication encrypted by GTK is 10191 * actually done by the AP. */ 10192 if (!unicast) 10193 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; 10194 10195 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10196 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM; 10197 tfd->u.data.key_index = 0; 10198 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE; 10199 break; 10200 case SEC_LEVEL_2: 10201 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10202 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10203 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10204 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP; 10205 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE; 10206 break; 10207 case SEC_LEVEL_1: 10208 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10209 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10210 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx; 10211 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <= 10212 40) 10213 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit; 10214 else 10215 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit; 10216 break; 10217 case SEC_LEVEL_0: 10218 break; 10219 default: 10220 printk(KERN_ERR "Unknown security level %d\n", 10221 priv->ieee->sec.level); 10222 break; 10223 } 10224 } else 10225 /* No hardware encryption */ 10226 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP; 10227 10228 #ifdef CONFIG_IPW2200_QOS 10229 if (fc & IEEE80211_STYPE_QOS_DATA) 10230 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data)); 10231 #endif /* CONFIG_IPW2200_QOS */ 10232 10233 /* payload */ 10234 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2), 10235 txb->nr_frags)); 10236 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n", 10237 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks)); 10238 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) { 10239 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n", 10240 i, le32_to_cpu(tfd->u.data.num_chunks), 10241 txb->fragments[i]->len - hdr_len); 10242 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n", 10243 i, tfd->u.data.num_chunks, 10244 txb->fragments[i]->len - hdr_len); 10245 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len, 10246 txb->fragments[i]->len - hdr_len); 10247 10248 tfd->u.data.chunk_ptr[i] = 10249 cpu_to_le32(pci_map_single 10250 (priv->pci_dev, 10251 txb->fragments[i]->data + hdr_len, 10252 txb->fragments[i]->len - hdr_len, 10253 PCI_DMA_TODEVICE)); 10254 tfd->u.data.chunk_len[i] = 10255 cpu_to_le16(txb->fragments[i]->len - hdr_len); 10256 } 10257 10258 if (i != txb->nr_frags) { 10259 struct sk_buff *skb; 10260 u16 remaining_bytes = 0; 10261 int j; 10262 10263 for (j = i; j < txb->nr_frags; j++) 10264 remaining_bytes += txb->fragments[j]->len - hdr_len; 10265 10266 printk(KERN_INFO "Trying to reallocate for %d bytes\n", 10267 remaining_bytes); 10268 skb = alloc_skb(remaining_bytes, GFP_ATOMIC); 10269 if (skb != NULL) { 10270 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes); 10271 for (j = i; j < txb->nr_frags; j++) { 10272 int size = txb->fragments[j]->len - hdr_len; 10273 10274 printk(KERN_INFO "Adding frag %d %d...\n", 10275 j, size); 10276 skb_put_data(skb, 10277 txb->fragments[j]->data + hdr_len, 10278 size); 10279 } 10280 dev_kfree_skb_any(txb->fragments[i]); 10281 txb->fragments[i] = skb; 10282 tfd->u.data.chunk_ptr[i] = 10283 cpu_to_le32(pci_map_single 10284 (priv->pci_dev, skb->data, 10285 remaining_bytes, 10286 PCI_DMA_TODEVICE)); 10287 10288 le32_add_cpu(&tfd->u.data.num_chunks, 1); 10289 } 10290 } 10291 10292 /* kick DMA */ 10293 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 10294 ipw_write32(priv, q->reg_w, q->first_empty); 10295 10296 if (ipw_tx_queue_space(q) < q->high_mark) 10297 netif_stop_queue(priv->net_dev); 10298 10299 return NETDEV_TX_OK; 10300 10301 drop: 10302 IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); 10303 libipw_txb_free(txb); 10304 return NETDEV_TX_OK; 10305 } 10306 10307 static int ipw_net_is_queue_full(struct net_device *dev, int pri) 10308 { 10309 struct ipw_priv *priv = libipw_priv(dev); 10310 #ifdef CONFIG_IPW2200_QOS 10311 int tx_id = ipw_get_tx_queue_number(priv, pri); 10312 struct clx2_tx_queue *txq = &priv->txq[tx_id]; 10313 #else 10314 struct clx2_tx_queue *txq = &priv->txq[0]; 10315 #endif /* CONFIG_IPW2200_QOS */ 10316 10317 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark) 10318 return 1; 10319 10320 return 0; 10321 } 10322 10323 #ifdef CONFIG_IPW2200_PROMISCUOUS 10324 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, 10325 struct libipw_txb *txb) 10326 { 10327 struct libipw_rx_stats dummystats; 10328 struct ieee80211_hdr *hdr; 10329 u8 n; 10330 u16 filter = priv->prom_priv->filter; 10331 int hdr_only = 0; 10332 10333 if (filter & IPW_PROM_NO_TX) 10334 return; 10335 10336 memset(&dummystats, 0, sizeof(dummystats)); 10337 10338 /* Filtering of fragment chains is done against the first fragment */ 10339 hdr = (void *)txb->fragments[0]->data; 10340 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { 10341 if (filter & IPW_PROM_NO_MGMT) 10342 return; 10343 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 10344 hdr_only = 1; 10345 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { 10346 if (filter & IPW_PROM_NO_CTL) 10347 return; 10348 if (filter & IPW_PROM_CTL_HEADER_ONLY) 10349 hdr_only = 1; 10350 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { 10351 if (filter & IPW_PROM_NO_DATA) 10352 return; 10353 if (filter & IPW_PROM_DATA_HEADER_ONLY) 10354 hdr_only = 1; 10355 } 10356 10357 for(n=0; n<txb->nr_frags; ++n) { 10358 struct sk_buff *src = txb->fragments[n]; 10359 struct sk_buff *dst; 10360 struct ieee80211_radiotap_header *rt_hdr; 10361 int len; 10362 10363 if (hdr_only) { 10364 hdr = (void *)src->data; 10365 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); 10366 } else 10367 len = src->len; 10368 10369 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC); 10370 if (!dst) 10371 continue; 10372 10373 rt_hdr = skb_put(dst, sizeof(*rt_hdr)); 10374 10375 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION; 10376 rt_hdr->it_pad = 0; 10377 rt_hdr->it_present = 0; /* after all, it's just an idea */ 10378 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL); 10379 10380 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16( 10381 ieee80211chan2mhz(priv->channel)); 10382 if (priv->channel > 14) /* 802.11a */ 10383 *(__le16*)skb_put(dst, sizeof(u16)) = 10384 cpu_to_le16(IEEE80211_CHAN_OFDM | 10385 IEEE80211_CHAN_5GHZ); 10386 else if (priv->ieee->mode == IEEE_B) /* 802.11b */ 10387 *(__le16*)skb_put(dst, sizeof(u16)) = 10388 cpu_to_le16(IEEE80211_CHAN_CCK | 10389 IEEE80211_CHAN_2GHZ); 10390 else /* 802.11g */ 10391 *(__le16*)skb_put(dst, sizeof(u16)) = 10392 cpu_to_le16(IEEE80211_CHAN_OFDM | 10393 IEEE80211_CHAN_2GHZ); 10394 10395 rt_hdr->it_len = cpu_to_le16(dst->len); 10396 10397 skb_copy_from_linear_data(src, skb_put(dst, len), len); 10398 10399 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats)) 10400 dev_kfree_skb_any(dst); 10401 } 10402 } 10403 #endif 10404 10405 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb, 10406 struct net_device *dev, int pri) 10407 { 10408 struct ipw_priv *priv = libipw_priv(dev); 10409 unsigned long flags; 10410 netdev_tx_t ret; 10411 10412 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); 10413 spin_lock_irqsave(&priv->lock, flags); 10414 10415 #ifdef CONFIG_IPW2200_PROMISCUOUS 10416 if (rtap_iface && netif_running(priv->prom_net_dev)) 10417 ipw_handle_promiscuous_tx(priv, txb); 10418 #endif 10419 10420 ret = ipw_tx_skb(priv, txb, pri); 10421 if (ret == NETDEV_TX_OK) 10422 __ipw_led_activity_on(priv); 10423 spin_unlock_irqrestore(&priv->lock, flags); 10424 10425 return ret; 10426 } 10427 10428 static void ipw_net_set_multicast_list(struct net_device *dev) 10429 { 10430 10431 } 10432 10433 static int ipw_net_set_mac_address(struct net_device *dev, void *p) 10434 { 10435 struct ipw_priv *priv = libipw_priv(dev); 10436 struct sockaddr *addr = p; 10437 10438 if (!is_valid_ether_addr(addr->sa_data)) 10439 return -EADDRNOTAVAIL; 10440 mutex_lock(&priv->mutex); 10441 priv->config |= CFG_CUSTOM_MAC; 10442 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 10443 printk(KERN_INFO "%s: Setting MAC to %pM\n", 10444 priv->net_dev->name, priv->mac_addr); 10445 schedule_work(&priv->adapter_restart); 10446 mutex_unlock(&priv->mutex); 10447 return 0; 10448 } 10449 10450 static void ipw_ethtool_get_drvinfo(struct net_device *dev, 10451 struct ethtool_drvinfo *info) 10452 { 10453 struct ipw_priv *p = libipw_priv(dev); 10454 char vers[64]; 10455 char date[32]; 10456 u32 len; 10457 10458 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 10459 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 10460 10461 len = sizeof(vers); 10462 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); 10463 len = sizeof(date); 10464 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); 10465 10466 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", 10467 vers, date); 10468 strlcpy(info->bus_info, pci_name(p->pci_dev), 10469 sizeof(info->bus_info)); 10470 } 10471 10472 static u32 ipw_ethtool_get_link(struct net_device *dev) 10473 { 10474 struct ipw_priv *priv = libipw_priv(dev); 10475 return (priv->status & STATUS_ASSOCIATED) != 0; 10476 } 10477 10478 static int ipw_ethtool_get_eeprom_len(struct net_device *dev) 10479 { 10480 return IPW_EEPROM_IMAGE_SIZE; 10481 } 10482 10483 static int ipw_ethtool_get_eeprom(struct net_device *dev, 10484 struct ethtool_eeprom *eeprom, u8 * bytes) 10485 { 10486 struct ipw_priv *p = libipw_priv(dev); 10487 10488 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 10489 return -EINVAL; 10490 mutex_lock(&p->mutex); 10491 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); 10492 mutex_unlock(&p->mutex); 10493 return 0; 10494 } 10495 10496 static int ipw_ethtool_set_eeprom(struct net_device *dev, 10497 struct ethtool_eeprom *eeprom, u8 * bytes) 10498 { 10499 struct ipw_priv *p = libipw_priv(dev); 10500 int i; 10501 10502 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 10503 return -EINVAL; 10504 mutex_lock(&p->mutex); 10505 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); 10506 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) 10507 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]); 10508 mutex_unlock(&p->mutex); 10509 return 0; 10510 } 10511 10512 static const struct ethtool_ops ipw_ethtool_ops = { 10513 .get_link = ipw_ethtool_get_link, 10514 .get_drvinfo = ipw_ethtool_get_drvinfo, 10515 .get_eeprom_len = ipw_ethtool_get_eeprom_len, 10516 .get_eeprom = ipw_ethtool_get_eeprom, 10517 .set_eeprom = ipw_ethtool_set_eeprom, 10518 }; 10519 10520 static irqreturn_t ipw_isr(int irq, void *data) 10521 { 10522 struct ipw_priv *priv = data; 10523 u32 inta, inta_mask; 10524 10525 if (!priv) 10526 return IRQ_NONE; 10527 10528 spin_lock(&priv->irq_lock); 10529 10530 if (!(priv->status & STATUS_INT_ENABLED)) { 10531 /* IRQ is disabled */ 10532 goto none; 10533 } 10534 10535 inta = ipw_read32(priv, IPW_INTA_RW); 10536 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 10537 10538 if (inta == 0xFFFFFFFF) { 10539 /* Hardware disappeared */ 10540 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n"); 10541 goto none; 10542 } 10543 10544 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) { 10545 /* Shared interrupt */ 10546 goto none; 10547 } 10548 10549 /* tell the device to stop sending interrupts */ 10550 __ipw_disable_interrupts(priv); 10551 10552 /* ack current interrupts */ 10553 inta &= (IPW_INTA_MASK_ALL & inta_mask); 10554 ipw_write32(priv, IPW_INTA_RW, inta); 10555 10556 /* Cache INTA value for our tasklet */ 10557 priv->isr_inta = inta; 10558 10559 tasklet_schedule(&priv->irq_tasklet); 10560 10561 spin_unlock(&priv->irq_lock); 10562 10563 return IRQ_HANDLED; 10564 none: 10565 spin_unlock(&priv->irq_lock); 10566 return IRQ_NONE; 10567 } 10568 10569 static void ipw_rf_kill(void *adapter) 10570 { 10571 struct ipw_priv *priv = adapter; 10572 unsigned long flags; 10573 10574 spin_lock_irqsave(&priv->lock, flags); 10575 10576 if (rf_kill_active(priv)) { 10577 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 10578 schedule_delayed_work(&priv->rf_kill, 2 * HZ); 10579 goto exit_unlock; 10580 } 10581 10582 /* RF Kill is now disabled, so bring the device back up */ 10583 10584 if (!(priv->status & STATUS_RF_KILL_MASK)) { 10585 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting " 10586 "device\n"); 10587 10588 /* we can not do an adapter restart while inside an irq lock */ 10589 schedule_work(&priv->adapter_restart); 10590 } else 10591 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " 10592 "enabled\n"); 10593 10594 exit_unlock: 10595 spin_unlock_irqrestore(&priv->lock, flags); 10596 } 10597 10598 static void ipw_bg_rf_kill(struct work_struct *work) 10599 { 10600 struct ipw_priv *priv = 10601 container_of(work, struct ipw_priv, rf_kill.work); 10602 mutex_lock(&priv->mutex); 10603 ipw_rf_kill(priv); 10604 mutex_unlock(&priv->mutex); 10605 } 10606 10607 static void ipw_link_up(struct ipw_priv *priv) 10608 { 10609 priv->last_seq_num = -1; 10610 priv->last_frag_num = -1; 10611 priv->last_packet_time = 0; 10612 10613 netif_carrier_on(priv->net_dev); 10614 10615 cancel_delayed_work(&priv->request_scan); 10616 cancel_delayed_work(&priv->request_direct_scan); 10617 cancel_delayed_work(&priv->request_passive_scan); 10618 cancel_delayed_work(&priv->scan_event); 10619 ipw_reset_stats(priv); 10620 /* Ensure the rate is updated immediately */ 10621 priv->last_rate = ipw_get_current_rate(priv); 10622 ipw_gather_stats(priv); 10623 ipw_led_link_up(priv); 10624 notify_wx_assoc_event(priv); 10625 10626 if (priv->config & CFG_BACKGROUND_SCAN) 10627 schedule_delayed_work(&priv->request_scan, HZ); 10628 } 10629 10630 static void ipw_bg_link_up(struct work_struct *work) 10631 { 10632 struct ipw_priv *priv = 10633 container_of(work, struct ipw_priv, link_up); 10634 mutex_lock(&priv->mutex); 10635 ipw_link_up(priv); 10636 mutex_unlock(&priv->mutex); 10637 } 10638 10639 static void ipw_link_down(struct ipw_priv *priv) 10640 { 10641 ipw_led_link_down(priv); 10642 netif_carrier_off(priv->net_dev); 10643 notify_wx_assoc_event(priv); 10644 10645 /* Cancel any queued work ... */ 10646 cancel_delayed_work(&priv->request_scan); 10647 cancel_delayed_work(&priv->request_direct_scan); 10648 cancel_delayed_work(&priv->request_passive_scan); 10649 cancel_delayed_work(&priv->adhoc_check); 10650 cancel_delayed_work(&priv->gather_stats); 10651 10652 ipw_reset_stats(priv); 10653 10654 if (!(priv->status & STATUS_EXIT_PENDING)) { 10655 /* Queue up another scan... */ 10656 schedule_delayed_work(&priv->request_scan, 0); 10657 } else 10658 cancel_delayed_work(&priv->scan_event); 10659 } 10660 10661 static void ipw_bg_link_down(struct work_struct *work) 10662 { 10663 struct ipw_priv *priv = 10664 container_of(work, struct ipw_priv, link_down); 10665 mutex_lock(&priv->mutex); 10666 ipw_link_down(priv); 10667 mutex_unlock(&priv->mutex); 10668 } 10669 10670 static int ipw_setup_deferred_work(struct ipw_priv *priv) 10671 { 10672 int ret = 0; 10673 10674 init_waitqueue_head(&priv->wait_command_queue); 10675 init_waitqueue_head(&priv->wait_state); 10676 10677 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); 10678 INIT_WORK(&priv->associate, ipw_bg_associate); 10679 INIT_WORK(&priv->disassociate, ipw_bg_disassociate); 10680 INIT_WORK(&priv->system_config, ipw_system_config); 10681 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); 10682 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); 10683 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); 10684 INIT_WORK(&priv->up, ipw_bg_up); 10685 INIT_WORK(&priv->down, ipw_bg_down); 10686 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); 10687 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan); 10688 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan); 10689 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); 10690 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); 10691 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); 10692 INIT_WORK(&priv->roam, ipw_bg_roam); 10693 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); 10694 INIT_WORK(&priv->link_up, ipw_bg_link_up); 10695 INIT_WORK(&priv->link_down, ipw_bg_link_down); 10696 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); 10697 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); 10698 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); 10699 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); 10700 10701 #ifdef CONFIG_IPW2200_QOS 10702 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); 10703 #endif /* CONFIG_IPW2200_QOS */ 10704 10705 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 10706 ipw_irq_tasklet, (unsigned long)priv); 10707 10708 return ret; 10709 } 10710 10711 static void shim__set_security(struct net_device *dev, 10712 struct libipw_security *sec) 10713 { 10714 struct ipw_priv *priv = libipw_priv(dev); 10715 int i; 10716 for (i = 0; i < 4; i++) { 10717 if (sec->flags & (1 << i)) { 10718 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i]; 10719 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i]; 10720 if (sec->key_sizes[i] == 0) 10721 priv->ieee->sec.flags &= ~(1 << i); 10722 else { 10723 memcpy(priv->ieee->sec.keys[i], sec->keys[i], 10724 sec->key_sizes[i]); 10725 priv->ieee->sec.flags |= (1 << i); 10726 } 10727 priv->status |= STATUS_SECURITY_UPDATED; 10728 } else if (sec->level != SEC_LEVEL_1) 10729 priv->ieee->sec.flags &= ~(1 << i); 10730 } 10731 10732 if (sec->flags & SEC_ACTIVE_KEY) { 10733 if (sec->active_key <= 3) { 10734 priv->ieee->sec.active_key = sec->active_key; 10735 priv->ieee->sec.flags |= SEC_ACTIVE_KEY; 10736 } else 10737 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; 10738 priv->status |= STATUS_SECURITY_UPDATED; 10739 } else 10740 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; 10741 10742 if ((sec->flags & SEC_AUTH_MODE) && 10743 (priv->ieee->sec.auth_mode != sec->auth_mode)) { 10744 priv->ieee->sec.auth_mode = sec->auth_mode; 10745 priv->ieee->sec.flags |= SEC_AUTH_MODE; 10746 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY) 10747 priv->capability |= CAP_SHARED_KEY; 10748 else 10749 priv->capability &= ~CAP_SHARED_KEY; 10750 priv->status |= STATUS_SECURITY_UPDATED; 10751 } 10752 10753 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) { 10754 priv->ieee->sec.flags |= SEC_ENABLED; 10755 priv->ieee->sec.enabled = sec->enabled; 10756 priv->status |= STATUS_SECURITY_UPDATED; 10757 if (sec->enabled) 10758 priv->capability |= CAP_PRIVACY_ON; 10759 else 10760 priv->capability &= ~CAP_PRIVACY_ON; 10761 } 10762 10763 if (sec->flags & SEC_ENCRYPT) 10764 priv->ieee->sec.encrypt = sec->encrypt; 10765 10766 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) { 10767 priv->ieee->sec.level = sec->level; 10768 priv->ieee->sec.flags |= SEC_LEVEL; 10769 priv->status |= STATUS_SECURITY_UPDATED; 10770 } 10771 10772 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT)) 10773 ipw_set_hwcrypto_keys(priv); 10774 10775 /* To match current functionality of ipw2100 (which works well w/ 10776 * various supplicants, we don't force a disassociate if the 10777 * privacy capability changes ... */ 10778 #if 0 10779 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) && 10780 (((priv->assoc_request.capability & 10781 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) || 10782 (!(priv->assoc_request.capability & 10783 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) { 10784 IPW_DEBUG_ASSOC("Disassociating due to capability " 10785 "change.\n"); 10786 ipw_disassociate(priv); 10787 } 10788 #endif 10789 } 10790 10791 static int init_supported_rates(struct ipw_priv *priv, 10792 struct ipw_supported_rates *rates) 10793 { 10794 /* TODO: Mask out rates based on priv->rates_mask */ 10795 10796 memset(rates, 0, sizeof(*rates)); 10797 /* configure supported rates */ 10798 switch (priv->ieee->freq_band) { 10799 case LIBIPW_52GHZ_BAND: 10800 rates->ieee_mode = IPW_A_MODE; 10801 rates->purpose = IPW_RATE_CAPABILITIES; 10802 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, 10803 LIBIPW_OFDM_DEFAULT_RATES_MASK); 10804 break; 10805 10806 default: /* Mixed or 2.4Ghz */ 10807 rates->ieee_mode = IPW_G_MODE; 10808 rates->purpose = IPW_RATE_CAPABILITIES; 10809 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION, 10810 LIBIPW_CCK_DEFAULT_RATES_MASK); 10811 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) { 10812 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, 10813 LIBIPW_OFDM_DEFAULT_RATES_MASK); 10814 } 10815 break; 10816 } 10817 10818 return 0; 10819 } 10820 10821 static int ipw_config(struct ipw_priv *priv) 10822 { 10823 /* This is only called from ipw_up, which resets/reloads the firmware 10824 so, we don't need to first disable the card before we configure 10825 it */ 10826 if (ipw_set_tx_power(priv)) 10827 goto error; 10828 10829 /* initialize adapter address */ 10830 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr)) 10831 goto error; 10832 10833 /* set basic system config settings */ 10834 init_sys_config(&priv->sys_config); 10835 10836 /* Support Bluetooth if we have BT h/w on board, and user wants to. 10837 * Does not support BT priority yet (don't abort or defer our Tx) */ 10838 if (bt_coexist) { 10839 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY]; 10840 10841 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG) 10842 priv->sys_config.bt_coexistence 10843 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL; 10844 if (bt_caps & EEPROM_SKU_CAP_BT_OOB) 10845 priv->sys_config.bt_coexistence 10846 |= CFG_BT_COEXISTENCE_OOB; 10847 } 10848 10849 #ifdef CONFIG_IPW2200_PROMISCUOUS 10850 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 10851 priv->sys_config.accept_all_data_frames = 1; 10852 priv->sys_config.accept_non_directed_frames = 1; 10853 priv->sys_config.accept_all_mgmt_bcpr = 1; 10854 priv->sys_config.accept_all_mgmt_frames = 1; 10855 } 10856 #endif 10857 10858 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 10859 priv->sys_config.answer_broadcast_ssid_probe = 1; 10860 else 10861 priv->sys_config.answer_broadcast_ssid_probe = 0; 10862 10863 if (ipw_send_system_config(priv)) 10864 goto error; 10865 10866 init_supported_rates(priv, &priv->rates); 10867 if (ipw_send_supported_rates(priv, &priv->rates)) 10868 goto error; 10869 10870 /* Set request-to-send threshold */ 10871 if (priv->rts_threshold) { 10872 if (ipw_send_rts_threshold(priv, priv->rts_threshold)) 10873 goto error; 10874 } 10875 #ifdef CONFIG_IPW2200_QOS 10876 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n"); 10877 ipw_qos_activate(priv, NULL); 10878 #endif /* CONFIG_IPW2200_QOS */ 10879 10880 if (ipw_set_random_seed(priv)) 10881 goto error; 10882 10883 /* final state transition to the RUN state */ 10884 if (ipw_send_host_complete(priv)) 10885 goto error; 10886 10887 priv->status |= STATUS_INIT; 10888 10889 ipw_led_init(priv); 10890 ipw_led_radio_on(priv); 10891 priv->notif_missed_beacons = 0; 10892 10893 /* Set hardware WEP key if it is configured. */ 10894 if ((priv->capability & CAP_PRIVACY_ON) && 10895 (priv->ieee->sec.level == SEC_LEVEL_1) && 10896 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt)) 10897 ipw_set_hwcrypto_keys(priv); 10898 10899 return 0; 10900 10901 error: 10902 return -EIO; 10903 } 10904 10905 /* 10906 * NOTE: 10907 * 10908 * These tables have been tested in conjunction with the 10909 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters. 10910 * 10911 * Altering this values, using it on other hardware, or in geographies 10912 * not intended for resale of the above mentioned Intel adapters has 10913 * not been tested. 10914 * 10915 * Remember to update the table in README.ipw2200 when changing this 10916 * table. 10917 * 10918 */ 10919 static const struct libipw_geo ipw_geos[] = { 10920 { /* Restricted */ 10921 "---", 10922 .bg_channels = 11, 10923 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10924 {2427, 4}, {2432, 5}, {2437, 6}, 10925 {2442, 7}, {2447, 8}, {2452, 9}, 10926 {2457, 10}, {2462, 11}}, 10927 }, 10928 10929 { /* Custom US/Canada */ 10930 "ZZF", 10931 .bg_channels = 11, 10932 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10933 {2427, 4}, {2432, 5}, {2437, 6}, 10934 {2442, 7}, {2447, 8}, {2452, 9}, 10935 {2457, 10}, {2462, 11}}, 10936 .a_channels = 8, 10937 .a = {{5180, 36}, 10938 {5200, 40}, 10939 {5220, 44}, 10940 {5240, 48}, 10941 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 10942 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 10943 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 10944 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}}, 10945 }, 10946 10947 { /* Rest of World */ 10948 "ZZD", 10949 .bg_channels = 13, 10950 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10951 {2427, 4}, {2432, 5}, {2437, 6}, 10952 {2442, 7}, {2447, 8}, {2452, 9}, 10953 {2457, 10}, {2462, 11}, {2467, 12}, 10954 {2472, 13}}, 10955 }, 10956 10957 { /* Custom USA & Europe & High */ 10958 "ZZA", 10959 .bg_channels = 11, 10960 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10961 {2427, 4}, {2432, 5}, {2437, 6}, 10962 {2442, 7}, {2447, 8}, {2452, 9}, 10963 {2457, 10}, {2462, 11}}, 10964 .a_channels = 13, 10965 .a = {{5180, 36}, 10966 {5200, 40}, 10967 {5220, 44}, 10968 {5240, 48}, 10969 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 10970 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 10971 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 10972 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 10973 {5745, 149}, 10974 {5765, 153}, 10975 {5785, 157}, 10976 {5805, 161}, 10977 {5825, 165}}, 10978 }, 10979 10980 { /* Custom NA & Europe */ 10981 "ZZB", 10982 .bg_channels = 11, 10983 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10984 {2427, 4}, {2432, 5}, {2437, 6}, 10985 {2442, 7}, {2447, 8}, {2452, 9}, 10986 {2457, 10}, {2462, 11}}, 10987 .a_channels = 13, 10988 .a = {{5180, 36}, 10989 {5200, 40}, 10990 {5220, 44}, 10991 {5240, 48}, 10992 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 10993 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 10994 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 10995 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 10996 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 10997 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 10998 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 10999 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11000 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11001 }, 11002 11003 { /* Custom Japan */ 11004 "ZZC", 11005 .bg_channels = 11, 11006 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11007 {2427, 4}, {2432, 5}, {2437, 6}, 11008 {2442, 7}, {2447, 8}, {2452, 9}, 11009 {2457, 10}, {2462, 11}}, 11010 .a_channels = 4, 11011 .a = {{5170, 34}, {5190, 38}, 11012 {5210, 42}, {5230, 46}}, 11013 }, 11014 11015 { /* Custom */ 11016 "ZZM", 11017 .bg_channels = 11, 11018 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11019 {2427, 4}, {2432, 5}, {2437, 6}, 11020 {2442, 7}, {2447, 8}, {2452, 9}, 11021 {2457, 10}, {2462, 11}}, 11022 }, 11023 11024 { /* Europe */ 11025 "ZZE", 11026 .bg_channels = 13, 11027 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11028 {2427, 4}, {2432, 5}, {2437, 6}, 11029 {2442, 7}, {2447, 8}, {2452, 9}, 11030 {2457, 10}, {2462, 11}, {2467, 12}, 11031 {2472, 13}}, 11032 .a_channels = 19, 11033 .a = {{5180, 36}, 11034 {5200, 40}, 11035 {5220, 44}, 11036 {5240, 48}, 11037 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11038 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11039 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11040 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11041 {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, 11042 {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, 11043 {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, 11044 {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, 11045 {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, 11046 {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, 11047 {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, 11048 {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, 11049 {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, 11050 {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, 11051 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}}, 11052 }, 11053 11054 { /* Custom Japan */ 11055 "ZZJ", 11056 .bg_channels = 14, 11057 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11058 {2427, 4}, {2432, 5}, {2437, 6}, 11059 {2442, 7}, {2447, 8}, {2452, 9}, 11060 {2457, 10}, {2462, 11}, {2467, 12}, 11061 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}}, 11062 .a_channels = 4, 11063 .a = {{5170, 34}, {5190, 38}, 11064 {5210, 42}, {5230, 46}}, 11065 }, 11066 11067 { /* Rest of World */ 11068 "ZZR", 11069 .bg_channels = 14, 11070 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11071 {2427, 4}, {2432, 5}, {2437, 6}, 11072 {2442, 7}, {2447, 8}, {2452, 9}, 11073 {2457, 10}, {2462, 11}, {2467, 12}, 11074 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY | 11075 LIBIPW_CH_PASSIVE_ONLY}}, 11076 }, 11077 11078 { /* High Band */ 11079 "ZZH", 11080 .bg_channels = 13, 11081 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11082 {2427, 4}, {2432, 5}, {2437, 6}, 11083 {2442, 7}, {2447, 8}, {2452, 9}, 11084 {2457, 10}, {2462, 11}, 11085 {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, 11086 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, 11087 .a_channels = 4, 11088 .a = {{5745, 149}, {5765, 153}, 11089 {5785, 157}, {5805, 161}}, 11090 }, 11091 11092 { /* Custom Europe */ 11093 "ZZG", 11094 .bg_channels = 13, 11095 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11096 {2427, 4}, {2432, 5}, {2437, 6}, 11097 {2442, 7}, {2447, 8}, {2452, 9}, 11098 {2457, 10}, {2462, 11}, 11099 {2467, 12}, {2472, 13}}, 11100 .a_channels = 4, 11101 .a = {{5180, 36}, {5200, 40}, 11102 {5220, 44}, {5240, 48}}, 11103 }, 11104 11105 { /* Europe */ 11106 "ZZK", 11107 .bg_channels = 13, 11108 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11109 {2427, 4}, {2432, 5}, {2437, 6}, 11110 {2442, 7}, {2447, 8}, {2452, 9}, 11111 {2457, 10}, {2462, 11}, 11112 {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, 11113 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, 11114 .a_channels = 24, 11115 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, 11116 {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, 11117 {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, 11118 {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, 11119 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11120 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11121 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11122 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11123 {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, 11124 {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, 11125 {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, 11126 {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, 11127 {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, 11128 {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, 11129 {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, 11130 {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, 11131 {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, 11132 {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, 11133 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}, 11134 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11135 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11136 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11137 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11138 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11139 }, 11140 11141 { /* Europe */ 11142 "ZZL", 11143 .bg_channels = 11, 11144 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11145 {2427, 4}, {2432, 5}, {2437, 6}, 11146 {2442, 7}, {2447, 8}, {2452, 9}, 11147 {2457, 10}, {2462, 11}}, 11148 .a_channels = 13, 11149 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, 11150 {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, 11151 {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, 11152 {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, 11153 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11154 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11155 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11156 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11157 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11158 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11159 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11160 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11161 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11162 } 11163 }; 11164 11165 static void ipw_set_geo(struct ipw_priv *priv) 11166 { 11167 int j; 11168 11169 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) { 11170 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE], 11171 ipw_geos[j].name, 3)) 11172 break; 11173 } 11174 11175 if (j == ARRAY_SIZE(ipw_geos)) { 11176 IPW_WARNING("SKU [%c%c%c] not recognized.\n", 11177 priv->eeprom[EEPROM_COUNTRY_CODE + 0], 11178 priv->eeprom[EEPROM_COUNTRY_CODE + 1], 11179 priv->eeprom[EEPROM_COUNTRY_CODE + 2]); 11180 j = 0; 11181 } 11182 11183 libipw_set_geo(priv->ieee, &ipw_geos[j]); 11184 } 11185 11186 #define MAX_HW_RESTARTS 5 11187 static int ipw_up(struct ipw_priv *priv) 11188 { 11189 int rc, i; 11190 11191 /* Age scan list entries found before suspend */ 11192 if (priv->suspend_time) { 11193 libipw_networks_age(priv->ieee, priv->suspend_time); 11194 priv->suspend_time = 0; 11195 } 11196 11197 if (priv->status & STATUS_EXIT_PENDING) 11198 return -EIO; 11199 11200 if (cmdlog && !priv->cmdlog) { 11201 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog), 11202 GFP_KERNEL); 11203 if (priv->cmdlog == NULL) { 11204 IPW_ERROR("Error allocating %d command log entries.\n", 11205 cmdlog); 11206 return -ENOMEM; 11207 } else { 11208 priv->cmdlog_len = cmdlog; 11209 } 11210 } 11211 11212 for (i = 0; i < MAX_HW_RESTARTS; i++) { 11213 /* Load the microcode, firmware, and eeprom. 11214 * Also start the clocks. */ 11215 rc = ipw_load(priv); 11216 if (rc) { 11217 IPW_ERROR("Unable to load firmware: %d\n", rc); 11218 return rc; 11219 } 11220 11221 ipw_init_ordinals(priv); 11222 if (!(priv->config & CFG_CUSTOM_MAC)) 11223 eeprom_parse_mac(priv, priv->mac_addr); 11224 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11225 11226 ipw_set_geo(priv); 11227 11228 if (priv->status & STATUS_RF_KILL_SW) { 11229 IPW_WARNING("Radio disabled by module parameter.\n"); 11230 return 0; 11231 } else if (rf_kill_active(priv)) { 11232 IPW_WARNING("Radio Frequency Kill Switch is On:\n" 11233 "Kill switch must be turned off for " 11234 "wireless networking to work.\n"); 11235 schedule_delayed_work(&priv->rf_kill, 2 * HZ); 11236 return 0; 11237 } 11238 11239 rc = ipw_config(priv); 11240 if (!rc) { 11241 IPW_DEBUG_INFO("Configured device on count %i\n", i); 11242 11243 /* If configure to try and auto-associate, kick 11244 * off a scan. */ 11245 schedule_delayed_work(&priv->request_scan, 0); 11246 11247 return 0; 11248 } 11249 11250 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc); 11251 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n", 11252 i, MAX_HW_RESTARTS); 11253 11254 /* We had an error bringing up the hardware, so take it 11255 * all the way back down so we can try again */ 11256 ipw_down(priv); 11257 } 11258 11259 /* tried to restart and config the device for as long as our 11260 * patience could withstand */ 11261 IPW_ERROR("Unable to initialize device after %d attempts.\n", i); 11262 11263 return -EIO; 11264 } 11265 11266 static void ipw_bg_up(struct work_struct *work) 11267 { 11268 struct ipw_priv *priv = 11269 container_of(work, struct ipw_priv, up); 11270 mutex_lock(&priv->mutex); 11271 ipw_up(priv); 11272 mutex_unlock(&priv->mutex); 11273 } 11274 11275 static void ipw_deinit(struct ipw_priv *priv) 11276 { 11277 int i; 11278 11279 if (priv->status & STATUS_SCANNING) { 11280 IPW_DEBUG_INFO("Aborting scan during shutdown.\n"); 11281 ipw_abort_scan(priv); 11282 } 11283 11284 if (priv->status & STATUS_ASSOCIATED) { 11285 IPW_DEBUG_INFO("Disassociating during shutdown.\n"); 11286 ipw_disassociate(priv); 11287 } 11288 11289 ipw_led_shutdown(priv); 11290 11291 /* Wait up to 1s for status to change to not scanning and not 11292 * associated (disassociation can take a while for a ful 802.11 11293 * exchange */ 11294 for (i = 1000; i && (priv->status & 11295 (STATUS_DISASSOCIATING | 11296 STATUS_ASSOCIATED | STATUS_SCANNING)); i--) 11297 udelay(10); 11298 11299 if (priv->status & (STATUS_DISASSOCIATING | 11300 STATUS_ASSOCIATED | STATUS_SCANNING)) 11301 IPW_DEBUG_INFO("Still associated or scanning...\n"); 11302 else 11303 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i); 11304 11305 /* Attempt to disable the card */ 11306 ipw_send_card_disable(priv, 0); 11307 11308 priv->status &= ~STATUS_INIT; 11309 } 11310 11311 static void ipw_down(struct ipw_priv *priv) 11312 { 11313 int exit_pending = priv->status & STATUS_EXIT_PENDING; 11314 11315 priv->status |= STATUS_EXIT_PENDING; 11316 11317 if (ipw_is_init(priv)) 11318 ipw_deinit(priv); 11319 11320 /* Wipe out the EXIT_PENDING status bit if we are not actually 11321 * exiting the module */ 11322 if (!exit_pending) 11323 priv->status &= ~STATUS_EXIT_PENDING; 11324 11325 /* tell the device to stop sending interrupts */ 11326 ipw_disable_interrupts(priv); 11327 11328 /* Clear all bits but the RF Kill */ 11329 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING; 11330 netif_carrier_off(priv->net_dev); 11331 11332 ipw_stop_nic(priv); 11333 11334 ipw_led_radio_off(priv); 11335 } 11336 11337 static void ipw_bg_down(struct work_struct *work) 11338 { 11339 struct ipw_priv *priv = 11340 container_of(work, struct ipw_priv, down); 11341 mutex_lock(&priv->mutex); 11342 ipw_down(priv); 11343 mutex_unlock(&priv->mutex); 11344 } 11345 11346 static int ipw_wdev_init(struct net_device *dev) 11347 { 11348 int i, rc = 0; 11349 struct ipw_priv *priv = libipw_priv(dev); 11350 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 11351 struct wireless_dev *wdev = &priv->ieee->wdev; 11352 11353 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 11354 11355 /* fill-out priv->ieee->bg_band */ 11356 if (geo->bg_channels) { 11357 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; 11358 11359 bg_band->band = NL80211_BAND_2GHZ; 11360 bg_band->n_channels = geo->bg_channels; 11361 bg_band->channels = kcalloc(geo->bg_channels, 11362 sizeof(struct ieee80211_channel), 11363 GFP_KERNEL); 11364 if (!bg_band->channels) { 11365 rc = -ENOMEM; 11366 goto out; 11367 } 11368 /* translate geo->bg to bg_band.channels */ 11369 for (i = 0; i < geo->bg_channels; i++) { 11370 bg_band->channels[i].band = NL80211_BAND_2GHZ; 11371 bg_band->channels[i].center_freq = geo->bg[i].freq; 11372 bg_band->channels[i].hw_value = geo->bg[i].channel; 11373 bg_band->channels[i].max_power = geo->bg[i].max_power; 11374 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) 11375 bg_band->channels[i].flags |= 11376 IEEE80211_CHAN_NO_IR; 11377 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS) 11378 bg_band->channels[i].flags |= 11379 IEEE80211_CHAN_NO_IR; 11380 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT) 11381 bg_band->channels[i].flags |= 11382 IEEE80211_CHAN_RADAR; 11383 /* No equivalent for LIBIPW_CH_80211H_RULES, 11384 LIBIPW_CH_UNIFORM_SPREADING, or 11385 LIBIPW_CH_B_ONLY... */ 11386 } 11387 /* point at bitrate info */ 11388 bg_band->bitrates = ipw2200_bg_rates; 11389 bg_band->n_bitrates = ipw2200_num_bg_rates; 11390 11391 wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band; 11392 } 11393 11394 /* fill-out priv->ieee->a_band */ 11395 if (geo->a_channels) { 11396 struct ieee80211_supported_band *a_band = &priv->ieee->a_band; 11397 11398 a_band->band = NL80211_BAND_5GHZ; 11399 a_band->n_channels = geo->a_channels; 11400 a_band->channels = kcalloc(geo->a_channels, 11401 sizeof(struct ieee80211_channel), 11402 GFP_KERNEL); 11403 if (!a_band->channels) { 11404 rc = -ENOMEM; 11405 goto out; 11406 } 11407 /* translate geo->a to a_band.channels */ 11408 for (i = 0; i < geo->a_channels; i++) { 11409 a_band->channels[i].band = NL80211_BAND_5GHZ; 11410 a_band->channels[i].center_freq = geo->a[i].freq; 11411 a_band->channels[i].hw_value = geo->a[i].channel; 11412 a_band->channels[i].max_power = geo->a[i].max_power; 11413 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) 11414 a_band->channels[i].flags |= 11415 IEEE80211_CHAN_NO_IR; 11416 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS) 11417 a_band->channels[i].flags |= 11418 IEEE80211_CHAN_NO_IR; 11419 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT) 11420 a_band->channels[i].flags |= 11421 IEEE80211_CHAN_RADAR; 11422 /* No equivalent for LIBIPW_CH_80211H_RULES, 11423 LIBIPW_CH_UNIFORM_SPREADING, or 11424 LIBIPW_CH_B_ONLY... */ 11425 } 11426 /* point at bitrate info */ 11427 a_band->bitrates = ipw2200_a_rates; 11428 a_band->n_bitrates = ipw2200_num_a_rates; 11429 11430 wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band; 11431 } 11432 11433 wdev->wiphy->cipher_suites = ipw_cipher_suites; 11434 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites); 11435 11436 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11437 11438 /* With that information in place, we can now register the wiphy... */ 11439 if (wiphy_register(wdev->wiphy)) 11440 rc = -EIO; 11441 out: 11442 return rc; 11443 } 11444 11445 /* PCI driver stuff */ 11446 static const struct pci_device_id card_ids[] = { 11447 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, 11448 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, 11449 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, 11450 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0}, 11451 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0}, 11452 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0}, 11453 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0}, 11454 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0}, 11455 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0}, 11456 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0}, 11457 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0}, 11458 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0}, 11459 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0}, 11460 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0}, 11461 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0}, 11462 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0}, 11463 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0}, 11464 {PCI_VDEVICE(INTEL, 0x104f), 0}, 11465 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */ 11466 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */ 11467 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */ 11468 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */ 11469 11470 /* required last entry */ 11471 {0,} 11472 }; 11473 11474 MODULE_DEVICE_TABLE(pci, card_ids); 11475 11476 static struct attribute *ipw_sysfs_entries[] = { 11477 &dev_attr_rf_kill.attr, 11478 &dev_attr_direct_dword.attr, 11479 &dev_attr_indirect_byte.attr, 11480 &dev_attr_indirect_dword.attr, 11481 &dev_attr_mem_gpio_reg.attr, 11482 &dev_attr_command_event_reg.attr, 11483 &dev_attr_nic_type.attr, 11484 &dev_attr_status.attr, 11485 &dev_attr_cfg.attr, 11486 &dev_attr_error.attr, 11487 &dev_attr_event_log.attr, 11488 &dev_attr_cmd_log.attr, 11489 &dev_attr_eeprom_delay.attr, 11490 &dev_attr_ucode_version.attr, 11491 &dev_attr_rtc.attr, 11492 &dev_attr_scan_age.attr, 11493 &dev_attr_led.attr, 11494 &dev_attr_speed_scan.attr, 11495 &dev_attr_net_stats.attr, 11496 &dev_attr_channels.attr, 11497 #ifdef CONFIG_IPW2200_PROMISCUOUS 11498 &dev_attr_rtap_iface.attr, 11499 &dev_attr_rtap_filter.attr, 11500 #endif 11501 NULL 11502 }; 11503 11504 static const struct attribute_group ipw_attribute_group = { 11505 .name = NULL, /* put in device directory */ 11506 .attrs = ipw_sysfs_entries, 11507 }; 11508 11509 #ifdef CONFIG_IPW2200_PROMISCUOUS 11510 static int ipw_prom_open(struct net_device *dev) 11511 { 11512 struct ipw_prom_priv *prom_priv = libipw_priv(dev); 11513 struct ipw_priv *priv = prom_priv->priv; 11514 11515 IPW_DEBUG_INFO("prom dev->open\n"); 11516 netif_carrier_off(dev); 11517 11518 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 11519 priv->sys_config.accept_all_data_frames = 1; 11520 priv->sys_config.accept_non_directed_frames = 1; 11521 priv->sys_config.accept_all_mgmt_bcpr = 1; 11522 priv->sys_config.accept_all_mgmt_frames = 1; 11523 11524 ipw_send_system_config(priv); 11525 } 11526 11527 return 0; 11528 } 11529 11530 static int ipw_prom_stop(struct net_device *dev) 11531 { 11532 struct ipw_prom_priv *prom_priv = libipw_priv(dev); 11533 struct ipw_priv *priv = prom_priv->priv; 11534 11535 IPW_DEBUG_INFO("prom dev->stop\n"); 11536 11537 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 11538 priv->sys_config.accept_all_data_frames = 0; 11539 priv->sys_config.accept_non_directed_frames = 0; 11540 priv->sys_config.accept_all_mgmt_bcpr = 0; 11541 priv->sys_config.accept_all_mgmt_frames = 0; 11542 11543 ipw_send_system_config(priv); 11544 } 11545 11546 return 0; 11547 } 11548 11549 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb, 11550 struct net_device *dev) 11551 { 11552 IPW_DEBUG_INFO("prom dev->xmit\n"); 11553 dev_kfree_skb(skb); 11554 return NETDEV_TX_OK; 11555 } 11556 11557 static const struct net_device_ops ipw_prom_netdev_ops = { 11558 .ndo_open = ipw_prom_open, 11559 .ndo_stop = ipw_prom_stop, 11560 .ndo_start_xmit = ipw_prom_hard_start_xmit, 11561 .ndo_set_mac_address = eth_mac_addr, 11562 .ndo_validate_addr = eth_validate_addr, 11563 }; 11564 11565 static int ipw_prom_alloc(struct ipw_priv *priv) 11566 { 11567 int rc = 0; 11568 11569 if (priv->prom_net_dev) 11570 return -EPERM; 11571 11572 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1); 11573 if (priv->prom_net_dev == NULL) 11574 return -ENOMEM; 11575 11576 priv->prom_priv = libipw_priv(priv->prom_net_dev); 11577 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev); 11578 priv->prom_priv->priv = priv; 11579 11580 strcpy(priv->prom_net_dev->name, "rtap%d"); 11581 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11582 11583 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 11584 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops; 11585 11586 priv->prom_net_dev->min_mtu = 68; 11587 priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN; 11588 11589 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; 11590 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev); 11591 11592 rc = register_netdev(priv->prom_net_dev); 11593 if (rc) { 11594 free_libipw(priv->prom_net_dev, 1); 11595 priv->prom_net_dev = NULL; 11596 return rc; 11597 } 11598 11599 return 0; 11600 } 11601 11602 static void ipw_prom_free(struct ipw_priv *priv) 11603 { 11604 if (!priv->prom_net_dev) 11605 return; 11606 11607 unregister_netdev(priv->prom_net_dev); 11608 free_libipw(priv->prom_net_dev, 1); 11609 11610 priv->prom_net_dev = NULL; 11611 } 11612 11613 #endif 11614 11615 static const struct net_device_ops ipw_netdev_ops = { 11616 .ndo_open = ipw_net_open, 11617 .ndo_stop = ipw_net_stop, 11618 .ndo_set_rx_mode = ipw_net_set_multicast_list, 11619 .ndo_set_mac_address = ipw_net_set_mac_address, 11620 .ndo_start_xmit = libipw_xmit, 11621 .ndo_validate_addr = eth_validate_addr, 11622 }; 11623 11624 static int ipw_pci_probe(struct pci_dev *pdev, 11625 const struct pci_device_id *ent) 11626 { 11627 int err = 0; 11628 struct net_device *net_dev; 11629 void __iomem *base; 11630 u32 length, val; 11631 struct ipw_priv *priv; 11632 int i; 11633 11634 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0); 11635 if (net_dev == NULL) { 11636 err = -ENOMEM; 11637 goto out; 11638 } 11639 11640 priv = libipw_priv(net_dev); 11641 priv->ieee = netdev_priv(net_dev); 11642 11643 priv->net_dev = net_dev; 11644 priv->pci_dev = pdev; 11645 ipw_debug_level = debug; 11646 spin_lock_init(&priv->irq_lock); 11647 spin_lock_init(&priv->lock); 11648 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) 11649 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); 11650 11651 mutex_init(&priv->mutex); 11652 if (pci_enable_device(pdev)) { 11653 err = -ENODEV; 11654 goto out_free_libipw; 11655 } 11656 11657 pci_set_master(pdev); 11658 11659 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 11660 if (!err) 11661 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 11662 if (err) { 11663 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); 11664 goto out_pci_disable_device; 11665 } 11666 11667 pci_set_drvdata(pdev, priv); 11668 11669 err = pci_request_regions(pdev, DRV_NAME); 11670 if (err) 11671 goto out_pci_disable_device; 11672 11673 /* We disable the RETRY_TIMEOUT register (0x41) to keep 11674 * PCI Tx retries from interfering with C3 CPU state */ 11675 pci_read_config_dword(pdev, 0x40, &val); 11676 if ((val & 0x0000ff00) != 0) 11677 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 11678 11679 length = pci_resource_len(pdev, 0); 11680 priv->hw_len = length; 11681 11682 base = pci_ioremap_bar(pdev, 0); 11683 if (!base) { 11684 err = -ENODEV; 11685 goto out_pci_release_regions; 11686 } 11687 11688 priv->hw_base = base; 11689 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length); 11690 IPW_DEBUG_INFO("pci_resource_base = %p\n", base); 11691 11692 err = ipw_setup_deferred_work(priv); 11693 if (err) { 11694 IPW_ERROR("Unable to setup deferred work\n"); 11695 goto out_iounmap; 11696 } 11697 11698 ipw_sw_reset(priv, 1); 11699 11700 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); 11701 if (err) { 11702 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); 11703 goto out_iounmap; 11704 } 11705 11706 SET_NETDEV_DEV(net_dev, &pdev->dev); 11707 11708 mutex_lock(&priv->mutex); 11709 11710 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; 11711 priv->ieee->set_security = shim__set_security; 11712 priv->ieee->is_queue_full = ipw_net_is_queue_full; 11713 11714 #ifdef CONFIG_IPW2200_QOS 11715 priv->ieee->is_qos_active = ipw_is_qos_active; 11716 priv->ieee->handle_probe_response = ipw_handle_beacon; 11717 priv->ieee->handle_beacon = ipw_handle_probe_response; 11718 priv->ieee->handle_assoc_response = ipw_handle_assoc_response; 11719 #endif /* CONFIG_IPW2200_QOS */ 11720 11721 priv->ieee->perfect_rssi = -20; 11722 priv->ieee->worst_rssi = -85; 11723 11724 net_dev->netdev_ops = &ipw_netdev_ops; 11725 priv->wireless_data.spy_data = &priv->ieee->spy_data; 11726 net_dev->wireless_data = &priv->wireless_data; 11727 net_dev->wireless_handlers = &ipw_wx_handler_def; 11728 net_dev->ethtool_ops = &ipw_ethtool_ops; 11729 11730 net_dev->min_mtu = 68; 11731 net_dev->max_mtu = LIBIPW_DATA_LEN; 11732 11733 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); 11734 if (err) { 11735 IPW_ERROR("failed to create sysfs device attributes\n"); 11736 mutex_unlock(&priv->mutex); 11737 goto out_release_irq; 11738 } 11739 11740 if (ipw_up(priv)) { 11741 mutex_unlock(&priv->mutex); 11742 err = -EIO; 11743 goto out_remove_sysfs; 11744 } 11745 11746 mutex_unlock(&priv->mutex); 11747 11748 err = ipw_wdev_init(net_dev); 11749 if (err) { 11750 IPW_ERROR("failed to register wireless device\n"); 11751 goto out_remove_sysfs; 11752 } 11753 11754 err = register_netdev(net_dev); 11755 if (err) { 11756 IPW_ERROR("failed to register network device\n"); 11757 goto out_unregister_wiphy; 11758 } 11759 11760 #ifdef CONFIG_IPW2200_PROMISCUOUS 11761 if (rtap_iface) { 11762 err = ipw_prom_alloc(priv); 11763 if (err) { 11764 IPW_ERROR("Failed to register promiscuous network " 11765 "device (error %d).\n", err); 11766 unregister_netdev(priv->net_dev); 11767 goto out_unregister_wiphy; 11768 } 11769 } 11770 #endif 11771 11772 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " 11773 "channels, %d 802.11a channels)\n", 11774 priv->ieee->geo.name, priv->ieee->geo.bg_channels, 11775 priv->ieee->geo.a_channels); 11776 11777 return 0; 11778 11779 out_unregister_wiphy: 11780 wiphy_unregister(priv->ieee->wdev.wiphy); 11781 kfree(priv->ieee->a_band.channels); 11782 kfree(priv->ieee->bg_band.channels); 11783 out_remove_sysfs: 11784 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11785 out_release_irq: 11786 free_irq(pdev->irq, priv); 11787 out_iounmap: 11788 iounmap(priv->hw_base); 11789 out_pci_release_regions: 11790 pci_release_regions(pdev); 11791 out_pci_disable_device: 11792 pci_disable_device(pdev); 11793 out_free_libipw: 11794 free_libipw(priv->net_dev, 0); 11795 out: 11796 return err; 11797 } 11798 11799 static void ipw_pci_remove(struct pci_dev *pdev) 11800 { 11801 struct ipw_priv *priv = pci_get_drvdata(pdev); 11802 struct list_head *p, *q; 11803 int i; 11804 11805 if (!priv) 11806 return; 11807 11808 mutex_lock(&priv->mutex); 11809 11810 priv->status |= STATUS_EXIT_PENDING; 11811 ipw_down(priv); 11812 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11813 11814 mutex_unlock(&priv->mutex); 11815 11816 unregister_netdev(priv->net_dev); 11817 11818 if (priv->rxq) { 11819 ipw_rx_queue_free(priv, priv->rxq); 11820 priv->rxq = NULL; 11821 } 11822 ipw_tx_queue_free(priv); 11823 11824 if (priv->cmdlog) { 11825 kfree(priv->cmdlog); 11826 priv->cmdlog = NULL; 11827 } 11828 11829 /* make sure all works are inactive */ 11830 cancel_delayed_work_sync(&priv->adhoc_check); 11831 cancel_work_sync(&priv->associate); 11832 cancel_work_sync(&priv->disassociate); 11833 cancel_work_sync(&priv->system_config); 11834 cancel_work_sync(&priv->rx_replenish); 11835 cancel_work_sync(&priv->adapter_restart); 11836 cancel_delayed_work_sync(&priv->rf_kill); 11837 cancel_work_sync(&priv->up); 11838 cancel_work_sync(&priv->down); 11839 cancel_delayed_work_sync(&priv->request_scan); 11840 cancel_delayed_work_sync(&priv->request_direct_scan); 11841 cancel_delayed_work_sync(&priv->request_passive_scan); 11842 cancel_delayed_work_sync(&priv->scan_event); 11843 cancel_delayed_work_sync(&priv->gather_stats); 11844 cancel_work_sync(&priv->abort_scan); 11845 cancel_work_sync(&priv->roam); 11846 cancel_delayed_work_sync(&priv->scan_check); 11847 cancel_work_sync(&priv->link_up); 11848 cancel_work_sync(&priv->link_down); 11849 cancel_delayed_work_sync(&priv->led_link_on); 11850 cancel_delayed_work_sync(&priv->led_link_off); 11851 cancel_delayed_work_sync(&priv->led_act_off); 11852 cancel_work_sync(&priv->merge_networks); 11853 11854 /* Free MAC hash list for ADHOC */ 11855 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { 11856 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { 11857 list_del(p); 11858 kfree(list_entry(p, struct ipw_ibss_seq, list)); 11859 } 11860 } 11861 11862 kfree(priv->error); 11863 priv->error = NULL; 11864 11865 #ifdef CONFIG_IPW2200_PROMISCUOUS 11866 ipw_prom_free(priv); 11867 #endif 11868 11869 free_irq(pdev->irq, priv); 11870 iounmap(priv->hw_base); 11871 pci_release_regions(pdev); 11872 pci_disable_device(pdev); 11873 /* wiphy_unregister needs to be here, before free_libipw */ 11874 wiphy_unregister(priv->ieee->wdev.wiphy); 11875 kfree(priv->ieee->a_band.channels); 11876 kfree(priv->ieee->bg_band.channels); 11877 free_libipw(priv->net_dev, 0); 11878 free_firmware(); 11879 } 11880 11881 #ifdef CONFIG_PM 11882 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 11883 { 11884 struct ipw_priv *priv = pci_get_drvdata(pdev); 11885 struct net_device *dev = priv->net_dev; 11886 11887 printk(KERN_INFO "%s: Going into suspend...\n", dev->name); 11888 11889 /* Take down the device; powers it off, etc. */ 11890 ipw_down(priv); 11891 11892 /* Remove the PRESENT state of the device */ 11893 netif_device_detach(dev); 11894 11895 pci_save_state(pdev); 11896 pci_disable_device(pdev); 11897 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 11898 11899 priv->suspend_at = get_seconds(); 11900 11901 return 0; 11902 } 11903 11904 static int ipw_pci_resume(struct pci_dev *pdev) 11905 { 11906 struct ipw_priv *priv = pci_get_drvdata(pdev); 11907 struct net_device *dev = priv->net_dev; 11908 int err; 11909 u32 val; 11910 11911 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); 11912 11913 pci_set_power_state(pdev, PCI_D0); 11914 err = pci_enable_device(pdev); 11915 if (err) { 11916 printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 11917 dev->name); 11918 return err; 11919 } 11920 pci_restore_state(pdev); 11921 11922 /* 11923 * Suspend/Resume resets the PCI configuration space, so we have to 11924 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 11925 * from interfering with C3 CPU state. pci_restore_state won't help 11926 * here since it only restores the first 64 bytes pci config header. 11927 */ 11928 pci_read_config_dword(pdev, 0x40, &val); 11929 if ((val & 0x0000ff00) != 0) 11930 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 11931 11932 /* Set the device back into the PRESENT state; this will also wake 11933 * the queue of needed */ 11934 netif_device_attach(dev); 11935 11936 priv->suspend_time = get_seconds() - priv->suspend_at; 11937 11938 /* Bring the device back up */ 11939 schedule_work(&priv->up); 11940 11941 return 0; 11942 } 11943 #endif 11944 11945 static void ipw_pci_shutdown(struct pci_dev *pdev) 11946 { 11947 struct ipw_priv *priv = pci_get_drvdata(pdev); 11948 11949 /* Take down the device; powers it off, etc. */ 11950 ipw_down(priv); 11951 11952 pci_disable_device(pdev); 11953 } 11954 11955 /* driver initialization stuff */ 11956 static struct pci_driver ipw_driver = { 11957 .name = DRV_NAME, 11958 .id_table = card_ids, 11959 .probe = ipw_pci_probe, 11960 .remove = ipw_pci_remove, 11961 #ifdef CONFIG_PM 11962 .suspend = ipw_pci_suspend, 11963 .resume = ipw_pci_resume, 11964 #endif 11965 .shutdown = ipw_pci_shutdown, 11966 }; 11967 11968 static int __init ipw_init(void) 11969 { 11970 int ret; 11971 11972 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 11973 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 11974 11975 ret = pci_register_driver(&ipw_driver); 11976 if (ret) { 11977 IPW_ERROR("Unable to initialize PCI module\n"); 11978 return ret; 11979 } 11980 11981 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level); 11982 if (ret) { 11983 IPW_ERROR("Unable to create driver sysfs file\n"); 11984 pci_unregister_driver(&ipw_driver); 11985 return ret; 11986 } 11987 11988 return ret; 11989 } 11990 11991 static void __exit ipw_exit(void) 11992 { 11993 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level); 11994 pci_unregister_driver(&ipw_driver); 11995 } 11996 11997 module_param(disable, int, 0444); 11998 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 11999 12000 module_param(associate, int, 0444); 12001 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)"); 12002 12003 module_param(auto_create, int, 0444); 12004 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); 12005 12006 module_param_named(led, led_support, int, 0444); 12007 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)"); 12008 12009 module_param(debug, int, 0444); 12010 MODULE_PARM_DESC(debug, "debug output mask"); 12011 12012 module_param_named(channel, default_channel, int, 0444); 12013 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); 12014 12015 #ifdef CONFIG_IPW2200_PROMISCUOUS 12016 module_param(rtap_iface, int, 0444); 12017 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)"); 12018 #endif 12019 12020 #ifdef CONFIG_IPW2200_QOS 12021 module_param(qos_enable, int, 0444); 12022 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis"); 12023 12024 module_param(qos_burst_enable, int, 0444); 12025 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode"); 12026 12027 module_param(qos_no_ack_mask, int, 0444); 12028 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack"); 12029 12030 module_param(burst_duration_CCK, int, 0444); 12031 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value"); 12032 12033 module_param(burst_duration_OFDM, int, 0444); 12034 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value"); 12035 #endif /* CONFIG_IPW2200_QOS */ 12036 12037 #ifdef CONFIG_IPW2200_MONITOR 12038 module_param_named(mode, network_mode, int, 0444); 12039 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); 12040 #else 12041 module_param_named(mode, network_mode, int, 0444); 12042 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); 12043 #endif 12044 12045 module_param(bt_coexist, int, 0444); 12046 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)"); 12047 12048 module_param(hwcrypto, int, 0444); 12049 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)"); 12050 12051 module_param(cmdlog, int, 0444); 12052 MODULE_PARM_DESC(cmdlog, 12053 "allocate a ring buffer for logging firmware commands"); 12054 12055 module_param(roaming, int, 0444); 12056 MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); 12057 12058 module_param(antenna, int, 0444); 12059 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)"); 12060 12061 module_exit(ipw_exit); 12062 module_init(ipw_init); 12063