1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 4 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. 5 6 802.11 status code portion of this file from ethereal-0.10.6: 7 Copyright 2000, Axis Communications AB 8 Ethereal - Network traffic analyzer 9 By Gerald Combs <gerald@ethereal.com> 10 Copyright 1998 Gerald Combs 11 12 13 Contact Information: 14 Intel Linux Wireless <ilw@linux.intel.com> 15 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 16 17 ******************************************************************************/ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <net/cfg80211-wext.h> 22 #include "ipw2200.h" 23 #include "ipw.h" 24 25 26 #ifndef KBUILD_EXTMOD 27 #define VK "k" 28 #else 29 #define VK 30 #endif 31 32 #ifdef CONFIG_IPW2200_DEBUG 33 #define VD "d" 34 #else 35 #define VD 36 #endif 37 38 #ifdef CONFIG_IPW2200_MONITOR 39 #define VM "m" 40 #else 41 #define VM 42 #endif 43 44 #ifdef CONFIG_IPW2200_PROMISCUOUS 45 #define VP "p" 46 #else 47 #define VP 48 #endif 49 50 #ifdef CONFIG_IPW2200_RADIOTAP 51 #define VR "r" 52 #else 53 #define VR 54 #endif 55 56 #ifdef CONFIG_IPW2200_QOS 57 #define VQ "q" 58 #else 59 #define VQ 60 #endif 61 62 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ 63 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" 64 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 65 #define DRV_VERSION IPW2200_VERSION 66 67 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) 68 69 MODULE_DESCRIPTION(DRV_DESCRIPTION); 70 MODULE_VERSION(DRV_VERSION); 71 MODULE_AUTHOR(DRV_COPYRIGHT); 72 MODULE_LICENSE("GPL"); 73 MODULE_FIRMWARE("ipw2200-ibss.fw"); 74 #ifdef CONFIG_IPW2200_MONITOR 75 MODULE_FIRMWARE("ipw2200-sniffer.fw"); 76 #endif 77 MODULE_FIRMWARE("ipw2200-bss.fw"); 78 79 static int cmdlog = 0; 80 static int debug = 0; 81 static int default_channel = 0; 82 static int network_mode = 0; 83 84 static u32 ipw_debug_level; 85 static int associate; 86 static int auto_create = 1; 87 static int led_support = 1; 88 static int disable = 0; 89 static int bt_coexist = 0; 90 static int hwcrypto = 0; 91 static int roaming = 1; 92 static const char ipw_modes[] = { 93 'a', 'b', 'g', '?' 94 }; 95 static int antenna = CFG_SYS_ANTENNA_BOTH; 96 97 #ifdef CONFIG_IPW2200_PROMISCUOUS 98 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ 99 #endif 100 101 static struct ieee80211_rate ipw2200_rates[] = { 102 { .bitrate = 10 }, 103 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 104 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 105 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 106 { .bitrate = 60 }, 107 { .bitrate = 90 }, 108 { .bitrate = 120 }, 109 { .bitrate = 180 }, 110 { .bitrate = 240 }, 111 { .bitrate = 360 }, 112 { .bitrate = 480 }, 113 { .bitrate = 540 } 114 }; 115 116 #define ipw2200_a_rates (ipw2200_rates + 4) 117 #define ipw2200_num_a_rates 8 118 #define ipw2200_bg_rates (ipw2200_rates + 0) 119 #define ipw2200_num_bg_rates 12 120 121 /* Ugly macro to convert literal channel numbers into their mhz equivalents 122 * There are certianly some conditions that will break this (like feeding it '30') 123 * but they shouldn't arise since nothing talks on channel 30. */ 124 #define ieee80211chan2mhz(x) \ 125 (((x) <= 14) ? \ 126 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ 127 ((x) + 1000) * 5) 128 129 #ifdef CONFIG_IPW2200_QOS 130 static int qos_enable = 0; 131 static int qos_burst_enable = 0; 132 static int qos_no_ack_mask = 0; 133 static int burst_duration_CCK = 0; 134 static int burst_duration_OFDM = 0; 135 136 static struct libipw_qos_parameters def_qos_parameters_OFDM = { 137 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM, 138 QOS_TX3_CW_MIN_OFDM}, 139 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM, 140 QOS_TX3_CW_MAX_OFDM}, 141 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, 142 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, 143 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM, 144 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM} 145 }; 146 147 static struct libipw_qos_parameters def_qos_parameters_CCK = { 148 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK, 149 QOS_TX3_CW_MIN_CCK}, 150 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK, 151 QOS_TX3_CW_MAX_CCK}, 152 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, 153 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, 154 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK, 155 QOS_TX3_TXOP_LIMIT_CCK} 156 }; 157 158 static struct libipw_qos_parameters def_parameters_OFDM = { 159 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM, 160 DEF_TX3_CW_MIN_OFDM}, 161 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM, 162 DEF_TX3_CW_MAX_OFDM}, 163 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, 164 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, 165 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM, 166 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM} 167 }; 168 169 static struct libipw_qos_parameters def_parameters_CCK = { 170 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK, 171 DEF_TX3_CW_MIN_CCK}, 172 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK, 173 DEF_TX3_CW_MAX_CCK}, 174 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, 175 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, 176 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK, 177 DEF_TX3_TXOP_LIMIT_CCK} 178 }; 179 180 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; 181 182 static int from_priority_to_tx_queue[] = { 183 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1, 184 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4 185 }; 186 187 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv); 188 189 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters 190 *qos_param); 191 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element 192 *qos_param); 193 #endif /* CONFIG_IPW2200_QOS */ 194 195 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev); 196 static void ipw_remove_current_network(struct ipw_priv *priv); 197 static void ipw_rx(struct ipw_priv *priv); 198 static int ipw_queue_tx_reclaim(struct ipw_priv *priv, 199 struct clx2_tx_queue *txq, int qindex); 200 static int ipw_queue_reset(struct ipw_priv *priv); 201 202 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, 203 int len, int sync); 204 205 static void ipw_tx_queue_free(struct ipw_priv *); 206 207 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); 208 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); 209 static void ipw_rx_queue_replenish(void *); 210 static int ipw_up(struct ipw_priv *); 211 static void ipw_bg_up(struct work_struct *work); 212 static void ipw_down(struct ipw_priv *); 213 static void ipw_bg_down(struct work_struct *work); 214 static int ipw_config(struct ipw_priv *); 215 static int init_supported_rates(struct ipw_priv *priv, 216 struct ipw_supported_rates *prates); 217 static void ipw_set_hwcrypto_keys(struct ipw_priv *); 218 static void ipw_send_wep_keys(struct ipw_priv *, int); 219 220 static int snprint_line(char *buf, size_t count, 221 const u8 * data, u32 len, u32 ofs) 222 { 223 int out, i, j, l; 224 char c; 225 226 out = scnprintf(buf, count, "%08X", ofs); 227 228 for (l = 0, i = 0; i < 2; i++) { 229 out += scnprintf(buf + out, count - out, " "); 230 for (j = 0; j < 8 && l < len; j++, l++) 231 out += scnprintf(buf + out, count - out, "%02X ", 232 data[(i * 8 + j)]); 233 for (; j < 8; j++) 234 out += scnprintf(buf + out, count - out, " "); 235 } 236 237 out += scnprintf(buf + out, count - out, " "); 238 for (l = 0, i = 0; i < 2; i++) { 239 out += scnprintf(buf + out, count - out, " "); 240 for (j = 0; j < 8 && l < len; j++, l++) { 241 c = data[(i * 8 + j)]; 242 if (!isascii(c) || !isprint(c)) 243 c = '.'; 244 245 out += scnprintf(buf + out, count - out, "%c", c); 246 } 247 248 for (; j < 8; j++) 249 out += scnprintf(buf + out, count - out, " "); 250 } 251 252 return out; 253 } 254 255 static void printk_buf(int level, const u8 * data, u32 len) 256 { 257 char line[81]; 258 u32 ofs = 0; 259 if (!(ipw_debug_level & level)) 260 return; 261 262 while (len) { 263 snprint_line(line, sizeof(line), &data[ofs], 264 min(len, 16U), ofs); 265 printk(KERN_DEBUG "%s\n", line); 266 ofs += 16; 267 len -= min(len, 16U); 268 } 269 } 270 271 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len) 272 { 273 size_t out = size; 274 u32 ofs = 0; 275 int total = 0; 276 277 while (size && len) { 278 out = snprint_line(output, size, &data[ofs], 279 min_t(size_t, len, 16U), ofs); 280 281 ofs += 16; 282 output += out; 283 size -= out; 284 len -= min_t(size_t, len, 16U); 285 total += out; 286 } 287 return total; 288 } 289 290 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ 291 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); 292 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) 293 294 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ 295 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); 296 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) 297 298 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 299 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); 300 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) 301 { 302 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, 303 __LINE__, (u32) (b), (u32) (c)); 304 _ipw_write_reg8(a, b, c); 305 } 306 307 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 308 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); 309 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) 310 { 311 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, 312 __LINE__, (u32) (b), (u32) (c)); 313 _ipw_write_reg16(a, b, c); 314 } 315 316 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 317 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); 318 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) 319 { 320 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, 321 __LINE__, (u32) (b), (u32) (c)); 322 _ipw_write_reg32(a, b, c); 323 } 324 325 /* 8-bit direct write (low 4K) */ 326 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs, 327 u8 val) 328 { 329 writeb(val, ipw->hw_base + ofs); 330 } 331 332 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 333 #define ipw_write8(ipw, ofs, val) do { \ 334 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \ 335 __LINE__, (u32)(ofs), (u32)(val)); \ 336 _ipw_write8(ipw, ofs, val); \ 337 } while (0) 338 339 /* 16-bit direct write (low 4K) */ 340 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs, 341 u16 val) 342 { 343 writew(val, ipw->hw_base + ofs); 344 } 345 346 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 347 #define ipw_write16(ipw, ofs, val) do { \ 348 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \ 349 __LINE__, (u32)(ofs), (u32)(val)); \ 350 _ipw_write16(ipw, ofs, val); \ 351 } while (0) 352 353 /* 32-bit direct write (low 4K) */ 354 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs, 355 u32 val) 356 { 357 writel(val, ipw->hw_base + ofs); 358 } 359 360 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 361 #define ipw_write32(ipw, ofs, val) do { \ 362 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \ 363 __LINE__, (u32)(ofs), (u32)(val)); \ 364 _ipw_write32(ipw, ofs, val); \ 365 } while (0) 366 367 /* 8-bit direct read (low 4K) */ 368 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs) 369 { 370 return readb(ipw->hw_base + ofs); 371 } 372 373 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 374 #define ipw_read8(ipw, ofs) ({ \ 375 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \ 376 (u32)(ofs)); \ 377 _ipw_read8(ipw, ofs); \ 378 }) 379 380 /* 16-bit direct read (low 4K) */ 381 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs) 382 { 383 return readw(ipw->hw_base + ofs); 384 } 385 386 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 387 #define ipw_read16(ipw, ofs) ({ \ 388 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \ 389 (u32)(ofs)); \ 390 _ipw_read16(ipw, ofs); \ 391 }) 392 393 /* 32-bit direct read (low 4K) */ 394 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs) 395 { 396 return readl(ipw->hw_base + ofs); 397 } 398 399 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 400 #define ipw_read32(ipw, ofs) ({ \ 401 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \ 402 (u32)(ofs)); \ 403 _ipw_read32(ipw, ofs); \ 404 }) 405 406 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); 407 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ 408 #define ipw_read_indirect(a, b, c, d) ({ \ 409 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \ 410 __LINE__, (u32)(b), (u32)(d)); \ 411 _ipw_read_indirect(a, b, c, d); \ 412 }) 413 414 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ 415 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, 416 int num); 417 #define ipw_write_indirect(a, b, c, d) do { \ 418 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \ 419 __LINE__, (u32)(b), (u32)(d)); \ 420 _ipw_write_indirect(a, b, c, d); \ 421 } while (0) 422 423 /* 32-bit indirect write (above 4K) */ 424 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) 425 { 426 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); 427 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 428 _ipw_write32(priv, IPW_INDIRECT_DATA, value); 429 } 430 431 /* 8-bit indirect write (above 4K) */ 432 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) 433 { 434 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ 435 u32 dif_len = reg - aligned_addr; 436 437 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 438 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 439 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value); 440 } 441 442 /* 16-bit indirect write (above 4K) */ 443 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) 444 { 445 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ 446 u32 dif_len = (reg - aligned_addr) & (~0x1ul); 447 448 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 449 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 450 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value); 451 } 452 453 /* 8-bit indirect read (above 4K) */ 454 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) 455 { 456 u32 word; 457 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); 458 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg); 459 word = _ipw_read32(priv, IPW_INDIRECT_DATA); 460 return (word >> ((reg & 0x3) * 8)) & 0xff; 461 } 462 463 /* 32-bit indirect read (above 4K) */ 464 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) 465 { 466 u32 value; 467 468 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg); 469 470 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 471 value = _ipw_read32(priv, IPW_INDIRECT_DATA); 472 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value); 473 return value; 474 } 475 476 /* General purpose, no alignment requirement, iterative (multi-byte) read, */ 477 /* for area above 1st 4K of SRAM/reg space */ 478 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 479 int num) 480 { 481 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ 482 u32 dif_len = addr - aligned_addr; 483 u32 i; 484 485 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); 486 487 if (num <= 0) { 488 return; 489 } 490 491 /* Read the first dword (or portion) byte by byte */ 492 if (unlikely(dif_len)) { 493 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 494 /* Start reading at aligned_addr + dif_len */ 495 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--) 496 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i); 497 aligned_addr += 4; 498 } 499 500 /* Read all of the middle dwords as dwords, with auto-increment */ 501 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 502 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 503 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); 504 505 /* Read the last dword (or portion) byte by byte */ 506 if (unlikely(num)) { 507 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 508 for (i = 0; num > 0; i++, num--) 509 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i); 510 } 511 } 512 513 /* General purpose, no alignment requirement, iterative (multi-byte) write, */ 514 /* for area above 1st 4K of SRAM/reg space */ 515 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 516 int num) 517 { 518 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ 519 u32 dif_len = addr - aligned_addr; 520 u32 i; 521 522 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); 523 524 if (num <= 0) { 525 return; 526 } 527 528 /* Write the first dword (or portion) byte by byte */ 529 if (unlikely(dif_len)) { 530 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 531 /* Start writing at aligned_addr + dif_len */ 532 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) 533 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 534 aligned_addr += 4; 535 } 536 537 /* Write all of the middle dwords as dwords, with auto-increment */ 538 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 539 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 540 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); 541 542 /* Write the last dword (or portion) byte by byte */ 543 if (unlikely(num)) { 544 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 545 for (i = 0; num > 0; i++, num--, buf++) 546 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 547 } 548 } 549 550 /* General purpose, no alignment requirement, iterative (multi-byte) write, */ 551 /* for 1st 4K of SRAM/regs space */ 552 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, 553 int num) 554 { 555 memcpy_toio((priv->hw_base + addr), buf, num); 556 } 557 558 /* Set bit(s) in low 4K of SRAM/regs */ 559 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) 560 { 561 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); 562 } 563 564 /* Clear bit(s) in low 4K of SRAM/regs */ 565 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) 566 { 567 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); 568 } 569 570 static inline void __ipw_enable_interrupts(struct ipw_priv *priv) 571 { 572 if (priv->status & STATUS_INT_ENABLED) 573 return; 574 priv->status |= STATUS_INT_ENABLED; 575 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL); 576 } 577 578 static inline void __ipw_disable_interrupts(struct ipw_priv *priv) 579 { 580 if (!(priv->status & STATUS_INT_ENABLED)) 581 return; 582 priv->status &= ~STATUS_INT_ENABLED; 583 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 584 } 585 586 static inline void ipw_enable_interrupts(struct ipw_priv *priv) 587 { 588 unsigned long flags; 589 590 spin_lock_irqsave(&priv->irq_lock, flags); 591 __ipw_enable_interrupts(priv); 592 spin_unlock_irqrestore(&priv->irq_lock, flags); 593 } 594 595 static inline void ipw_disable_interrupts(struct ipw_priv *priv) 596 { 597 unsigned long flags; 598 599 spin_lock_irqsave(&priv->irq_lock, flags); 600 __ipw_disable_interrupts(priv); 601 spin_unlock_irqrestore(&priv->irq_lock, flags); 602 } 603 604 static char *ipw_error_desc(u32 val) 605 { 606 switch (val) { 607 case IPW_FW_ERROR_OK: 608 return "ERROR_OK"; 609 case IPW_FW_ERROR_FAIL: 610 return "ERROR_FAIL"; 611 case IPW_FW_ERROR_MEMORY_UNDERFLOW: 612 return "MEMORY_UNDERFLOW"; 613 case IPW_FW_ERROR_MEMORY_OVERFLOW: 614 return "MEMORY_OVERFLOW"; 615 case IPW_FW_ERROR_BAD_PARAM: 616 return "BAD_PARAM"; 617 case IPW_FW_ERROR_BAD_CHECKSUM: 618 return "BAD_CHECKSUM"; 619 case IPW_FW_ERROR_NMI_INTERRUPT: 620 return "NMI_INTERRUPT"; 621 case IPW_FW_ERROR_BAD_DATABASE: 622 return "BAD_DATABASE"; 623 case IPW_FW_ERROR_ALLOC_FAIL: 624 return "ALLOC_FAIL"; 625 case IPW_FW_ERROR_DMA_UNDERRUN: 626 return "DMA_UNDERRUN"; 627 case IPW_FW_ERROR_DMA_STATUS: 628 return "DMA_STATUS"; 629 case IPW_FW_ERROR_DINO_ERROR: 630 return "DINO_ERROR"; 631 case IPW_FW_ERROR_EEPROM_ERROR: 632 return "EEPROM_ERROR"; 633 case IPW_FW_ERROR_SYSASSERT: 634 return "SYSASSERT"; 635 case IPW_FW_ERROR_FATAL_ERROR: 636 return "FATAL_ERROR"; 637 default: 638 return "UNKNOWN_ERROR"; 639 } 640 } 641 642 static void ipw_dump_error_log(struct ipw_priv *priv, 643 struct ipw_fw_error *error) 644 { 645 u32 i; 646 647 if (!error) { 648 IPW_ERROR("Error allocating and capturing error log. " 649 "Nothing to dump.\n"); 650 return; 651 } 652 653 IPW_ERROR("Start IPW Error Log Dump:\n"); 654 IPW_ERROR("Status: 0x%08X, Config: %08X\n", 655 error->status, error->config); 656 657 for (i = 0; i < error->elem_len; i++) 658 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 659 ipw_error_desc(error->elem[i].desc), 660 error->elem[i].time, 661 error->elem[i].blink1, 662 error->elem[i].blink2, 663 error->elem[i].link1, 664 error->elem[i].link2, error->elem[i].data); 665 for (i = 0; i < error->log_len; i++) 666 IPW_ERROR("%i\t0x%08x\t%i\n", 667 error->log[i].time, 668 error->log[i].data, error->log[i].event); 669 } 670 671 static inline int ipw_is_init(struct ipw_priv *priv) 672 { 673 return (priv->status & STATUS_INIT) ? 1 : 0; 674 } 675 676 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len) 677 { 678 u32 addr, field_info, field_len, field_count, total_len; 679 680 IPW_DEBUG_ORD("ordinal = %i\n", ord); 681 682 if (!priv || !val || !len) { 683 IPW_DEBUG_ORD("Invalid argument\n"); 684 return -EINVAL; 685 } 686 687 /* verify device ordinal tables have been initialized */ 688 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) { 689 IPW_DEBUG_ORD("Access ordinals before initialization\n"); 690 return -EINVAL; 691 } 692 693 switch (IPW_ORD_TABLE_ID_MASK & ord) { 694 case IPW_ORD_TABLE_0_MASK: 695 /* 696 * TABLE 0: Direct access to a table of 32 bit values 697 * 698 * This is a very simple table with the data directly 699 * read from the table 700 */ 701 702 /* remove the table id from the ordinal */ 703 ord &= IPW_ORD_TABLE_VALUE_MASK; 704 705 /* boundary check */ 706 if (ord > priv->table0_len) { 707 IPW_DEBUG_ORD("ordinal value (%i) longer then " 708 "max (%i)\n", ord, priv->table0_len); 709 return -EINVAL; 710 } 711 712 /* verify we have enough room to store the value */ 713 if (*len < sizeof(u32)) { 714 IPW_DEBUG_ORD("ordinal buffer length too small, " 715 "need %zd\n", sizeof(u32)); 716 return -EINVAL; 717 } 718 719 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n", 720 ord, priv->table0_addr + (ord << 2)); 721 722 *len = sizeof(u32); 723 ord <<= 2; 724 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord); 725 break; 726 727 case IPW_ORD_TABLE_1_MASK: 728 /* 729 * TABLE 1: Indirect access to a table of 32 bit values 730 * 731 * This is a fairly large table of u32 values each 732 * representing starting addr for the data (which is 733 * also a u32) 734 */ 735 736 /* remove the table id from the ordinal */ 737 ord &= IPW_ORD_TABLE_VALUE_MASK; 738 739 /* boundary check */ 740 if (ord > priv->table1_len) { 741 IPW_DEBUG_ORD("ordinal value too long\n"); 742 return -EINVAL; 743 } 744 745 /* verify we have enough room to store the value */ 746 if (*len < sizeof(u32)) { 747 IPW_DEBUG_ORD("ordinal buffer length too small, " 748 "need %zd\n", sizeof(u32)); 749 return -EINVAL; 750 } 751 752 *((u32 *) val) = 753 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2))); 754 *len = sizeof(u32); 755 break; 756 757 case IPW_ORD_TABLE_2_MASK: 758 /* 759 * TABLE 2: Indirect access to a table of variable sized values 760 * 761 * This table consist of six values, each containing 762 * - dword containing the starting offset of the data 763 * - dword containing the lengh in the first 16bits 764 * and the count in the second 16bits 765 */ 766 767 /* remove the table id from the ordinal */ 768 ord &= IPW_ORD_TABLE_VALUE_MASK; 769 770 /* boundary check */ 771 if (ord > priv->table2_len) { 772 IPW_DEBUG_ORD("ordinal value too long\n"); 773 return -EINVAL; 774 } 775 776 /* get the address of statistic */ 777 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3)); 778 779 /* get the second DW of statistics ; 780 * two 16-bit words - first is length, second is count */ 781 field_info = 782 ipw_read_reg32(priv, 783 priv->table2_addr + (ord << 3) + 784 sizeof(u32)); 785 786 /* get each entry length */ 787 field_len = *((u16 *) & field_info); 788 789 /* get number of entries */ 790 field_count = *(((u16 *) & field_info) + 1); 791 792 /* abort if not enough memory */ 793 total_len = field_len * field_count; 794 if (total_len > *len) { 795 *len = total_len; 796 return -EINVAL; 797 } 798 799 *len = total_len; 800 if (!total_len) 801 return 0; 802 803 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, " 804 "field_info = 0x%08x\n", 805 addr, total_len, field_info); 806 ipw_read_indirect(priv, addr, val, total_len); 807 break; 808 809 default: 810 IPW_DEBUG_ORD("Invalid ordinal!\n"); 811 return -EINVAL; 812 813 } 814 815 return 0; 816 } 817 818 static void ipw_init_ordinals(struct ipw_priv *priv) 819 { 820 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER; 821 priv->table0_len = ipw_read32(priv, priv->table0_addr); 822 823 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n", 824 priv->table0_addr, priv->table0_len); 825 826 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1); 827 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr); 828 829 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n", 830 priv->table1_addr, priv->table1_len); 831 832 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2); 833 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr); 834 priv->table2_len &= 0x0000ffff; /* use first two bytes */ 835 836 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n", 837 priv->table2_addr, priv->table2_len); 838 839 } 840 841 static u32 ipw_register_toggle(u32 reg) 842 { 843 reg &= ~IPW_START_STANDBY; 844 if (reg & IPW_GATE_ODMA) 845 reg &= ~IPW_GATE_ODMA; 846 if (reg & IPW_GATE_IDMA) 847 reg &= ~IPW_GATE_IDMA; 848 if (reg & IPW_GATE_ADMA) 849 reg &= ~IPW_GATE_ADMA; 850 return reg; 851 } 852 853 /* 854 * LED behavior: 855 * - On radio ON, turn on any LEDs that require to be on during start 856 * - On initialization, start unassociated blink 857 * - On association, disable unassociated blink 858 * - On disassociation, start unassociated blink 859 * - On radio OFF, turn off any LEDs started during radio on 860 * 861 */ 862 #define LD_TIME_LINK_ON msecs_to_jiffies(300) 863 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700) 864 #define LD_TIME_ACT_ON msecs_to_jiffies(250) 865 866 static void ipw_led_link_on(struct ipw_priv *priv) 867 { 868 unsigned long flags; 869 u32 led; 870 871 /* If configured to not use LEDs, or nic_type is 1, 872 * then we don't toggle a LINK led */ 873 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) 874 return; 875 876 spin_lock_irqsave(&priv->lock, flags); 877 878 if (!(priv->status & STATUS_RF_KILL_MASK) && 879 !(priv->status & STATUS_LED_LINK_ON)) { 880 IPW_DEBUG_LED("Link LED On\n"); 881 led = ipw_read_reg32(priv, IPW_EVENT_REG); 882 led |= priv->led_association_on; 883 884 led = ipw_register_toggle(led); 885 886 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 887 ipw_write_reg32(priv, IPW_EVENT_REG, led); 888 889 priv->status |= STATUS_LED_LINK_ON; 890 891 /* If we aren't associated, schedule turning the LED off */ 892 if (!(priv->status & STATUS_ASSOCIATED)) 893 schedule_delayed_work(&priv->led_link_off, 894 LD_TIME_LINK_ON); 895 } 896 897 spin_unlock_irqrestore(&priv->lock, flags); 898 } 899 900 static void ipw_bg_led_link_on(struct work_struct *work) 901 { 902 struct ipw_priv *priv = 903 container_of(work, struct ipw_priv, led_link_on.work); 904 mutex_lock(&priv->mutex); 905 ipw_led_link_on(priv); 906 mutex_unlock(&priv->mutex); 907 } 908 909 static void ipw_led_link_off(struct ipw_priv *priv) 910 { 911 unsigned long flags; 912 u32 led; 913 914 /* If configured not to use LEDs, or nic type is 1, 915 * then we don't goggle the LINK led. */ 916 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) 917 return; 918 919 spin_lock_irqsave(&priv->lock, flags); 920 921 if (priv->status & STATUS_LED_LINK_ON) { 922 led = ipw_read_reg32(priv, IPW_EVENT_REG); 923 led &= priv->led_association_off; 924 led = ipw_register_toggle(led); 925 926 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 927 ipw_write_reg32(priv, IPW_EVENT_REG, led); 928 929 IPW_DEBUG_LED("Link LED Off\n"); 930 931 priv->status &= ~STATUS_LED_LINK_ON; 932 933 /* If we aren't associated and the radio is on, schedule 934 * turning the LED on (blink while unassociated) */ 935 if (!(priv->status & STATUS_RF_KILL_MASK) && 936 !(priv->status & STATUS_ASSOCIATED)) 937 schedule_delayed_work(&priv->led_link_on, 938 LD_TIME_LINK_OFF); 939 940 } 941 942 spin_unlock_irqrestore(&priv->lock, flags); 943 } 944 945 static void ipw_bg_led_link_off(struct work_struct *work) 946 { 947 struct ipw_priv *priv = 948 container_of(work, struct ipw_priv, led_link_off.work); 949 mutex_lock(&priv->mutex); 950 ipw_led_link_off(priv); 951 mutex_unlock(&priv->mutex); 952 } 953 954 static void __ipw_led_activity_on(struct ipw_priv *priv) 955 { 956 u32 led; 957 958 if (priv->config & CFG_NO_LED) 959 return; 960 961 if (priv->status & STATUS_RF_KILL_MASK) 962 return; 963 964 if (!(priv->status & STATUS_LED_ACT_ON)) { 965 led = ipw_read_reg32(priv, IPW_EVENT_REG); 966 led |= priv->led_activity_on; 967 968 led = ipw_register_toggle(led); 969 970 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 971 ipw_write_reg32(priv, IPW_EVENT_REG, led); 972 973 IPW_DEBUG_LED("Activity LED On\n"); 974 975 priv->status |= STATUS_LED_ACT_ON; 976 977 cancel_delayed_work(&priv->led_act_off); 978 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); 979 } else { 980 /* Reschedule LED off for full time period */ 981 cancel_delayed_work(&priv->led_act_off); 982 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); 983 } 984 } 985 986 #if 0 987 void ipw_led_activity_on(struct ipw_priv *priv) 988 { 989 unsigned long flags; 990 spin_lock_irqsave(&priv->lock, flags); 991 __ipw_led_activity_on(priv); 992 spin_unlock_irqrestore(&priv->lock, flags); 993 } 994 #endif /* 0 */ 995 996 static void ipw_led_activity_off(struct ipw_priv *priv) 997 { 998 unsigned long flags; 999 u32 led; 1000 1001 if (priv->config & CFG_NO_LED) 1002 return; 1003 1004 spin_lock_irqsave(&priv->lock, flags); 1005 1006 if (priv->status & STATUS_LED_ACT_ON) { 1007 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1008 led &= priv->led_activity_off; 1009 1010 led = ipw_register_toggle(led); 1011 1012 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1013 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1014 1015 IPW_DEBUG_LED("Activity LED Off\n"); 1016 1017 priv->status &= ~STATUS_LED_ACT_ON; 1018 } 1019 1020 spin_unlock_irqrestore(&priv->lock, flags); 1021 } 1022 1023 static void ipw_bg_led_activity_off(struct work_struct *work) 1024 { 1025 struct ipw_priv *priv = 1026 container_of(work, struct ipw_priv, led_act_off.work); 1027 mutex_lock(&priv->mutex); 1028 ipw_led_activity_off(priv); 1029 mutex_unlock(&priv->mutex); 1030 } 1031 1032 static void ipw_led_band_on(struct ipw_priv *priv) 1033 { 1034 unsigned long flags; 1035 u32 led; 1036 1037 /* Only nic type 1 supports mode LEDs */ 1038 if (priv->config & CFG_NO_LED || 1039 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network) 1040 return; 1041 1042 spin_lock_irqsave(&priv->lock, flags); 1043 1044 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1045 if (priv->assoc_network->mode == IEEE_A) { 1046 led |= priv->led_ofdm_on; 1047 led &= priv->led_association_off; 1048 IPW_DEBUG_LED("Mode LED On: 802.11a\n"); 1049 } else if (priv->assoc_network->mode == IEEE_G) { 1050 led |= priv->led_ofdm_on; 1051 led |= priv->led_association_on; 1052 IPW_DEBUG_LED("Mode LED On: 802.11g\n"); 1053 } else { 1054 led &= priv->led_ofdm_off; 1055 led |= priv->led_association_on; 1056 IPW_DEBUG_LED("Mode LED On: 802.11b\n"); 1057 } 1058 1059 led = ipw_register_toggle(led); 1060 1061 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1062 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1063 1064 spin_unlock_irqrestore(&priv->lock, flags); 1065 } 1066 1067 static void ipw_led_band_off(struct ipw_priv *priv) 1068 { 1069 unsigned long flags; 1070 u32 led; 1071 1072 /* Only nic type 1 supports mode LEDs */ 1073 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1) 1074 return; 1075 1076 spin_lock_irqsave(&priv->lock, flags); 1077 1078 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1079 led &= priv->led_ofdm_off; 1080 led &= priv->led_association_off; 1081 1082 led = ipw_register_toggle(led); 1083 1084 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1085 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1086 1087 spin_unlock_irqrestore(&priv->lock, flags); 1088 } 1089 1090 static void ipw_led_radio_on(struct ipw_priv *priv) 1091 { 1092 ipw_led_link_on(priv); 1093 } 1094 1095 static void ipw_led_radio_off(struct ipw_priv *priv) 1096 { 1097 ipw_led_activity_off(priv); 1098 ipw_led_link_off(priv); 1099 } 1100 1101 static void ipw_led_link_up(struct ipw_priv *priv) 1102 { 1103 /* Set the Link Led on for all nic types */ 1104 ipw_led_link_on(priv); 1105 } 1106 1107 static void ipw_led_link_down(struct ipw_priv *priv) 1108 { 1109 ipw_led_activity_off(priv); 1110 ipw_led_link_off(priv); 1111 1112 if (priv->status & STATUS_RF_KILL_MASK) 1113 ipw_led_radio_off(priv); 1114 } 1115 1116 static void ipw_led_init(struct ipw_priv *priv) 1117 { 1118 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; 1119 1120 /* Set the default PINs for the link and activity leds */ 1121 priv->led_activity_on = IPW_ACTIVITY_LED; 1122 priv->led_activity_off = ~(IPW_ACTIVITY_LED); 1123 1124 priv->led_association_on = IPW_ASSOCIATED_LED; 1125 priv->led_association_off = ~(IPW_ASSOCIATED_LED); 1126 1127 /* Set the default PINs for the OFDM leds */ 1128 priv->led_ofdm_on = IPW_OFDM_LED; 1129 priv->led_ofdm_off = ~(IPW_OFDM_LED); 1130 1131 switch (priv->nic_type) { 1132 case EEPROM_NIC_TYPE_1: 1133 /* In this NIC type, the LEDs are reversed.... */ 1134 priv->led_activity_on = IPW_ASSOCIATED_LED; 1135 priv->led_activity_off = ~(IPW_ASSOCIATED_LED); 1136 priv->led_association_on = IPW_ACTIVITY_LED; 1137 priv->led_association_off = ~(IPW_ACTIVITY_LED); 1138 1139 if (!(priv->config & CFG_NO_LED)) 1140 ipw_led_band_on(priv); 1141 1142 /* And we don't blink link LEDs for this nic, so 1143 * just return here */ 1144 return; 1145 1146 case EEPROM_NIC_TYPE_3: 1147 case EEPROM_NIC_TYPE_2: 1148 case EEPROM_NIC_TYPE_4: 1149 case EEPROM_NIC_TYPE_0: 1150 break; 1151 1152 default: 1153 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n", 1154 priv->nic_type); 1155 priv->nic_type = EEPROM_NIC_TYPE_0; 1156 break; 1157 } 1158 1159 if (!(priv->config & CFG_NO_LED)) { 1160 if (priv->status & STATUS_ASSOCIATED) 1161 ipw_led_link_on(priv); 1162 else 1163 ipw_led_link_off(priv); 1164 } 1165 } 1166 1167 static void ipw_led_shutdown(struct ipw_priv *priv) 1168 { 1169 ipw_led_activity_off(priv); 1170 ipw_led_link_off(priv); 1171 ipw_led_band_off(priv); 1172 cancel_delayed_work(&priv->led_link_on); 1173 cancel_delayed_work(&priv->led_link_off); 1174 cancel_delayed_work(&priv->led_act_off); 1175 } 1176 1177 /* 1178 * The following adds a new attribute to the sysfs representation 1179 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/) 1180 * used for controlling the debug level. 1181 * 1182 * See the level definitions in ipw for details. 1183 */ 1184 static ssize_t debug_level_show(struct device_driver *d, char *buf) 1185 { 1186 return sprintf(buf, "0x%08X\n", ipw_debug_level); 1187 } 1188 1189 static ssize_t debug_level_store(struct device_driver *d, const char *buf, 1190 size_t count) 1191 { 1192 char *p = (char *)buf; 1193 u32 val; 1194 1195 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 1196 p++; 1197 if (p[0] == 'x' || p[0] == 'X') 1198 p++; 1199 val = simple_strtoul(p, &p, 16); 1200 } else 1201 val = simple_strtoul(p, &p, 10); 1202 if (p == buf) 1203 printk(KERN_INFO DRV_NAME 1204 ": %s is not in hex or decimal form.\n", buf); 1205 else 1206 ipw_debug_level = val; 1207 1208 return strnlen(buf, count); 1209 } 1210 static DRIVER_ATTR_RW(debug_level); 1211 1212 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) 1213 { 1214 /* length = 1st dword in log */ 1215 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); 1216 } 1217 1218 static void ipw_capture_event_log(struct ipw_priv *priv, 1219 u32 log_len, struct ipw_event *log) 1220 { 1221 u32 base; 1222 1223 if (log_len) { 1224 base = ipw_read32(priv, IPW_EVENT_LOG); 1225 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32), 1226 (u8 *) log, sizeof(*log) * log_len); 1227 } 1228 } 1229 1230 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv) 1231 { 1232 struct ipw_fw_error *error; 1233 u32 log_len = ipw_get_event_log_len(priv); 1234 u32 base = ipw_read32(priv, IPW_ERROR_LOG); 1235 u32 elem_len = ipw_read_reg32(priv, base); 1236 1237 error = kmalloc(sizeof(*error) + 1238 sizeof(*error->elem) * elem_len + 1239 sizeof(*error->log) * log_len, GFP_ATOMIC); 1240 if (!error) { 1241 IPW_ERROR("Memory allocation for firmware error log " 1242 "failed.\n"); 1243 return NULL; 1244 } 1245 error->jiffies = jiffies; 1246 error->status = priv->status; 1247 error->config = priv->config; 1248 error->elem_len = elem_len; 1249 error->log_len = log_len; 1250 error->elem = (struct ipw_error_elem *)error->payload; 1251 error->log = (struct ipw_event *)(error->elem + elem_len); 1252 1253 ipw_capture_event_log(priv, log_len, error->log); 1254 1255 if (elem_len) 1256 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem, 1257 sizeof(*error->elem) * elem_len); 1258 1259 return error; 1260 } 1261 1262 static ssize_t show_event_log(struct device *d, 1263 struct device_attribute *attr, char *buf) 1264 { 1265 struct ipw_priv *priv = dev_get_drvdata(d); 1266 u32 log_len = ipw_get_event_log_len(priv); 1267 u32 log_size; 1268 struct ipw_event *log; 1269 u32 len = 0, i; 1270 1271 /* not using min() because of its strict type checking */ 1272 log_size = PAGE_SIZE / sizeof(*log) > log_len ? 1273 sizeof(*log) * log_len : PAGE_SIZE; 1274 log = kzalloc(log_size, GFP_KERNEL); 1275 if (!log) { 1276 IPW_ERROR("Unable to allocate memory for log\n"); 1277 return 0; 1278 } 1279 log_len = log_size / sizeof(*log); 1280 ipw_capture_event_log(priv, log_len, log); 1281 1282 len += scnprintf(buf + len, PAGE_SIZE - len, "%08X", log_len); 1283 for (i = 0; i < log_len; i++) 1284 len += scnprintf(buf + len, PAGE_SIZE - len, 1285 "\n%08X%08X%08X", 1286 log[i].time, log[i].event, log[i].data); 1287 len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); 1288 kfree(log); 1289 return len; 1290 } 1291 1292 static DEVICE_ATTR(event_log, 0444, show_event_log, NULL); 1293 1294 static ssize_t show_error(struct device *d, 1295 struct device_attribute *attr, char *buf) 1296 { 1297 struct ipw_priv *priv = dev_get_drvdata(d); 1298 u32 len = 0, i; 1299 if (!priv->error) 1300 return 0; 1301 len += scnprintf(buf + len, PAGE_SIZE - len, 1302 "%08lX%08X%08X%08X", 1303 priv->error->jiffies, 1304 priv->error->status, 1305 priv->error->config, priv->error->elem_len); 1306 for (i = 0; i < priv->error->elem_len; i++) 1307 len += scnprintf(buf + len, PAGE_SIZE - len, 1308 "\n%08X%08X%08X%08X%08X%08X%08X", 1309 priv->error->elem[i].time, 1310 priv->error->elem[i].desc, 1311 priv->error->elem[i].blink1, 1312 priv->error->elem[i].blink2, 1313 priv->error->elem[i].link1, 1314 priv->error->elem[i].link2, 1315 priv->error->elem[i].data); 1316 1317 len += scnprintf(buf + len, PAGE_SIZE - len, 1318 "\n%08X", priv->error->log_len); 1319 for (i = 0; i < priv->error->log_len; i++) 1320 len += scnprintf(buf + len, PAGE_SIZE - len, 1321 "\n%08X%08X%08X", 1322 priv->error->log[i].time, 1323 priv->error->log[i].event, 1324 priv->error->log[i].data); 1325 len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); 1326 return len; 1327 } 1328 1329 static ssize_t clear_error(struct device *d, 1330 struct device_attribute *attr, 1331 const char *buf, size_t count) 1332 { 1333 struct ipw_priv *priv = dev_get_drvdata(d); 1334 1335 kfree(priv->error); 1336 priv->error = NULL; 1337 return count; 1338 } 1339 1340 static DEVICE_ATTR(error, 0644, show_error, clear_error); 1341 1342 static ssize_t show_cmd_log(struct device *d, 1343 struct device_attribute *attr, char *buf) 1344 { 1345 struct ipw_priv *priv = dev_get_drvdata(d); 1346 u32 len = 0, i; 1347 if (!priv->cmdlog) 1348 return 0; 1349 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len; 1350 (i != priv->cmdlog_pos) && (len < PAGE_SIZE); 1351 i = (i + 1) % priv->cmdlog_len) { 1352 len += 1353 scnprintf(buf + len, PAGE_SIZE - len, 1354 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies, 1355 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd, 1356 priv->cmdlog[i].cmd.len); 1357 len += 1358 snprintk_buf(buf + len, PAGE_SIZE - len, 1359 (u8 *) priv->cmdlog[i].cmd.param, 1360 priv->cmdlog[i].cmd.len); 1361 len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); 1362 } 1363 len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); 1364 return len; 1365 } 1366 1367 static DEVICE_ATTR(cmd_log, 0444, show_cmd_log, NULL); 1368 1369 #ifdef CONFIG_IPW2200_PROMISCUOUS 1370 static void ipw_prom_free(struct ipw_priv *priv); 1371 static int ipw_prom_alloc(struct ipw_priv *priv); 1372 static ssize_t store_rtap_iface(struct device *d, 1373 struct device_attribute *attr, 1374 const char *buf, size_t count) 1375 { 1376 struct ipw_priv *priv = dev_get_drvdata(d); 1377 int rc = 0; 1378 1379 if (count < 1) 1380 return -EINVAL; 1381 1382 switch (buf[0]) { 1383 case '0': 1384 if (!rtap_iface) 1385 return count; 1386 1387 if (netif_running(priv->prom_net_dev)) { 1388 IPW_WARNING("Interface is up. Cannot unregister.\n"); 1389 return count; 1390 } 1391 1392 ipw_prom_free(priv); 1393 rtap_iface = 0; 1394 break; 1395 1396 case '1': 1397 if (rtap_iface) 1398 return count; 1399 1400 rc = ipw_prom_alloc(priv); 1401 if (!rc) 1402 rtap_iface = 1; 1403 break; 1404 1405 default: 1406 return -EINVAL; 1407 } 1408 1409 if (rc) { 1410 IPW_ERROR("Failed to register promiscuous network " 1411 "device (error %d).\n", rc); 1412 } 1413 1414 return count; 1415 } 1416 1417 static ssize_t show_rtap_iface(struct device *d, 1418 struct device_attribute *attr, 1419 char *buf) 1420 { 1421 struct ipw_priv *priv = dev_get_drvdata(d); 1422 if (rtap_iface) 1423 return sprintf(buf, "%s", priv->prom_net_dev->name); 1424 else { 1425 buf[0] = '-'; 1426 buf[1] = '1'; 1427 buf[2] = '\0'; 1428 return 3; 1429 } 1430 } 1431 1432 static DEVICE_ATTR(rtap_iface, 0600, show_rtap_iface, store_rtap_iface); 1433 1434 static ssize_t store_rtap_filter(struct device *d, 1435 struct device_attribute *attr, 1436 const char *buf, size_t count) 1437 { 1438 struct ipw_priv *priv = dev_get_drvdata(d); 1439 1440 if (!priv->prom_priv) { 1441 IPW_ERROR("Attempting to set filter without " 1442 "rtap_iface enabled.\n"); 1443 return -EPERM; 1444 } 1445 1446 priv->prom_priv->filter = simple_strtol(buf, NULL, 0); 1447 1448 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n", 1449 BIT_ARG16(priv->prom_priv->filter)); 1450 1451 return count; 1452 } 1453 1454 static ssize_t show_rtap_filter(struct device *d, 1455 struct device_attribute *attr, 1456 char *buf) 1457 { 1458 struct ipw_priv *priv = dev_get_drvdata(d); 1459 return sprintf(buf, "0x%04X", 1460 priv->prom_priv ? priv->prom_priv->filter : 0); 1461 } 1462 1463 static DEVICE_ATTR(rtap_filter, 0600, show_rtap_filter, store_rtap_filter); 1464 #endif 1465 1466 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, 1467 char *buf) 1468 { 1469 struct ipw_priv *priv = dev_get_drvdata(d); 1470 return sprintf(buf, "%d\n", priv->ieee->scan_age); 1471 } 1472 1473 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, 1474 const char *buf, size_t count) 1475 { 1476 struct ipw_priv *priv = dev_get_drvdata(d); 1477 struct net_device *dev = priv->net_dev; 1478 char buffer[] = "00000000"; 1479 unsigned long len = 1480 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; 1481 unsigned long val; 1482 char *p = buffer; 1483 1484 IPW_DEBUG_INFO("enter\n"); 1485 1486 strncpy(buffer, buf, len); 1487 buffer[len] = 0; 1488 1489 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 1490 p++; 1491 if (p[0] == 'x' || p[0] == 'X') 1492 p++; 1493 val = simple_strtoul(p, &p, 16); 1494 } else 1495 val = simple_strtoul(p, &p, 10); 1496 if (p == buffer) { 1497 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); 1498 } else { 1499 priv->ieee->scan_age = val; 1500 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age); 1501 } 1502 1503 IPW_DEBUG_INFO("exit\n"); 1504 return len; 1505 } 1506 1507 static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age); 1508 1509 static ssize_t show_led(struct device *d, struct device_attribute *attr, 1510 char *buf) 1511 { 1512 struct ipw_priv *priv = dev_get_drvdata(d); 1513 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1); 1514 } 1515 1516 static ssize_t store_led(struct device *d, struct device_attribute *attr, 1517 const char *buf, size_t count) 1518 { 1519 struct ipw_priv *priv = dev_get_drvdata(d); 1520 1521 IPW_DEBUG_INFO("enter\n"); 1522 1523 if (count == 0) 1524 return 0; 1525 1526 if (*buf == 0) { 1527 IPW_DEBUG_LED("Disabling LED control.\n"); 1528 priv->config |= CFG_NO_LED; 1529 ipw_led_shutdown(priv); 1530 } else { 1531 IPW_DEBUG_LED("Enabling LED control.\n"); 1532 priv->config &= ~CFG_NO_LED; 1533 ipw_led_init(priv); 1534 } 1535 1536 IPW_DEBUG_INFO("exit\n"); 1537 return count; 1538 } 1539 1540 static DEVICE_ATTR(led, 0644, show_led, store_led); 1541 1542 static ssize_t show_status(struct device *d, 1543 struct device_attribute *attr, char *buf) 1544 { 1545 struct ipw_priv *p = dev_get_drvdata(d); 1546 return sprintf(buf, "0x%08x\n", (int)p->status); 1547 } 1548 1549 static DEVICE_ATTR(status, 0444, show_status, NULL); 1550 1551 static ssize_t show_cfg(struct device *d, struct device_attribute *attr, 1552 char *buf) 1553 { 1554 struct ipw_priv *p = dev_get_drvdata(d); 1555 return sprintf(buf, "0x%08x\n", (int)p->config); 1556 } 1557 1558 static DEVICE_ATTR(cfg, 0444, show_cfg, NULL); 1559 1560 static ssize_t show_nic_type(struct device *d, 1561 struct device_attribute *attr, char *buf) 1562 { 1563 struct ipw_priv *priv = dev_get_drvdata(d); 1564 return sprintf(buf, "TYPE: %d\n", priv->nic_type); 1565 } 1566 1567 static DEVICE_ATTR(nic_type, 0444, show_nic_type, NULL); 1568 1569 static ssize_t show_ucode_version(struct device *d, 1570 struct device_attribute *attr, char *buf) 1571 { 1572 u32 len = sizeof(u32), tmp = 0; 1573 struct ipw_priv *p = dev_get_drvdata(d); 1574 1575 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len)) 1576 return 0; 1577 1578 return sprintf(buf, "0x%08x\n", tmp); 1579 } 1580 1581 static DEVICE_ATTR(ucode_version, 0644, show_ucode_version, NULL); 1582 1583 static ssize_t show_rtc(struct device *d, struct device_attribute *attr, 1584 char *buf) 1585 { 1586 u32 len = sizeof(u32), tmp = 0; 1587 struct ipw_priv *p = dev_get_drvdata(d); 1588 1589 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len)) 1590 return 0; 1591 1592 return sprintf(buf, "0x%08x\n", tmp); 1593 } 1594 1595 static DEVICE_ATTR(rtc, 0644, show_rtc, NULL); 1596 1597 /* 1598 * Add a device attribute to view/control the delay between eeprom 1599 * operations. 1600 */ 1601 static ssize_t show_eeprom_delay(struct device *d, 1602 struct device_attribute *attr, char *buf) 1603 { 1604 struct ipw_priv *p = dev_get_drvdata(d); 1605 int n = p->eeprom_delay; 1606 return sprintf(buf, "%i\n", n); 1607 } 1608 static ssize_t store_eeprom_delay(struct device *d, 1609 struct device_attribute *attr, 1610 const char *buf, size_t count) 1611 { 1612 struct ipw_priv *p = dev_get_drvdata(d); 1613 sscanf(buf, "%i", &p->eeprom_delay); 1614 return strnlen(buf, count); 1615 } 1616 1617 static DEVICE_ATTR(eeprom_delay, 0644, show_eeprom_delay, store_eeprom_delay); 1618 1619 static ssize_t show_command_event_reg(struct device *d, 1620 struct device_attribute *attr, char *buf) 1621 { 1622 u32 reg = 0; 1623 struct ipw_priv *p = dev_get_drvdata(d); 1624 1625 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT); 1626 return sprintf(buf, "0x%08x\n", reg); 1627 } 1628 static ssize_t store_command_event_reg(struct device *d, 1629 struct device_attribute *attr, 1630 const char *buf, size_t count) 1631 { 1632 u32 reg; 1633 struct ipw_priv *p = dev_get_drvdata(d); 1634 1635 sscanf(buf, "%x", ®); 1636 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg); 1637 return strnlen(buf, count); 1638 } 1639 1640 static DEVICE_ATTR(command_event_reg, 0644, 1641 show_command_event_reg, store_command_event_reg); 1642 1643 static ssize_t show_mem_gpio_reg(struct device *d, 1644 struct device_attribute *attr, char *buf) 1645 { 1646 u32 reg = 0; 1647 struct ipw_priv *p = dev_get_drvdata(d); 1648 1649 reg = ipw_read_reg32(p, 0x301100); 1650 return sprintf(buf, "0x%08x\n", reg); 1651 } 1652 static ssize_t store_mem_gpio_reg(struct device *d, 1653 struct device_attribute *attr, 1654 const char *buf, size_t count) 1655 { 1656 u32 reg; 1657 struct ipw_priv *p = dev_get_drvdata(d); 1658 1659 sscanf(buf, "%x", ®); 1660 ipw_write_reg32(p, 0x301100, reg); 1661 return strnlen(buf, count); 1662 } 1663 1664 static DEVICE_ATTR(mem_gpio_reg, 0644, show_mem_gpio_reg, store_mem_gpio_reg); 1665 1666 static ssize_t show_indirect_dword(struct device *d, 1667 struct device_attribute *attr, char *buf) 1668 { 1669 u32 reg = 0; 1670 struct ipw_priv *priv = dev_get_drvdata(d); 1671 1672 if (priv->status & STATUS_INDIRECT_DWORD) 1673 reg = ipw_read_reg32(priv, priv->indirect_dword); 1674 else 1675 reg = 0; 1676 1677 return sprintf(buf, "0x%08x\n", reg); 1678 } 1679 static ssize_t store_indirect_dword(struct device *d, 1680 struct device_attribute *attr, 1681 const char *buf, size_t count) 1682 { 1683 struct ipw_priv *priv = dev_get_drvdata(d); 1684 1685 sscanf(buf, "%x", &priv->indirect_dword); 1686 priv->status |= STATUS_INDIRECT_DWORD; 1687 return strnlen(buf, count); 1688 } 1689 1690 static DEVICE_ATTR(indirect_dword, 0644, 1691 show_indirect_dword, store_indirect_dword); 1692 1693 static ssize_t show_indirect_byte(struct device *d, 1694 struct device_attribute *attr, char *buf) 1695 { 1696 u8 reg = 0; 1697 struct ipw_priv *priv = dev_get_drvdata(d); 1698 1699 if (priv->status & STATUS_INDIRECT_BYTE) 1700 reg = ipw_read_reg8(priv, priv->indirect_byte); 1701 else 1702 reg = 0; 1703 1704 return sprintf(buf, "0x%02x\n", reg); 1705 } 1706 static ssize_t store_indirect_byte(struct device *d, 1707 struct device_attribute *attr, 1708 const char *buf, size_t count) 1709 { 1710 struct ipw_priv *priv = dev_get_drvdata(d); 1711 1712 sscanf(buf, "%x", &priv->indirect_byte); 1713 priv->status |= STATUS_INDIRECT_BYTE; 1714 return strnlen(buf, count); 1715 } 1716 1717 static DEVICE_ATTR(indirect_byte, 0644, 1718 show_indirect_byte, store_indirect_byte); 1719 1720 static ssize_t show_direct_dword(struct device *d, 1721 struct device_attribute *attr, char *buf) 1722 { 1723 u32 reg = 0; 1724 struct ipw_priv *priv = dev_get_drvdata(d); 1725 1726 if (priv->status & STATUS_DIRECT_DWORD) 1727 reg = ipw_read32(priv, priv->direct_dword); 1728 else 1729 reg = 0; 1730 1731 return sprintf(buf, "0x%08x\n", reg); 1732 } 1733 static ssize_t store_direct_dword(struct device *d, 1734 struct device_attribute *attr, 1735 const char *buf, size_t count) 1736 { 1737 struct ipw_priv *priv = dev_get_drvdata(d); 1738 1739 sscanf(buf, "%x", &priv->direct_dword); 1740 priv->status |= STATUS_DIRECT_DWORD; 1741 return strnlen(buf, count); 1742 } 1743 1744 static DEVICE_ATTR(direct_dword, 0644, show_direct_dword, store_direct_dword); 1745 1746 static int rf_kill_active(struct ipw_priv *priv) 1747 { 1748 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) { 1749 priv->status |= STATUS_RF_KILL_HW; 1750 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); 1751 } else { 1752 priv->status &= ~STATUS_RF_KILL_HW; 1753 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false); 1754 } 1755 1756 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0; 1757 } 1758 1759 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, 1760 char *buf) 1761 { 1762 /* 0 - RF kill not enabled 1763 1 - SW based RF kill active (sysfs) 1764 2 - HW based RF kill active 1765 3 - Both HW and SW baed RF kill active */ 1766 struct ipw_priv *priv = dev_get_drvdata(d); 1767 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | 1768 (rf_kill_active(priv) ? 0x2 : 0x0); 1769 return sprintf(buf, "%i\n", val); 1770 } 1771 1772 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) 1773 { 1774 if ((disable_radio ? 1 : 0) == 1775 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0)) 1776 return 0; 1777 1778 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", 1779 disable_radio ? "OFF" : "ON"); 1780 1781 if (disable_radio) { 1782 priv->status |= STATUS_RF_KILL_SW; 1783 1784 cancel_delayed_work(&priv->request_scan); 1785 cancel_delayed_work(&priv->request_direct_scan); 1786 cancel_delayed_work(&priv->request_passive_scan); 1787 cancel_delayed_work(&priv->scan_event); 1788 schedule_work(&priv->down); 1789 } else { 1790 priv->status &= ~STATUS_RF_KILL_SW; 1791 if (rf_kill_active(priv)) { 1792 IPW_DEBUG_RF_KILL("Can not turn radio back on - " 1793 "disabled by HW switch\n"); 1794 /* Make sure the RF_KILL check timer is running */ 1795 cancel_delayed_work(&priv->rf_kill); 1796 schedule_delayed_work(&priv->rf_kill, 1797 round_jiffies_relative(2 * HZ)); 1798 } else 1799 schedule_work(&priv->up); 1800 } 1801 1802 return 1; 1803 } 1804 1805 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, 1806 const char *buf, size_t count) 1807 { 1808 struct ipw_priv *priv = dev_get_drvdata(d); 1809 1810 ipw_radio_kill_sw(priv, buf[0] == '1'); 1811 1812 return count; 1813 } 1814 1815 static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill); 1816 1817 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr, 1818 char *buf) 1819 { 1820 struct ipw_priv *priv = dev_get_drvdata(d); 1821 int pos = 0, len = 0; 1822 if (priv->config & CFG_SPEED_SCAN) { 1823 while (priv->speed_scan[pos] != 0) 1824 len += sprintf(&buf[len], "%d ", 1825 priv->speed_scan[pos++]); 1826 return len + sprintf(&buf[len], "\n"); 1827 } 1828 1829 return sprintf(buf, "0\n"); 1830 } 1831 1832 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, 1833 const char *buf, size_t count) 1834 { 1835 struct ipw_priv *priv = dev_get_drvdata(d); 1836 int channel, pos = 0; 1837 const char *p = buf; 1838 1839 /* list of space separated channels to scan, optionally ending with 0 */ 1840 while ((channel = simple_strtol(p, NULL, 0))) { 1841 if (pos == MAX_SPEED_SCAN - 1) { 1842 priv->speed_scan[pos] = 0; 1843 break; 1844 } 1845 1846 if (libipw_is_valid_channel(priv->ieee, channel)) 1847 priv->speed_scan[pos++] = channel; 1848 else 1849 IPW_WARNING("Skipping invalid channel request: %d\n", 1850 channel); 1851 p = strchr(p, ' '); 1852 if (!p) 1853 break; 1854 while (*p == ' ' || *p == '\t') 1855 p++; 1856 } 1857 1858 if (pos == 0) 1859 priv->config &= ~CFG_SPEED_SCAN; 1860 else { 1861 priv->speed_scan_pos = 0; 1862 priv->config |= CFG_SPEED_SCAN; 1863 } 1864 1865 return count; 1866 } 1867 1868 static DEVICE_ATTR(speed_scan, 0644, show_speed_scan, store_speed_scan); 1869 1870 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr, 1871 char *buf) 1872 { 1873 struct ipw_priv *priv = dev_get_drvdata(d); 1874 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0'); 1875 } 1876 1877 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr, 1878 const char *buf, size_t count) 1879 { 1880 struct ipw_priv *priv = dev_get_drvdata(d); 1881 if (buf[0] == '1') 1882 priv->config |= CFG_NET_STATS; 1883 else 1884 priv->config &= ~CFG_NET_STATS; 1885 1886 return count; 1887 } 1888 1889 static DEVICE_ATTR(net_stats, 0644, show_net_stats, store_net_stats); 1890 1891 static ssize_t show_channels(struct device *d, 1892 struct device_attribute *attr, 1893 char *buf) 1894 { 1895 struct ipw_priv *priv = dev_get_drvdata(d); 1896 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 1897 int len = 0, i; 1898 1899 len = sprintf(&buf[len], 1900 "Displaying %d channels in 2.4Ghz band " 1901 "(802.11bg):\n", geo->bg_channels); 1902 1903 for (i = 0; i < geo->bg_channels; i++) { 1904 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n", 1905 geo->bg[i].channel, 1906 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ? 1907 " (radar spectrum)" : "", 1908 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) || 1909 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)) 1910 ? "" : ", IBSS", 1911 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ? 1912 "passive only" : "active/passive", 1913 geo->bg[i].flags & LIBIPW_CH_B_ONLY ? 1914 "B" : "B/G"); 1915 } 1916 1917 len += sprintf(&buf[len], 1918 "Displaying %d channels in 5.2Ghz band " 1919 "(802.11a):\n", geo->a_channels); 1920 for (i = 0; i < geo->a_channels; i++) { 1921 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n", 1922 geo->a[i].channel, 1923 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ? 1924 " (radar spectrum)" : "", 1925 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) || 1926 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)) 1927 ? "" : ", IBSS", 1928 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ? 1929 "passive only" : "active/passive"); 1930 } 1931 1932 return len; 1933 } 1934 1935 static DEVICE_ATTR(channels, 0400, show_channels, NULL); 1936 1937 static void notify_wx_assoc_event(struct ipw_priv *priv) 1938 { 1939 union iwreq_data wrqu; 1940 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1941 if (priv->status & STATUS_ASSOCIATED) 1942 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); 1943 else 1944 eth_zero_addr(wrqu.ap_addr.sa_data); 1945 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); 1946 } 1947 1948 static void ipw_irq_tasklet(unsigned long data) 1949 { 1950 struct ipw_priv *priv = (struct ipw_priv *)data; 1951 u32 inta, inta_mask, handled = 0; 1952 unsigned long flags; 1953 int rc = 0; 1954 1955 spin_lock_irqsave(&priv->irq_lock, flags); 1956 1957 inta = ipw_read32(priv, IPW_INTA_RW); 1958 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 1959 1960 if (inta == 0xFFFFFFFF) { 1961 /* Hardware disappeared */ 1962 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n"); 1963 /* Only handle the cached INTA values */ 1964 inta = 0; 1965 } 1966 inta &= (IPW_INTA_MASK_ALL & inta_mask); 1967 1968 /* Add any cached INTA values that need to be handled */ 1969 inta |= priv->isr_inta; 1970 1971 spin_unlock_irqrestore(&priv->irq_lock, flags); 1972 1973 spin_lock_irqsave(&priv->lock, flags); 1974 1975 /* handle all the justifications for the interrupt */ 1976 if (inta & IPW_INTA_BIT_RX_TRANSFER) { 1977 ipw_rx(priv); 1978 handled |= IPW_INTA_BIT_RX_TRANSFER; 1979 } 1980 1981 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) { 1982 IPW_DEBUG_HC("Command completed.\n"); 1983 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1); 1984 priv->status &= ~STATUS_HCMD_ACTIVE; 1985 wake_up_interruptible(&priv->wait_command_queue); 1986 handled |= IPW_INTA_BIT_TX_CMD_QUEUE; 1987 } 1988 1989 if (inta & IPW_INTA_BIT_TX_QUEUE_1) { 1990 IPW_DEBUG_TX("TX_QUEUE_1\n"); 1991 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0); 1992 handled |= IPW_INTA_BIT_TX_QUEUE_1; 1993 } 1994 1995 if (inta & IPW_INTA_BIT_TX_QUEUE_2) { 1996 IPW_DEBUG_TX("TX_QUEUE_2\n"); 1997 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1); 1998 handled |= IPW_INTA_BIT_TX_QUEUE_2; 1999 } 2000 2001 if (inta & IPW_INTA_BIT_TX_QUEUE_3) { 2002 IPW_DEBUG_TX("TX_QUEUE_3\n"); 2003 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2); 2004 handled |= IPW_INTA_BIT_TX_QUEUE_3; 2005 } 2006 2007 if (inta & IPW_INTA_BIT_TX_QUEUE_4) { 2008 IPW_DEBUG_TX("TX_QUEUE_4\n"); 2009 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3); 2010 handled |= IPW_INTA_BIT_TX_QUEUE_4; 2011 } 2012 2013 if (inta & IPW_INTA_BIT_STATUS_CHANGE) { 2014 IPW_WARNING("STATUS_CHANGE\n"); 2015 handled |= IPW_INTA_BIT_STATUS_CHANGE; 2016 } 2017 2018 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) { 2019 IPW_WARNING("TX_PERIOD_EXPIRED\n"); 2020 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED; 2021 } 2022 2023 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) { 2024 IPW_WARNING("HOST_CMD_DONE\n"); 2025 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE; 2026 } 2027 2028 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) { 2029 IPW_WARNING("FW_INITIALIZATION_DONE\n"); 2030 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE; 2031 } 2032 2033 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) { 2034 IPW_WARNING("PHY_OFF_DONE\n"); 2035 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE; 2036 } 2037 2038 if (inta & IPW_INTA_BIT_RF_KILL_DONE) { 2039 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n"); 2040 priv->status |= STATUS_RF_KILL_HW; 2041 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); 2042 wake_up_interruptible(&priv->wait_command_queue); 2043 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); 2044 cancel_delayed_work(&priv->request_scan); 2045 cancel_delayed_work(&priv->request_direct_scan); 2046 cancel_delayed_work(&priv->request_passive_scan); 2047 cancel_delayed_work(&priv->scan_event); 2048 schedule_work(&priv->link_down); 2049 schedule_delayed_work(&priv->rf_kill, 2 * HZ); 2050 handled |= IPW_INTA_BIT_RF_KILL_DONE; 2051 } 2052 2053 if (inta & IPW_INTA_BIT_FATAL_ERROR) { 2054 IPW_WARNING("Firmware error detected. Restarting.\n"); 2055 if (priv->error) { 2056 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); 2057 if (ipw_debug_level & IPW_DL_FW_ERRORS) { 2058 struct ipw_fw_error *error = 2059 ipw_alloc_error_log(priv); 2060 ipw_dump_error_log(priv, error); 2061 kfree(error); 2062 } 2063 } else { 2064 priv->error = ipw_alloc_error_log(priv); 2065 if (priv->error) 2066 IPW_DEBUG_FW("Sysfs 'error' log captured.\n"); 2067 else 2068 IPW_DEBUG_FW("Error allocating sysfs 'error' " 2069 "log.\n"); 2070 if (ipw_debug_level & IPW_DL_FW_ERRORS) 2071 ipw_dump_error_log(priv, priv->error); 2072 } 2073 2074 /* XXX: If hardware encryption is for WPA/WPA2, 2075 * we have to notify the supplicant. */ 2076 if (priv->ieee->sec.encrypt) { 2077 priv->status &= ~STATUS_ASSOCIATED; 2078 notify_wx_assoc_event(priv); 2079 } 2080 2081 /* Keep the restart process from trying to send host 2082 * commands by clearing the INIT status bit */ 2083 priv->status &= ~STATUS_INIT; 2084 2085 /* Cancel currently queued command. */ 2086 priv->status &= ~STATUS_HCMD_ACTIVE; 2087 wake_up_interruptible(&priv->wait_command_queue); 2088 2089 schedule_work(&priv->adapter_restart); 2090 handled |= IPW_INTA_BIT_FATAL_ERROR; 2091 } 2092 2093 if (inta & IPW_INTA_BIT_PARITY_ERROR) { 2094 IPW_ERROR("Parity error\n"); 2095 handled |= IPW_INTA_BIT_PARITY_ERROR; 2096 } 2097 2098 if (handled != inta) { 2099 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); 2100 } 2101 2102 spin_unlock_irqrestore(&priv->lock, flags); 2103 2104 /* enable all interrupts */ 2105 ipw_enable_interrupts(priv); 2106 } 2107 2108 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x 2109 static char *get_cmd_string(u8 cmd) 2110 { 2111 switch (cmd) { 2112 IPW_CMD(HOST_COMPLETE); 2113 IPW_CMD(POWER_DOWN); 2114 IPW_CMD(SYSTEM_CONFIG); 2115 IPW_CMD(MULTICAST_ADDRESS); 2116 IPW_CMD(SSID); 2117 IPW_CMD(ADAPTER_ADDRESS); 2118 IPW_CMD(PORT_TYPE); 2119 IPW_CMD(RTS_THRESHOLD); 2120 IPW_CMD(FRAG_THRESHOLD); 2121 IPW_CMD(POWER_MODE); 2122 IPW_CMD(WEP_KEY); 2123 IPW_CMD(TGI_TX_KEY); 2124 IPW_CMD(SCAN_REQUEST); 2125 IPW_CMD(SCAN_REQUEST_EXT); 2126 IPW_CMD(ASSOCIATE); 2127 IPW_CMD(SUPPORTED_RATES); 2128 IPW_CMD(SCAN_ABORT); 2129 IPW_CMD(TX_FLUSH); 2130 IPW_CMD(QOS_PARAMETERS); 2131 IPW_CMD(DINO_CONFIG); 2132 IPW_CMD(RSN_CAPABILITIES); 2133 IPW_CMD(RX_KEY); 2134 IPW_CMD(CARD_DISABLE); 2135 IPW_CMD(SEED_NUMBER); 2136 IPW_CMD(TX_POWER); 2137 IPW_CMD(COUNTRY_INFO); 2138 IPW_CMD(AIRONET_INFO); 2139 IPW_CMD(AP_TX_POWER); 2140 IPW_CMD(CCKM_INFO); 2141 IPW_CMD(CCX_VER_INFO); 2142 IPW_CMD(SET_CALIBRATION); 2143 IPW_CMD(SENSITIVITY_CALIB); 2144 IPW_CMD(RETRY_LIMIT); 2145 IPW_CMD(IPW_PRE_POWER_DOWN); 2146 IPW_CMD(VAP_BEACON_TEMPLATE); 2147 IPW_CMD(VAP_DTIM_PERIOD); 2148 IPW_CMD(EXT_SUPPORTED_RATES); 2149 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT); 2150 IPW_CMD(VAP_QUIET_INTERVALS); 2151 IPW_CMD(VAP_CHANNEL_SWITCH); 2152 IPW_CMD(VAP_MANDATORY_CHANNELS); 2153 IPW_CMD(VAP_CELL_PWR_LIMIT); 2154 IPW_CMD(VAP_CF_PARAM_SET); 2155 IPW_CMD(VAP_SET_BEACONING_STATE); 2156 IPW_CMD(MEASUREMENT); 2157 IPW_CMD(POWER_CAPABILITY); 2158 IPW_CMD(SUPPORTED_CHANNELS); 2159 IPW_CMD(TPC_REPORT); 2160 IPW_CMD(WME_INFO); 2161 IPW_CMD(PRODUCTION_COMMAND); 2162 default: 2163 return "UNKNOWN"; 2164 } 2165 } 2166 2167 #define HOST_COMPLETE_TIMEOUT HZ 2168 2169 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) 2170 { 2171 int rc = 0; 2172 unsigned long flags; 2173 unsigned long now, end; 2174 2175 spin_lock_irqsave(&priv->lock, flags); 2176 if (priv->status & STATUS_HCMD_ACTIVE) { 2177 IPW_ERROR("Failed to send %s: Already sending a command.\n", 2178 get_cmd_string(cmd->cmd)); 2179 spin_unlock_irqrestore(&priv->lock, flags); 2180 return -EAGAIN; 2181 } 2182 2183 priv->status |= STATUS_HCMD_ACTIVE; 2184 2185 if (priv->cmdlog) { 2186 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies; 2187 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd; 2188 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len; 2189 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param, 2190 cmd->len); 2191 priv->cmdlog[priv->cmdlog_pos].retcode = -1; 2192 } 2193 2194 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", 2195 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, 2196 priv->status); 2197 2198 #ifndef DEBUG_CMD_WEP_KEY 2199 if (cmd->cmd == IPW_CMD_WEP_KEY) 2200 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n"); 2201 else 2202 #endif 2203 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); 2204 2205 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0); 2206 if (rc) { 2207 priv->status &= ~STATUS_HCMD_ACTIVE; 2208 IPW_ERROR("Failed to send %s: Reason %d\n", 2209 get_cmd_string(cmd->cmd), rc); 2210 spin_unlock_irqrestore(&priv->lock, flags); 2211 goto exit; 2212 } 2213 spin_unlock_irqrestore(&priv->lock, flags); 2214 2215 now = jiffies; 2216 end = now + HOST_COMPLETE_TIMEOUT; 2217 again: 2218 rc = wait_event_interruptible_timeout(priv->wait_command_queue, 2219 !(priv-> 2220 status & STATUS_HCMD_ACTIVE), 2221 end - now); 2222 if (rc < 0) { 2223 now = jiffies; 2224 if (time_before(now, end)) 2225 goto again; 2226 rc = 0; 2227 } 2228 2229 if (rc == 0) { 2230 spin_lock_irqsave(&priv->lock, flags); 2231 if (priv->status & STATUS_HCMD_ACTIVE) { 2232 IPW_ERROR("Failed to send %s: Command timed out.\n", 2233 get_cmd_string(cmd->cmd)); 2234 priv->status &= ~STATUS_HCMD_ACTIVE; 2235 spin_unlock_irqrestore(&priv->lock, flags); 2236 rc = -EIO; 2237 goto exit; 2238 } 2239 spin_unlock_irqrestore(&priv->lock, flags); 2240 } else 2241 rc = 0; 2242 2243 if (priv->status & STATUS_RF_KILL_HW) { 2244 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n", 2245 get_cmd_string(cmd->cmd)); 2246 rc = -EIO; 2247 goto exit; 2248 } 2249 2250 exit: 2251 if (priv->cmdlog) { 2252 priv->cmdlog[priv->cmdlog_pos++].retcode = rc; 2253 priv->cmdlog_pos %= priv->cmdlog_len; 2254 } 2255 return rc; 2256 } 2257 2258 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command) 2259 { 2260 struct host_cmd cmd = { 2261 .cmd = command, 2262 }; 2263 2264 return __ipw_send_cmd(priv, &cmd); 2265 } 2266 2267 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len, 2268 void *data) 2269 { 2270 struct host_cmd cmd = { 2271 .cmd = command, 2272 .len = len, 2273 .param = data, 2274 }; 2275 2276 return __ipw_send_cmd(priv, &cmd); 2277 } 2278 2279 static int ipw_send_host_complete(struct ipw_priv *priv) 2280 { 2281 if (!priv) { 2282 IPW_ERROR("Invalid args\n"); 2283 return -1; 2284 } 2285 2286 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); 2287 } 2288 2289 static int ipw_send_system_config(struct ipw_priv *priv) 2290 { 2291 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, 2292 sizeof(priv->sys_config), 2293 &priv->sys_config); 2294 } 2295 2296 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) 2297 { 2298 if (!priv || !ssid) { 2299 IPW_ERROR("Invalid args\n"); 2300 return -1; 2301 } 2302 2303 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE), 2304 ssid); 2305 } 2306 2307 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) 2308 { 2309 if (!priv || !mac) { 2310 IPW_ERROR("Invalid args\n"); 2311 return -1; 2312 } 2313 2314 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n", 2315 priv->net_dev->name, mac); 2316 2317 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); 2318 } 2319 2320 static void ipw_adapter_restart(void *adapter) 2321 { 2322 struct ipw_priv *priv = adapter; 2323 2324 if (priv->status & STATUS_RF_KILL_MASK) 2325 return; 2326 2327 ipw_down(priv); 2328 2329 if (priv->assoc_network && 2330 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS)) 2331 ipw_remove_current_network(priv); 2332 2333 if (ipw_up(priv)) { 2334 IPW_ERROR("Failed to up device\n"); 2335 return; 2336 } 2337 } 2338 2339 static void ipw_bg_adapter_restart(struct work_struct *work) 2340 { 2341 struct ipw_priv *priv = 2342 container_of(work, struct ipw_priv, adapter_restart); 2343 mutex_lock(&priv->mutex); 2344 ipw_adapter_restart(priv); 2345 mutex_unlock(&priv->mutex); 2346 } 2347 2348 static void ipw_abort_scan(struct ipw_priv *priv); 2349 2350 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) 2351 2352 static void ipw_scan_check(void *data) 2353 { 2354 struct ipw_priv *priv = data; 2355 2356 if (priv->status & STATUS_SCAN_ABORTING) { 2357 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2358 "adapter after (%dms).\n", 2359 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2360 schedule_work(&priv->adapter_restart); 2361 } else if (priv->status & STATUS_SCANNING) { 2362 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan " 2363 "after (%dms).\n", 2364 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2365 ipw_abort_scan(priv); 2366 schedule_delayed_work(&priv->scan_check, HZ); 2367 } 2368 } 2369 2370 static void ipw_bg_scan_check(struct work_struct *work) 2371 { 2372 struct ipw_priv *priv = 2373 container_of(work, struct ipw_priv, scan_check.work); 2374 mutex_lock(&priv->mutex); 2375 ipw_scan_check(priv); 2376 mutex_unlock(&priv->mutex); 2377 } 2378 2379 static int ipw_send_scan_request_ext(struct ipw_priv *priv, 2380 struct ipw_scan_request_ext *request) 2381 { 2382 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT, 2383 sizeof(*request), request); 2384 } 2385 2386 static int ipw_send_scan_abort(struct ipw_priv *priv) 2387 { 2388 if (!priv) { 2389 IPW_ERROR("Invalid args\n"); 2390 return -1; 2391 } 2392 2393 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT); 2394 } 2395 2396 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) 2397 { 2398 struct ipw_sensitivity_calib calib = { 2399 .beacon_rssi_raw = cpu_to_le16(sens), 2400 }; 2401 2402 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), 2403 &calib); 2404 } 2405 2406 static int ipw_send_associate(struct ipw_priv *priv, 2407 struct ipw_associate *associate) 2408 { 2409 if (!priv || !associate) { 2410 IPW_ERROR("Invalid args\n"); 2411 return -1; 2412 } 2413 2414 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate), 2415 associate); 2416 } 2417 2418 static int ipw_send_supported_rates(struct ipw_priv *priv, 2419 struct ipw_supported_rates *rates) 2420 { 2421 if (!priv || !rates) { 2422 IPW_ERROR("Invalid args\n"); 2423 return -1; 2424 } 2425 2426 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates), 2427 rates); 2428 } 2429 2430 static int ipw_set_random_seed(struct ipw_priv *priv) 2431 { 2432 u32 val; 2433 2434 if (!priv) { 2435 IPW_ERROR("Invalid args\n"); 2436 return -1; 2437 } 2438 2439 get_random_bytes(&val, sizeof(val)); 2440 2441 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val); 2442 } 2443 2444 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) 2445 { 2446 __le32 v = cpu_to_le32(phy_off); 2447 if (!priv) { 2448 IPW_ERROR("Invalid args\n"); 2449 return -1; 2450 } 2451 2452 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v); 2453 } 2454 2455 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) 2456 { 2457 if (!priv || !power) { 2458 IPW_ERROR("Invalid args\n"); 2459 return -1; 2460 } 2461 2462 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power); 2463 } 2464 2465 static int ipw_set_tx_power(struct ipw_priv *priv) 2466 { 2467 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 2468 struct ipw_tx_power tx_power; 2469 s8 max_power; 2470 int i; 2471 2472 memset(&tx_power, 0, sizeof(tx_power)); 2473 2474 /* configure device for 'G' band */ 2475 tx_power.ieee_mode = IPW_G_MODE; 2476 tx_power.num_channels = geo->bg_channels; 2477 for (i = 0; i < geo->bg_channels; i++) { 2478 max_power = geo->bg[i].max_power; 2479 tx_power.channels_tx_power[i].channel_number = 2480 geo->bg[i].channel; 2481 tx_power.channels_tx_power[i].tx_power = max_power ? 2482 min(max_power, priv->tx_power) : priv->tx_power; 2483 } 2484 if (ipw_send_tx_power(priv, &tx_power)) 2485 return -EIO; 2486 2487 /* configure device to also handle 'B' band */ 2488 tx_power.ieee_mode = IPW_B_MODE; 2489 if (ipw_send_tx_power(priv, &tx_power)) 2490 return -EIO; 2491 2492 /* configure device to also handle 'A' band */ 2493 if (priv->ieee->abg_true) { 2494 tx_power.ieee_mode = IPW_A_MODE; 2495 tx_power.num_channels = geo->a_channels; 2496 for (i = 0; i < tx_power.num_channels; i++) { 2497 max_power = geo->a[i].max_power; 2498 tx_power.channels_tx_power[i].channel_number = 2499 geo->a[i].channel; 2500 tx_power.channels_tx_power[i].tx_power = max_power ? 2501 min(max_power, priv->tx_power) : priv->tx_power; 2502 } 2503 if (ipw_send_tx_power(priv, &tx_power)) 2504 return -EIO; 2505 } 2506 return 0; 2507 } 2508 2509 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) 2510 { 2511 struct ipw_rts_threshold rts_threshold = { 2512 .rts_threshold = cpu_to_le16(rts), 2513 }; 2514 2515 if (!priv) { 2516 IPW_ERROR("Invalid args\n"); 2517 return -1; 2518 } 2519 2520 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD, 2521 sizeof(rts_threshold), &rts_threshold); 2522 } 2523 2524 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) 2525 { 2526 struct ipw_frag_threshold frag_threshold = { 2527 .frag_threshold = cpu_to_le16(frag), 2528 }; 2529 2530 if (!priv) { 2531 IPW_ERROR("Invalid args\n"); 2532 return -1; 2533 } 2534 2535 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD, 2536 sizeof(frag_threshold), &frag_threshold); 2537 } 2538 2539 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) 2540 { 2541 __le32 param; 2542 2543 if (!priv) { 2544 IPW_ERROR("Invalid args\n"); 2545 return -1; 2546 } 2547 2548 /* If on battery, set to 3, if AC set to CAM, else user 2549 * level */ 2550 switch (mode) { 2551 case IPW_POWER_BATTERY: 2552 param = cpu_to_le32(IPW_POWER_INDEX_3); 2553 break; 2554 case IPW_POWER_AC: 2555 param = cpu_to_le32(IPW_POWER_MODE_CAM); 2556 break; 2557 default: 2558 param = cpu_to_le32(mode); 2559 break; 2560 } 2561 2562 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), 2563 ¶m); 2564 } 2565 2566 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) 2567 { 2568 struct ipw_retry_limit retry_limit = { 2569 .short_retry_limit = slimit, 2570 .long_retry_limit = llimit 2571 }; 2572 2573 if (!priv) { 2574 IPW_ERROR("Invalid args\n"); 2575 return -1; 2576 } 2577 2578 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit), 2579 &retry_limit); 2580 } 2581 2582 /* 2583 * The IPW device contains a Microwire compatible EEPROM that stores 2584 * various data like the MAC address. Usually the firmware has exclusive 2585 * access to the eeprom, but during device initialization (before the 2586 * device driver has sent the HostComplete command to the firmware) the 2587 * device driver has read access to the EEPROM by way of indirect addressing 2588 * through a couple of memory mapped registers. 2589 * 2590 * The following is a simplified implementation for pulling data out of the 2591 * the eeprom, along with some helper functions to find information in 2592 * the per device private data's copy of the eeprom. 2593 * 2594 * NOTE: To better understand how these functions work (i.e what is a chip 2595 * select and why do have to keep driving the eeprom clock?), read 2596 * just about any data sheet for a Microwire compatible EEPROM. 2597 */ 2598 2599 /* write a 32 bit value into the indirect accessor register */ 2600 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data) 2601 { 2602 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data); 2603 2604 /* the eeprom requires some time to complete the operation */ 2605 udelay(p->eeprom_delay); 2606 } 2607 2608 /* perform a chip select operation */ 2609 static void eeprom_cs(struct ipw_priv *priv) 2610 { 2611 eeprom_write_reg(priv, 0); 2612 eeprom_write_reg(priv, EEPROM_BIT_CS); 2613 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); 2614 eeprom_write_reg(priv, EEPROM_BIT_CS); 2615 } 2616 2617 /* perform a chip select operation */ 2618 static void eeprom_disable_cs(struct ipw_priv *priv) 2619 { 2620 eeprom_write_reg(priv, EEPROM_BIT_CS); 2621 eeprom_write_reg(priv, 0); 2622 eeprom_write_reg(priv, EEPROM_BIT_SK); 2623 } 2624 2625 /* push a single bit down to the eeprom */ 2626 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit) 2627 { 2628 int d = (bit ? EEPROM_BIT_DI : 0); 2629 eeprom_write_reg(p, EEPROM_BIT_CS | d); 2630 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK); 2631 } 2632 2633 /* push an opcode followed by an address down to the eeprom */ 2634 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr) 2635 { 2636 int i; 2637 2638 eeprom_cs(priv); 2639 eeprom_write_bit(priv, 1); 2640 eeprom_write_bit(priv, op & 2); 2641 eeprom_write_bit(priv, op & 1); 2642 for (i = 7; i >= 0; i--) { 2643 eeprom_write_bit(priv, addr & (1 << i)); 2644 } 2645 } 2646 2647 /* pull 16 bits off the eeprom, one bit at a time */ 2648 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr) 2649 { 2650 int i; 2651 u16 r = 0; 2652 2653 /* Send READ Opcode */ 2654 eeprom_op(priv, EEPROM_CMD_READ, addr); 2655 2656 /* Send dummy bit */ 2657 eeprom_write_reg(priv, EEPROM_BIT_CS); 2658 2659 /* Read the byte off the eeprom one bit at a time */ 2660 for (i = 0; i < 16; i++) { 2661 u32 data = 0; 2662 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); 2663 eeprom_write_reg(priv, EEPROM_BIT_CS); 2664 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS); 2665 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0); 2666 } 2667 2668 /* Send another dummy bit */ 2669 eeprom_write_reg(priv, 0); 2670 eeprom_disable_cs(priv); 2671 2672 return r; 2673 } 2674 2675 /* helper function for pulling the mac address out of the private */ 2676 /* data's copy of the eeprom data */ 2677 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac) 2678 { 2679 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN); 2680 } 2681 2682 static void ipw_read_eeprom(struct ipw_priv *priv) 2683 { 2684 int i; 2685 __le16 *eeprom = (__le16 *) priv->eeprom; 2686 2687 IPW_DEBUG_TRACE(">>\n"); 2688 2689 /* read entire contents of eeprom into private buffer */ 2690 for (i = 0; i < 128; i++) 2691 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i)); 2692 2693 IPW_DEBUG_TRACE("<<\n"); 2694 } 2695 2696 /* 2697 * Either the device driver (i.e. the host) or the firmware can 2698 * load eeprom data into the designated region in SRAM. If neither 2699 * happens then the FW will shutdown with a fatal error. 2700 * 2701 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE 2702 * bit needs region of shared SRAM needs to be non-zero. 2703 */ 2704 static void ipw_eeprom_init_sram(struct ipw_priv *priv) 2705 { 2706 int i; 2707 2708 IPW_DEBUG_TRACE(">>\n"); 2709 2710 /* 2711 If the data looks correct, then copy it to our private 2712 copy. Otherwise let the firmware know to perform the operation 2713 on its own. 2714 */ 2715 if (priv->eeprom[EEPROM_VERSION] != 0) { 2716 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); 2717 2718 /* write the eeprom data to sram */ 2719 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) 2720 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]); 2721 2722 /* Do not load eeprom data on fatal error or suspend */ 2723 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 2724 } else { 2725 IPW_DEBUG_INFO("Enabling FW initialization of SRAM\n"); 2726 2727 /* Load eeprom data on fatal error or suspend */ 2728 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1); 2729 } 2730 2731 IPW_DEBUG_TRACE("<<\n"); 2732 } 2733 2734 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) 2735 { 2736 count >>= 2; 2737 if (!count) 2738 return; 2739 _ipw_write32(priv, IPW_AUTOINC_ADDR, start); 2740 while (count--) 2741 _ipw_write32(priv, IPW_AUTOINC_DATA, 0); 2742 } 2743 2744 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv) 2745 { 2746 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL, 2747 CB_NUMBER_OF_ELEMENTS_SMALL * 2748 sizeof(struct command_block)); 2749 } 2750 2751 static int ipw_fw_dma_enable(struct ipw_priv *priv) 2752 { /* start dma engine but no transfers yet */ 2753 2754 IPW_DEBUG_FW(">> :\n"); 2755 2756 /* Start the dma */ 2757 ipw_fw_dma_reset_command_blocks(priv); 2758 2759 /* Write CB base address */ 2760 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL); 2761 2762 IPW_DEBUG_FW("<< :\n"); 2763 return 0; 2764 } 2765 2766 static void ipw_fw_dma_abort(struct ipw_priv *priv) 2767 { 2768 u32 control = 0; 2769 2770 IPW_DEBUG_FW(">> :\n"); 2771 2772 /* set the Stop and Abort bit */ 2773 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; 2774 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2775 priv->sram_desc.last_cb_index = 0; 2776 2777 IPW_DEBUG_FW("<<\n"); 2778 } 2779 2780 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, 2781 struct command_block *cb) 2782 { 2783 u32 address = 2784 IPW_SHARED_SRAM_DMA_CONTROL + 2785 (sizeof(struct command_block) * index); 2786 IPW_DEBUG_FW(">> :\n"); 2787 2788 ipw_write_indirect(priv, address, (u8 *) cb, 2789 (int)sizeof(struct command_block)); 2790 2791 IPW_DEBUG_FW("<< :\n"); 2792 return 0; 2793 2794 } 2795 2796 static int ipw_fw_dma_kick(struct ipw_priv *priv) 2797 { 2798 u32 control = 0; 2799 u32 index = 0; 2800 2801 IPW_DEBUG_FW(">> :\n"); 2802 2803 for (index = 0; index < priv->sram_desc.last_cb_index; index++) 2804 ipw_fw_dma_write_command_block(priv, index, 2805 &priv->sram_desc.cb_list[index]); 2806 2807 /* Enable the DMA in the CSR register */ 2808 ipw_clear_bit(priv, IPW_RESET_REG, 2809 IPW_RESET_REG_MASTER_DISABLED | 2810 IPW_RESET_REG_STOP_MASTER); 2811 2812 /* Set the Start bit. */ 2813 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START; 2814 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2815 2816 IPW_DEBUG_FW("<< :\n"); 2817 return 0; 2818 } 2819 2820 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv) 2821 { 2822 u32 address; 2823 u32 register_value = 0; 2824 u32 cb_fields_address = 0; 2825 2826 IPW_DEBUG_FW(">> :\n"); 2827 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2828 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address); 2829 2830 /* Read the DMA Controlor register */ 2831 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL); 2832 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value); 2833 2834 /* Print the CB values */ 2835 cb_fields_address = address; 2836 register_value = ipw_read_reg32(priv, cb_fields_address); 2837 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value); 2838 2839 cb_fields_address += sizeof(u32); 2840 register_value = ipw_read_reg32(priv, cb_fields_address); 2841 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value); 2842 2843 cb_fields_address += sizeof(u32); 2844 register_value = ipw_read_reg32(priv, cb_fields_address); 2845 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n", 2846 register_value); 2847 2848 cb_fields_address += sizeof(u32); 2849 register_value = ipw_read_reg32(priv, cb_fields_address); 2850 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value); 2851 2852 IPW_DEBUG_FW(">> :\n"); 2853 } 2854 2855 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv) 2856 { 2857 u32 current_cb_address = 0; 2858 u32 current_cb_index = 0; 2859 2860 IPW_DEBUG_FW("<< :\n"); 2861 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2862 2863 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) / 2864 sizeof(struct command_block); 2865 2866 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n", 2867 current_cb_index, current_cb_address); 2868 2869 IPW_DEBUG_FW(">> :\n"); 2870 return current_cb_index; 2871 2872 } 2873 2874 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv, 2875 u32 src_address, 2876 u32 dest_address, 2877 u32 length, 2878 int interrupt_enabled, int is_last) 2879 { 2880 2881 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC | 2882 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG | 2883 CB_DEST_SIZE_LONG; 2884 struct command_block *cb; 2885 u32 last_cb_element = 0; 2886 2887 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n", 2888 src_address, dest_address, length); 2889 2890 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL) 2891 return -1; 2892 2893 last_cb_element = priv->sram_desc.last_cb_index; 2894 cb = &priv->sram_desc.cb_list[last_cb_element]; 2895 priv->sram_desc.last_cb_index++; 2896 2897 /* Calculate the new CB control word */ 2898 if (interrupt_enabled) 2899 control |= CB_INT_ENABLED; 2900 2901 if (is_last) 2902 control |= CB_LAST_VALID; 2903 2904 control |= length; 2905 2906 /* Calculate the CB Element's checksum value */ 2907 cb->status = control ^ src_address ^ dest_address; 2908 2909 /* Copy the Source and Destination addresses */ 2910 cb->dest_addr = dest_address; 2911 cb->source_addr = src_address; 2912 2913 /* Copy the Control Word last */ 2914 cb->control = control; 2915 2916 return 0; 2917 } 2918 2919 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address, 2920 int nr, u32 dest_address, u32 len) 2921 { 2922 int ret, i; 2923 u32 size; 2924 2925 IPW_DEBUG_FW(">>\n"); 2926 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", 2927 nr, dest_address, len); 2928 2929 for (i = 0; i < nr; i++) { 2930 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH); 2931 ret = ipw_fw_dma_add_command_block(priv, src_address[i], 2932 dest_address + 2933 i * CB_MAX_LENGTH, size, 2934 0, 0); 2935 if (ret) { 2936 IPW_DEBUG_FW_INFO(": Failed\n"); 2937 return -1; 2938 } else 2939 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2940 } 2941 2942 IPW_DEBUG_FW("<<\n"); 2943 return 0; 2944 } 2945 2946 static int ipw_fw_dma_wait(struct ipw_priv *priv) 2947 { 2948 u32 current_index = 0, previous_index; 2949 u32 watchdog = 0; 2950 2951 IPW_DEBUG_FW(">> :\n"); 2952 2953 current_index = ipw_fw_dma_command_block_index(priv); 2954 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", 2955 (int)priv->sram_desc.last_cb_index); 2956 2957 while (current_index < priv->sram_desc.last_cb_index) { 2958 udelay(50); 2959 previous_index = current_index; 2960 current_index = ipw_fw_dma_command_block_index(priv); 2961 2962 if (previous_index < current_index) { 2963 watchdog = 0; 2964 continue; 2965 } 2966 if (++watchdog > 400) { 2967 IPW_DEBUG_FW_INFO("Timeout\n"); 2968 ipw_fw_dma_dump_command_block(priv); 2969 ipw_fw_dma_abort(priv); 2970 return -1; 2971 } 2972 } 2973 2974 ipw_fw_dma_abort(priv); 2975 2976 /*Disable the DMA in the CSR register */ 2977 ipw_set_bit(priv, IPW_RESET_REG, 2978 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER); 2979 2980 IPW_DEBUG_FW("<< dmaWaitSync\n"); 2981 return 0; 2982 } 2983 2984 static void ipw_remove_current_network(struct ipw_priv *priv) 2985 { 2986 struct list_head *element, *safe; 2987 struct libipw_network *network = NULL; 2988 unsigned long flags; 2989 2990 spin_lock_irqsave(&priv->ieee->lock, flags); 2991 list_for_each_safe(element, safe, &priv->ieee->network_list) { 2992 network = list_entry(element, struct libipw_network, list); 2993 if (ether_addr_equal(network->bssid, priv->bssid)) { 2994 list_del(element); 2995 list_add_tail(&network->list, 2996 &priv->ieee->network_free_list); 2997 } 2998 } 2999 spin_unlock_irqrestore(&priv->ieee->lock, flags); 3000 } 3001 3002 /** 3003 * Check that card is still alive. 3004 * Reads debug register from domain0. 3005 * If card is present, pre-defined value should 3006 * be found there. 3007 * 3008 * @param priv 3009 * @return 1 if card is present, 0 otherwise 3010 */ 3011 static inline int ipw_alive(struct ipw_priv *priv) 3012 { 3013 return ipw_read32(priv, 0x90) == 0xd55555d5; 3014 } 3015 3016 /* timeout in msec, attempted in 10-msec quanta */ 3017 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, 3018 int timeout) 3019 { 3020 int i = 0; 3021 3022 do { 3023 if ((ipw_read32(priv, addr) & mask) == mask) 3024 return i; 3025 mdelay(10); 3026 i += 10; 3027 } while (i < timeout); 3028 3029 return -ETIME; 3030 } 3031 3032 /* These functions load the firmware and micro code for the operation of 3033 * the ipw hardware. It assumes the buffer has all the bits for the 3034 * image and the caller is handling the memory allocation and clean up. 3035 */ 3036 3037 static int ipw_stop_master(struct ipw_priv *priv) 3038 { 3039 int rc; 3040 3041 IPW_DEBUG_TRACE(">>\n"); 3042 /* stop master. typical delay - 0 */ 3043 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3044 3045 /* timeout is in msec, polled in 10-msec quanta */ 3046 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3047 IPW_RESET_REG_MASTER_DISABLED, 100); 3048 if (rc < 0) { 3049 IPW_ERROR("wait for stop master failed after 100ms\n"); 3050 return -1; 3051 } 3052 3053 IPW_DEBUG_INFO("stop master %dms\n", rc); 3054 3055 return rc; 3056 } 3057 3058 static void ipw_arc_release(struct ipw_priv *priv) 3059 { 3060 IPW_DEBUG_TRACE(">>\n"); 3061 mdelay(5); 3062 3063 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3064 3065 /* no one knows timing, for safety add some delay */ 3066 mdelay(5); 3067 } 3068 3069 struct fw_chunk { 3070 __le32 address; 3071 __le32 length; 3072 }; 3073 3074 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) 3075 { 3076 int rc = 0, i, addr; 3077 u8 cr = 0; 3078 __le16 *image; 3079 3080 image = (__le16 *) data; 3081 3082 IPW_DEBUG_TRACE(">>\n"); 3083 3084 rc = ipw_stop_master(priv); 3085 3086 if (rc < 0) 3087 return rc; 3088 3089 for (addr = IPW_SHARED_LOWER_BOUND; 3090 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) { 3091 ipw_write32(priv, addr, 0); 3092 } 3093 3094 /* no ucode (yet) */ 3095 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive)); 3096 /* destroy DMA queues */ 3097 /* reset sequence */ 3098 3099 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON); 3100 ipw_arc_release(priv); 3101 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF); 3102 mdelay(1); 3103 3104 /* reset PHY */ 3105 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN); 3106 mdelay(1); 3107 3108 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0); 3109 mdelay(1); 3110 3111 /* enable ucode store */ 3112 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0); 3113 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS); 3114 mdelay(1); 3115 3116 /* write ucode */ 3117 /** 3118 * @bug 3119 * Do NOT set indirect address register once and then 3120 * store data to indirect data register in the loop. 3121 * It seems very reasonable, but in this case DINO do not 3122 * accept ucode. It is essential to set address each time. 3123 */ 3124 /* load new ipw uCode */ 3125 for (i = 0; i < len / 2; i++) 3126 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE, 3127 le16_to_cpu(image[i])); 3128 3129 /* enable DINO */ 3130 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3131 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM); 3132 3133 /* this is where the igx / win driver deveates from the VAP driver. */ 3134 3135 /* wait for alive response */ 3136 for (i = 0; i < 100; i++) { 3137 /* poll for incoming data */ 3138 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS); 3139 if (cr & DINO_RXFIFO_DATA) 3140 break; 3141 mdelay(1); 3142 } 3143 3144 if (cr & DINO_RXFIFO_DATA) { 3145 /* alive_command_responce size is NOT multiple of 4 */ 3146 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4]; 3147 3148 for (i = 0; i < ARRAY_SIZE(response_buffer); i++) 3149 response_buffer[i] = 3150 cpu_to_le32(ipw_read_reg32(priv, 3151 IPW_BASEBAND_RX_FIFO_READ)); 3152 memcpy(&priv->dino_alive, response_buffer, 3153 sizeof(priv->dino_alive)); 3154 if (priv->dino_alive.alive_command == 1 3155 && priv->dino_alive.ucode_valid == 1) { 3156 rc = 0; 3157 IPW_DEBUG_INFO 3158 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) " 3159 "of %02d/%02d/%02d %02d:%02d\n", 3160 priv->dino_alive.software_revision, 3161 priv->dino_alive.software_revision, 3162 priv->dino_alive.device_identifier, 3163 priv->dino_alive.device_identifier, 3164 priv->dino_alive.time_stamp[0], 3165 priv->dino_alive.time_stamp[1], 3166 priv->dino_alive.time_stamp[2], 3167 priv->dino_alive.time_stamp[3], 3168 priv->dino_alive.time_stamp[4]); 3169 } else { 3170 IPW_DEBUG_INFO("Microcode is not alive\n"); 3171 rc = -EINVAL; 3172 } 3173 } else { 3174 IPW_DEBUG_INFO("No alive response from DINO\n"); 3175 rc = -ETIME; 3176 } 3177 3178 /* disable DINO, otherwise for some reason 3179 firmware have problem getting alive resp. */ 3180 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3181 3182 return rc; 3183 } 3184 3185 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) 3186 { 3187 int ret = -1; 3188 int offset = 0; 3189 struct fw_chunk *chunk; 3190 int total_nr = 0; 3191 int i; 3192 struct dma_pool *pool; 3193 void **virts; 3194 dma_addr_t *phys; 3195 3196 IPW_DEBUG_TRACE("<< :\n"); 3197 3198 virts = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(void *), 3199 GFP_KERNEL); 3200 if (!virts) 3201 return -ENOMEM; 3202 3203 phys = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(dma_addr_t), 3204 GFP_KERNEL); 3205 if (!phys) { 3206 kfree(virts); 3207 return -ENOMEM; 3208 } 3209 pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0, 3210 0); 3211 if (!pool) { 3212 IPW_ERROR("dma_pool_create failed\n"); 3213 kfree(phys); 3214 kfree(virts); 3215 return -ENOMEM; 3216 } 3217 3218 /* Start the Dma */ 3219 ret = ipw_fw_dma_enable(priv); 3220 3221 /* the DMA is already ready this would be a bug. */ 3222 BUG_ON(priv->sram_desc.last_cb_index > 0); 3223 3224 do { 3225 u32 chunk_len; 3226 u8 *start; 3227 int size; 3228 int nr = 0; 3229 3230 chunk = (struct fw_chunk *)(data + offset); 3231 offset += sizeof(struct fw_chunk); 3232 chunk_len = le32_to_cpu(chunk->length); 3233 start = data + offset; 3234 3235 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH; 3236 for (i = 0; i < nr; i++) { 3237 virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL, 3238 &phys[total_nr]); 3239 if (!virts[total_nr]) { 3240 ret = -ENOMEM; 3241 goto out; 3242 } 3243 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH, 3244 CB_MAX_LENGTH); 3245 memcpy(virts[total_nr], start, size); 3246 start += size; 3247 total_nr++; 3248 /* We don't support fw chunk larger than 64*8K */ 3249 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL); 3250 } 3251 3252 /* build DMA packet and queue up for sending */ 3253 /* dma to chunk->address, the chunk->length bytes from data + 3254 * offeset*/ 3255 /* Dma loading */ 3256 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr], 3257 nr, le32_to_cpu(chunk->address), 3258 chunk_len); 3259 if (ret) { 3260 IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); 3261 goto out; 3262 } 3263 3264 offset += chunk_len; 3265 } while (offset < len); 3266 3267 /* Run the DMA and wait for the answer */ 3268 ret = ipw_fw_dma_kick(priv); 3269 if (ret) { 3270 IPW_ERROR("dmaKick Failed\n"); 3271 goto out; 3272 } 3273 3274 ret = ipw_fw_dma_wait(priv); 3275 if (ret) { 3276 IPW_ERROR("dmaWaitSync Failed\n"); 3277 goto out; 3278 } 3279 out: 3280 for (i = 0; i < total_nr; i++) 3281 dma_pool_free(pool, virts[i], phys[i]); 3282 3283 dma_pool_destroy(pool); 3284 kfree(phys); 3285 kfree(virts); 3286 3287 return ret; 3288 } 3289 3290 /* stop nic */ 3291 static int ipw_stop_nic(struct ipw_priv *priv) 3292 { 3293 int rc = 0; 3294 3295 /* stop */ 3296 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3297 3298 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3299 IPW_RESET_REG_MASTER_DISABLED, 500); 3300 if (rc < 0) { 3301 IPW_ERROR("wait for reg master disabled failed after 500ms\n"); 3302 return rc; 3303 } 3304 3305 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3306 3307 return rc; 3308 } 3309 3310 static void ipw_start_nic(struct ipw_priv *priv) 3311 { 3312 IPW_DEBUG_TRACE(">>\n"); 3313 3314 /* prvHwStartNic release ARC */ 3315 ipw_clear_bit(priv, IPW_RESET_REG, 3316 IPW_RESET_REG_MASTER_DISABLED | 3317 IPW_RESET_REG_STOP_MASTER | 3318 CBD_RESET_REG_PRINCETON_RESET); 3319 3320 /* enable power management */ 3321 ipw_set_bit(priv, IPW_GP_CNTRL_RW, 3322 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); 3323 3324 IPW_DEBUG_TRACE("<<\n"); 3325 } 3326 3327 static int ipw_init_nic(struct ipw_priv *priv) 3328 { 3329 int rc; 3330 3331 IPW_DEBUG_TRACE(">>\n"); 3332 /* reset */ 3333 /*prvHwInitNic */ 3334 /* set "initialization complete" bit to move adapter to D0 state */ 3335 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); 3336 3337 /* low-level PLL activation */ 3338 ipw_write32(priv, IPW_READ_INT_REGISTER, 3339 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER); 3340 3341 /* wait for clock stabilization */ 3342 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW, 3343 IPW_GP_CNTRL_BIT_CLOCK_READY, 250); 3344 if (rc < 0) 3345 IPW_DEBUG_INFO("FAILED wait for clock stablization\n"); 3346 3347 /* assert SW reset */ 3348 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET); 3349 3350 udelay(10); 3351 3352 /* set "initialization complete" bit to move adapter to D0 state */ 3353 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); 3354 3355 IPW_DEBUG_TRACE(">>\n"); 3356 return 0; 3357 } 3358 3359 /* Call this function from process context, it will sleep in request_firmware. 3360 * Probe is an ok place to call this from. 3361 */ 3362 static int ipw_reset_nic(struct ipw_priv *priv) 3363 { 3364 int rc = 0; 3365 unsigned long flags; 3366 3367 IPW_DEBUG_TRACE(">>\n"); 3368 3369 rc = ipw_init_nic(priv); 3370 3371 spin_lock_irqsave(&priv->lock, flags); 3372 /* Clear the 'host command active' bit... */ 3373 priv->status &= ~STATUS_HCMD_ACTIVE; 3374 wake_up_interruptible(&priv->wait_command_queue); 3375 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); 3376 wake_up_interruptible(&priv->wait_state); 3377 spin_unlock_irqrestore(&priv->lock, flags); 3378 3379 IPW_DEBUG_TRACE("<<\n"); 3380 return rc; 3381 } 3382 3383 3384 struct ipw_fw { 3385 __le32 ver; 3386 __le32 boot_size; 3387 __le32 ucode_size; 3388 __le32 fw_size; 3389 u8 data[0]; 3390 }; 3391 3392 static int ipw_get_fw(struct ipw_priv *priv, 3393 const struct firmware **raw, const char *name) 3394 { 3395 struct ipw_fw *fw; 3396 int rc; 3397 3398 /* ask firmware_class module to get the boot firmware off disk */ 3399 rc = request_firmware(raw, name, &priv->pci_dev->dev); 3400 if (rc < 0) { 3401 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc); 3402 return rc; 3403 } 3404 3405 if ((*raw)->size < sizeof(*fw)) { 3406 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size); 3407 return -EINVAL; 3408 } 3409 3410 fw = (void *)(*raw)->data; 3411 3412 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) + 3413 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) { 3414 IPW_ERROR("%s is too small or corrupt (%zd)\n", 3415 name, (*raw)->size); 3416 return -EINVAL; 3417 } 3418 3419 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n", 3420 name, 3421 le32_to_cpu(fw->ver) >> 16, 3422 le32_to_cpu(fw->ver) & 0xff, 3423 (*raw)->size - sizeof(*fw)); 3424 return 0; 3425 } 3426 3427 #define IPW_RX_BUF_SIZE (3000) 3428 3429 static void ipw_rx_queue_reset(struct ipw_priv *priv, 3430 struct ipw_rx_queue *rxq) 3431 { 3432 unsigned long flags; 3433 int i; 3434 3435 spin_lock_irqsave(&rxq->lock, flags); 3436 3437 INIT_LIST_HEAD(&rxq->rx_free); 3438 INIT_LIST_HEAD(&rxq->rx_used); 3439 3440 /* Fill the rx_used queue with _all_ of the Rx buffers */ 3441 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 3442 /* In the reset function, these buffers may have been allocated 3443 * to an SKB, so we need to unmap and free potential storage */ 3444 if (rxq->pool[i].skb != NULL) { 3445 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 3446 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 3447 dev_kfree_skb(rxq->pool[i].skb); 3448 rxq->pool[i].skb = NULL; 3449 } 3450 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 3451 } 3452 3453 /* Set us so that we have processed and used all buffers, but have 3454 * not restocked the Rx queue with fresh buffers */ 3455 rxq->read = rxq->write = 0; 3456 rxq->free_count = 0; 3457 spin_unlock_irqrestore(&rxq->lock, flags); 3458 } 3459 3460 #ifdef CONFIG_PM 3461 static int fw_loaded = 0; 3462 static const struct firmware *raw = NULL; 3463 3464 static void free_firmware(void) 3465 { 3466 if (fw_loaded) { 3467 release_firmware(raw); 3468 raw = NULL; 3469 fw_loaded = 0; 3470 } 3471 } 3472 #else 3473 #define free_firmware() do {} while (0) 3474 #endif 3475 3476 static int ipw_load(struct ipw_priv *priv) 3477 { 3478 #ifndef CONFIG_PM 3479 const struct firmware *raw = NULL; 3480 #endif 3481 struct ipw_fw *fw; 3482 u8 *boot_img, *ucode_img, *fw_img; 3483 u8 *name = NULL; 3484 int rc = 0, retries = 3; 3485 3486 switch (priv->ieee->iw_mode) { 3487 case IW_MODE_ADHOC: 3488 name = "ipw2200-ibss.fw"; 3489 break; 3490 #ifdef CONFIG_IPW2200_MONITOR 3491 case IW_MODE_MONITOR: 3492 name = "ipw2200-sniffer.fw"; 3493 break; 3494 #endif 3495 case IW_MODE_INFRA: 3496 name = "ipw2200-bss.fw"; 3497 break; 3498 } 3499 3500 if (!name) { 3501 rc = -EINVAL; 3502 goto error; 3503 } 3504 3505 #ifdef CONFIG_PM 3506 if (!fw_loaded) { 3507 #endif 3508 rc = ipw_get_fw(priv, &raw, name); 3509 if (rc < 0) 3510 goto error; 3511 #ifdef CONFIG_PM 3512 } 3513 #endif 3514 3515 fw = (void *)raw->data; 3516 boot_img = &fw->data[0]; 3517 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)]; 3518 fw_img = &fw->data[le32_to_cpu(fw->boot_size) + 3519 le32_to_cpu(fw->ucode_size)]; 3520 3521 if (!priv->rxq) 3522 priv->rxq = ipw_rx_queue_alloc(priv); 3523 else 3524 ipw_rx_queue_reset(priv, priv->rxq); 3525 if (!priv->rxq) { 3526 IPW_ERROR("Unable to initialize Rx queue\n"); 3527 rc = -ENOMEM; 3528 goto error; 3529 } 3530 3531 retry: 3532 /* Ensure interrupts are disabled */ 3533 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 3534 priv->status &= ~STATUS_INT_ENABLED; 3535 3536 /* ack pending interrupts */ 3537 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3538 3539 ipw_stop_nic(priv); 3540 3541 rc = ipw_reset_nic(priv); 3542 if (rc < 0) { 3543 IPW_ERROR("Unable to reset NIC\n"); 3544 goto error; 3545 } 3546 3547 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND, 3548 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); 3549 3550 /* DMA the initial boot firmware into the device */ 3551 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size)); 3552 if (rc < 0) { 3553 IPW_ERROR("Unable to load boot firmware: %d\n", rc); 3554 goto error; 3555 } 3556 3557 /* kick start the device */ 3558 ipw_start_nic(priv); 3559 3560 /* wait for the device to finish its initial startup sequence */ 3561 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3562 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3563 if (rc < 0) { 3564 IPW_ERROR("device failed to boot initial fw image\n"); 3565 goto error; 3566 } 3567 IPW_DEBUG_INFO("initial device response after %dms\n", rc); 3568 3569 /* ack fw init done interrupt */ 3570 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3571 3572 /* DMA the ucode into the device */ 3573 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size)); 3574 if (rc < 0) { 3575 IPW_ERROR("Unable to load ucode: %d\n", rc); 3576 goto error; 3577 } 3578 3579 /* stop nic */ 3580 ipw_stop_nic(priv); 3581 3582 /* DMA bss firmware into the device */ 3583 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size)); 3584 if (rc < 0) { 3585 IPW_ERROR("Unable to load firmware: %d\n", rc); 3586 goto error; 3587 } 3588 #ifdef CONFIG_PM 3589 fw_loaded = 1; 3590 #endif 3591 3592 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 3593 3594 rc = ipw_queue_reset(priv); 3595 if (rc < 0) { 3596 IPW_ERROR("Unable to initialize queues\n"); 3597 goto error; 3598 } 3599 3600 /* Ensure interrupts are disabled */ 3601 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 3602 /* ack pending interrupts */ 3603 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3604 3605 /* kick start the device */ 3606 ipw_start_nic(priv); 3607 3608 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) { 3609 if (retries > 0) { 3610 IPW_WARNING("Parity error. Retrying init.\n"); 3611 retries--; 3612 goto retry; 3613 } 3614 3615 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n"); 3616 rc = -EIO; 3617 goto error; 3618 } 3619 3620 /* wait for the device */ 3621 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3622 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3623 if (rc < 0) { 3624 IPW_ERROR("device failed to start within 500ms\n"); 3625 goto error; 3626 } 3627 IPW_DEBUG_INFO("device response after %dms\n", rc); 3628 3629 /* ack fw init done interrupt */ 3630 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3631 3632 /* read eeprom data */ 3633 priv->eeprom_delay = 1; 3634 ipw_read_eeprom(priv); 3635 /* initialize the eeprom region of sram */ 3636 ipw_eeprom_init_sram(priv); 3637 3638 /* enable interrupts */ 3639 ipw_enable_interrupts(priv); 3640 3641 /* Ensure our queue has valid packets */ 3642 ipw_rx_queue_replenish(priv); 3643 3644 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read); 3645 3646 /* ack pending interrupts */ 3647 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3648 3649 #ifndef CONFIG_PM 3650 release_firmware(raw); 3651 #endif 3652 return 0; 3653 3654 error: 3655 if (priv->rxq) { 3656 ipw_rx_queue_free(priv, priv->rxq); 3657 priv->rxq = NULL; 3658 } 3659 ipw_tx_queue_free(priv); 3660 release_firmware(raw); 3661 #ifdef CONFIG_PM 3662 fw_loaded = 0; 3663 raw = NULL; 3664 #endif 3665 3666 return rc; 3667 } 3668 3669 /** 3670 * DMA services 3671 * 3672 * Theory of operation 3673 * 3674 * A queue is a circular buffers with 'Read' and 'Write' pointers. 3675 * 2 empty entries always kept in the buffer to protect from overflow. 3676 * 3677 * For Tx queue, there are low mark and high mark limits. If, after queuing 3678 * the packet for Tx, free space become < low mark, Tx queue stopped. When 3679 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 3680 * Tx queue resumed. 3681 * 3682 * The IPW operates with six queues, one receive queue in the device's 3683 * sram, one transmit queue for sending commands to the device firmware, 3684 * and four transmit queues for data. 3685 * 3686 * The four transmit queues allow for performing quality of service (qos) 3687 * transmissions as per the 802.11 protocol. Currently Linux does not 3688 * provide a mechanism to the user for utilizing prioritized queues, so 3689 * we only utilize the first data transmit queue (queue1). 3690 */ 3691 3692 /** 3693 * Driver allocates buffers of this size for Rx 3694 */ 3695 3696 /** 3697 * ipw_rx_queue_space - Return number of free slots available in queue. 3698 */ 3699 static int ipw_rx_queue_space(const struct ipw_rx_queue *q) 3700 { 3701 int s = q->read - q->write; 3702 if (s <= 0) 3703 s += RX_QUEUE_SIZE; 3704 /* keep some buffer to not confuse full and empty queue */ 3705 s -= 2; 3706 if (s < 0) 3707 s = 0; 3708 return s; 3709 } 3710 3711 static inline int ipw_tx_queue_space(const struct clx2_queue *q) 3712 { 3713 int s = q->last_used - q->first_empty; 3714 if (s <= 0) 3715 s += q->n_bd; 3716 s -= 2; /* keep some reserve to not confuse empty and full situations */ 3717 if (s < 0) 3718 s = 0; 3719 return s; 3720 } 3721 3722 static inline int ipw_queue_inc_wrap(int index, int n_bd) 3723 { 3724 return (++index == n_bd) ? 0 : index; 3725 } 3726 3727 /** 3728 * Initialize common DMA queue structure 3729 * 3730 * @param q queue to init 3731 * @param count Number of BD's to allocate. Should be power of 2 3732 * @param read_register Address for 'read' register 3733 * (not offset within BAR, full address) 3734 * @param write_register Address for 'write' register 3735 * (not offset within BAR, full address) 3736 * @param base_register Address for 'base' register 3737 * (not offset within BAR, full address) 3738 * @param size Address for 'size' register 3739 * (not offset within BAR, full address) 3740 */ 3741 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q, 3742 int count, u32 read, u32 write, u32 base, u32 size) 3743 { 3744 q->n_bd = count; 3745 3746 q->low_mark = q->n_bd / 4; 3747 if (q->low_mark < 4) 3748 q->low_mark = 4; 3749 3750 q->high_mark = q->n_bd / 8; 3751 if (q->high_mark < 2) 3752 q->high_mark = 2; 3753 3754 q->first_empty = q->last_used = 0; 3755 q->reg_r = read; 3756 q->reg_w = write; 3757 3758 ipw_write32(priv, base, q->dma_addr); 3759 ipw_write32(priv, size, count); 3760 ipw_write32(priv, read, 0); 3761 ipw_write32(priv, write, 0); 3762 3763 _ipw_read32(priv, 0x90); 3764 } 3765 3766 static int ipw_queue_tx_init(struct ipw_priv *priv, 3767 struct clx2_tx_queue *q, 3768 int count, u32 read, u32 write, u32 base, u32 size) 3769 { 3770 struct pci_dev *dev = priv->pci_dev; 3771 3772 q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL); 3773 if (!q->txb) { 3774 IPW_ERROR("vmalloc for auxiliary BD structures failed\n"); 3775 return -ENOMEM; 3776 } 3777 3778 q->bd = 3779 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); 3780 if (!q->bd) { 3781 IPW_ERROR("pci_alloc_consistent(%zd) failed\n", 3782 sizeof(q->bd[0]) * count); 3783 kfree(q->txb); 3784 q->txb = NULL; 3785 return -ENOMEM; 3786 } 3787 3788 ipw_queue_init(priv, &q->q, count, read, write, base, size); 3789 return 0; 3790 } 3791 3792 /** 3793 * Free one TFD, those at index [txq->q.last_used]. 3794 * Do NOT advance any indexes 3795 * 3796 * @param dev 3797 * @param txq 3798 */ 3799 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv, 3800 struct clx2_tx_queue *txq) 3801 { 3802 struct tfd_frame *bd = &txq->bd[txq->q.last_used]; 3803 struct pci_dev *dev = priv->pci_dev; 3804 int i; 3805 3806 /* classify bd */ 3807 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE) 3808 /* nothing to cleanup after for host commands */ 3809 return; 3810 3811 /* sanity check */ 3812 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) { 3813 IPW_ERROR("Too many chunks: %i\n", 3814 le32_to_cpu(bd->u.data.num_chunks)); 3815 /** @todo issue fatal error, it is quite serious situation */ 3816 return; 3817 } 3818 3819 /* unmap chunks if any */ 3820 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) { 3821 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]), 3822 le16_to_cpu(bd->u.data.chunk_len[i]), 3823 PCI_DMA_TODEVICE); 3824 if (txq->txb[txq->q.last_used]) { 3825 libipw_txb_free(txq->txb[txq->q.last_used]); 3826 txq->txb[txq->q.last_used] = NULL; 3827 } 3828 } 3829 } 3830 3831 /** 3832 * Deallocate DMA queue. 3833 * 3834 * Empty queue by removing and destroying all BD's. 3835 * Free all buffers. 3836 * 3837 * @param dev 3838 * @param q 3839 */ 3840 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq) 3841 { 3842 struct clx2_queue *q = &txq->q; 3843 struct pci_dev *dev = priv->pci_dev; 3844 3845 if (q->n_bd == 0) 3846 return; 3847 3848 /* first, empty all BD's */ 3849 for (; q->first_empty != q->last_used; 3850 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { 3851 ipw_queue_tx_free_tfd(priv, txq); 3852 } 3853 3854 /* free buffers belonging to queue itself */ 3855 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, 3856 q->dma_addr); 3857 kfree(txq->txb); 3858 3859 /* 0 fill whole structure */ 3860 memset(txq, 0, sizeof(*txq)); 3861 } 3862 3863 /** 3864 * Destroy all DMA queues and structures 3865 * 3866 * @param priv 3867 */ 3868 static void ipw_tx_queue_free(struct ipw_priv *priv) 3869 { 3870 /* Tx CMD queue */ 3871 ipw_queue_tx_free(priv, &priv->txq_cmd); 3872 3873 /* Tx queues */ 3874 ipw_queue_tx_free(priv, &priv->txq[0]); 3875 ipw_queue_tx_free(priv, &priv->txq[1]); 3876 ipw_queue_tx_free(priv, &priv->txq[2]); 3877 ipw_queue_tx_free(priv, &priv->txq[3]); 3878 } 3879 3880 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid) 3881 { 3882 /* First 3 bytes are manufacturer */ 3883 bssid[0] = priv->mac_addr[0]; 3884 bssid[1] = priv->mac_addr[1]; 3885 bssid[2] = priv->mac_addr[2]; 3886 3887 /* Last bytes are random */ 3888 get_random_bytes(&bssid[3], ETH_ALEN - 3); 3889 3890 bssid[0] &= 0xfe; /* clear multicast bit */ 3891 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ 3892 } 3893 3894 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) 3895 { 3896 struct ipw_station_entry entry; 3897 int i; 3898 3899 for (i = 0; i < priv->num_stations; i++) { 3900 if (ether_addr_equal(priv->stations[i], bssid)) { 3901 /* Another node is active in network */ 3902 priv->missed_adhoc_beacons = 0; 3903 if (!(priv->config & CFG_STATIC_CHANNEL)) 3904 /* when other nodes drop out, we drop out */ 3905 priv->config &= ~CFG_ADHOC_PERSIST; 3906 3907 return i; 3908 } 3909 } 3910 3911 if (i == MAX_STATIONS) 3912 return IPW_INVALID_STATION; 3913 3914 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid); 3915 3916 entry.reserved = 0; 3917 entry.support_mode = 0; 3918 memcpy(entry.mac_addr, bssid, ETH_ALEN); 3919 memcpy(priv->stations[i], bssid, ETH_ALEN); 3920 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry), 3921 &entry, sizeof(entry)); 3922 priv->num_stations++; 3923 3924 return i; 3925 } 3926 3927 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) 3928 { 3929 int i; 3930 3931 for (i = 0; i < priv->num_stations; i++) 3932 if (ether_addr_equal(priv->stations[i], bssid)) 3933 return i; 3934 3935 return IPW_INVALID_STATION; 3936 } 3937 3938 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) 3939 { 3940 int err; 3941 3942 if (priv->status & STATUS_ASSOCIATING) { 3943 IPW_DEBUG_ASSOC("Disassociating while associating.\n"); 3944 schedule_work(&priv->disassociate); 3945 return; 3946 } 3947 3948 if (!(priv->status & STATUS_ASSOCIATED)) { 3949 IPW_DEBUG_ASSOC("Disassociating while not associated.\n"); 3950 return; 3951 } 3952 3953 IPW_DEBUG_ASSOC("Disassociation attempt from %pM " 3954 "on channel %d.\n", 3955 priv->assoc_request.bssid, 3956 priv->assoc_request.channel); 3957 3958 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); 3959 priv->status |= STATUS_DISASSOCIATING; 3960 3961 if (quiet) 3962 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET; 3963 else 3964 priv->assoc_request.assoc_type = HC_DISASSOCIATE; 3965 3966 err = ipw_send_associate(priv, &priv->assoc_request); 3967 if (err) { 3968 IPW_DEBUG_HC("Attempt to send [dis]associate command " 3969 "failed.\n"); 3970 return; 3971 } 3972 3973 } 3974 3975 static int ipw_disassociate(void *data) 3976 { 3977 struct ipw_priv *priv = data; 3978 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3979 return 0; 3980 ipw_send_disassociate(data, 0); 3981 netif_carrier_off(priv->net_dev); 3982 return 1; 3983 } 3984 3985 static void ipw_bg_disassociate(struct work_struct *work) 3986 { 3987 struct ipw_priv *priv = 3988 container_of(work, struct ipw_priv, disassociate); 3989 mutex_lock(&priv->mutex); 3990 ipw_disassociate(priv); 3991 mutex_unlock(&priv->mutex); 3992 } 3993 3994 static void ipw_system_config(struct work_struct *work) 3995 { 3996 struct ipw_priv *priv = 3997 container_of(work, struct ipw_priv, system_config); 3998 3999 #ifdef CONFIG_IPW2200_PROMISCUOUS 4000 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 4001 priv->sys_config.accept_all_data_frames = 1; 4002 priv->sys_config.accept_non_directed_frames = 1; 4003 priv->sys_config.accept_all_mgmt_bcpr = 1; 4004 priv->sys_config.accept_all_mgmt_frames = 1; 4005 } 4006 #endif 4007 4008 ipw_send_system_config(priv); 4009 } 4010 4011 struct ipw_status_code { 4012 u16 status; 4013 const char *reason; 4014 }; 4015 4016 static const struct ipw_status_code ipw_status_codes[] = { 4017 {0x00, "Successful"}, 4018 {0x01, "Unspecified failure"}, 4019 {0x0A, "Cannot support all requested capabilities in the " 4020 "Capability information field"}, 4021 {0x0B, "Reassociation denied due to inability to confirm that " 4022 "association exists"}, 4023 {0x0C, "Association denied due to reason outside the scope of this " 4024 "standard"}, 4025 {0x0D, 4026 "Responding station does not support the specified authentication " 4027 "algorithm"}, 4028 {0x0E, 4029 "Received an Authentication frame with authentication sequence " 4030 "transaction sequence number out of expected sequence"}, 4031 {0x0F, "Authentication rejected because of challenge failure"}, 4032 {0x10, "Authentication rejected due to timeout waiting for next " 4033 "frame in sequence"}, 4034 {0x11, "Association denied because AP is unable to handle additional " 4035 "associated stations"}, 4036 {0x12, 4037 "Association denied due to requesting station not supporting all " 4038 "of the datarates in the BSSBasicServiceSet Parameter"}, 4039 {0x13, 4040 "Association denied due to requesting station not supporting " 4041 "short preamble operation"}, 4042 {0x14, 4043 "Association denied due to requesting station not supporting " 4044 "PBCC encoding"}, 4045 {0x15, 4046 "Association denied due to requesting station not supporting " 4047 "channel agility"}, 4048 {0x19, 4049 "Association denied due to requesting station not supporting " 4050 "short slot operation"}, 4051 {0x1A, 4052 "Association denied due to requesting station not supporting " 4053 "DSSS-OFDM operation"}, 4054 {0x28, "Invalid Information Element"}, 4055 {0x29, "Group Cipher is not valid"}, 4056 {0x2A, "Pairwise Cipher is not valid"}, 4057 {0x2B, "AKMP is not valid"}, 4058 {0x2C, "Unsupported RSN IE version"}, 4059 {0x2D, "Invalid RSN IE Capabilities"}, 4060 {0x2E, "Cipher suite is rejected per security policy"}, 4061 }; 4062 4063 static const char *ipw_get_status_code(u16 status) 4064 { 4065 int i; 4066 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++) 4067 if (ipw_status_codes[i].status == (status & 0xff)) 4068 return ipw_status_codes[i].reason; 4069 return "Unknown status value."; 4070 } 4071 4072 static inline void average_init(struct average *avg) 4073 { 4074 memset(avg, 0, sizeof(*avg)); 4075 } 4076 4077 #define DEPTH_RSSI 8 4078 #define DEPTH_NOISE 16 4079 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth) 4080 { 4081 return ((depth-1)*prev_avg + val)/depth; 4082 } 4083 4084 static void average_add(struct average *avg, s16 val) 4085 { 4086 avg->sum -= avg->entries[avg->pos]; 4087 avg->sum += val; 4088 avg->entries[avg->pos++] = val; 4089 if (unlikely(avg->pos == AVG_ENTRIES)) { 4090 avg->init = 1; 4091 avg->pos = 0; 4092 } 4093 } 4094 4095 static s16 average_value(struct average *avg) 4096 { 4097 if (!unlikely(avg->init)) { 4098 if (avg->pos) 4099 return avg->sum / avg->pos; 4100 return 0; 4101 } 4102 4103 return avg->sum / AVG_ENTRIES; 4104 } 4105 4106 static void ipw_reset_stats(struct ipw_priv *priv) 4107 { 4108 u32 len = sizeof(u32); 4109 4110 priv->quality = 0; 4111 4112 average_init(&priv->average_missed_beacons); 4113 priv->exp_avg_rssi = -60; 4114 priv->exp_avg_noise = -85 + 0x100; 4115 4116 priv->last_rate = 0; 4117 priv->last_missed_beacons = 0; 4118 priv->last_rx_packets = 0; 4119 priv->last_tx_packets = 0; 4120 priv->last_tx_failures = 0; 4121 4122 /* Firmware managed, reset only when NIC is restarted, so we have to 4123 * normalize on the current value */ 4124 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, 4125 &priv->last_rx_err, &len); 4126 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, 4127 &priv->last_tx_failures, &len); 4128 4129 /* Driver managed, reset with each association */ 4130 priv->missed_adhoc_beacons = 0; 4131 priv->missed_beacons = 0; 4132 priv->tx_packets = 0; 4133 priv->rx_packets = 0; 4134 4135 } 4136 4137 static u32 ipw_get_max_rate(struct ipw_priv *priv) 4138 { 4139 u32 i = 0x80000000; 4140 u32 mask = priv->rates_mask; 4141 /* If currently associated in B mode, restrict the maximum 4142 * rate match to B rates */ 4143 if (priv->assoc_request.ieee_mode == IPW_B_MODE) 4144 mask &= LIBIPW_CCK_RATES_MASK; 4145 4146 /* TODO: Verify that the rate is supported by the current rates 4147 * list. */ 4148 4149 while (i && !(mask & i)) 4150 i >>= 1; 4151 switch (i) { 4152 case LIBIPW_CCK_RATE_1MB_MASK: 4153 return 1000000; 4154 case LIBIPW_CCK_RATE_2MB_MASK: 4155 return 2000000; 4156 case LIBIPW_CCK_RATE_5MB_MASK: 4157 return 5500000; 4158 case LIBIPW_OFDM_RATE_6MB_MASK: 4159 return 6000000; 4160 case LIBIPW_OFDM_RATE_9MB_MASK: 4161 return 9000000; 4162 case LIBIPW_CCK_RATE_11MB_MASK: 4163 return 11000000; 4164 case LIBIPW_OFDM_RATE_12MB_MASK: 4165 return 12000000; 4166 case LIBIPW_OFDM_RATE_18MB_MASK: 4167 return 18000000; 4168 case LIBIPW_OFDM_RATE_24MB_MASK: 4169 return 24000000; 4170 case LIBIPW_OFDM_RATE_36MB_MASK: 4171 return 36000000; 4172 case LIBIPW_OFDM_RATE_48MB_MASK: 4173 return 48000000; 4174 case LIBIPW_OFDM_RATE_54MB_MASK: 4175 return 54000000; 4176 } 4177 4178 if (priv->ieee->mode == IEEE_B) 4179 return 11000000; 4180 else 4181 return 54000000; 4182 } 4183 4184 static u32 ipw_get_current_rate(struct ipw_priv *priv) 4185 { 4186 u32 rate, len = sizeof(rate); 4187 int err; 4188 4189 if (!(priv->status & STATUS_ASSOCIATED)) 4190 return 0; 4191 4192 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) { 4193 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate, 4194 &len); 4195 if (err) { 4196 IPW_DEBUG_INFO("failed querying ordinals.\n"); 4197 return 0; 4198 } 4199 } else 4200 return ipw_get_max_rate(priv); 4201 4202 switch (rate) { 4203 case IPW_TX_RATE_1MB: 4204 return 1000000; 4205 case IPW_TX_RATE_2MB: 4206 return 2000000; 4207 case IPW_TX_RATE_5MB: 4208 return 5500000; 4209 case IPW_TX_RATE_6MB: 4210 return 6000000; 4211 case IPW_TX_RATE_9MB: 4212 return 9000000; 4213 case IPW_TX_RATE_11MB: 4214 return 11000000; 4215 case IPW_TX_RATE_12MB: 4216 return 12000000; 4217 case IPW_TX_RATE_18MB: 4218 return 18000000; 4219 case IPW_TX_RATE_24MB: 4220 return 24000000; 4221 case IPW_TX_RATE_36MB: 4222 return 36000000; 4223 case IPW_TX_RATE_48MB: 4224 return 48000000; 4225 case IPW_TX_RATE_54MB: 4226 return 54000000; 4227 } 4228 4229 return 0; 4230 } 4231 4232 #define IPW_STATS_INTERVAL (2 * HZ) 4233 static void ipw_gather_stats(struct ipw_priv *priv) 4234 { 4235 u32 rx_err, rx_err_delta, rx_packets_delta; 4236 u32 tx_failures, tx_failures_delta, tx_packets_delta; 4237 u32 missed_beacons_percent, missed_beacons_delta; 4238 u32 quality = 0; 4239 u32 len = sizeof(u32); 4240 s16 rssi; 4241 u32 beacon_quality, signal_quality, tx_quality, rx_quality, 4242 rate_quality; 4243 u32 max_rate; 4244 4245 if (!(priv->status & STATUS_ASSOCIATED)) { 4246 priv->quality = 0; 4247 return; 4248 } 4249 4250 /* Update the statistics */ 4251 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS, 4252 &priv->missed_beacons, &len); 4253 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons; 4254 priv->last_missed_beacons = priv->missed_beacons; 4255 if (priv->assoc_request.beacon_interval) { 4256 missed_beacons_percent = missed_beacons_delta * 4257 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) / 4258 (IPW_STATS_INTERVAL * 10); 4259 } else { 4260 missed_beacons_percent = 0; 4261 } 4262 average_add(&priv->average_missed_beacons, missed_beacons_percent); 4263 4264 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len); 4265 rx_err_delta = rx_err - priv->last_rx_err; 4266 priv->last_rx_err = rx_err; 4267 4268 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len); 4269 tx_failures_delta = tx_failures - priv->last_tx_failures; 4270 priv->last_tx_failures = tx_failures; 4271 4272 rx_packets_delta = priv->rx_packets - priv->last_rx_packets; 4273 priv->last_rx_packets = priv->rx_packets; 4274 4275 tx_packets_delta = priv->tx_packets - priv->last_tx_packets; 4276 priv->last_tx_packets = priv->tx_packets; 4277 4278 /* Calculate quality based on the following: 4279 * 4280 * Missed beacon: 100% = 0, 0% = 70% missed 4281 * Rate: 60% = 1Mbs, 100% = Max 4282 * Rx and Tx errors represent a straight % of total Rx/Tx 4283 * RSSI: 100% = > -50, 0% = < -80 4284 * Rx errors: 100% = 0, 0% = 50% missed 4285 * 4286 * The lowest computed quality is used. 4287 * 4288 */ 4289 #define BEACON_THRESHOLD 5 4290 beacon_quality = 100 - missed_beacons_percent; 4291 if (beacon_quality < BEACON_THRESHOLD) 4292 beacon_quality = 0; 4293 else 4294 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 / 4295 (100 - BEACON_THRESHOLD); 4296 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n", 4297 beacon_quality, missed_beacons_percent); 4298 4299 priv->last_rate = ipw_get_current_rate(priv); 4300 max_rate = ipw_get_max_rate(priv); 4301 rate_quality = priv->last_rate * 40 / max_rate + 60; 4302 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n", 4303 rate_quality, priv->last_rate / 1000000); 4304 4305 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta) 4306 rx_quality = 100 - (rx_err_delta * 100) / 4307 (rx_packets_delta + rx_err_delta); 4308 else 4309 rx_quality = 100; 4310 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n", 4311 rx_quality, rx_err_delta, rx_packets_delta); 4312 4313 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta) 4314 tx_quality = 100 - (tx_failures_delta * 100) / 4315 (tx_packets_delta + tx_failures_delta); 4316 else 4317 tx_quality = 100; 4318 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", 4319 tx_quality, tx_failures_delta, tx_packets_delta); 4320 4321 rssi = priv->exp_avg_rssi; 4322 signal_quality = 4323 (100 * 4324 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * 4325 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) - 4326 (priv->ieee->perfect_rssi - rssi) * 4327 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) + 4328 62 * (priv->ieee->perfect_rssi - rssi))) / 4329 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * 4330 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi)); 4331 if (signal_quality > 100) 4332 signal_quality = 100; 4333 else if (signal_quality < 1) 4334 signal_quality = 0; 4335 4336 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n", 4337 signal_quality, rssi); 4338 4339 quality = min(rx_quality, signal_quality); 4340 quality = min(tx_quality, quality); 4341 quality = min(rate_quality, quality); 4342 quality = min(beacon_quality, quality); 4343 if (quality == beacon_quality) 4344 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n", 4345 quality); 4346 if (quality == rate_quality) 4347 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n", 4348 quality); 4349 if (quality == tx_quality) 4350 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n", 4351 quality); 4352 if (quality == rx_quality) 4353 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n", 4354 quality); 4355 if (quality == signal_quality) 4356 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n", 4357 quality); 4358 4359 priv->quality = quality; 4360 4361 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL); 4362 } 4363 4364 static void ipw_bg_gather_stats(struct work_struct *work) 4365 { 4366 struct ipw_priv *priv = 4367 container_of(work, struct ipw_priv, gather_stats.work); 4368 mutex_lock(&priv->mutex); 4369 ipw_gather_stats(priv); 4370 mutex_unlock(&priv->mutex); 4371 } 4372 4373 /* Missed beacon behavior: 4374 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam. 4375 * roaming_threshold -> disassociate_threshold, scan and roam for better signal. 4376 * Above disassociate threshold, give up and stop scanning. 4377 * Roaming is disabled if disassociate_threshold <= roaming_threshold */ 4378 static void ipw_handle_missed_beacon(struct ipw_priv *priv, 4379 int missed_count) 4380 { 4381 priv->notif_missed_beacons = missed_count; 4382 4383 if (missed_count > priv->disassociate_threshold && 4384 priv->status & STATUS_ASSOCIATED) { 4385 /* If associated and we've hit the missed 4386 * beacon threshold, disassociate, turn 4387 * off roaming, and abort any active scans */ 4388 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4389 IPW_DL_STATE | IPW_DL_ASSOC, 4390 "Missed beacon: %d - disassociate\n", missed_count); 4391 priv->status &= ~STATUS_ROAMING; 4392 if (priv->status & STATUS_SCANNING) { 4393 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4394 IPW_DL_STATE, 4395 "Aborting scan with missed beacon.\n"); 4396 schedule_work(&priv->abort_scan); 4397 } 4398 4399 schedule_work(&priv->disassociate); 4400 return; 4401 } 4402 4403 if (priv->status & STATUS_ROAMING) { 4404 /* If we are currently roaming, then just 4405 * print a debug statement... */ 4406 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4407 "Missed beacon: %d - roam in progress\n", 4408 missed_count); 4409 return; 4410 } 4411 4412 if (roaming && 4413 (missed_count > priv->roaming_threshold && 4414 missed_count <= priv->disassociate_threshold)) { 4415 /* If we are not already roaming, set the ROAM 4416 * bit in the status and kick off a scan. 4417 * This can happen several times before we reach 4418 * disassociate_threshold. */ 4419 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4420 "Missed beacon: %d - initiate " 4421 "roaming\n", missed_count); 4422 if (!(priv->status & STATUS_ROAMING)) { 4423 priv->status |= STATUS_ROAMING; 4424 if (!(priv->status & STATUS_SCANNING)) 4425 schedule_delayed_work(&priv->request_scan, 0); 4426 } 4427 return; 4428 } 4429 4430 if (priv->status & STATUS_SCANNING && 4431 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) { 4432 /* Stop scan to keep fw from getting 4433 * stuck (only if we aren't roaming -- 4434 * otherwise we'll never scan more than 2 or 3 4435 * channels..) */ 4436 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, 4437 "Aborting scan with missed beacon.\n"); 4438 schedule_work(&priv->abort_scan); 4439 } 4440 4441 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); 4442 } 4443 4444 static void ipw_scan_event(struct work_struct *work) 4445 { 4446 union iwreq_data wrqu; 4447 4448 struct ipw_priv *priv = 4449 container_of(work, struct ipw_priv, scan_event.work); 4450 4451 wrqu.data.length = 0; 4452 wrqu.data.flags = 0; 4453 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); 4454 } 4455 4456 static void handle_scan_event(struct ipw_priv *priv) 4457 { 4458 /* Only userspace-requested scan completion events go out immediately */ 4459 if (!priv->user_requested_scan) { 4460 schedule_delayed_work(&priv->scan_event, 4461 round_jiffies_relative(msecs_to_jiffies(4000))); 4462 } else { 4463 priv->user_requested_scan = 0; 4464 mod_delayed_work(system_wq, &priv->scan_event, 0); 4465 } 4466 } 4467 4468 /** 4469 * Handle host notification packet. 4470 * Called from interrupt routine 4471 */ 4472 static void ipw_rx_notification(struct ipw_priv *priv, 4473 struct ipw_rx_notification *notif) 4474 { 4475 u16 size = le16_to_cpu(notif->size); 4476 4477 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size); 4478 4479 switch (notif->subtype) { 4480 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{ 4481 struct notif_association *assoc = ¬if->u.assoc; 4482 4483 switch (assoc->state) { 4484 case CMAS_ASSOCIATED:{ 4485 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4486 IPW_DL_ASSOC, 4487 "associated: '%*pE' %pM\n", 4488 priv->essid_len, priv->essid, 4489 priv->bssid); 4490 4491 switch (priv->ieee->iw_mode) { 4492 case IW_MODE_INFRA: 4493 memcpy(priv->ieee->bssid, 4494 priv->bssid, ETH_ALEN); 4495 break; 4496 4497 case IW_MODE_ADHOC: 4498 memcpy(priv->ieee->bssid, 4499 priv->bssid, ETH_ALEN); 4500 4501 /* clear out the station table */ 4502 priv->num_stations = 0; 4503 4504 IPW_DEBUG_ASSOC 4505 ("queueing adhoc check\n"); 4506 schedule_delayed_work( 4507 &priv->adhoc_check, 4508 le16_to_cpu(priv-> 4509 assoc_request. 4510 beacon_interval)); 4511 break; 4512 } 4513 4514 priv->status &= ~STATUS_ASSOCIATING; 4515 priv->status |= STATUS_ASSOCIATED; 4516 schedule_work(&priv->system_config); 4517 4518 #ifdef CONFIG_IPW2200_QOS 4519 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ 4520 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control)) 4521 if ((priv->status & STATUS_AUTH) && 4522 (IPW_GET_PACKET_STYPE(¬if->u.raw) 4523 == IEEE80211_STYPE_ASSOC_RESP)) { 4524 if ((sizeof 4525 (struct 4526 libipw_assoc_response) 4527 <= size) 4528 && (size <= 2314)) { 4529 struct 4530 libipw_rx_stats 4531 stats = { 4532 .len = size - 1, 4533 }; 4534 4535 IPW_DEBUG_QOS 4536 ("QoS Associate " 4537 "size %d\n", size); 4538 libipw_rx_mgt(priv-> 4539 ieee, 4540 (struct 4541 libipw_hdr_4addr 4542 *) 4543 ¬if->u.raw, &stats); 4544 } 4545 } 4546 #endif 4547 4548 schedule_work(&priv->link_up); 4549 4550 break; 4551 } 4552 4553 case CMAS_AUTHENTICATED:{ 4554 if (priv-> 4555 status & (STATUS_ASSOCIATED | 4556 STATUS_AUTH)) { 4557 struct notif_authenticate *auth 4558 = ¬if->u.auth; 4559 IPW_DEBUG(IPW_DL_NOTIF | 4560 IPW_DL_STATE | 4561 IPW_DL_ASSOC, 4562 "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n", 4563 priv->essid_len, 4564 priv->essid, 4565 priv->bssid, 4566 le16_to_cpu(auth->status), 4567 ipw_get_status_code 4568 (le16_to_cpu 4569 (auth->status))); 4570 4571 priv->status &= 4572 ~(STATUS_ASSOCIATING | 4573 STATUS_AUTH | 4574 STATUS_ASSOCIATED); 4575 4576 schedule_work(&priv->link_down); 4577 break; 4578 } 4579 4580 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4581 IPW_DL_ASSOC, 4582 "authenticated: '%*pE' %pM\n", 4583 priv->essid_len, priv->essid, 4584 priv->bssid); 4585 break; 4586 } 4587 4588 case CMAS_INIT:{ 4589 if (priv->status & STATUS_AUTH) { 4590 struct 4591 libipw_assoc_response 4592 *resp; 4593 resp = 4594 (struct 4595 libipw_assoc_response 4596 *)¬if->u.raw; 4597 IPW_DEBUG(IPW_DL_NOTIF | 4598 IPW_DL_STATE | 4599 IPW_DL_ASSOC, 4600 "association failed (0x%04X): %s\n", 4601 le16_to_cpu(resp->status), 4602 ipw_get_status_code 4603 (le16_to_cpu 4604 (resp->status))); 4605 } 4606 4607 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4608 IPW_DL_ASSOC, 4609 "disassociated: '%*pE' %pM\n", 4610 priv->essid_len, priv->essid, 4611 priv->bssid); 4612 4613 priv->status &= 4614 ~(STATUS_DISASSOCIATING | 4615 STATUS_ASSOCIATING | 4616 STATUS_ASSOCIATED | STATUS_AUTH); 4617 if (priv->assoc_network 4618 && (priv->assoc_network-> 4619 capability & 4620 WLAN_CAPABILITY_IBSS)) 4621 ipw_remove_current_network 4622 (priv); 4623 4624 schedule_work(&priv->link_down); 4625 4626 break; 4627 } 4628 4629 case CMAS_RX_ASSOC_RESP: 4630 break; 4631 4632 default: 4633 IPW_ERROR("assoc: unknown (%d)\n", 4634 assoc->state); 4635 break; 4636 } 4637 4638 break; 4639 } 4640 4641 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{ 4642 struct notif_authenticate *auth = ¬if->u.auth; 4643 switch (auth->state) { 4644 case CMAS_AUTHENTICATED: 4645 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4646 "authenticated: '%*pE' %pM\n", 4647 priv->essid_len, priv->essid, 4648 priv->bssid); 4649 priv->status |= STATUS_AUTH; 4650 break; 4651 4652 case CMAS_INIT: 4653 if (priv->status & STATUS_AUTH) { 4654 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4655 IPW_DL_ASSOC, 4656 "authentication failed (0x%04X): %s\n", 4657 le16_to_cpu(auth->status), 4658 ipw_get_status_code(le16_to_cpu 4659 (auth-> 4660 status))); 4661 } 4662 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4663 IPW_DL_ASSOC, 4664 "deauthenticated: '%*pE' %pM\n", 4665 priv->essid_len, priv->essid, 4666 priv->bssid); 4667 4668 priv->status &= ~(STATUS_ASSOCIATING | 4669 STATUS_AUTH | 4670 STATUS_ASSOCIATED); 4671 4672 schedule_work(&priv->link_down); 4673 break; 4674 4675 case CMAS_TX_AUTH_SEQ_1: 4676 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4677 IPW_DL_ASSOC, "AUTH_SEQ_1\n"); 4678 break; 4679 case CMAS_RX_AUTH_SEQ_2: 4680 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4681 IPW_DL_ASSOC, "AUTH_SEQ_2\n"); 4682 break; 4683 case CMAS_AUTH_SEQ_1_PASS: 4684 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4685 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n"); 4686 break; 4687 case CMAS_AUTH_SEQ_1_FAIL: 4688 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4689 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n"); 4690 break; 4691 case CMAS_TX_AUTH_SEQ_3: 4692 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4693 IPW_DL_ASSOC, "AUTH_SEQ_3\n"); 4694 break; 4695 case CMAS_RX_AUTH_SEQ_4: 4696 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4697 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n"); 4698 break; 4699 case CMAS_AUTH_SEQ_2_PASS: 4700 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4701 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n"); 4702 break; 4703 case CMAS_AUTH_SEQ_2_FAIL: 4704 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4705 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n"); 4706 break; 4707 case CMAS_TX_ASSOC: 4708 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4709 IPW_DL_ASSOC, "TX_ASSOC\n"); 4710 break; 4711 case CMAS_RX_ASSOC_RESP: 4712 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4713 IPW_DL_ASSOC, "RX_ASSOC_RESP\n"); 4714 4715 break; 4716 case CMAS_ASSOCIATED: 4717 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4718 IPW_DL_ASSOC, "ASSOCIATED\n"); 4719 break; 4720 default: 4721 IPW_DEBUG_NOTIF("auth: failure - %d\n", 4722 auth->state); 4723 break; 4724 } 4725 break; 4726 } 4727 4728 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{ 4729 struct notif_channel_result *x = 4730 ¬if->u.channel_result; 4731 4732 if (size == sizeof(*x)) { 4733 IPW_DEBUG_SCAN("Scan result for channel %d\n", 4734 x->channel_num); 4735 } else { 4736 IPW_DEBUG_SCAN("Scan result of wrong size %d " 4737 "(should be %zd)\n", 4738 size, sizeof(*x)); 4739 } 4740 break; 4741 } 4742 4743 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{ 4744 struct notif_scan_complete *x = ¬if->u.scan_complete; 4745 if (size == sizeof(*x)) { 4746 IPW_DEBUG_SCAN 4747 ("Scan completed: type %d, %d channels, " 4748 "%d status\n", x->scan_type, 4749 x->num_channels, x->status); 4750 } else { 4751 IPW_ERROR("Scan completed of wrong size %d " 4752 "(should be %zd)\n", 4753 size, sizeof(*x)); 4754 } 4755 4756 priv->status &= 4757 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); 4758 4759 wake_up_interruptible(&priv->wait_state); 4760 cancel_delayed_work(&priv->scan_check); 4761 4762 if (priv->status & STATUS_EXIT_PENDING) 4763 break; 4764 4765 priv->ieee->scans++; 4766 4767 #ifdef CONFIG_IPW2200_MONITOR 4768 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4769 priv->status |= STATUS_SCAN_FORCED; 4770 schedule_delayed_work(&priv->request_scan, 0); 4771 break; 4772 } 4773 priv->status &= ~STATUS_SCAN_FORCED; 4774 #endif /* CONFIG_IPW2200_MONITOR */ 4775 4776 /* Do queued direct scans first */ 4777 if (priv->status & STATUS_DIRECT_SCAN_PENDING) 4778 schedule_delayed_work(&priv->request_direct_scan, 0); 4779 4780 if (!(priv->status & (STATUS_ASSOCIATED | 4781 STATUS_ASSOCIATING | 4782 STATUS_ROAMING | 4783 STATUS_DISASSOCIATING))) 4784 schedule_work(&priv->associate); 4785 else if (priv->status & STATUS_ROAMING) { 4786 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4787 /* If a scan completed and we are in roam mode, then 4788 * the scan that completed was the one requested as a 4789 * result of entering roam... so, schedule the 4790 * roam work */ 4791 schedule_work(&priv->roam); 4792 else 4793 /* Don't schedule if we aborted the scan */ 4794 priv->status &= ~STATUS_ROAMING; 4795 } else if (priv->status & STATUS_SCAN_PENDING) 4796 schedule_delayed_work(&priv->request_scan, 0); 4797 else if (priv->config & CFG_BACKGROUND_SCAN 4798 && priv->status & STATUS_ASSOCIATED) 4799 schedule_delayed_work(&priv->request_scan, 4800 round_jiffies_relative(HZ)); 4801 4802 /* Send an empty event to user space. 4803 * We don't send the received data on the event because 4804 * it would require us to do complex transcoding, and 4805 * we want to minimise the work done in the irq handler 4806 * Use a request to extract the data. 4807 * Also, we generate this even for any scan, regardless 4808 * on how the scan was initiated. User space can just 4809 * sync on periodic scan to get fresh data... 4810 * Jean II */ 4811 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4812 handle_scan_event(priv); 4813 break; 4814 } 4815 4816 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{ 4817 struct notif_frag_length *x = ¬if->u.frag_len; 4818 4819 if (size == sizeof(*x)) 4820 IPW_ERROR("Frag length: %d\n", 4821 le16_to_cpu(x->frag_length)); 4822 else 4823 IPW_ERROR("Frag length of wrong size %d " 4824 "(should be %zd)\n", 4825 size, sizeof(*x)); 4826 break; 4827 } 4828 4829 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{ 4830 struct notif_link_deterioration *x = 4831 ¬if->u.link_deterioration; 4832 4833 if (size == sizeof(*x)) { 4834 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4835 "link deterioration: type %d, cnt %d\n", 4836 x->silence_notification_type, 4837 x->silence_count); 4838 memcpy(&priv->last_link_deterioration, x, 4839 sizeof(*x)); 4840 } else { 4841 IPW_ERROR("Link Deterioration of wrong size %d " 4842 "(should be %zd)\n", 4843 size, sizeof(*x)); 4844 } 4845 break; 4846 } 4847 4848 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{ 4849 IPW_ERROR("Dino config\n"); 4850 if (priv->hcmd 4851 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG) 4852 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n"); 4853 4854 break; 4855 } 4856 4857 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{ 4858 struct notif_beacon_state *x = ¬if->u.beacon_state; 4859 if (size != sizeof(*x)) { 4860 IPW_ERROR 4861 ("Beacon state of wrong size %d (should " 4862 "be %zd)\n", size, sizeof(*x)); 4863 break; 4864 } 4865 4866 if (le32_to_cpu(x->state) == 4867 HOST_NOTIFICATION_STATUS_BEACON_MISSING) 4868 ipw_handle_missed_beacon(priv, 4869 le32_to_cpu(x-> 4870 number)); 4871 4872 break; 4873 } 4874 4875 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{ 4876 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key; 4877 if (size == sizeof(*x)) { 4878 IPW_ERROR("TGi Tx Key: state 0x%02x sec type " 4879 "0x%02x station %d\n", 4880 x->key_state, x->security_type, 4881 x->station_index); 4882 break; 4883 } 4884 4885 IPW_ERROR 4886 ("TGi Tx Key of wrong size %d (should be %zd)\n", 4887 size, sizeof(*x)); 4888 break; 4889 } 4890 4891 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{ 4892 struct notif_calibration *x = ¬if->u.calibration; 4893 4894 if (size == sizeof(*x)) { 4895 memcpy(&priv->calib, x, sizeof(*x)); 4896 IPW_DEBUG_INFO("TODO: Calibration\n"); 4897 break; 4898 } 4899 4900 IPW_ERROR 4901 ("Calibration of wrong size %d (should be %zd)\n", 4902 size, sizeof(*x)); 4903 break; 4904 } 4905 4906 case HOST_NOTIFICATION_NOISE_STATS:{ 4907 if (size == sizeof(u32)) { 4908 priv->exp_avg_noise = 4909 exponential_average(priv->exp_avg_noise, 4910 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff), 4911 DEPTH_NOISE); 4912 break; 4913 } 4914 4915 IPW_ERROR 4916 ("Noise stat is wrong size %d (should be %zd)\n", 4917 size, sizeof(u32)); 4918 break; 4919 } 4920 4921 default: 4922 IPW_DEBUG_NOTIF("Unknown notification: " 4923 "subtype=%d,flags=0x%2x,size=%d\n", 4924 notif->subtype, notif->flags, size); 4925 } 4926 } 4927 4928 /** 4929 * Destroys all DMA structures and initialise them again 4930 * 4931 * @param priv 4932 * @return error code 4933 */ 4934 static int ipw_queue_reset(struct ipw_priv *priv) 4935 { 4936 int rc = 0; 4937 /** @todo customize queue sizes */ 4938 int nTx = 64, nTxCmd = 8; 4939 ipw_tx_queue_free(priv); 4940 /* Tx CMD queue */ 4941 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd, 4942 IPW_TX_CMD_QUEUE_READ_INDEX, 4943 IPW_TX_CMD_QUEUE_WRITE_INDEX, 4944 IPW_TX_CMD_QUEUE_BD_BASE, 4945 IPW_TX_CMD_QUEUE_BD_SIZE); 4946 if (rc) { 4947 IPW_ERROR("Tx Cmd queue init failed\n"); 4948 goto error; 4949 } 4950 /* Tx queue(s) */ 4951 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx, 4952 IPW_TX_QUEUE_0_READ_INDEX, 4953 IPW_TX_QUEUE_0_WRITE_INDEX, 4954 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE); 4955 if (rc) { 4956 IPW_ERROR("Tx 0 queue init failed\n"); 4957 goto error; 4958 } 4959 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx, 4960 IPW_TX_QUEUE_1_READ_INDEX, 4961 IPW_TX_QUEUE_1_WRITE_INDEX, 4962 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE); 4963 if (rc) { 4964 IPW_ERROR("Tx 1 queue init failed\n"); 4965 goto error; 4966 } 4967 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx, 4968 IPW_TX_QUEUE_2_READ_INDEX, 4969 IPW_TX_QUEUE_2_WRITE_INDEX, 4970 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE); 4971 if (rc) { 4972 IPW_ERROR("Tx 2 queue init failed\n"); 4973 goto error; 4974 } 4975 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx, 4976 IPW_TX_QUEUE_3_READ_INDEX, 4977 IPW_TX_QUEUE_3_WRITE_INDEX, 4978 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE); 4979 if (rc) { 4980 IPW_ERROR("Tx 3 queue init failed\n"); 4981 goto error; 4982 } 4983 /* statistics */ 4984 priv->rx_bufs_min = 0; 4985 priv->rx_pend_max = 0; 4986 return rc; 4987 4988 error: 4989 ipw_tx_queue_free(priv); 4990 return rc; 4991 } 4992 4993 /** 4994 * Reclaim Tx queue entries no more used by NIC. 4995 * 4996 * When FW advances 'R' index, all entries between old and 4997 * new 'R' index need to be reclaimed. As result, some free space 4998 * forms. If there is enough free space (> low mark), wake Tx queue. 4999 * 5000 * @note Need to protect against garbage in 'R' index 5001 * @param priv 5002 * @param txq 5003 * @param qindex 5004 * @return Number of used entries remains in the queue 5005 */ 5006 static int ipw_queue_tx_reclaim(struct ipw_priv *priv, 5007 struct clx2_tx_queue *txq, int qindex) 5008 { 5009 u32 hw_tail; 5010 int used; 5011 struct clx2_queue *q = &txq->q; 5012 5013 hw_tail = ipw_read32(priv, q->reg_r); 5014 if (hw_tail >= q->n_bd) { 5015 IPW_ERROR 5016 ("Read index for DMA queue (%d) is out of range [0-%d)\n", 5017 hw_tail, q->n_bd); 5018 goto done; 5019 } 5020 for (; q->last_used != hw_tail; 5021 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { 5022 ipw_queue_tx_free_tfd(priv, txq); 5023 priv->tx_packets++; 5024 } 5025 done: 5026 if ((ipw_tx_queue_space(q) > q->low_mark) && 5027 (qindex >= 0)) 5028 netif_wake_queue(priv->net_dev); 5029 used = q->first_empty - q->last_used; 5030 if (used < 0) 5031 used += q->n_bd; 5032 5033 return used; 5034 } 5035 5036 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, 5037 int len, int sync) 5038 { 5039 struct clx2_tx_queue *txq = &priv->txq_cmd; 5040 struct clx2_queue *q = &txq->q; 5041 struct tfd_frame *tfd; 5042 5043 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) { 5044 IPW_ERROR("No space for Tx\n"); 5045 return -EBUSY; 5046 } 5047 5048 tfd = &txq->bd[q->first_empty]; 5049 txq->txb[q->first_empty] = NULL; 5050 5051 memset(tfd, 0, sizeof(*tfd)); 5052 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE; 5053 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; 5054 priv->hcmd_seq++; 5055 tfd->u.cmd.index = hcmd; 5056 tfd->u.cmd.length = len; 5057 memcpy(tfd->u.cmd.payload, buf, len); 5058 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 5059 ipw_write32(priv, q->reg_w, q->first_empty); 5060 _ipw_read32(priv, 0x90); 5061 5062 return 0; 5063 } 5064 5065 /* 5066 * Rx theory of operation 5067 * 5068 * The host allocates 32 DMA target addresses and passes the host address 5069 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is 5070 * 0 to 31 5071 * 5072 * Rx Queue Indexes 5073 * The host/firmware share two index registers for managing the Rx buffers. 5074 * 5075 * The READ index maps to the first position that the firmware may be writing 5076 * to -- the driver can read up to (but not including) this position and get 5077 * good data. 5078 * The READ index is managed by the firmware once the card is enabled. 5079 * 5080 * The WRITE index maps to the last position the driver has read from -- the 5081 * position preceding WRITE is the last slot the firmware can place a packet. 5082 * 5083 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 5084 * WRITE = READ. 5085 * 5086 * During initialization the host sets up the READ queue position to the first 5087 * INDEX position, and WRITE to the last (READ - 1 wrapped) 5088 * 5089 * When the firmware places a packet in a buffer it will advance the READ index 5090 * and fire the RX interrupt. The driver can then query the READ index and 5091 * process as many packets as possible, moving the WRITE index forward as it 5092 * resets the Rx queue buffers with new memory. 5093 * 5094 * The management in the driver is as follows: 5095 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When 5096 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 5097 * to replensish the ipw->rxq->rx_free. 5098 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the 5099 * ipw->rxq is replenished and the READ INDEX is updated (updating the 5100 * 'processed' and 'read' driver indexes as well) 5101 * + A received packet is processed and handed to the kernel network stack, 5102 * detached from the ipw->rxq. The driver 'processed' index is updated. 5103 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free 5104 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ 5105 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there 5106 * were enough free buffers and RX_STALLED is set it is cleared. 5107 * 5108 * 5109 * Driver sequence: 5110 * 5111 * ipw_rx_queue_alloc() Allocates rx_free 5112 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls 5113 * ipw_rx_queue_restock 5114 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx 5115 * queue, updates firmware pointers, and updates 5116 * the WRITE index. If insufficient rx_free buffers 5117 * are available, schedules ipw_rx_queue_replenish 5118 * 5119 * -- enable interrupts -- 5120 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the 5121 * READ INDEX, detaching the SKB from the pool. 5122 * Moves the packet buffer from queue to rx_used. 5123 * Calls ipw_rx_queue_restock to refill any empty 5124 * slots. 5125 * ... 5126 * 5127 */ 5128 5129 /* 5130 * If there are slots in the RX queue that need to be restocked, 5131 * and we have free pre-allocated buffers, fill the ranks as much 5132 * as we can pulling from rx_free. 5133 * 5134 * This moves the 'write' index forward to catch up with 'processed', and 5135 * also updates the memory address in the firmware to reference the new 5136 * target buffer. 5137 */ 5138 static void ipw_rx_queue_restock(struct ipw_priv *priv) 5139 { 5140 struct ipw_rx_queue *rxq = priv->rxq; 5141 struct list_head *element; 5142 struct ipw_rx_mem_buffer *rxb; 5143 unsigned long flags; 5144 int write; 5145 5146 spin_lock_irqsave(&rxq->lock, flags); 5147 write = rxq->write; 5148 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 5149 element = rxq->rx_free.next; 5150 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5151 list_del(element); 5152 5153 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE, 5154 rxb->dma_addr); 5155 rxq->queue[rxq->write] = rxb; 5156 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE; 5157 rxq->free_count--; 5158 } 5159 spin_unlock_irqrestore(&rxq->lock, flags); 5160 5161 /* If the pre-allocated buffer pool is dropping low, schedule to 5162 * refill it */ 5163 if (rxq->free_count <= RX_LOW_WATERMARK) 5164 schedule_work(&priv->rx_replenish); 5165 5166 /* If we've added more space for the firmware to place data, tell it */ 5167 if (write != rxq->write) 5168 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write); 5169 } 5170 5171 /* 5172 * Move all used packet from rx_used to rx_free, allocating a new SKB for each. 5173 * Also restock the Rx queue via ipw_rx_queue_restock. 5174 * 5175 * This is called as a scheduled work item (except for during initialization) 5176 */ 5177 static void ipw_rx_queue_replenish(void *data) 5178 { 5179 struct ipw_priv *priv = data; 5180 struct ipw_rx_queue *rxq = priv->rxq; 5181 struct list_head *element; 5182 struct ipw_rx_mem_buffer *rxb; 5183 unsigned long flags; 5184 5185 spin_lock_irqsave(&rxq->lock, flags); 5186 while (!list_empty(&rxq->rx_used)) { 5187 element = rxq->rx_used.next; 5188 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5189 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC); 5190 if (!rxb->skb) { 5191 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n", 5192 priv->net_dev->name); 5193 /* We don't reschedule replenish work here -- we will 5194 * call the restock method and if it still needs 5195 * more buffers it will schedule replenish */ 5196 break; 5197 } 5198 list_del(element); 5199 5200 rxb->dma_addr = 5201 pci_map_single(priv->pci_dev, rxb->skb->data, 5202 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5203 5204 list_add_tail(&rxb->list, &rxq->rx_free); 5205 rxq->free_count++; 5206 } 5207 spin_unlock_irqrestore(&rxq->lock, flags); 5208 5209 ipw_rx_queue_restock(priv); 5210 } 5211 5212 static void ipw_bg_rx_queue_replenish(struct work_struct *work) 5213 { 5214 struct ipw_priv *priv = 5215 container_of(work, struct ipw_priv, rx_replenish); 5216 mutex_lock(&priv->mutex); 5217 ipw_rx_queue_replenish(priv); 5218 mutex_unlock(&priv->mutex); 5219 } 5220 5221 /* Assumes that the skb field of the buffers in 'pool' is kept accurate. 5222 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 5223 * This free routine walks the list of POOL entries and if SKB is set to 5224 * non NULL it is unmapped and freed 5225 */ 5226 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq) 5227 { 5228 int i; 5229 5230 if (!rxq) 5231 return; 5232 5233 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 5234 if (rxq->pool[i].skb != NULL) { 5235 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 5236 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5237 dev_kfree_skb(rxq->pool[i].skb); 5238 } 5239 } 5240 5241 kfree(rxq); 5242 } 5243 5244 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv) 5245 { 5246 struct ipw_rx_queue *rxq; 5247 int i; 5248 5249 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); 5250 if (unlikely(!rxq)) { 5251 IPW_ERROR("memory allocation failed\n"); 5252 return NULL; 5253 } 5254 spin_lock_init(&rxq->lock); 5255 INIT_LIST_HEAD(&rxq->rx_free); 5256 INIT_LIST_HEAD(&rxq->rx_used); 5257 5258 /* Fill the rx_used queue with _all_ of the Rx buffers */ 5259 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 5260 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 5261 5262 /* Set us so that we have processed and used all buffers, but have 5263 * not restocked the Rx queue with fresh buffers */ 5264 rxq->read = rxq->write = 0; 5265 rxq->free_count = 0; 5266 5267 return rxq; 5268 } 5269 5270 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate) 5271 { 5272 rate &= ~LIBIPW_BASIC_RATE_MASK; 5273 if (ieee_mode == IEEE_A) { 5274 switch (rate) { 5275 case LIBIPW_OFDM_RATE_6MB: 5276 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 5277 1 : 0; 5278 case LIBIPW_OFDM_RATE_9MB: 5279 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 5280 1 : 0; 5281 case LIBIPW_OFDM_RATE_12MB: 5282 return priv-> 5283 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; 5284 case LIBIPW_OFDM_RATE_18MB: 5285 return priv-> 5286 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; 5287 case LIBIPW_OFDM_RATE_24MB: 5288 return priv-> 5289 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; 5290 case LIBIPW_OFDM_RATE_36MB: 5291 return priv-> 5292 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; 5293 case LIBIPW_OFDM_RATE_48MB: 5294 return priv-> 5295 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; 5296 case LIBIPW_OFDM_RATE_54MB: 5297 return priv-> 5298 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; 5299 default: 5300 return 0; 5301 } 5302 } 5303 5304 /* B and G mixed */ 5305 switch (rate) { 5306 case LIBIPW_CCK_RATE_1MB: 5307 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0; 5308 case LIBIPW_CCK_RATE_2MB: 5309 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0; 5310 case LIBIPW_CCK_RATE_5MB: 5311 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0; 5312 case LIBIPW_CCK_RATE_11MB: 5313 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0; 5314 } 5315 5316 /* If we are limited to B modulations, bail at this point */ 5317 if (ieee_mode == IEEE_B) 5318 return 0; 5319 5320 /* G */ 5321 switch (rate) { 5322 case LIBIPW_OFDM_RATE_6MB: 5323 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0; 5324 case LIBIPW_OFDM_RATE_9MB: 5325 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0; 5326 case LIBIPW_OFDM_RATE_12MB: 5327 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; 5328 case LIBIPW_OFDM_RATE_18MB: 5329 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; 5330 case LIBIPW_OFDM_RATE_24MB: 5331 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; 5332 case LIBIPW_OFDM_RATE_36MB: 5333 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; 5334 case LIBIPW_OFDM_RATE_48MB: 5335 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; 5336 case LIBIPW_OFDM_RATE_54MB: 5337 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; 5338 } 5339 5340 return 0; 5341 } 5342 5343 static int ipw_compatible_rates(struct ipw_priv *priv, 5344 const struct libipw_network *network, 5345 struct ipw_supported_rates *rates) 5346 { 5347 int num_rates, i; 5348 5349 memset(rates, 0, sizeof(*rates)); 5350 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES); 5351 rates->num_rates = 0; 5352 for (i = 0; i < num_rates; i++) { 5353 if (!ipw_is_rate_in_mask(priv, network->mode, 5354 network->rates[i])) { 5355 5356 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) { 5357 IPW_DEBUG_SCAN("Adding masked mandatory " 5358 "rate %02X\n", 5359 network->rates[i]); 5360 rates->supported_rates[rates->num_rates++] = 5361 network->rates[i]; 5362 continue; 5363 } 5364 5365 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 5366 network->rates[i], priv->rates_mask); 5367 continue; 5368 } 5369 5370 rates->supported_rates[rates->num_rates++] = network->rates[i]; 5371 } 5372 5373 num_rates = min(network->rates_ex_len, 5374 (u8) (IPW_MAX_RATES - num_rates)); 5375 for (i = 0; i < num_rates; i++) { 5376 if (!ipw_is_rate_in_mask(priv, network->mode, 5377 network->rates_ex[i])) { 5378 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) { 5379 IPW_DEBUG_SCAN("Adding masked mandatory " 5380 "rate %02X\n", 5381 network->rates_ex[i]); 5382 rates->supported_rates[rates->num_rates++] = 5383 network->rates[i]; 5384 continue; 5385 } 5386 5387 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 5388 network->rates_ex[i], priv->rates_mask); 5389 continue; 5390 } 5391 5392 rates->supported_rates[rates->num_rates++] = 5393 network->rates_ex[i]; 5394 } 5395 5396 return 1; 5397 } 5398 5399 static void ipw_copy_rates(struct ipw_supported_rates *dest, 5400 const struct ipw_supported_rates *src) 5401 { 5402 u8 i; 5403 for (i = 0; i < src->num_rates; i++) 5404 dest->supported_rates[i] = src->supported_rates[i]; 5405 dest->num_rates = src->num_rates; 5406 } 5407 5408 /* TODO: Look at sniffed packets in the air to determine if the basic rate 5409 * mask should ever be used -- right now all callers to add the scan rates are 5410 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */ 5411 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates, 5412 u8 modulation, u32 rate_mask) 5413 { 5414 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? 5415 LIBIPW_BASIC_RATE_MASK : 0; 5416 5417 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK) 5418 rates->supported_rates[rates->num_rates++] = 5419 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB; 5420 5421 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK) 5422 rates->supported_rates[rates->num_rates++] = 5423 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB; 5424 5425 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK) 5426 rates->supported_rates[rates->num_rates++] = basic_mask | 5427 LIBIPW_CCK_RATE_5MB; 5428 5429 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK) 5430 rates->supported_rates[rates->num_rates++] = basic_mask | 5431 LIBIPW_CCK_RATE_11MB; 5432 } 5433 5434 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates, 5435 u8 modulation, u32 rate_mask) 5436 { 5437 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? 5438 LIBIPW_BASIC_RATE_MASK : 0; 5439 5440 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK) 5441 rates->supported_rates[rates->num_rates++] = basic_mask | 5442 LIBIPW_OFDM_RATE_6MB; 5443 5444 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK) 5445 rates->supported_rates[rates->num_rates++] = 5446 LIBIPW_OFDM_RATE_9MB; 5447 5448 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK) 5449 rates->supported_rates[rates->num_rates++] = basic_mask | 5450 LIBIPW_OFDM_RATE_12MB; 5451 5452 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK) 5453 rates->supported_rates[rates->num_rates++] = 5454 LIBIPW_OFDM_RATE_18MB; 5455 5456 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK) 5457 rates->supported_rates[rates->num_rates++] = basic_mask | 5458 LIBIPW_OFDM_RATE_24MB; 5459 5460 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK) 5461 rates->supported_rates[rates->num_rates++] = 5462 LIBIPW_OFDM_RATE_36MB; 5463 5464 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK) 5465 rates->supported_rates[rates->num_rates++] = 5466 LIBIPW_OFDM_RATE_48MB; 5467 5468 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK) 5469 rates->supported_rates[rates->num_rates++] = 5470 LIBIPW_OFDM_RATE_54MB; 5471 } 5472 5473 struct ipw_network_match { 5474 struct libipw_network *network; 5475 struct ipw_supported_rates rates; 5476 }; 5477 5478 static int ipw_find_adhoc_network(struct ipw_priv *priv, 5479 struct ipw_network_match *match, 5480 struct libipw_network *network, 5481 int roaming) 5482 { 5483 struct ipw_supported_rates rates; 5484 5485 /* Verify that this network's capability is compatible with the 5486 * current mode (AdHoc or Infrastructure) */ 5487 if ((priv->ieee->iw_mode == IW_MODE_ADHOC && 5488 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5489 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n", 5490 network->ssid_len, network->ssid, 5491 network->bssid); 5492 return 0; 5493 } 5494 5495 if (unlikely(roaming)) { 5496 /* If we are roaming, then ensure check if this is a valid 5497 * network to try and roam to */ 5498 if ((network->ssid_len != match->network->ssid_len) || 5499 memcmp(network->ssid, match->network->ssid, 5500 network->ssid_len)) { 5501 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n", 5502 network->ssid_len, network->ssid, 5503 network->bssid); 5504 return 0; 5505 } 5506 } else { 5507 /* If an ESSID has been configured then compare the broadcast 5508 * ESSID to ours */ 5509 if ((priv->config & CFG_STATIC_ESSID) && 5510 ((network->ssid_len != priv->essid_len) || 5511 memcmp(network->ssid, priv->essid, 5512 min(network->ssid_len, priv->essid_len)))) { 5513 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n", 5514 network->ssid_len, network->ssid, 5515 network->bssid, priv->essid_len, 5516 priv->essid); 5517 return 0; 5518 } 5519 } 5520 5521 /* If the old network rate is better than this one, don't bother 5522 * testing everything else. */ 5523 5524 if (network->time_stamp[0] < match->network->time_stamp[0]) { 5525 IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n", 5526 match->network->ssid_len, match->network->ssid); 5527 return 0; 5528 } else if (network->time_stamp[1] < match->network->time_stamp[1]) { 5529 IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n", 5530 match->network->ssid_len, match->network->ssid); 5531 return 0; 5532 } 5533 5534 /* Now go through and see if the requested network is valid... */ 5535 if (priv->ieee->scan_age != 0 && 5536 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5537 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n", 5538 network->ssid_len, network->ssid, 5539 network->bssid, 5540 jiffies_to_msecs(jiffies - 5541 network->last_scanned)); 5542 return 0; 5543 } 5544 5545 if ((priv->config & CFG_STATIC_CHANNEL) && 5546 (network->channel != priv->channel)) { 5547 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n", 5548 network->ssid_len, network->ssid, 5549 network->bssid, 5550 network->channel, priv->channel); 5551 return 0; 5552 } 5553 5554 /* Verify privacy compatibility */ 5555 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5556 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5557 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n", 5558 network->ssid_len, network->ssid, 5559 network->bssid, 5560 priv-> 5561 capability & CAP_PRIVACY_ON ? "on" : "off", 5562 network-> 5563 capability & WLAN_CAPABILITY_PRIVACY ? "on" : 5564 "off"); 5565 return 0; 5566 } 5567 5568 if (ether_addr_equal(network->bssid, priv->bssid)) { 5569 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n", 5570 network->ssid_len, network->ssid, 5571 network->bssid, priv->bssid); 5572 return 0; 5573 } 5574 5575 /* Filter out any incompatible freq / mode combinations */ 5576 if (!libipw_is_valid_mode(priv->ieee, network->mode)) { 5577 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n", 5578 network->ssid_len, network->ssid, 5579 network->bssid); 5580 return 0; 5581 } 5582 5583 /* Ensure that the rates supported by the driver are compatible with 5584 * this AP, including verification of basic rates (mandatory) */ 5585 if (!ipw_compatible_rates(priv, network, &rates)) { 5586 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n", 5587 network->ssid_len, network->ssid, 5588 network->bssid); 5589 return 0; 5590 } 5591 5592 if (rates.num_rates == 0) { 5593 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n", 5594 network->ssid_len, network->ssid, 5595 network->bssid); 5596 return 0; 5597 } 5598 5599 /* TODO: Perform any further minimal comparititive tests. We do not 5600 * want to put too much policy logic here; intelligent scan selection 5601 * should occur within a generic IEEE 802.11 user space tool. */ 5602 5603 /* Set up 'new' AP to this network */ 5604 ipw_copy_rates(&match->rates, &rates); 5605 match->network = network; 5606 IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n", 5607 network->ssid_len, network->ssid, network->bssid); 5608 5609 return 1; 5610 } 5611 5612 static void ipw_merge_adhoc_network(struct work_struct *work) 5613 { 5614 struct ipw_priv *priv = 5615 container_of(work, struct ipw_priv, merge_networks); 5616 struct libipw_network *network = NULL; 5617 struct ipw_network_match match = { 5618 .network = priv->assoc_network 5619 }; 5620 5621 if ((priv->status & STATUS_ASSOCIATED) && 5622 (priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5623 /* First pass through ROAM process -- look for a better 5624 * network */ 5625 unsigned long flags; 5626 5627 spin_lock_irqsave(&priv->ieee->lock, flags); 5628 list_for_each_entry(network, &priv->ieee->network_list, list) { 5629 if (network != priv->assoc_network) 5630 ipw_find_adhoc_network(priv, &match, network, 5631 1); 5632 } 5633 spin_unlock_irqrestore(&priv->ieee->lock, flags); 5634 5635 if (match.network == priv->assoc_network) { 5636 IPW_DEBUG_MERGE("No better ADHOC in this network to " 5637 "merge to.\n"); 5638 return; 5639 } 5640 5641 mutex_lock(&priv->mutex); 5642 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 5643 IPW_DEBUG_MERGE("remove network %*pE\n", 5644 priv->essid_len, priv->essid); 5645 ipw_remove_current_network(priv); 5646 } 5647 5648 ipw_disassociate(priv); 5649 priv->assoc_network = match.network; 5650 mutex_unlock(&priv->mutex); 5651 return; 5652 } 5653 } 5654 5655 static int ipw_best_network(struct ipw_priv *priv, 5656 struct ipw_network_match *match, 5657 struct libipw_network *network, int roaming) 5658 { 5659 struct ipw_supported_rates rates; 5660 5661 /* Verify that this network's capability is compatible with the 5662 * current mode (AdHoc or Infrastructure) */ 5663 if ((priv->ieee->iw_mode == IW_MODE_INFRA && 5664 !(network->capability & WLAN_CAPABILITY_ESS)) || 5665 (priv->ieee->iw_mode == IW_MODE_ADHOC && 5666 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5667 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n", 5668 network->ssid_len, network->ssid, 5669 network->bssid); 5670 return 0; 5671 } 5672 5673 if (unlikely(roaming)) { 5674 /* If we are roaming, then ensure check if this is a valid 5675 * network to try and roam to */ 5676 if ((network->ssid_len != match->network->ssid_len) || 5677 memcmp(network->ssid, match->network->ssid, 5678 network->ssid_len)) { 5679 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n", 5680 network->ssid_len, network->ssid, 5681 network->bssid); 5682 return 0; 5683 } 5684 } else { 5685 /* If an ESSID has been configured then compare the broadcast 5686 * ESSID to ours */ 5687 if ((priv->config & CFG_STATIC_ESSID) && 5688 ((network->ssid_len != priv->essid_len) || 5689 memcmp(network->ssid, priv->essid, 5690 min(network->ssid_len, priv->essid_len)))) { 5691 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n", 5692 network->ssid_len, network->ssid, 5693 network->bssid, priv->essid_len, 5694 priv->essid); 5695 return 0; 5696 } 5697 } 5698 5699 /* If the old network rate is better than this one, don't bother 5700 * testing everything else. */ 5701 if (match->network && match->network->stats.rssi > network->stats.rssi) { 5702 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n", 5703 network->ssid_len, network->ssid, 5704 network->bssid, match->network->ssid_len, 5705 match->network->ssid, match->network->bssid); 5706 return 0; 5707 } 5708 5709 /* If this network has already had an association attempt within the 5710 * last 3 seconds, do not try and associate again... */ 5711 if (network->last_associate && 5712 time_after(network->last_associate + (HZ * 3UL), jiffies)) { 5713 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n", 5714 network->ssid_len, network->ssid, 5715 network->bssid, 5716 jiffies_to_msecs(jiffies - 5717 network->last_associate)); 5718 return 0; 5719 } 5720 5721 /* Now go through and see if the requested network is valid... */ 5722 if (priv->ieee->scan_age != 0 && 5723 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5724 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n", 5725 network->ssid_len, network->ssid, 5726 network->bssid, 5727 jiffies_to_msecs(jiffies - 5728 network->last_scanned)); 5729 return 0; 5730 } 5731 5732 if ((priv->config & CFG_STATIC_CHANNEL) && 5733 (network->channel != priv->channel)) { 5734 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n", 5735 network->ssid_len, network->ssid, 5736 network->bssid, 5737 network->channel, priv->channel); 5738 return 0; 5739 } 5740 5741 /* Verify privacy compatibility */ 5742 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5743 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5744 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n", 5745 network->ssid_len, network->ssid, 5746 network->bssid, 5747 priv->capability & CAP_PRIVACY_ON ? "on" : 5748 "off", 5749 network->capability & 5750 WLAN_CAPABILITY_PRIVACY ? "on" : "off"); 5751 return 0; 5752 } 5753 5754 if ((priv->config & CFG_STATIC_BSSID) && 5755 !ether_addr_equal(network->bssid, priv->bssid)) { 5756 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n", 5757 network->ssid_len, network->ssid, 5758 network->bssid, priv->bssid); 5759 return 0; 5760 } 5761 5762 /* Filter out any incompatible freq / mode combinations */ 5763 if (!libipw_is_valid_mode(priv->ieee, network->mode)) { 5764 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n", 5765 network->ssid_len, network->ssid, 5766 network->bssid); 5767 return 0; 5768 } 5769 5770 /* Filter out invalid channel in current GEO */ 5771 if (!libipw_is_valid_channel(priv->ieee, network->channel)) { 5772 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n", 5773 network->ssid_len, network->ssid, 5774 network->bssid); 5775 return 0; 5776 } 5777 5778 /* Ensure that the rates supported by the driver are compatible with 5779 * this AP, including verification of basic rates (mandatory) */ 5780 if (!ipw_compatible_rates(priv, network, &rates)) { 5781 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n", 5782 network->ssid_len, network->ssid, 5783 network->bssid); 5784 return 0; 5785 } 5786 5787 if (rates.num_rates == 0) { 5788 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n", 5789 network->ssid_len, network->ssid, 5790 network->bssid); 5791 return 0; 5792 } 5793 5794 /* TODO: Perform any further minimal comparititive tests. We do not 5795 * want to put too much policy logic here; intelligent scan selection 5796 * should occur within a generic IEEE 802.11 user space tool. */ 5797 5798 /* Set up 'new' AP to this network */ 5799 ipw_copy_rates(&match->rates, &rates); 5800 match->network = network; 5801 5802 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n", 5803 network->ssid_len, network->ssid, network->bssid); 5804 5805 return 1; 5806 } 5807 5808 static void ipw_adhoc_create(struct ipw_priv *priv, 5809 struct libipw_network *network) 5810 { 5811 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 5812 int i; 5813 5814 /* 5815 * For the purposes of scanning, we can set our wireless mode 5816 * to trigger scans across combinations of bands, but when it 5817 * comes to creating a new ad-hoc network, we have tell the FW 5818 * exactly which band to use. 5819 * 5820 * We also have the possibility of an invalid channel for the 5821 * chossen band. Attempting to create a new ad-hoc network 5822 * with an invalid channel for wireless mode will trigger a 5823 * FW fatal error. 5824 * 5825 */ 5826 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 5827 case LIBIPW_52GHZ_BAND: 5828 network->mode = IEEE_A; 5829 i = libipw_channel_to_index(priv->ieee, priv->channel); 5830 BUG_ON(i == -1); 5831 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) { 5832 IPW_WARNING("Overriding invalid channel\n"); 5833 priv->channel = geo->a[0].channel; 5834 } 5835 break; 5836 5837 case LIBIPW_24GHZ_BAND: 5838 if (priv->ieee->mode & IEEE_G) 5839 network->mode = IEEE_G; 5840 else 5841 network->mode = IEEE_B; 5842 i = libipw_channel_to_index(priv->ieee, priv->channel); 5843 BUG_ON(i == -1); 5844 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) { 5845 IPW_WARNING("Overriding invalid channel\n"); 5846 priv->channel = geo->bg[0].channel; 5847 } 5848 break; 5849 5850 default: 5851 IPW_WARNING("Overriding invalid channel\n"); 5852 if (priv->ieee->mode & IEEE_A) { 5853 network->mode = IEEE_A; 5854 priv->channel = geo->a[0].channel; 5855 } else if (priv->ieee->mode & IEEE_G) { 5856 network->mode = IEEE_G; 5857 priv->channel = geo->bg[0].channel; 5858 } else { 5859 network->mode = IEEE_B; 5860 priv->channel = geo->bg[0].channel; 5861 } 5862 break; 5863 } 5864 5865 network->channel = priv->channel; 5866 priv->config |= CFG_ADHOC_PERSIST; 5867 ipw_create_bssid(priv, network->bssid); 5868 network->ssid_len = priv->essid_len; 5869 memcpy(network->ssid, priv->essid, priv->essid_len); 5870 memset(&network->stats, 0, sizeof(network->stats)); 5871 network->capability = WLAN_CAPABILITY_IBSS; 5872 if (!(priv->config & CFG_PREAMBLE_LONG)) 5873 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE; 5874 if (priv->capability & CAP_PRIVACY_ON) 5875 network->capability |= WLAN_CAPABILITY_PRIVACY; 5876 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH); 5877 memcpy(network->rates, priv->rates.supported_rates, network->rates_len); 5878 network->rates_ex_len = priv->rates.num_rates - network->rates_len; 5879 memcpy(network->rates_ex, 5880 &priv->rates.supported_rates[network->rates_len], 5881 network->rates_ex_len); 5882 network->last_scanned = 0; 5883 network->flags = 0; 5884 network->last_associate = 0; 5885 network->time_stamp[0] = 0; 5886 network->time_stamp[1] = 0; 5887 network->beacon_interval = 100; /* Default */ 5888 network->listen_interval = 10; /* Default */ 5889 network->atim_window = 0; /* Default */ 5890 network->wpa_ie_len = 0; 5891 network->rsn_ie_len = 0; 5892 } 5893 5894 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) 5895 { 5896 struct ipw_tgi_tx_key key; 5897 5898 if (!(priv->ieee->sec.flags & (1 << index))) 5899 return; 5900 5901 key.key_id = index; 5902 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); 5903 key.security_type = type; 5904 key.station_index = 0; /* always 0 for BSS */ 5905 key.flags = 0; 5906 /* 0 for new key; previous value of counter (after fatal error) */ 5907 key.tx_counter[0] = cpu_to_le32(0); 5908 key.tx_counter[1] = cpu_to_le32(0); 5909 5910 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); 5911 } 5912 5913 static void ipw_send_wep_keys(struct ipw_priv *priv, int type) 5914 { 5915 struct ipw_wep_key key; 5916 int i; 5917 5918 key.cmd_id = DINO_CMD_WEP_KEY; 5919 key.seq_num = 0; 5920 5921 /* Note: AES keys cannot be set for multiple times. 5922 * Only set it at the first time. */ 5923 for (i = 0; i < 4; i++) { 5924 key.key_index = i | type; 5925 if (!(priv->ieee->sec.flags & (1 << i))) { 5926 key.key_size = 0; 5927 continue; 5928 } 5929 5930 key.key_size = priv->ieee->sec.key_sizes[i]; 5931 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size); 5932 5933 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key); 5934 } 5935 } 5936 5937 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level) 5938 { 5939 if (priv->ieee->host_encrypt) 5940 return; 5941 5942 switch (level) { 5943 case SEC_LEVEL_3: 5944 priv->sys_config.disable_unicast_decryption = 0; 5945 priv->ieee->host_decrypt = 0; 5946 break; 5947 case SEC_LEVEL_2: 5948 priv->sys_config.disable_unicast_decryption = 1; 5949 priv->ieee->host_decrypt = 1; 5950 break; 5951 case SEC_LEVEL_1: 5952 priv->sys_config.disable_unicast_decryption = 0; 5953 priv->ieee->host_decrypt = 0; 5954 break; 5955 case SEC_LEVEL_0: 5956 priv->sys_config.disable_unicast_decryption = 1; 5957 break; 5958 default: 5959 break; 5960 } 5961 } 5962 5963 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level) 5964 { 5965 if (priv->ieee->host_encrypt) 5966 return; 5967 5968 switch (level) { 5969 case SEC_LEVEL_3: 5970 priv->sys_config.disable_multicast_decryption = 0; 5971 break; 5972 case SEC_LEVEL_2: 5973 priv->sys_config.disable_multicast_decryption = 1; 5974 break; 5975 case SEC_LEVEL_1: 5976 priv->sys_config.disable_multicast_decryption = 0; 5977 break; 5978 case SEC_LEVEL_0: 5979 priv->sys_config.disable_multicast_decryption = 1; 5980 break; 5981 default: 5982 break; 5983 } 5984 } 5985 5986 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv) 5987 { 5988 switch (priv->ieee->sec.level) { 5989 case SEC_LEVEL_3: 5990 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) 5991 ipw_send_tgi_tx_key(priv, 5992 DCT_FLAG_EXT_SECURITY_CCM, 5993 priv->ieee->sec.active_key); 5994 5995 if (!priv->ieee->host_mc_decrypt) 5996 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM); 5997 break; 5998 case SEC_LEVEL_2: 5999 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) 6000 ipw_send_tgi_tx_key(priv, 6001 DCT_FLAG_EXT_SECURITY_TKIP, 6002 priv->ieee->sec.active_key); 6003 break; 6004 case SEC_LEVEL_1: 6005 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 6006 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level); 6007 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level); 6008 break; 6009 case SEC_LEVEL_0: 6010 default: 6011 break; 6012 } 6013 } 6014 6015 static void ipw_adhoc_check(void *data) 6016 { 6017 struct ipw_priv *priv = data; 6018 6019 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold && 6020 !(priv->config & CFG_ADHOC_PERSIST)) { 6021 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 6022 IPW_DL_STATE | IPW_DL_ASSOC, 6023 "Missed beacon: %d - disassociate\n", 6024 priv->missed_adhoc_beacons); 6025 ipw_remove_current_network(priv); 6026 ipw_disassociate(priv); 6027 return; 6028 } 6029 6030 schedule_delayed_work(&priv->adhoc_check, 6031 le16_to_cpu(priv->assoc_request.beacon_interval)); 6032 } 6033 6034 static void ipw_bg_adhoc_check(struct work_struct *work) 6035 { 6036 struct ipw_priv *priv = 6037 container_of(work, struct ipw_priv, adhoc_check.work); 6038 mutex_lock(&priv->mutex); 6039 ipw_adhoc_check(priv); 6040 mutex_unlock(&priv->mutex); 6041 } 6042 6043 static void ipw_debug_config(struct ipw_priv *priv) 6044 { 6045 IPW_DEBUG_INFO("Scan completed, no valid APs matched " 6046 "[CFG 0x%08X]\n", priv->config); 6047 if (priv->config & CFG_STATIC_CHANNEL) 6048 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel); 6049 else 6050 IPW_DEBUG_INFO("Channel unlocked.\n"); 6051 if (priv->config & CFG_STATIC_ESSID) 6052 IPW_DEBUG_INFO("ESSID locked to '%*pE'\n", 6053 priv->essid_len, priv->essid); 6054 else 6055 IPW_DEBUG_INFO("ESSID unlocked.\n"); 6056 if (priv->config & CFG_STATIC_BSSID) 6057 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid); 6058 else 6059 IPW_DEBUG_INFO("BSSID unlocked.\n"); 6060 if (priv->capability & CAP_PRIVACY_ON) 6061 IPW_DEBUG_INFO("PRIVACY on\n"); 6062 else 6063 IPW_DEBUG_INFO("PRIVACY off\n"); 6064 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); 6065 } 6066 6067 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) 6068 { 6069 /* TODO: Verify that this works... */ 6070 struct ipw_fixed_rate fr; 6071 u32 reg; 6072 u16 mask = 0; 6073 u16 new_tx_rates = priv->rates_mask; 6074 6075 /* Identify 'current FW band' and match it with the fixed 6076 * Tx rates */ 6077 6078 switch (priv->ieee->freq_band) { 6079 case LIBIPW_52GHZ_BAND: /* A only */ 6080 /* IEEE_A */ 6081 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) { 6082 /* Invalid fixed rate mask */ 6083 IPW_DEBUG_WX 6084 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6085 new_tx_rates = 0; 6086 break; 6087 } 6088 6089 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A; 6090 break; 6091 6092 default: /* 2.4Ghz or Mixed */ 6093 /* IEEE_B */ 6094 if (mode == IEEE_B) { 6095 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) { 6096 /* Invalid fixed rate mask */ 6097 IPW_DEBUG_WX 6098 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6099 new_tx_rates = 0; 6100 } 6101 break; 6102 } 6103 6104 /* IEEE_G */ 6105 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK | 6106 LIBIPW_OFDM_RATES_MASK)) { 6107 /* Invalid fixed rate mask */ 6108 IPW_DEBUG_WX 6109 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6110 new_tx_rates = 0; 6111 break; 6112 } 6113 6114 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) { 6115 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1); 6116 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK; 6117 } 6118 6119 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) { 6120 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1); 6121 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK; 6122 } 6123 6124 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) { 6125 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1); 6126 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK; 6127 } 6128 6129 new_tx_rates |= mask; 6130 break; 6131 } 6132 6133 fr.tx_rates = cpu_to_le16(new_tx_rates); 6134 6135 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE); 6136 ipw_write_reg32(priv, reg, *(u32 *) & fr); 6137 } 6138 6139 static void ipw_abort_scan(struct ipw_priv *priv) 6140 { 6141 int err; 6142 6143 if (priv->status & STATUS_SCAN_ABORTING) { 6144 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n"); 6145 return; 6146 } 6147 priv->status |= STATUS_SCAN_ABORTING; 6148 6149 err = ipw_send_scan_abort(priv); 6150 if (err) 6151 IPW_DEBUG_HC("Request to abort scan failed.\n"); 6152 } 6153 6154 static void ipw_add_scan_channels(struct ipw_priv *priv, 6155 struct ipw_scan_request_ext *scan, 6156 int scan_type) 6157 { 6158 int channel_index = 0; 6159 const struct libipw_geo *geo; 6160 int i; 6161 6162 geo = libipw_get_geo(priv->ieee); 6163 6164 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) { 6165 int start = channel_index; 6166 for (i = 0; i < geo->a_channels; i++) { 6167 if ((priv->status & STATUS_ASSOCIATED) && 6168 geo->a[i].channel == priv->channel) 6169 continue; 6170 channel_index++; 6171 scan->channels_list[channel_index] = geo->a[i].channel; 6172 ipw_set_scan_type(scan, channel_index, 6173 geo->a[i]. 6174 flags & LIBIPW_CH_PASSIVE_ONLY ? 6175 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN : 6176 scan_type); 6177 } 6178 6179 if (start != channel_index) { 6180 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) | 6181 (channel_index - start); 6182 channel_index++; 6183 } 6184 } 6185 6186 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) { 6187 int start = channel_index; 6188 if (priv->config & CFG_SPEED_SCAN) { 6189 int index; 6190 u8 channels[LIBIPW_24GHZ_CHANNELS] = { 6191 /* nop out the list */ 6192 [0] = 0 6193 }; 6194 6195 u8 channel; 6196 while (channel_index < IPW_SCAN_CHANNELS - 1) { 6197 channel = 6198 priv->speed_scan[priv->speed_scan_pos]; 6199 if (channel == 0) { 6200 priv->speed_scan_pos = 0; 6201 channel = priv->speed_scan[0]; 6202 } 6203 if ((priv->status & STATUS_ASSOCIATED) && 6204 channel == priv->channel) { 6205 priv->speed_scan_pos++; 6206 continue; 6207 } 6208 6209 /* If this channel has already been 6210 * added in scan, break from loop 6211 * and this will be the first channel 6212 * in the next scan. 6213 */ 6214 if (channels[channel - 1] != 0) 6215 break; 6216 6217 channels[channel - 1] = 1; 6218 priv->speed_scan_pos++; 6219 channel_index++; 6220 scan->channels_list[channel_index] = channel; 6221 index = 6222 libipw_channel_to_index(priv->ieee, channel); 6223 ipw_set_scan_type(scan, channel_index, 6224 geo->bg[index]. 6225 flags & 6226 LIBIPW_CH_PASSIVE_ONLY ? 6227 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN 6228 : scan_type); 6229 } 6230 } else { 6231 for (i = 0; i < geo->bg_channels; i++) { 6232 if ((priv->status & STATUS_ASSOCIATED) && 6233 geo->bg[i].channel == priv->channel) 6234 continue; 6235 channel_index++; 6236 scan->channels_list[channel_index] = 6237 geo->bg[i].channel; 6238 ipw_set_scan_type(scan, channel_index, 6239 geo->bg[i]. 6240 flags & 6241 LIBIPW_CH_PASSIVE_ONLY ? 6242 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN 6243 : scan_type); 6244 } 6245 } 6246 6247 if (start != channel_index) { 6248 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) | 6249 (channel_index - start); 6250 } 6251 } 6252 } 6253 6254 static int ipw_passive_dwell_time(struct ipw_priv *priv) 6255 { 6256 /* staying on passive channels longer than the DTIM interval during a 6257 * scan, while associated, causes the firmware to cancel the scan 6258 * without notification. Hence, don't stay on passive channels longer 6259 * than the beacon interval. 6260 */ 6261 if (priv->status & STATUS_ASSOCIATED 6262 && priv->assoc_network->beacon_interval > 10) 6263 return priv->assoc_network->beacon_interval - 10; 6264 else 6265 return 120; 6266 } 6267 6268 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) 6269 { 6270 struct ipw_scan_request_ext scan; 6271 int err = 0, scan_type; 6272 6273 if (!(priv->status & STATUS_INIT) || 6274 (priv->status & STATUS_EXIT_PENDING)) 6275 return 0; 6276 6277 mutex_lock(&priv->mutex); 6278 6279 if (direct && (priv->direct_scan_ssid_len == 0)) { 6280 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n"); 6281 priv->status &= ~STATUS_DIRECT_SCAN_PENDING; 6282 goto done; 6283 } 6284 6285 if (priv->status & STATUS_SCANNING) { 6286 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n"); 6287 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6288 STATUS_SCAN_PENDING; 6289 goto done; 6290 } 6291 6292 if (!(priv->status & STATUS_SCAN_FORCED) && 6293 priv->status & STATUS_SCAN_ABORTING) { 6294 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); 6295 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6296 STATUS_SCAN_PENDING; 6297 goto done; 6298 } 6299 6300 if (priv->status & STATUS_RF_KILL_MASK) { 6301 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n"); 6302 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6303 STATUS_SCAN_PENDING; 6304 goto done; 6305 } 6306 6307 memset(&scan, 0, sizeof(scan)); 6308 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee)); 6309 6310 if (type == IW_SCAN_TYPE_PASSIVE) { 6311 IPW_DEBUG_WX("use passive scanning\n"); 6312 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN; 6313 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6314 cpu_to_le16(ipw_passive_dwell_time(priv)); 6315 ipw_add_scan_channels(priv, &scan, scan_type); 6316 goto send_request; 6317 } 6318 6319 /* Use active scan by default. */ 6320 if (priv->config & CFG_SPEED_SCAN) 6321 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6322 cpu_to_le16(30); 6323 else 6324 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6325 cpu_to_le16(20); 6326 6327 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 6328 cpu_to_le16(20); 6329 6330 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6331 cpu_to_le16(ipw_passive_dwell_time(priv)); 6332 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); 6333 6334 #ifdef CONFIG_IPW2200_MONITOR 6335 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 6336 u8 channel; 6337 u8 band = 0; 6338 6339 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 6340 case LIBIPW_52GHZ_BAND: 6341 band = (u8) (IPW_A_MODE << 6) | 1; 6342 channel = priv->channel; 6343 break; 6344 6345 case LIBIPW_24GHZ_BAND: 6346 band = (u8) (IPW_B_MODE << 6) | 1; 6347 channel = priv->channel; 6348 break; 6349 6350 default: 6351 band = (u8) (IPW_B_MODE << 6) | 1; 6352 channel = 9; 6353 break; 6354 } 6355 6356 scan.channels_list[0] = band; 6357 scan.channels_list[1] = channel; 6358 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN); 6359 6360 /* NOTE: The card will sit on this channel for this time 6361 * period. Scan aborts are timing sensitive and frequently 6362 * result in firmware restarts. As such, it is best to 6363 * set a small dwell_time here and just keep re-issuing 6364 * scans. Otherwise fast channel hopping will not actually 6365 * hop channels. 6366 * 6367 * TODO: Move SPEED SCAN support to all modes and bands */ 6368 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6369 cpu_to_le16(2000); 6370 } else { 6371 #endif /* CONFIG_IPW2200_MONITOR */ 6372 /* Honor direct scans first, otherwise if we are roaming make 6373 * this a direct scan for the current network. Finally, 6374 * ensure that every other scan is a fast channel hop scan */ 6375 if (direct) { 6376 err = ipw_send_ssid(priv, priv->direct_scan_ssid, 6377 priv->direct_scan_ssid_len); 6378 if (err) { 6379 IPW_DEBUG_HC("Attempt to send SSID command " 6380 "failed\n"); 6381 goto done; 6382 } 6383 6384 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; 6385 } else if ((priv->status & STATUS_ROAMING) 6386 || (!(priv->status & STATUS_ASSOCIATED) 6387 && (priv->config & CFG_STATIC_ESSID) 6388 && (le32_to_cpu(scan.full_scan_index) % 2))) { 6389 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 6390 if (err) { 6391 IPW_DEBUG_HC("Attempt to send SSID command " 6392 "failed.\n"); 6393 goto done; 6394 } 6395 6396 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; 6397 } else 6398 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN; 6399 6400 ipw_add_scan_channels(priv, &scan, scan_type); 6401 #ifdef CONFIG_IPW2200_MONITOR 6402 } 6403 #endif 6404 6405 send_request: 6406 err = ipw_send_scan_request_ext(priv, &scan); 6407 if (err) { 6408 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); 6409 goto done; 6410 } 6411 6412 priv->status |= STATUS_SCANNING; 6413 if (direct) { 6414 priv->status &= ~STATUS_DIRECT_SCAN_PENDING; 6415 priv->direct_scan_ssid_len = 0; 6416 } else 6417 priv->status &= ~STATUS_SCAN_PENDING; 6418 6419 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG); 6420 done: 6421 mutex_unlock(&priv->mutex); 6422 return err; 6423 } 6424 6425 static void ipw_request_passive_scan(struct work_struct *work) 6426 { 6427 struct ipw_priv *priv = 6428 container_of(work, struct ipw_priv, request_passive_scan.work); 6429 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0); 6430 } 6431 6432 static void ipw_request_scan(struct work_struct *work) 6433 { 6434 struct ipw_priv *priv = 6435 container_of(work, struct ipw_priv, request_scan.work); 6436 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0); 6437 } 6438 6439 static void ipw_request_direct_scan(struct work_struct *work) 6440 { 6441 struct ipw_priv *priv = 6442 container_of(work, struct ipw_priv, request_direct_scan.work); 6443 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1); 6444 } 6445 6446 static void ipw_bg_abort_scan(struct work_struct *work) 6447 { 6448 struct ipw_priv *priv = 6449 container_of(work, struct ipw_priv, abort_scan); 6450 mutex_lock(&priv->mutex); 6451 ipw_abort_scan(priv); 6452 mutex_unlock(&priv->mutex); 6453 } 6454 6455 static int ipw_wpa_enable(struct ipw_priv *priv, int value) 6456 { 6457 /* This is called when wpa_supplicant loads and closes the driver 6458 * interface. */ 6459 priv->ieee->wpa_enabled = value; 6460 return 0; 6461 } 6462 6463 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) 6464 { 6465 struct libipw_device *ieee = priv->ieee; 6466 struct libipw_security sec = { 6467 .flags = SEC_AUTH_MODE, 6468 }; 6469 int ret = 0; 6470 6471 if (value & IW_AUTH_ALG_SHARED_KEY) { 6472 sec.auth_mode = WLAN_AUTH_SHARED_KEY; 6473 ieee->open_wep = 0; 6474 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { 6475 sec.auth_mode = WLAN_AUTH_OPEN; 6476 ieee->open_wep = 1; 6477 } else if (value & IW_AUTH_ALG_LEAP) { 6478 sec.auth_mode = WLAN_AUTH_LEAP; 6479 ieee->open_wep = 1; 6480 } else 6481 return -EINVAL; 6482 6483 if (ieee->set_security) 6484 ieee->set_security(ieee->dev, &sec); 6485 else 6486 ret = -EOPNOTSUPP; 6487 6488 return ret; 6489 } 6490 6491 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, 6492 int wpa_ie_len) 6493 { 6494 /* make sure WPA is enabled */ 6495 ipw_wpa_enable(priv, 1); 6496 } 6497 6498 static int ipw_set_rsn_capa(struct ipw_priv *priv, 6499 char *capabilities, int length) 6500 { 6501 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); 6502 6503 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length, 6504 capabilities); 6505 } 6506 6507 /* 6508 * WE-18 support 6509 */ 6510 6511 /* SIOCSIWGENIE */ 6512 static int ipw_wx_set_genie(struct net_device *dev, 6513 struct iw_request_info *info, 6514 union iwreq_data *wrqu, char *extra) 6515 { 6516 struct ipw_priv *priv = libipw_priv(dev); 6517 struct libipw_device *ieee = priv->ieee; 6518 u8 *buf; 6519 int err = 0; 6520 6521 if (wrqu->data.length > MAX_WPA_IE_LEN || 6522 (wrqu->data.length && extra == NULL)) 6523 return -EINVAL; 6524 6525 if (wrqu->data.length) { 6526 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); 6527 if (buf == NULL) { 6528 err = -ENOMEM; 6529 goto out; 6530 } 6531 6532 kfree(ieee->wpa_ie); 6533 ieee->wpa_ie = buf; 6534 ieee->wpa_ie_len = wrqu->data.length; 6535 } else { 6536 kfree(ieee->wpa_ie); 6537 ieee->wpa_ie = NULL; 6538 ieee->wpa_ie_len = 0; 6539 } 6540 6541 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); 6542 out: 6543 return err; 6544 } 6545 6546 /* SIOCGIWGENIE */ 6547 static int ipw_wx_get_genie(struct net_device *dev, 6548 struct iw_request_info *info, 6549 union iwreq_data *wrqu, char *extra) 6550 { 6551 struct ipw_priv *priv = libipw_priv(dev); 6552 struct libipw_device *ieee = priv->ieee; 6553 int err = 0; 6554 6555 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { 6556 wrqu->data.length = 0; 6557 goto out; 6558 } 6559 6560 if (wrqu->data.length < ieee->wpa_ie_len) { 6561 err = -E2BIG; 6562 goto out; 6563 } 6564 6565 wrqu->data.length = ieee->wpa_ie_len; 6566 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); 6567 6568 out: 6569 return err; 6570 } 6571 6572 static int wext_cipher2level(int cipher) 6573 { 6574 switch (cipher) { 6575 case IW_AUTH_CIPHER_NONE: 6576 return SEC_LEVEL_0; 6577 case IW_AUTH_CIPHER_WEP40: 6578 case IW_AUTH_CIPHER_WEP104: 6579 return SEC_LEVEL_1; 6580 case IW_AUTH_CIPHER_TKIP: 6581 return SEC_LEVEL_2; 6582 case IW_AUTH_CIPHER_CCMP: 6583 return SEC_LEVEL_3; 6584 default: 6585 return -1; 6586 } 6587 } 6588 6589 /* SIOCSIWAUTH */ 6590 static int ipw_wx_set_auth(struct net_device *dev, 6591 struct iw_request_info *info, 6592 union iwreq_data *wrqu, char *extra) 6593 { 6594 struct ipw_priv *priv = libipw_priv(dev); 6595 struct libipw_device *ieee = priv->ieee; 6596 struct iw_param *param = &wrqu->param; 6597 struct lib80211_crypt_data *crypt; 6598 unsigned long flags; 6599 int ret = 0; 6600 6601 switch (param->flags & IW_AUTH_INDEX) { 6602 case IW_AUTH_WPA_VERSION: 6603 break; 6604 case IW_AUTH_CIPHER_PAIRWISE: 6605 ipw_set_hw_decrypt_unicast(priv, 6606 wext_cipher2level(param->value)); 6607 break; 6608 case IW_AUTH_CIPHER_GROUP: 6609 ipw_set_hw_decrypt_multicast(priv, 6610 wext_cipher2level(param->value)); 6611 break; 6612 case IW_AUTH_KEY_MGMT: 6613 /* 6614 * ipw2200 does not use these parameters 6615 */ 6616 break; 6617 6618 case IW_AUTH_TKIP_COUNTERMEASURES: 6619 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6620 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) 6621 break; 6622 6623 flags = crypt->ops->get_flags(crypt->priv); 6624 6625 if (param->value) 6626 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; 6627 else 6628 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; 6629 6630 crypt->ops->set_flags(flags, crypt->priv); 6631 6632 break; 6633 6634 case IW_AUTH_DROP_UNENCRYPTED:{ 6635 /* HACK: 6636 * 6637 * wpa_supplicant calls set_wpa_enabled when the driver 6638 * is loaded and unloaded, regardless of if WPA is being 6639 * used. No other calls are made which can be used to 6640 * determine if encryption will be used or not prior to 6641 * association being expected. If encryption is not being 6642 * used, drop_unencrypted is set to false, else true -- we 6643 * can use this to determine if the CAP_PRIVACY_ON bit should 6644 * be set. 6645 */ 6646 struct libipw_security sec = { 6647 .flags = SEC_ENABLED, 6648 .enabled = param->value, 6649 }; 6650 priv->ieee->drop_unencrypted = param->value; 6651 /* We only change SEC_LEVEL for open mode. Others 6652 * are set by ipw_wpa_set_encryption. 6653 */ 6654 if (!param->value) { 6655 sec.flags |= SEC_LEVEL; 6656 sec.level = SEC_LEVEL_0; 6657 } else { 6658 sec.flags |= SEC_LEVEL; 6659 sec.level = SEC_LEVEL_1; 6660 } 6661 if (priv->ieee->set_security) 6662 priv->ieee->set_security(priv->ieee->dev, &sec); 6663 break; 6664 } 6665 6666 case IW_AUTH_80211_AUTH_ALG: 6667 ret = ipw_wpa_set_auth_algs(priv, param->value); 6668 break; 6669 6670 case IW_AUTH_WPA_ENABLED: 6671 ret = ipw_wpa_enable(priv, param->value); 6672 ipw_disassociate(priv); 6673 break; 6674 6675 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 6676 ieee->ieee802_1x = param->value; 6677 break; 6678 6679 case IW_AUTH_PRIVACY_INVOKED: 6680 ieee->privacy_invoked = param->value; 6681 break; 6682 6683 default: 6684 return -EOPNOTSUPP; 6685 } 6686 return ret; 6687 } 6688 6689 /* SIOCGIWAUTH */ 6690 static int ipw_wx_get_auth(struct net_device *dev, 6691 struct iw_request_info *info, 6692 union iwreq_data *wrqu, char *extra) 6693 { 6694 struct ipw_priv *priv = libipw_priv(dev); 6695 struct libipw_device *ieee = priv->ieee; 6696 struct lib80211_crypt_data *crypt; 6697 struct iw_param *param = &wrqu->param; 6698 6699 switch (param->flags & IW_AUTH_INDEX) { 6700 case IW_AUTH_WPA_VERSION: 6701 case IW_AUTH_CIPHER_PAIRWISE: 6702 case IW_AUTH_CIPHER_GROUP: 6703 case IW_AUTH_KEY_MGMT: 6704 /* 6705 * wpa_supplicant will control these internally 6706 */ 6707 return -EOPNOTSUPP; 6708 6709 case IW_AUTH_TKIP_COUNTERMEASURES: 6710 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6711 if (!crypt || !crypt->ops->get_flags) 6712 break; 6713 6714 param->value = (crypt->ops->get_flags(crypt->priv) & 6715 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0; 6716 6717 break; 6718 6719 case IW_AUTH_DROP_UNENCRYPTED: 6720 param->value = ieee->drop_unencrypted; 6721 break; 6722 6723 case IW_AUTH_80211_AUTH_ALG: 6724 param->value = ieee->sec.auth_mode; 6725 break; 6726 6727 case IW_AUTH_WPA_ENABLED: 6728 param->value = ieee->wpa_enabled; 6729 break; 6730 6731 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 6732 param->value = ieee->ieee802_1x; 6733 break; 6734 6735 case IW_AUTH_ROAMING_CONTROL: 6736 case IW_AUTH_PRIVACY_INVOKED: 6737 param->value = ieee->privacy_invoked; 6738 break; 6739 6740 default: 6741 return -EOPNOTSUPP; 6742 } 6743 return 0; 6744 } 6745 6746 /* SIOCSIWENCODEEXT */ 6747 static int ipw_wx_set_encodeext(struct net_device *dev, 6748 struct iw_request_info *info, 6749 union iwreq_data *wrqu, char *extra) 6750 { 6751 struct ipw_priv *priv = libipw_priv(dev); 6752 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 6753 6754 if (hwcrypto) { 6755 if (ext->alg == IW_ENCODE_ALG_TKIP) { 6756 /* IPW HW can't build TKIP MIC, 6757 host decryption still needed */ 6758 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) 6759 priv->ieee->host_mc_decrypt = 1; 6760 else { 6761 priv->ieee->host_encrypt = 0; 6762 priv->ieee->host_encrypt_msdu = 1; 6763 priv->ieee->host_decrypt = 1; 6764 } 6765 } else { 6766 priv->ieee->host_encrypt = 0; 6767 priv->ieee->host_encrypt_msdu = 0; 6768 priv->ieee->host_decrypt = 0; 6769 priv->ieee->host_mc_decrypt = 0; 6770 } 6771 } 6772 6773 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra); 6774 } 6775 6776 /* SIOCGIWENCODEEXT */ 6777 static int ipw_wx_get_encodeext(struct net_device *dev, 6778 struct iw_request_info *info, 6779 union iwreq_data *wrqu, char *extra) 6780 { 6781 struct ipw_priv *priv = libipw_priv(dev); 6782 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra); 6783 } 6784 6785 /* SIOCSIWMLME */ 6786 static int ipw_wx_set_mlme(struct net_device *dev, 6787 struct iw_request_info *info, 6788 union iwreq_data *wrqu, char *extra) 6789 { 6790 struct ipw_priv *priv = libipw_priv(dev); 6791 struct iw_mlme *mlme = (struct iw_mlme *)extra; 6792 6793 switch (mlme->cmd) { 6794 case IW_MLME_DEAUTH: 6795 /* silently ignore */ 6796 break; 6797 6798 case IW_MLME_DISASSOC: 6799 ipw_disassociate(priv); 6800 break; 6801 6802 default: 6803 return -EOPNOTSUPP; 6804 } 6805 return 0; 6806 } 6807 6808 #ifdef CONFIG_IPW2200_QOS 6809 6810 /* QoS */ 6811 /* 6812 * get the modulation type of the current network or 6813 * the card current mode 6814 */ 6815 static u8 ipw_qos_current_mode(struct ipw_priv * priv) 6816 { 6817 u8 mode = 0; 6818 6819 if (priv->status & STATUS_ASSOCIATED) { 6820 unsigned long flags; 6821 6822 spin_lock_irqsave(&priv->ieee->lock, flags); 6823 mode = priv->assoc_network->mode; 6824 spin_unlock_irqrestore(&priv->ieee->lock, flags); 6825 } else { 6826 mode = priv->ieee->mode; 6827 } 6828 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode); 6829 return mode; 6830 } 6831 6832 /* 6833 * Handle management frame beacon and probe response 6834 */ 6835 static int ipw_qos_handle_probe_response(struct ipw_priv *priv, 6836 int active_network, 6837 struct libipw_network *network) 6838 { 6839 u32 size = sizeof(struct libipw_qos_parameters); 6840 6841 if (network->capability & WLAN_CAPABILITY_IBSS) 6842 network->qos_data.active = network->qos_data.supported; 6843 6844 if (network->flags & NETWORK_HAS_QOS_MASK) { 6845 if (active_network && 6846 (network->flags & NETWORK_HAS_QOS_PARAMETERS)) 6847 network->qos_data.active = network->qos_data.supported; 6848 6849 if ((network->qos_data.active == 1) && (active_network == 1) && 6850 (network->flags & NETWORK_HAS_QOS_PARAMETERS) && 6851 (network->qos_data.old_param_count != 6852 network->qos_data.param_count)) { 6853 network->qos_data.old_param_count = 6854 network->qos_data.param_count; 6855 schedule_work(&priv->qos_activate); 6856 IPW_DEBUG_QOS("QoS parameters change call " 6857 "qos_activate\n"); 6858 } 6859 } else { 6860 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B)) 6861 memcpy(&network->qos_data.parameters, 6862 &def_parameters_CCK, size); 6863 else 6864 memcpy(&network->qos_data.parameters, 6865 &def_parameters_OFDM, size); 6866 6867 if ((network->qos_data.active == 1) && (active_network == 1)) { 6868 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n"); 6869 schedule_work(&priv->qos_activate); 6870 } 6871 6872 network->qos_data.active = 0; 6873 network->qos_data.supported = 0; 6874 } 6875 if ((priv->status & STATUS_ASSOCIATED) && 6876 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) { 6877 if (!ether_addr_equal(network->bssid, priv->bssid)) 6878 if (network->capability & WLAN_CAPABILITY_IBSS) 6879 if ((network->ssid_len == 6880 priv->assoc_network->ssid_len) && 6881 !memcmp(network->ssid, 6882 priv->assoc_network->ssid, 6883 network->ssid_len)) { 6884 schedule_work(&priv->merge_networks); 6885 } 6886 } 6887 6888 return 0; 6889 } 6890 6891 /* 6892 * This function set up the firmware to support QoS. It sends 6893 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO 6894 */ 6895 static int ipw_qos_activate(struct ipw_priv *priv, 6896 struct libipw_qos_data *qos_network_data) 6897 { 6898 int err; 6899 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS]; 6900 struct libipw_qos_parameters *active_one = NULL; 6901 u32 size = sizeof(struct libipw_qos_parameters); 6902 u32 burst_duration; 6903 int i; 6904 u8 type; 6905 6906 type = ipw_qos_current_mode(priv); 6907 6908 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]); 6909 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size); 6910 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]); 6911 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size); 6912 6913 if (qos_network_data == NULL) { 6914 if (type == IEEE_B) { 6915 IPW_DEBUG_QOS("QoS activate network mode %d\n", type); 6916 active_one = &def_parameters_CCK; 6917 } else 6918 active_one = &def_parameters_OFDM; 6919 6920 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 6921 burst_duration = ipw_qos_get_burst_duration(priv); 6922 for (i = 0; i < QOS_QUEUE_NUM; i++) 6923 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] = 6924 cpu_to_le16(burst_duration); 6925 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 6926 if (type == IEEE_B) { 6927 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n", 6928 type); 6929 if (priv->qos_data.qos_enable == 0) 6930 active_one = &def_parameters_CCK; 6931 else 6932 active_one = priv->qos_data.def_qos_parm_CCK; 6933 } else { 6934 if (priv->qos_data.qos_enable == 0) 6935 active_one = &def_parameters_OFDM; 6936 else 6937 active_one = priv->qos_data.def_qos_parm_OFDM; 6938 } 6939 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 6940 } else { 6941 unsigned long flags; 6942 int active; 6943 6944 spin_lock_irqsave(&priv->ieee->lock, flags); 6945 active_one = &(qos_network_data->parameters); 6946 qos_network_data->old_param_count = 6947 qos_network_data->param_count; 6948 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 6949 active = qos_network_data->supported; 6950 spin_unlock_irqrestore(&priv->ieee->lock, flags); 6951 6952 if (active == 0) { 6953 burst_duration = ipw_qos_get_burst_duration(priv); 6954 for (i = 0; i < QOS_QUEUE_NUM; i++) 6955 qos_parameters[QOS_PARAM_SET_ACTIVE]. 6956 tx_op_limit[i] = cpu_to_le16(burst_duration); 6957 } 6958 } 6959 6960 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); 6961 err = ipw_send_qos_params_command(priv, &qos_parameters[0]); 6962 if (err) 6963 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n"); 6964 6965 return err; 6966 } 6967 6968 /* 6969 * send IPW_CMD_WME_INFO to the firmware 6970 */ 6971 static int ipw_qos_set_info_element(struct ipw_priv *priv) 6972 { 6973 int ret = 0; 6974 struct libipw_qos_information_element qos_info; 6975 6976 if (priv == NULL) 6977 return -1; 6978 6979 qos_info.elementID = QOS_ELEMENT_ID; 6980 qos_info.length = sizeof(struct libipw_qos_information_element) - 2; 6981 6982 qos_info.version = QOS_VERSION_1; 6983 qos_info.ac_info = 0; 6984 6985 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN); 6986 qos_info.qui_type = QOS_OUI_TYPE; 6987 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE; 6988 6989 ret = ipw_send_qos_info_command(priv, &qos_info); 6990 if (ret != 0) { 6991 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n"); 6992 } 6993 return ret; 6994 } 6995 6996 /* 6997 * Set the QoS parameter with the association request structure 6998 */ 6999 static int ipw_qos_association(struct ipw_priv *priv, 7000 struct libipw_network *network) 7001 { 7002 int err = 0; 7003 struct libipw_qos_data *qos_data = NULL; 7004 struct libipw_qos_data ibss_data = { 7005 .supported = 1, 7006 .active = 1, 7007 }; 7008 7009 switch (priv->ieee->iw_mode) { 7010 case IW_MODE_ADHOC: 7011 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS)); 7012 7013 qos_data = &ibss_data; 7014 break; 7015 7016 case IW_MODE_INFRA: 7017 qos_data = &network->qos_data; 7018 break; 7019 7020 default: 7021 BUG(); 7022 break; 7023 } 7024 7025 err = ipw_qos_activate(priv, qos_data); 7026 if (err) { 7027 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC; 7028 return err; 7029 } 7030 7031 if (priv->qos_data.qos_enable && qos_data->supported) { 7032 IPW_DEBUG_QOS("QoS will be enabled for this association\n"); 7033 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC; 7034 return ipw_qos_set_info_element(priv); 7035 } 7036 7037 return 0; 7038 } 7039 7040 /* 7041 * handling the beaconing responses. if we get different QoS setting 7042 * off the network from the associated setting, adjust the QoS 7043 * setting 7044 */ 7045 static int ipw_qos_association_resp(struct ipw_priv *priv, 7046 struct libipw_network *network) 7047 { 7048 int ret = 0; 7049 unsigned long flags; 7050 u32 size = sizeof(struct libipw_qos_parameters); 7051 int set_qos_param = 0; 7052 7053 if ((priv == NULL) || (network == NULL) || 7054 (priv->assoc_network == NULL)) 7055 return ret; 7056 7057 if (!(priv->status & STATUS_ASSOCIATED)) 7058 return ret; 7059 7060 if ((priv->ieee->iw_mode != IW_MODE_INFRA)) 7061 return ret; 7062 7063 spin_lock_irqsave(&priv->ieee->lock, flags); 7064 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) { 7065 memcpy(&priv->assoc_network->qos_data, &network->qos_data, 7066 sizeof(struct libipw_qos_data)); 7067 priv->assoc_network->qos_data.active = 1; 7068 if ((network->qos_data.old_param_count != 7069 network->qos_data.param_count)) { 7070 set_qos_param = 1; 7071 network->qos_data.old_param_count = 7072 network->qos_data.param_count; 7073 } 7074 7075 } else { 7076 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B)) 7077 memcpy(&priv->assoc_network->qos_data.parameters, 7078 &def_parameters_CCK, size); 7079 else 7080 memcpy(&priv->assoc_network->qos_data.parameters, 7081 &def_parameters_OFDM, size); 7082 priv->assoc_network->qos_data.active = 0; 7083 priv->assoc_network->qos_data.supported = 0; 7084 set_qos_param = 1; 7085 } 7086 7087 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7088 7089 if (set_qos_param == 1) 7090 schedule_work(&priv->qos_activate); 7091 7092 return ret; 7093 } 7094 7095 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv) 7096 { 7097 u32 ret = 0; 7098 7099 if (!priv) 7100 return 0; 7101 7102 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION)) 7103 ret = priv->qos_data.burst_duration_CCK; 7104 else 7105 ret = priv->qos_data.burst_duration_OFDM; 7106 7107 return ret; 7108 } 7109 7110 /* 7111 * Initialize the setting of QoS global 7112 */ 7113 static void ipw_qos_init(struct ipw_priv *priv, int enable, 7114 int burst_enable, u32 burst_duration_CCK, 7115 u32 burst_duration_OFDM) 7116 { 7117 priv->qos_data.qos_enable = enable; 7118 7119 if (priv->qos_data.qos_enable) { 7120 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK; 7121 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM; 7122 IPW_DEBUG_QOS("QoS is enabled\n"); 7123 } else { 7124 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK; 7125 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM; 7126 IPW_DEBUG_QOS("QoS is not enabled\n"); 7127 } 7128 7129 priv->qos_data.burst_enable = burst_enable; 7130 7131 if (burst_enable) { 7132 priv->qos_data.burst_duration_CCK = burst_duration_CCK; 7133 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM; 7134 } else { 7135 priv->qos_data.burst_duration_CCK = 0; 7136 priv->qos_data.burst_duration_OFDM = 0; 7137 } 7138 } 7139 7140 /* 7141 * map the packet priority to the right TX Queue 7142 */ 7143 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority) 7144 { 7145 if (priority > 7 || !priv->qos_data.qos_enable) 7146 priority = 0; 7147 7148 return from_priority_to_tx_queue[priority] - 1; 7149 } 7150 7151 static int ipw_is_qos_active(struct net_device *dev, 7152 struct sk_buff *skb) 7153 { 7154 struct ipw_priv *priv = libipw_priv(dev); 7155 struct libipw_qos_data *qos_data = NULL; 7156 int active, supported; 7157 u8 *daddr = skb->data + ETH_ALEN; 7158 int unicast = !is_multicast_ether_addr(daddr); 7159 7160 if (!(priv->status & STATUS_ASSOCIATED)) 7161 return 0; 7162 7163 qos_data = &priv->assoc_network->qos_data; 7164 7165 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7166 if (unicast == 0) 7167 qos_data->active = 0; 7168 else 7169 qos_data->active = qos_data->supported; 7170 } 7171 active = qos_data->active; 7172 supported = qos_data->supported; 7173 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d " 7174 "unicast %d\n", 7175 priv->qos_data.qos_enable, active, supported, unicast); 7176 if (active && priv->qos_data.qos_enable) 7177 return 1; 7178 7179 return 0; 7180 7181 } 7182 /* 7183 * add QoS parameter to the TX command 7184 */ 7185 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, 7186 u16 priority, 7187 struct tfd_data *tfd) 7188 { 7189 int tx_queue_id = 0; 7190 7191 7192 tx_queue_id = from_priority_to_tx_queue[priority] - 1; 7193 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED; 7194 7195 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { 7196 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; 7197 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK); 7198 } 7199 return 0; 7200 } 7201 7202 /* 7203 * background support to run QoS activate functionality 7204 */ 7205 static void ipw_bg_qos_activate(struct work_struct *work) 7206 { 7207 struct ipw_priv *priv = 7208 container_of(work, struct ipw_priv, qos_activate); 7209 7210 mutex_lock(&priv->mutex); 7211 7212 if (priv->status & STATUS_ASSOCIATED) 7213 ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); 7214 7215 mutex_unlock(&priv->mutex); 7216 } 7217 7218 static int ipw_handle_probe_response(struct net_device *dev, 7219 struct libipw_probe_response *resp, 7220 struct libipw_network *network) 7221 { 7222 struct ipw_priv *priv = libipw_priv(dev); 7223 int active_network = ((priv->status & STATUS_ASSOCIATED) && 7224 (network == priv->assoc_network)); 7225 7226 ipw_qos_handle_probe_response(priv, active_network, network); 7227 7228 return 0; 7229 } 7230 7231 static int ipw_handle_beacon(struct net_device *dev, 7232 struct libipw_beacon *resp, 7233 struct libipw_network *network) 7234 { 7235 struct ipw_priv *priv = libipw_priv(dev); 7236 int active_network = ((priv->status & STATUS_ASSOCIATED) && 7237 (network == priv->assoc_network)); 7238 7239 ipw_qos_handle_probe_response(priv, active_network, network); 7240 7241 return 0; 7242 } 7243 7244 static int ipw_handle_assoc_response(struct net_device *dev, 7245 struct libipw_assoc_response *resp, 7246 struct libipw_network *network) 7247 { 7248 struct ipw_priv *priv = libipw_priv(dev); 7249 ipw_qos_association_resp(priv, network); 7250 return 0; 7251 } 7252 7253 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters 7254 *qos_param) 7255 { 7256 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, 7257 sizeof(*qos_param) * 3, qos_param); 7258 } 7259 7260 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element 7261 *qos_param) 7262 { 7263 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param), 7264 qos_param); 7265 } 7266 7267 #endif /* CONFIG_IPW2200_QOS */ 7268 7269 static int ipw_associate_network(struct ipw_priv *priv, 7270 struct libipw_network *network, 7271 struct ipw_supported_rates *rates, int roaming) 7272 { 7273 int err; 7274 7275 if (priv->config & CFG_FIXED_RATE) 7276 ipw_set_fixed_rate(priv, network->mode); 7277 7278 if (!(priv->config & CFG_STATIC_ESSID)) { 7279 priv->essid_len = min(network->ssid_len, 7280 (u8) IW_ESSID_MAX_SIZE); 7281 memcpy(priv->essid, network->ssid, priv->essid_len); 7282 } 7283 7284 network->last_associate = jiffies; 7285 7286 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); 7287 priv->assoc_request.channel = network->channel; 7288 priv->assoc_request.auth_key = 0; 7289 7290 if ((priv->capability & CAP_PRIVACY_ON) && 7291 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) { 7292 priv->assoc_request.auth_type = AUTH_SHARED_KEY; 7293 priv->assoc_request.auth_key = priv->ieee->sec.active_key; 7294 7295 if (priv->ieee->sec.level == SEC_LEVEL_1) 7296 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 7297 7298 } else if ((priv->capability & CAP_PRIVACY_ON) && 7299 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)) 7300 priv->assoc_request.auth_type = AUTH_LEAP; 7301 else 7302 priv->assoc_request.auth_type = AUTH_OPEN; 7303 7304 if (priv->ieee->wpa_ie_len) { 7305 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */ 7306 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie, 7307 priv->ieee->wpa_ie_len); 7308 } 7309 7310 /* 7311 * It is valid for our ieee device to support multiple modes, but 7312 * when it comes to associating to a given network we have to choose 7313 * just one mode. 7314 */ 7315 if (network->mode & priv->ieee->mode & IEEE_A) 7316 priv->assoc_request.ieee_mode = IPW_A_MODE; 7317 else if (network->mode & priv->ieee->mode & IEEE_G) 7318 priv->assoc_request.ieee_mode = IPW_G_MODE; 7319 else if (network->mode & priv->ieee->mode & IEEE_B) 7320 priv->assoc_request.ieee_mode = IPW_B_MODE; 7321 7322 priv->assoc_request.capability = cpu_to_le16(network->capability); 7323 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 7324 && !(priv->config & CFG_PREAMBLE_LONG)) { 7325 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE; 7326 } else { 7327 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE; 7328 7329 /* Clear the short preamble if we won't be supporting it */ 7330 priv->assoc_request.capability &= 7331 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); 7332 } 7333 7334 /* Clear capability bits that aren't used in Ad Hoc */ 7335 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 7336 priv->assoc_request.capability &= 7337 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME); 7338 7339 IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n", 7340 roaming ? "Rea" : "A", 7341 priv->essid_len, priv->essid, 7342 network->channel, 7343 ipw_modes[priv->assoc_request.ieee_mode], 7344 rates->num_rates, 7345 (priv->assoc_request.preamble_length == 7346 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short", 7347 network->capability & 7348 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long", 7349 priv->capability & CAP_PRIVACY_ON ? "on " : "off", 7350 priv->capability & CAP_PRIVACY_ON ? 7351 (priv->capability & CAP_SHARED_KEY ? "(shared)" : 7352 "(open)") : "", 7353 priv->capability & CAP_PRIVACY_ON ? " key=" : "", 7354 priv->capability & CAP_PRIVACY_ON ? 7355 '1' + priv->ieee->sec.active_key : '.', 7356 priv->capability & CAP_PRIVACY_ON ? '.' : ' '); 7357 7358 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval); 7359 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 7360 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) { 7361 priv->assoc_request.assoc_type = HC_IBSS_START; 7362 priv->assoc_request.assoc_tsf_msw = 0; 7363 priv->assoc_request.assoc_tsf_lsw = 0; 7364 } else { 7365 if (unlikely(roaming)) 7366 priv->assoc_request.assoc_type = HC_REASSOCIATE; 7367 else 7368 priv->assoc_request.assoc_type = HC_ASSOCIATE; 7369 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]); 7370 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]); 7371 } 7372 7373 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); 7374 7375 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7376 eth_broadcast_addr(priv->assoc_request.dest); 7377 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); 7378 } else { 7379 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); 7380 priv->assoc_request.atim_window = 0; 7381 } 7382 7383 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval); 7384 7385 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 7386 if (err) { 7387 IPW_DEBUG_HC("Attempt to send SSID command failed.\n"); 7388 return err; 7389 } 7390 7391 rates->ieee_mode = priv->assoc_request.ieee_mode; 7392 rates->purpose = IPW_RATE_CONNECT; 7393 ipw_send_supported_rates(priv, rates); 7394 7395 if (priv->assoc_request.ieee_mode == IPW_G_MODE) 7396 priv->sys_config.dot11g_auto_detection = 1; 7397 else 7398 priv->sys_config.dot11g_auto_detection = 0; 7399 7400 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 7401 priv->sys_config.answer_broadcast_ssid_probe = 1; 7402 else 7403 priv->sys_config.answer_broadcast_ssid_probe = 0; 7404 7405 err = ipw_send_system_config(priv); 7406 if (err) { 7407 IPW_DEBUG_HC("Attempt to send sys config command failed.\n"); 7408 return err; 7409 } 7410 7411 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi); 7412 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM); 7413 if (err) { 7414 IPW_DEBUG_HC("Attempt to send associate command failed.\n"); 7415 return err; 7416 } 7417 7418 /* 7419 * If preemption is enabled, it is possible for the association 7420 * to complete before we return from ipw_send_associate. Therefore 7421 * we have to be sure and update our priviate data first. 7422 */ 7423 priv->channel = network->channel; 7424 memcpy(priv->bssid, network->bssid, ETH_ALEN); 7425 priv->status |= STATUS_ASSOCIATING; 7426 priv->status &= ~STATUS_SECURITY_UPDATED; 7427 7428 priv->assoc_network = network; 7429 7430 #ifdef CONFIG_IPW2200_QOS 7431 ipw_qos_association(priv, network); 7432 #endif 7433 7434 err = ipw_send_associate(priv, &priv->assoc_request); 7435 if (err) { 7436 IPW_DEBUG_HC("Attempt to send associate command failed.\n"); 7437 return err; 7438 } 7439 7440 IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n", 7441 priv->essid_len, priv->essid, priv->bssid); 7442 7443 return 0; 7444 } 7445 7446 static void ipw_roam(void *data) 7447 { 7448 struct ipw_priv *priv = data; 7449 struct libipw_network *network = NULL; 7450 struct ipw_network_match match = { 7451 .network = priv->assoc_network 7452 }; 7453 7454 /* The roaming process is as follows: 7455 * 7456 * 1. Missed beacon threshold triggers the roaming process by 7457 * setting the status ROAM bit and requesting a scan. 7458 * 2. When the scan completes, it schedules the ROAM work 7459 * 3. The ROAM work looks at all of the known networks for one that 7460 * is a better network than the currently associated. If none 7461 * found, the ROAM process is over (ROAM bit cleared) 7462 * 4. If a better network is found, a disassociation request is 7463 * sent. 7464 * 5. When the disassociation completes, the roam work is again 7465 * scheduled. The second time through, the driver is no longer 7466 * associated, and the newly selected network is sent an 7467 * association request. 7468 * 6. At this point ,the roaming process is complete and the ROAM 7469 * status bit is cleared. 7470 */ 7471 7472 /* If we are no longer associated, and the roaming bit is no longer 7473 * set, then we are not actively roaming, so just return */ 7474 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING))) 7475 return; 7476 7477 if (priv->status & STATUS_ASSOCIATED) { 7478 /* First pass through ROAM process -- look for a better 7479 * network */ 7480 unsigned long flags; 7481 u8 rssi = priv->assoc_network->stats.rssi; 7482 priv->assoc_network->stats.rssi = -128; 7483 spin_lock_irqsave(&priv->ieee->lock, flags); 7484 list_for_each_entry(network, &priv->ieee->network_list, list) { 7485 if (network != priv->assoc_network) 7486 ipw_best_network(priv, &match, network, 1); 7487 } 7488 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7489 priv->assoc_network->stats.rssi = rssi; 7490 7491 if (match.network == priv->assoc_network) { 7492 IPW_DEBUG_ASSOC("No better APs in this network to " 7493 "roam to.\n"); 7494 priv->status &= ~STATUS_ROAMING; 7495 ipw_debug_config(priv); 7496 return; 7497 } 7498 7499 ipw_send_disassociate(priv, 1); 7500 priv->assoc_network = match.network; 7501 7502 return; 7503 } 7504 7505 /* Second pass through ROAM process -- request association */ 7506 ipw_compatible_rates(priv, priv->assoc_network, &match.rates); 7507 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1); 7508 priv->status &= ~STATUS_ROAMING; 7509 } 7510 7511 static void ipw_bg_roam(struct work_struct *work) 7512 { 7513 struct ipw_priv *priv = 7514 container_of(work, struct ipw_priv, roam); 7515 mutex_lock(&priv->mutex); 7516 ipw_roam(priv); 7517 mutex_unlock(&priv->mutex); 7518 } 7519 7520 static int ipw_associate(void *data) 7521 { 7522 struct ipw_priv *priv = data; 7523 7524 struct libipw_network *network = NULL; 7525 struct ipw_network_match match = { 7526 .network = NULL 7527 }; 7528 struct ipw_supported_rates *rates; 7529 struct list_head *element; 7530 unsigned long flags; 7531 7532 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 7533 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n"); 7534 return 0; 7535 } 7536 7537 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 7538 IPW_DEBUG_ASSOC("Not attempting association (already in " 7539 "progress)\n"); 7540 return 0; 7541 } 7542 7543 if (priv->status & STATUS_DISASSOCIATING) { 7544 IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n"); 7545 schedule_work(&priv->associate); 7546 return 0; 7547 } 7548 7549 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) { 7550 IPW_DEBUG_ASSOC("Not attempting association (scanning or not " 7551 "initialized)\n"); 7552 return 0; 7553 } 7554 7555 if (!(priv->config & CFG_ASSOCIATE) && 7556 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) { 7557 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n"); 7558 return 0; 7559 } 7560 7561 /* Protect our use of the network_list */ 7562 spin_lock_irqsave(&priv->ieee->lock, flags); 7563 list_for_each_entry(network, &priv->ieee->network_list, list) 7564 ipw_best_network(priv, &match, network, 0); 7565 7566 network = match.network; 7567 rates = &match.rates; 7568 7569 if (network == NULL && 7570 priv->ieee->iw_mode == IW_MODE_ADHOC && 7571 priv->config & CFG_ADHOC_CREATE && 7572 priv->config & CFG_STATIC_ESSID && 7573 priv->config & CFG_STATIC_CHANNEL) { 7574 /* Use oldest network if the free list is empty */ 7575 if (list_empty(&priv->ieee->network_free_list)) { 7576 struct libipw_network *oldest = NULL; 7577 struct libipw_network *target; 7578 7579 list_for_each_entry(target, &priv->ieee->network_list, list) { 7580 if ((oldest == NULL) || 7581 (target->last_scanned < oldest->last_scanned)) 7582 oldest = target; 7583 } 7584 7585 /* If there are no more slots, expire the oldest */ 7586 list_del(&oldest->list); 7587 target = oldest; 7588 IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n", 7589 target->ssid_len, target->ssid, 7590 target->bssid); 7591 list_add_tail(&target->list, 7592 &priv->ieee->network_free_list); 7593 } 7594 7595 element = priv->ieee->network_free_list.next; 7596 network = list_entry(element, struct libipw_network, list); 7597 ipw_adhoc_create(priv, network); 7598 rates = &priv->rates; 7599 list_del(element); 7600 list_add_tail(&network->list, &priv->ieee->network_list); 7601 } 7602 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7603 7604 /* If we reached the end of the list, then we don't have any valid 7605 * matching APs */ 7606 if (!network) { 7607 ipw_debug_config(priv); 7608 7609 if (!(priv->status & STATUS_SCANNING)) { 7610 if (!(priv->config & CFG_SPEED_SCAN)) 7611 schedule_delayed_work(&priv->request_scan, 7612 SCAN_INTERVAL); 7613 else 7614 schedule_delayed_work(&priv->request_scan, 0); 7615 } 7616 7617 return 0; 7618 } 7619 7620 ipw_associate_network(priv, network, rates, 0); 7621 7622 return 1; 7623 } 7624 7625 static void ipw_bg_associate(struct work_struct *work) 7626 { 7627 struct ipw_priv *priv = 7628 container_of(work, struct ipw_priv, associate); 7629 mutex_lock(&priv->mutex); 7630 ipw_associate(priv); 7631 mutex_unlock(&priv->mutex); 7632 } 7633 7634 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, 7635 struct sk_buff *skb) 7636 { 7637 struct ieee80211_hdr *hdr; 7638 u16 fc; 7639 7640 hdr = (struct ieee80211_hdr *)skb->data; 7641 fc = le16_to_cpu(hdr->frame_control); 7642 if (!(fc & IEEE80211_FCTL_PROTECTED)) 7643 return; 7644 7645 fc &= ~IEEE80211_FCTL_PROTECTED; 7646 hdr->frame_control = cpu_to_le16(fc); 7647 switch (priv->ieee->sec.level) { 7648 case SEC_LEVEL_3: 7649 /* Remove CCMP HDR */ 7650 memmove(skb->data + LIBIPW_3ADDR_LEN, 7651 skb->data + LIBIPW_3ADDR_LEN + 8, 7652 skb->len - LIBIPW_3ADDR_LEN - 8); 7653 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */ 7654 break; 7655 case SEC_LEVEL_2: 7656 break; 7657 case SEC_LEVEL_1: 7658 /* Remove IV */ 7659 memmove(skb->data + LIBIPW_3ADDR_LEN, 7660 skb->data + LIBIPW_3ADDR_LEN + 4, 7661 skb->len - LIBIPW_3ADDR_LEN - 4); 7662 skb_trim(skb, skb->len - 8); /* IV + ICV */ 7663 break; 7664 case SEC_LEVEL_0: 7665 break; 7666 default: 7667 printk(KERN_ERR "Unknown security level %d\n", 7668 priv->ieee->sec.level); 7669 break; 7670 } 7671 } 7672 7673 static void ipw_handle_data_packet(struct ipw_priv *priv, 7674 struct ipw_rx_mem_buffer *rxb, 7675 struct libipw_rx_stats *stats) 7676 { 7677 struct net_device *dev = priv->net_dev; 7678 struct libipw_hdr_4addr *hdr; 7679 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7680 7681 /* We received data from the HW, so stop the watchdog */ 7682 netif_trans_update(dev); 7683 7684 /* We only process data packets if the 7685 * interface is open */ 7686 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > 7687 skb_tailroom(rxb->skb))) { 7688 dev->stats.rx_errors++; 7689 priv->wstats.discard.misc++; 7690 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7691 return; 7692 } else if (unlikely(!netif_running(priv->net_dev))) { 7693 dev->stats.rx_dropped++; 7694 priv->wstats.discard.misc++; 7695 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7696 return; 7697 } 7698 7699 /* Advance skb->data to the start of the actual payload */ 7700 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data)); 7701 7702 /* Set the size of the skb to the size of the frame */ 7703 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length)); 7704 7705 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); 7706 7707 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */ 7708 hdr = (struct libipw_hdr_4addr *)rxb->skb->data; 7709 if (priv->ieee->iw_mode != IW_MODE_MONITOR && 7710 (is_multicast_ether_addr(hdr->addr1) ? 7711 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt)) 7712 ipw_rebuild_decrypted_skb(priv, rxb->skb); 7713 7714 if (!libipw_rx(priv->ieee, rxb->skb, stats)) 7715 dev->stats.rx_errors++; 7716 else { /* libipw_rx succeeded, so it now owns the SKB */ 7717 rxb->skb = NULL; 7718 __ipw_led_activity_on(priv); 7719 } 7720 } 7721 7722 #ifdef CONFIG_IPW2200_RADIOTAP 7723 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, 7724 struct ipw_rx_mem_buffer *rxb, 7725 struct libipw_rx_stats *stats) 7726 { 7727 struct net_device *dev = priv->net_dev; 7728 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7729 struct ipw_rx_frame *frame = &pkt->u.frame; 7730 7731 /* initial pull of some data */ 7732 u16 received_channel = frame->received_channel; 7733 u8 antennaAndPhy = frame->antennaAndPhy; 7734 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */ 7735 u16 pktrate = frame->rate; 7736 7737 /* Magic struct that slots into the radiotap header -- no reason 7738 * to build this manually element by element, we can write it much 7739 * more efficiently than we can parse it. ORDER MATTERS HERE */ 7740 struct ipw_rt_hdr *ipw_rt; 7741 7742 unsigned short len = le16_to_cpu(pkt->u.frame.length); 7743 7744 /* We received data from the HW, so stop the watchdog */ 7745 netif_trans_update(dev); 7746 7747 /* We only process data packets if the 7748 * interface is open */ 7749 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > 7750 skb_tailroom(rxb->skb))) { 7751 dev->stats.rx_errors++; 7752 priv->wstats.discard.misc++; 7753 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7754 return; 7755 } else if (unlikely(!netif_running(priv->net_dev))) { 7756 dev->stats.rx_dropped++; 7757 priv->wstats.discard.misc++; 7758 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7759 return; 7760 } 7761 7762 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use 7763 * that now */ 7764 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { 7765 /* FIXME: Should alloc bigger skb instead */ 7766 dev->stats.rx_dropped++; 7767 priv->wstats.discard.misc++; 7768 IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); 7769 return; 7770 } 7771 7772 /* copy the frame itself */ 7773 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr), 7774 rxb->skb->data + IPW_RX_FRAME_SIZE, len); 7775 7776 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data; 7777 7778 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 7779 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 7780 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */ 7781 7782 /* Big bitfield of all the fields we provide in radiotap */ 7783 ipw_rt->rt_hdr.it_present = cpu_to_le32( 7784 (1 << IEEE80211_RADIOTAP_TSFT) | 7785 (1 << IEEE80211_RADIOTAP_FLAGS) | 7786 (1 << IEEE80211_RADIOTAP_RATE) | 7787 (1 << IEEE80211_RADIOTAP_CHANNEL) | 7788 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 7789 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 7790 (1 << IEEE80211_RADIOTAP_ANTENNA)); 7791 7792 /* Zero the flags, we'll add to them as we go */ 7793 ipw_rt->rt_flags = 0; 7794 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | 7795 frame->parent_tsf[2] << 16 | 7796 frame->parent_tsf[1] << 8 | 7797 frame->parent_tsf[0]); 7798 7799 /* Convert signal to DBM */ 7800 ipw_rt->rt_dbmsignal = antsignal; 7801 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise); 7802 7803 /* Convert the channel data and set the flags */ 7804 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel)); 7805 if (received_channel > 14) { /* 802.11a */ 7806 ipw_rt->rt_chbitmask = 7807 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); 7808 } else if (antennaAndPhy & 32) { /* 802.11b */ 7809 ipw_rt->rt_chbitmask = 7810 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); 7811 } else { /* 802.11g */ 7812 ipw_rt->rt_chbitmask = 7813 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); 7814 } 7815 7816 /* set the rate in multiples of 500k/s */ 7817 switch (pktrate) { 7818 case IPW_TX_RATE_1MB: 7819 ipw_rt->rt_rate = 2; 7820 break; 7821 case IPW_TX_RATE_2MB: 7822 ipw_rt->rt_rate = 4; 7823 break; 7824 case IPW_TX_RATE_5MB: 7825 ipw_rt->rt_rate = 10; 7826 break; 7827 case IPW_TX_RATE_6MB: 7828 ipw_rt->rt_rate = 12; 7829 break; 7830 case IPW_TX_RATE_9MB: 7831 ipw_rt->rt_rate = 18; 7832 break; 7833 case IPW_TX_RATE_11MB: 7834 ipw_rt->rt_rate = 22; 7835 break; 7836 case IPW_TX_RATE_12MB: 7837 ipw_rt->rt_rate = 24; 7838 break; 7839 case IPW_TX_RATE_18MB: 7840 ipw_rt->rt_rate = 36; 7841 break; 7842 case IPW_TX_RATE_24MB: 7843 ipw_rt->rt_rate = 48; 7844 break; 7845 case IPW_TX_RATE_36MB: 7846 ipw_rt->rt_rate = 72; 7847 break; 7848 case IPW_TX_RATE_48MB: 7849 ipw_rt->rt_rate = 96; 7850 break; 7851 case IPW_TX_RATE_54MB: 7852 ipw_rt->rt_rate = 108; 7853 break; 7854 default: 7855 ipw_rt->rt_rate = 0; 7856 break; 7857 } 7858 7859 /* antenna number */ 7860 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */ 7861 7862 /* set the preamble flag if we have it */ 7863 if ((antennaAndPhy & 64)) 7864 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 7865 7866 /* Set the size of the skb to the size of the frame */ 7867 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr)); 7868 7869 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); 7870 7871 if (!libipw_rx(priv->ieee, rxb->skb, stats)) 7872 dev->stats.rx_errors++; 7873 else { /* libipw_rx succeeded, so it now owns the SKB */ 7874 rxb->skb = NULL; 7875 /* no LED during capture */ 7876 } 7877 } 7878 #endif 7879 7880 #ifdef CONFIG_IPW2200_PROMISCUOUS 7881 #define libipw_is_probe_response(fc) \ 7882 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \ 7883 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP ) 7884 7885 #define libipw_is_management(fc) \ 7886 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) 7887 7888 #define libipw_is_control(fc) \ 7889 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) 7890 7891 #define libipw_is_data(fc) \ 7892 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 7893 7894 #define libipw_is_assoc_request(fc) \ 7895 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ) 7896 7897 #define libipw_is_reassoc_request(fc) \ 7898 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) 7899 7900 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, 7901 struct ipw_rx_mem_buffer *rxb, 7902 struct libipw_rx_stats *stats) 7903 { 7904 struct net_device *dev = priv->prom_net_dev; 7905 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7906 struct ipw_rx_frame *frame = &pkt->u.frame; 7907 struct ipw_rt_hdr *ipw_rt; 7908 7909 /* First cache any information we need before we overwrite 7910 * the information provided in the skb from the hardware */ 7911 struct ieee80211_hdr *hdr; 7912 u16 channel = frame->received_channel; 7913 u8 phy_flags = frame->antennaAndPhy; 7914 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM; 7915 s8 noise = (s8) le16_to_cpu(frame->noise); 7916 u8 rate = frame->rate; 7917 unsigned short len = le16_to_cpu(pkt->u.frame.length); 7918 struct sk_buff *skb; 7919 int hdr_only = 0; 7920 u16 filter = priv->prom_priv->filter; 7921 7922 /* If the filter is set to not include Rx frames then return */ 7923 if (filter & IPW_PROM_NO_RX) 7924 return; 7925 7926 /* We received data from the HW, so stop the watchdog */ 7927 netif_trans_update(dev); 7928 7929 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { 7930 dev->stats.rx_errors++; 7931 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7932 return; 7933 } 7934 7935 /* We only process data packets if the interface is open */ 7936 if (unlikely(!netif_running(dev))) { 7937 dev->stats.rx_dropped++; 7938 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7939 return; 7940 } 7941 7942 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use 7943 * that now */ 7944 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { 7945 /* FIXME: Should alloc bigger skb instead */ 7946 dev->stats.rx_dropped++; 7947 IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); 7948 return; 7949 } 7950 7951 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; 7952 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { 7953 if (filter & IPW_PROM_NO_MGMT) 7954 return; 7955 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 7956 hdr_only = 1; 7957 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { 7958 if (filter & IPW_PROM_NO_CTL) 7959 return; 7960 if (filter & IPW_PROM_CTL_HEADER_ONLY) 7961 hdr_only = 1; 7962 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { 7963 if (filter & IPW_PROM_NO_DATA) 7964 return; 7965 if (filter & IPW_PROM_DATA_HEADER_ONLY) 7966 hdr_only = 1; 7967 } 7968 7969 /* Copy the SKB since this is for the promiscuous side */ 7970 skb = skb_copy(rxb->skb, GFP_ATOMIC); 7971 if (skb == NULL) { 7972 IPW_ERROR("skb_clone failed for promiscuous copy.\n"); 7973 return; 7974 } 7975 7976 /* copy the frame data to write after where the radiotap header goes */ 7977 ipw_rt = (void *)skb->data; 7978 7979 if (hdr_only) 7980 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); 7981 7982 memcpy(ipw_rt->payload, hdr, len); 7983 7984 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 7985 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 7986 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */ 7987 7988 /* Set the size of the skb to the size of the frame */ 7989 skb_put(skb, sizeof(*ipw_rt) + len); 7990 7991 /* Big bitfield of all the fields we provide in radiotap */ 7992 ipw_rt->rt_hdr.it_present = cpu_to_le32( 7993 (1 << IEEE80211_RADIOTAP_TSFT) | 7994 (1 << IEEE80211_RADIOTAP_FLAGS) | 7995 (1 << IEEE80211_RADIOTAP_RATE) | 7996 (1 << IEEE80211_RADIOTAP_CHANNEL) | 7997 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 7998 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 7999 (1 << IEEE80211_RADIOTAP_ANTENNA)); 8000 8001 /* Zero the flags, we'll add to them as we go */ 8002 ipw_rt->rt_flags = 0; 8003 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | 8004 frame->parent_tsf[2] << 16 | 8005 frame->parent_tsf[1] << 8 | 8006 frame->parent_tsf[0]); 8007 8008 /* Convert to DBM */ 8009 ipw_rt->rt_dbmsignal = signal; 8010 ipw_rt->rt_dbmnoise = noise; 8011 8012 /* Convert the channel data and set the flags */ 8013 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel)); 8014 if (channel > 14) { /* 802.11a */ 8015 ipw_rt->rt_chbitmask = 8016 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); 8017 } else if (phy_flags & (1 << 5)) { /* 802.11b */ 8018 ipw_rt->rt_chbitmask = 8019 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); 8020 } else { /* 802.11g */ 8021 ipw_rt->rt_chbitmask = 8022 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); 8023 } 8024 8025 /* set the rate in multiples of 500k/s */ 8026 switch (rate) { 8027 case IPW_TX_RATE_1MB: 8028 ipw_rt->rt_rate = 2; 8029 break; 8030 case IPW_TX_RATE_2MB: 8031 ipw_rt->rt_rate = 4; 8032 break; 8033 case IPW_TX_RATE_5MB: 8034 ipw_rt->rt_rate = 10; 8035 break; 8036 case IPW_TX_RATE_6MB: 8037 ipw_rt->rt_rate = 12; 8038 break; 8039 case IPW_TX_RATE_9MB: 8040 ipw_rt->rt_rate = 18; 8041 break; 8042 case IPW_TX_RATE_11MB: 8043 ipw_rt->rt_rate = 22; 8044 break; 8045 case IPW_TX_RATE_12MB: 8046 ipw_rt->rt_rate = 24; 8047 break; 8048 case IPW_TX_RATE_18MB: 8049 ipw_rt->rt_rate = 36; 8050 break; 8051 case IPW_TX_RATE_24MB: 8052 ipw_rt->rt_rate = 48; 8053 break; 8054 case IPW_TX_RATE_36MB: 8055 ipw_rt->rt_rate = 72; 8056 break; 8057 case IPW_TX_RATE_48MB: 8058 ipw_rt->rt_rate = 96; 8059 break; 8060 case IPW_TX_RATE_54MB: 8061 ipw_rt->rt_rate = 108; 8062 break; 8063 default: 8064 ipw_rt->rt_rate = 0; 8065 break; 8066 } 8067 8068 /* antenna number */ 8069 ipw_rt->rt_antenna = (phy_flags & 3); 8070 8071 /* set the preamble flag if we have it */ 8072 if (phy_flags & (1 << 6)) 8073 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 8074 8075 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len); 8076 8077 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) { 8078 dev->stats.rx_errors++; 8079 dev_kfree_skb_any(skb); 8080 } 8081 } 8082 #endif 8083 8084 static int is_network_packet(struct ipw_priv *priv, 8085 struct libipw_hdr_4addr *header) 8086 { 8087 /* Filter incoming packets to determine if they are targeted toward 8088 * this network, discarding packets coming from ourselves */ 8089 switch (priv->ieee->iw_mode) { 8090 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */ 8091 /* packets from our adapter are dropped (echo) */ 8092 if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr)) 8093 return 0; 8094 8095 /* {broad,multi}cast packets to our BSSID go through */ 8096 if (is_multicast_ether_addr(header->addr1)) 8097 return ether_addr_equal(header->addr3, priv->bssid); 8098 8099 /* packets to our adapter go through */ 8100 return ether_addr_equal(header->addr1, 8101 priv->net_dev->dev_addr); 8102 8103 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */ 8104 /* packets from our adapter are dropped (echo) */ 8105 if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr)) 8106 return 0; 8107 8108 /* {broad,multi}cast packets to our BSS go through */ 8109 if (is_multicast_ether_addr(header->addr1)) 8110 return ether_addr_equal(header->addr2, priv->bssid); 8111 8112 /* packets to our adapter go through */ 8113 return ether_addr_equal(header->addr1, 8114 priv->net_dev->dev_addr); 8115 } 8116 8117 return 1; 8118 } 8119 8120 #define IPW_PACKET_RETRY_TIME HZ 8121 8122 static int is_duplicate_packet(struct ipw_priv *priv, 8123 struct libipw_hdr_4addr *header) 8124 { 8125 u16 sc = le16_to_cpu(header->seq_ctl); 8126 u16 seq = WLAN_GET_SEQ_SEQ(sc); 8127 u16 frag = WLAN_GET_SEQ_FRAG(sc); 8128 u16 *last_seq, *last_frag; 8129 unsigned long *last_time; 8130 8131 switch (priv->ieee->iw_mode) { 8132 case IW_MODE_ADHOC: 8133 { 8134 struct list_head *p; 8135 struct ipw_ibss_seq *entry = NULL; 8136 u8 *mac = header->addr2; 8137 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE; 8138 8139 list_for_each(p, &priv->ibss_mac_hash[index]) { 8140 entry = 8141 list_entry(p, struct ipw_ibss_seq, list); 8142 if (ether_addr_equal(entry->mac, mac)) 8143 break; 8144 } 8145 if (p == &priv->ibss_mac_hash[index]) { 8146 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 8147 if (!entry) { 8148 IPW_ERROR 8149 ("Cannot malloc new mac entry\n"); 8150 return 0; 8151 } 8152 memcpy(entry->mac, mac, ETH_ALEN); 8153 entry->seq_num = seq; 8154 entry->frag_num = frag; 8155 entry->packet_time = jiffies; 8156 list_add(&entry->list, 8157 &priv->ibss_mac_hash[index]); 8158 return 0; 8159 } 8160 last_seq = &entry->seq_num; 8161 last_frag = &entry->frag_num; 8162 last_time = &entry->packet_time; 8163 break; 8164 } 8165 case IW_MODE_INFRA: 8166 last_seq = &priv->last_seq_num; 8167 last_frag = &priv->last_frag_num; 8168 last_time = &priv->last_packet_time; 8169 break; 8170 default: 8171 return 0; 8172 } 8173 if ((*last_seq == seq) && 8174 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) { 8175 if (*last_frag == frag) 8176 goto drop; 8177 if (*last_frag + 1 != frag) 8178 /* out-of-order fragment */ 8179 goto drop; 8180 } else 8181 *last_seq = seq; 8182 8183 *last_frag = frag; 8184 *last_time = jiffies; 8185 return 0; 8186 8187 drop: 8188 /* Comment this line now since we observed the card receives 8189 * duplicate packets but the FCTL_RETRY bit is not set in the 8190 * IBSS mode with fragmentation enabled. 8191 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */ 8192 return 1; 8193 } 8194 8195 static void ipw_handle_mgmt_packet(struct ipw_priv *priv, 8196 struct ipw_rx_mem_buffer *rxb, 8197 struct libipw_rx_stats *stats) 8198 { 8199 struct sk_buff *skb = rxb->skb; 8200 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data; 8201 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *) 8202 (skb->data + IPW_RX_FRAME_SIZE); 8203 8204 libipw_rx_mgt(priv->ieee, header, stats); 8205 8206 if (priv->ieee->iw_mode == IW_MODE_ADHOC && 8207 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == 8208 IEEE80211_STYPE_PROBE_RESP) || 8209 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == 8210 IEEE80211_STYPE_BEACON))) { 8211 if (ether_addr_equal(header->addr3, priv->bssid)) 8212 ipw_add_station(priv, header->addr2); 8213 } 8214 8215 if (priv->config & CFG_NET_STATS) { 8216 IPW_DEBUG_HC("sending stat packet\n"); 8217 8218 /* Set the size of the skb to the size of the full 8219 * ipw header and 802.11 frame */ 8220 skb_put(skb, le16_to_cpu(pkt->u.frame.length) + 8221 IPW_RX_FRAME_SIZE); 8222 8223 /* Advance past the ipw packet header to the 802.11 frame */ 8224 skb_pull(skb, IPW_RX_FRAME_SIZE); 8225 8226 /* Push the libipw_rx_stats before the 802.11 frame */ 8227 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats)); 8228 8229 skb->dev = priv->ieee->dev; 8230 8231 /* Point raw at the libipw_stats */ 8232 skb_reset_mac_header(skb); 8233 8234 skb->pkt_type = PACKET_OTHERHOST; 8235 skb->protocol = cpu_to_be16(ETH_P_80211_STATS); 8236 memset(skb->cb, 0, sizeof(rxb->skb->cb)); 8237 netif_rx(skb); 8238 rxb->skb = NULL; 8239 } 8240 } 8241 8242 /* 8243 * Main entry function for receiving a packet with 80211 headers. This 8244 * should be called when ever the FW has notified us that there is a new 8245 * skb in the receive queue. 8246 */ 8247 static void ipw_rx(struct ipw_priv *priv) 8248 { 8249 struct ipw_rx_mem_buffer *rxb; 8250 struct ipw_rx_packet *pkt; 8251 struct libipw_hdr_4addr *header; 8252 u32 r, w, i; 8253 u8 network_packet; 8254 u8 fill_rx = 0; 8255 8256 r = ipw_read32(priv, IPW_RX_READ_INDEX); 8257 w = ipw_read32(priv, IPW_RX_WRITE_INDEX); 8258 i = priv->rxq->read; 8259 8260 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2)) 8261 fill_rx = 1; 8262 8263 while (i != r) { 8264 rxb = priv->rxq->queue[i]; 8265 if (unlikely(rxb == NULL)) { 8266 printk(KERN_CRIT "Queue not allocated!\n"); 8267 break; 8268 } 8269 priv->rxq->queue[i] = NULL; 8270 8271 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 8272 IPW_RX_BUF_SIZE, 8273 PCI_DMA_FROMDEVICE); 8274 8275 pkt = (struct ipw_rx_packet *)rxb->skb->data; 8276 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", 8277 pkt->header.message_type, 8278 pkt->header.rx_seq_num, pkt->header.control_bits); 8279 8280 switch (pkt->header.message_type) { 8281 case RX_FRAME_TYPE: /* 802.11 frame */ { 8282 struct libipw_rx_stats stats = { 8283 .rssi = pkt->u.frame.rssi_dbm - 8284 IPW_RSSI_TO_DBM, 8285 .signal = 8286 pkt->u.frame.rssi_dbm - 8287 IPW_RSSI_TO_DBM + 0x100, 8288 .noise = 8289 le16_to_cpu(pkt->u.frame.noise), 8290 .rate = pkt->u.frame.rate, 8291 .mac_time = jiffies, 8292 .received_channel = 8293 pkt->u.frame.received_channel, 8294 .freq = 8295 (pkt->u.frame. 8296 control & (1 << 0)) ? 8297 LIBIPW_24GHZ_BAND : 8298 LIBIPW_52GHZ_BAND, 8299 .len = le16_to_cpu(pkt->u.frame.length), 8300 }; 8301 8302 if (stats.rssi != 0) 8303 stats.mask |= LIBIPW_STATMASK_RSSI; 8304 if (stats.signal != 0) 8305 stats.mask |= LIBIPW_STATMASK_SIGNAL; 8306 if (stats.noise != 0) 8307 stats.mask |= LIBIPW_STATMASK_NOISE; 8308 if (stats.rate != 0) 8309 stats.mask |= LIBIPW_STATMASK_RATE; 8310 8311 priv->rx_packets++; 8312 8313 #ifdef CONFIG_IPW2200_PROMISCUOUS 8314 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) 8315 ipw_handle_promiscuous_rx(priv, rxb, &stats); 8316 #endif 8317 8318 #ifdef CONFIG_IPW2200_MONITOR 8319 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 8320 #ifdef CONFIG_IPW2200_RADIOTAP 8321 8322 ipw_handle_data_packet_monitor(priv, 8323 rxb, 8324 &stats); 8325 #else 8326 ipw_handle_data_packet(priv, rxb, 8327 &stats); 8328 #endif 8329 break; 8330 } 8331 #endif 8332 8333 header = 8334 (struct libipw_hdr_4addr *)(rxb->skb-> 8335 data + 8336 IPW_RX_FRAME_SIZE); 8337 /* TODO: Check Ad-Hoc dest/source and make sure 8338 * that we are actually parsing these packets 8339 * correctly -- we should probably use the 8340 * frame control of the packet and disregard 8341 * the current iw_mode */ 8342 8343 network_packet = 8344 is_network_packet(priv, header); 8345 if (network_packet && priv->assoc_network) { 8346 priv->assoc_network->stats.rssi = 8347 stats.rssi; 8348 priv->exp_avg_rssi = 8349 exponential_average(priv->exp_avg_rssi, 8350 stats.rssi, DEPTH_RSSI); 8351 } 8352 8353 IPW_DEBUG_RX("Frame: len=%u\n", 8354 le16_to_cpu(pkt->u.frame.length)); 8355 8356 if (le16_to_cpu(pkt->u.frame.length) < 8357 libipw_get_hdrlen(le16_to_cpu( 8358 header->frame_ctl))) { 8359 IPW_DEBUG_DROP 8360 ("Received packet is too small. " 8361 "Dropping.\n"); 8362 priv->net_dev->stats.rx_errors++; 8363 priv->wstats.discard.misc++; 8364 break; 8365 } 8366 8367 switch (WLAN_FC_GET_TYPE 8368 (le16_to_cpu(header->frame_ctl))) { 8369 8370 case IEEE80211_FTYPE_MGMT: 8371 ipw_handle_mgmt_packet(priv, rxb, 8372 &stats); 8373 break; 8374 8375 case IEEE80211_FTYPE_CTL: 8376 break; 8377 8378 case IEEE80211_FTYPE_DATA: 8379 if (unlikely(!network_packet || 8380 is_duplicate_packet(priv, 8381 header))) 8382 { 8383 IPW_DEBUG_DROP("Dropping: " 8384 "%pM, " 8385 "%pM, " 8386 "%pM\n", 8387 header->addr1, 8388 header->addr2, 8389 header->addr3); 8390 break; 8391 } 8392 8393 ipw_handle_data_packet(priv, rxb, 8394 &stats); 8395 8396 break; 8397 } 8398 break; 8399 } 8400 8401 case RX_HOST_NOTIFICATION_TYPE:{ 8402 IPW_DEBUG_RX 8403 ("Notification: subtype=%02X flags=%02X size=%d\n", 8404 pkt->u.notification.subtype, 8405 pkt->u.notification.flags, 8406 le16_to_cpu(pkt->u.notification.size)); 8407 ipw_rx_notification(priv, &pkt->u.notification); 8408 break; 8409 } 8410 8411 default: 8412 IPW_DEBUG_RX("Bad Rx packet of type %d\n", 8413 pkt->header.message_type); 8414 break; 8415 } 8416 8417 /* For now we just don't re-use anything. We can tweak this 8418 * later to try and re-use notification packets and SKBs that 8419 * fail to Rx correctly */ 8420 if (rxb->skb != NULL) { 8421 dev_kfree_skb_any(rxb->skb); 8422 rxb->skb = NULL; 8423 } 8424 8425 pci_unmap_single(priv->pci_dev, rxb->dma_addr, 8426 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 8427 list_add_tail(&rxb->list, &priv->rxq->rx_used); 8428 8429 i = (i + 1) % RX_QUEUE_SIZE; 8430 8431 /* If there are a lot of unsued frames, restock the Rx queue 8432 * so the ucode won't assert */ 8433 if (fill_rx) { 8434 priv->rxq->read = i; 8435 ipw_rx_queue_replenish(priv); 8436 } 8437 } 8438 8439 /* Backtrack one entry */ 8440 priv->rxq->read = i; 8441 ipw_rx_queue_restock(priv); 8442 } 8443 8444 #define DEFAULT_RTS_THRESHOLD 2304U 8445 #define MIN_RTS_THRESHOLD 1U 8446 #define MAX_RTS_THRESHOLD 2304U 8447 #define DEFAULT_BEACON_INTERVAL 100U 8448 #define DEFAULT_SHORT_RETRY_LIMIT 7U 8449 #define DEFAULT_LONG_RETRY_LIMIT 4U 8450 8451 /** 8452 * ipw_sw_reset 8453 * @option: options to control different reset behaviour 8454 * 0 = reset everything except the 'disable' module_param 8455 * 1 = reset everything and print out driver info (for probe only) 8456 * 2 = reset everything 8457 */ 8458 static int ipw_sw_reset(struct ipw_priv *priv, int option) 8459 { 8460 int band, modulation; 8461 int old_mode = priv->ieee->iw_mode; 8462 8463 /* Initialize module parameter values here */ 8464 priv->config = 0; 8465 8466 /* We default to disabling the LED code as right now it causes 8467 * too many systems to lock up... */ 8468 if (!led_support) 8469 priv->config |= CFG_NO_LED; 8470 8471 if (associate) 8472 priv->config |= CFG_ASSOCIATE; 8473 else 8474 IPW_DEBUG_INFO("Auto associate disabled.\n"); 8475 8476 if (auto_create) 8477 priv->config |= CFG_ADHOC_CREATE; 8478 else 8479 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n"); 8480 8481 priv->config &= ~CFG_STATIC_ESSID; 8482 priv->essid_len = 0; 8483 memset(priv->essid, 0, IW_ESSID_MAX_SIZE); 8484 8485 if (disable && option) { 8486 priv->status |= STATUS_RF_KILL_SW; 8487 IPW_DEBUG_INFO("Radio disabled.\n"); 8488 } 8489 8490 if (default_channel != 0) { 8491 priv->config |= CFG_STATIC_CHANNEL; 8492 priv->channel = default_channel; 8493 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel); 8494 /* TODO: Validate that provided channel is in range */ 8495 } 8496 #ifdef CONFIG_IPW2200_QOS 8497 ipw_qos_init(priv, qos_enable, qos_burst_enable, 8498 burst_duration_CCK, burst_duration_OFDM); 8499 #endif /* CONFIG_IPW2200_QOS */ 8500 8501 switch (network_mode) { 8502 case 1: 8503 priv->ieee->iw_mode = IW_MODE_ADHOC; 8504 priv->net_dev->type = ARPHRD_ETHER; 8505 8506 break; 8507 #ifdef CONFIG_IPW2200_MONITOR 8508 case 2: 8509 priv->ieee->iw_mode = IW_MODE_MONITOR; 8510 #ifdef CONFIG_IPW2200_RADIOTAP 8511 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 8512 #else 8513 priv->net_dev->type = ARPHRD_IEEE80211; 8514 #endif 8515 break; 8516 #endif 8517 default: 8518 case 0: 8519 priv->net_dev->type = ARPHRD_ETHER; 8520 priv->ieee->iw_mode = IW_MODE_INFRA; 8521 break; 8522 } 8523 8524 if (hwcrypto) { 8525 priv->ieee->host_encrypt = 0; 8526 priv->ieee->host_encrypt_msdu = 0; 8527 priv->ieee->host_decrypt = 0; 8528 priv->ieee->host_mc_decrypt = 0; 8529 } 8530 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off"); 8531 8532 /* IPW2200/2915 is abled to do hardware fragmentation. */ 8533 priv->ieee->host_open_frag = 0; 8534 8535 if ((priv->pci_dev->device == 0x4223) || 8536 (priv->pci_dev->device == 0x4224)) { 8537 if (option == 1) 8538 printk(KERN_INFO DRV_NAME 8539 ": Detected Intel PRO/Wireless 2915ABG Network " 8540 "Connection\n"); 8541 priv->ieee->abg_true = 1; 8542 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND; 8543 modulation = LIBIPW_OFDM_MODULATION | 8544 LIBIPW_CCK_MODULATION; 8545 priv->adapter = IPW_2915ABG; 8546 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; 8547 } else { 8548 if (option == 1) 8549 printk(KERN_INFO DRV_NAME 8550 ": Detected Intel PRO/Wireless 2200BG Network " 8551 "Connection\n"); 8552 8553 priv->ieee->abg_true = 0; 8554 band = LIBIPW_24GHZ_BAND; 8555 modulation = LIBIPW_OFDM_MODULATION | 8556 LIBIPW_CCK_MODULATION; 8557 priv->adapter = IPW_2200BG; 8558 priv->ieee->mode = IEEE_G | IEEE_B; 8559 } 8560 8561 priv->ieee->freq_band = band; 8562 priv->ieee->modulation = modulation; 8563 8564 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK; 8565 8566 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; 8567 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; 8568 8569 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 8570 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT; 8571 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT; 8572 8573 /* If power management is turned on, default to AC mode */ 8574 priv->power_mode = IPW_POWER_AC; 8575 priv->tx_power = IPW_TX_POWER_DEFAULT; 8576 8577 return old_mode == priv->ieee->iw_mode; 8578 } 8579 8580 /* 8581 * This file defines the Wireless Extension handlers. It does not 8582 * define any methods of hardware manipulation and relies on the 8583 * functions defined in ipw_main to provide the HW interaction. 8584 * 8585 * The exception to this is the use of the ipw_get_ordinal() 8586 * function used to poll the hardware vs. making unnecessary calls. 8587 * 8588 */ 8589 8590 static int ipw_set_channel(struct ipw_priv *priv, u8 channel) 8591 { 8592 if (channel == 0) { 8593 IPW_DEBUG_INFO("Setting channel to ANY (0)\n"); 8594 priv->config &= ~CFG_STATIC_CHANNEL; 8595 IPW_DEBUG_ASSOC("Attempting to associate with new " 8596 "parameters.\n"); 8597 ipw_associate(priv); 8598 return 0; 8599 } 8600 8601 priv->config |= CFG_STATIC_CHANNEL; 8602 8603 if (priv->channel == channel) { 8604 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n", 8605 channel); 8606 return 0; 8607 } 8608 8609 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel); 8610 priv->channel = channel; 8611 8612 #ifdef CONFIG_IPW2200_MONITOR 8613 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 8614 int i; 8615 if (priv->status & STATUS_SCANNING) { 8616 IPW_DEBUG_SCAN("Scan abort triggered due to " 8617 "channel change.\n"); 8618 ipw_abort_scan(priv); 8619 } 8620 8621 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--) 8622 udelay(10); 8623 8624 if (priv->status & STATUS_SCANNING) 8625 IPW_DEBUG_SCAN("Still scanning...\n"); 8626 else 8627 IPW_DEBUG_SCAN("Took %dms to abort current scan\n", 8628 1000 - i); 8629 8630 return 0; 8631 } 8632 #endif /* CONFIG_IPW2200_MONITOR */ 8633 8634 /* Network configuration changed -- force [re]association */ 8635 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n"); 8636 if (!ipw_disassociate(priv)) 8637 ipw_associate(priv); 8638 8639 return 0; 8640 } 8641 8642 static int ipw_wx_set_freq(struct net_device *dev, 8643 struct iw_request_info *info, 8644 union iwreq_data *wrqu, char *extra) 8645 { 8646 struct ipw_priv *priv = libipw_priv(dev); 8647 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 8648 struct iw_freq *fwrq = &wrqu->freq; 8649 int ret = 0, i; 8650 u8 channel, flags; 8651 int band; 8652 8653 if (fwrq->m == 0) { 8654 IPW_DEBUG_WX("SET Freq/Channel -> any\n"); 8655 mutex_lock(&priv->mutex); 8656 ret = ipw_set_channel(priv, 0); 8657 mutex_unlock(&priv->mutex); 8658 return ret; 8659 } 8660 /* if setting by freq convert to channel */ 8661 if (fwrq->e == 1) { 8662 channel = libipw_freq_to_channel(priv->ieee, fwrq->m); 8663 if (channel == 0) 8664 return -EINVAL; 8665 } else 8666 channel = fwrq->m; 8667 8668 if (!(band = libipw_is_valid_channel(priv->ieee, channel))) 8669 return -EINVAL; 8670 8671 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 8672 i = libipw_channel_to_index(priv->ieee, channel); 8673 if (i == -1) 8674 return -EINVAL; 8675 8676 flags = (band == LIBIPW_24GHZ_BAND) ? 8677 geo->bg[i].flags : geo->a[i].flags; 8678 if (flags & LIBIPW_CH_PASSIVE_ONLY) { 8679 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n"); 8680 return -EINVAL; 8681 } 8682 } 8683 8684 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m); 8685 mutex_lock(&priv->mutex); 8686 ret = ipw_set_channel(priv, channel); 8687 mutex_unlock(&priv->mutex); 8688 return ret; 8689 } 8690 8691 static int ipw_wx_get_freq(struct net_device *dev, 8692 struct iw_request_info *info, 8693 union iwreq_data *wrqu, char *extra) 8694 { 8695 struct ipw_priv *priv = libipw_priv(dev); 8696 8697 wrqu->freq.e = 0; 8698 8699 /* If we are associated, trying to associate, or have a statically 8700 * configured CHANNEL then return that; otherwise return ANY */ 8701 mutex_lock(&priv->mutex); 8702 if (priv->config & CFG_STATIC_CHANNEL || 8703 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) { 8704 int i; 8705 8706 i = libipw_channel_to_index(priv->ieee, priv->channel); 8707 BUG_ON(i == -1); 8708 wrqu->freq.e = 1; 8709 8710 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 8711 case LIBIPW_52GHZ_BAND: 8712 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000; 8713 break; 8714 8715 case LIBIPW_24GHZ_BAND: 8716 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000; 8717 break; 8718 8719 default: 8720 BUG(); 8721 } 8722 } else 8723 wrqu->freq.m = 0; 8724 8725 mutex_unlock(&priv->mutex); 8726 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel); 8727 return 0; 8728 } 8729 8730 static int ipw_wx_set_mode(struct net_device *dev, 8731 struct iw_request_info *info, 8732 union iwreq_data *wrqu, char *extra) 8733 { 8734 struct ipw_priv *priv = libipw_priv(dev); 8735 int err = 0; 8736 8737 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode); 8738 8739 switch (wrqu->mode) { 8740 #ifdef CONFIG_IPW2200_MONITOR 8741 case IW_MODE_MONITOR: 8742 #endif 8743 case IW_MODE_ADHOC: 8744 case IW_MODE_INFRA: 8745 break; 8746 case IW_MODE_AUTO: 8747 wrqu->mode = IW_MODE_INFRA; 8748 break; 8749 default: 8750 return -EINVAL; 8751 } 8752 if (wrqu->mode == priv->ieee->iw_mode) 8753 return 0; 8754 8755 mutex_lock(&priv->mutex); 8756 8757 ipw_sw_reset(priv, 0); 8758 8759 #ifdef CONFIG_IPW2200_MONITOR 8760 if (priv->ieee->iw_mode == IW_MODE_MONITOR) 8761 priv->net_dev->type = ARPHRD_ETHER; 8762 8763 if (wrqu->mode == IW_MODE_MONITOR) 8764 #ifdef CONFIG_IPW2200_RADIOTAP 8765 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 8766 #else 8767 priv->net_dev->type = ARPHRD_IEEE80211; 8768 #endif 8769 #endif /* CONFIG_IPW2200_MONITOR */ 8770 8771 /* Free the existing firmware and reset the fw_loaded 8772 * flag so ipw_load() will bring in the new firmware */ 8773 free_firmware(); 8774 8775 priv->ieee->iw_mode = wrqu->mode; 8776 8777 schedule_work(&priv->adapter_restart); 8778 mutex_unlock(&priv->mutex); 8779 return err; 8780 } 8781 8782 static int ipw_wx_get_mode(struct net_device *dev, 8783 struct iw_request_info *info, 8784 union iwreq_data *wrqu, char *extra) 8785 { 8786 struct ipw_priv *priv = libipw_priv(dev); 8787 mutex_lock(&priv->mutex); 8788 wrqu->mode = priv->ieee->iw_mode; 8789 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); 8790 mutex_unlock(&priv->mutex); 8791 return 0; 8792 } 8793 8794 /* Values are in microsecond */ 8795 static const s32 timeout_duration[] = { 8796 350000, 8797 250000, 8798 75000, 8799 37000, 8800 25000, 8801 }; 8802 8803 static const s32 period_duration[] = { 8804 400000, 8805 700000, 8806 1000000, 8807 1000000, 8808 1000000 8809 }; 8810 8811 static int ipw_wx_get_range(struct net_device *dev, 8812 struct iw_request_info *info, 8813 union iwreq_data *wrqu, char *extra) 8814 { 8815 struct ipw_priv *priv = libipw_priv(dev); 8816 struct iw_range *range = (struct iw_range *)extra; 8817 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 8818 int i = 0, j; 8819 8820 wrqu->data.length = sizeof(*range); 8821 memset(range, 0, sizeof(*range)); 8822 8823 /* 54Mbs == ~27 Mb/s real (802.11g) */ 8824 range->throughput = 27 * 1000 * 1000; 8825 8826 range->max_qual.qual = 100; 8827 /* TODO: Find real max RSSI and stick here */ 8828 range->max_qual.level = 0; 8829 range->max_qual.noise = 0; 8830 range->max_qual.updated = 7; /* Updated all three */ 8831 8832 range->avg_qual.qual = 70; 8833 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ 8834 range->avg_qual.level = 0; /* FIXME to real average level */ 8835 range->avg_qual.noise = 0; 8836 range->avg_qual.updated = 7; /* Updated all three */ 8837 mutex_lock(&priv->mutex); 8838 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); 8839 8840 for (i = 0; i < range->num_bitrates; i++) 8841 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) * 8842 500000; 8843 8844 range->max_rts = DEFAULT_RTS_THRESHOLD; 8845 range->min_frag = MIN_FRAG_THRESHOLD; 8846 range->max_frag = MAX_FRAG_THRESHOLD; 8847 8848 range->encoding_size[0] = 5; 8849 range->encoding_size[1] = 13; 8850 range->num_encoding_sizes = 2; 8851 range->max_encoding_tokens = WEP_KEYS; 8852 8853 /* Set the Wireless Extension versions */ 8854 range->we_version_compiled = WIRELESS_EXT; 8855 range->we_version_source = 18; 8856 8857 i = 0; 8858 if (priv->ieee->mode & (IEEE_B | IEEE_G)) { 8859 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) { 8860 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 8861 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY)) 8862 continue; 8863 8864 range->freq[i].i = geo->bg[j].channel; 8865 range->freq[i].m = geo->bg[j].freq * 100000; 8866 range->freq[i].e = 1; 8867 i++; 8868 } 8869 } 8870 8871 if (priv->ieee->mode & IEEE_A) { 8872 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) { 8873 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 8874 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY)) 8875 continue; 8876 8877 range->freq[i].i = geo->a[j].channel; 8878 range->freq[i].m = geo->a[j].freq * 100000; 8879 range->freq[i].e = 1; 8880 i++; 8881 } 8882 } 8883 8884 range->num_channels = i; 8885 range->num_frequency = i; 8886 8887 mutex_unlock(&priv->mutex); 8888 8889 /* Event capability (kernel + driver) */ 8890 range->event_capa[0] = (IW_EVENT_CAPA_K_0 | 8891 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | 8892 IW_EVENT_CAPA_MASK(SIOCGIWAP) | 8893 IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); 8894 range->event_capa[1] = IW_EVENT_CAPA_K_1; 8895 8896 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 8897 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 8898 8899 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE; 8900 8901 IPW_DEBUG_WX("GET Range\n"); 8902 return 0; 8903 } 8904 8905 static int ipw_wx_set_wap(struct net_device *dev, 8906 struct iw_request_info *info, 8907 union iwreq_data *wrqu, char *extra) 8908 { 8909 struct ipw_priv *priv = libipw_priv(dev); 8910 8911 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 8912 return -EINVAL; 8913 mutex_lock(&priv->mutex); 8914 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) || 8915 is_zero_ether_addr(wrqu->ap_addr.sa_data)) { 8916 /* we disable mandatory BSSID association */ 8917 IPW_DEBUG_WX("Setting AP BSSID to ANY\n"); 8918 priv->config &= ~CFG_STATIC_BSSID; 8919 IPW_DEBUG_ASSOC("Attempting to associate with new " 8920 "parameters.\n"); 8921 ipw_associate(priv); 8922 mutex_unlock(&priv->mutex); 8923 return 0; 8924 } 8925 8926 priv->config |= CFG_STATIC_BSSID; 8927 if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) { 8928 IPW_DEBUG_WX("BSSID set to current BSSID.\n"); 8929 mutex_unlock(&priv->mutex); 8930 return 0; 8931 } 8932 8933 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n", 8934 wrqu->ap_addr.sa_data); 8935 8936 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); 8937 8938 /* Network configuration changed -- force [re]association */ 8939 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n"); 8940 if (!ipw_disassociate(priv)) 8941 ipw_associate(priv); 8942 8943 mutex_unlock(&priv->mutex); 8944 return 0; 8945 } 8946 8947 static int ipw_wx_get_wap(struct net_device *dev, 8948 struct iw_request_info *info, 8949 union iwreq_data *wrqu, char *extra) 8950 { 8951 struct ipw_priv *priv = libipw_priv(dev); 8952 8953 /* If we are associated, trying to associate, or have a statically 8954 * configured BSSID then return that; otherwise return ANY */ 8955 mutex_lock(&priv->mutex); 8956 if (priv->config & CFG_STATIC_BSSID || 8957 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 8958 wrqu->ap_addr.sa_family = ARPHRD_ETHER; 8959 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); 8960 } else 8961 eth_zero_addr(wrqu->ap_addr.sa_data); 8962 8963 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", 8964 wrqu->ap_addr.sa_data); 8965 mutex_unlock(&priv->mutex); 8966 return 0; 8967 } 8968 8969 static int ipw_wx_set_essid(struct net_device *dev, 8970 struct iw_request_info *info, 8971 union iwreq_data *wrqu, char *extra) 8972 { 8973 struct ipw_priv *priv = libipw_priv(dev); 8974 int length; 8975 8976 mutex_lock(&priv->mutex); 8977 8978 if (!wrqu->essid.flags) 8979 { 8980 IPW_DEBUG_WX("Setting ESSID to ANY\n"); 8981 ipw_disassociate(priv); 8982 priv->config &= ~CFG_STATIC_ESSID; 8983 ipw_associate(priv); 8984 mutex_unlock(&priv->mutex); 8985 return 0; 8986 } 8987 8988 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE); 8989 8990 priv->config |= CFG_STATIC_ESSID; 8991 8992 if (priv->essid_len == length && !memcmp(priv->essid, extra, length) 8993 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) { 8994 IPW_DEBUG_WX("ESSID set to current ESSID.\n"); 8995 mutex_unlock(&priv->mutex); 8996 return 0; 8997 } 8998 8999 IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length); 9000 9001 priv->essid_len = length; 9002 memcpy(priv->essid, extra, priv->essid_len); 9003 9004 /* Network configuration changed -- force [re]association */ 9005 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n"); 9006 if (!ipw_disassociate(priv)) 9007 ipw_associate(priv); 9008 9009 mutex_unlock(&priv->mutex); 9010 return 0; 9011 } 9012 9013 static int ipw_wx_get_essid(struct net_device *dev, 9014 struct iw_request_info *info, 9015 union iwreq_data *wrqu, char *extra) 9016 { 9017 struct ipw_priv *priv = libipw_priv(dev); 9018 9019 /* If we are associated, trying to associate, or have a statically 9020 * configured ESSID then return that; otherwise return ANY */ 9021 mutex_lock(&priv->mutex); 9022 if (priv->config & CFG_STATIC_ESSID || 9023 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 9024 IPW_DEBUG_WX("Getting essid: '%*pE'\n", 9025 priv->essid_len, priv->essid); 9026 memcpy(extra, priv->essid, priv->essid_len); 9027 wrqu->essid.length = priv->essid_len; 9028 wrqu->essid.flags = 1; /* active */ 9029 } else { 9030 IPW_DEBUG_WX("Getting essid: ANY\n"); 9031 wrqu->essid.length = 0; 9032 wrqu->essid.flags = 0; /* active */ 9033 } 9034 mutex_unlock(&priv->mutex); 9035 return 0; 9036 } 9037 9038 static int ipw_wx_set_nick(struct net_device *dev, 9039 struct iw_request_info *info, 9040 union iwreq_data *wrqu, char *extra) 9041 { 9042 struct ipw_priv *priv = libipw_priv(dev); 9043 9044 IPW_DEBUG_WX("Setting nick to '%s'\n", extra); 9045 if (wrqu->data.length > IW_ESSID_MAX_SIZE) 9046 return -E2BIG; 9047 mutex_lock(&priv->mutex); 9048 wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick)); 9049 memset(priv->nick, 0, sizeof(priv->nick)); 9050 memcpy(priv->nick, extra, wrqu->data.length); 9051 IPW_DEBUG_TRACE("<<\n"); 9052 mutex_unlock(&priv->mutex); 9053 return 0; 9054 9055 } 9056 9057 static int ipw_wx_get_nick(struct net_device *dev, 9058 struct iw_request_info *info, 9059 union iwreq_data *wrqu, char *extra) 9060 { 9061 struct ipw_priv *priv = libipw_priv(dev); 9062 IPW_DEBUG_WX("Getting nick\n"); 9063 mutex_lock(&priv->mutex); 9064 wrqu->data.length = strlen(priv->nick); 9065 memcpy(extra, priv->nick, wrqu->data.length); 9066 wrqu->data.flags = 1; /* active */ 9067 mutex_unlock(&priv->mutex); 9068 return 0; 9069 } 9070 9071 static int ipw_wx_set_sens(struct net_device *dev, 9072 struct iw_request_info *info, 9073 union iwreq_data *wrqu, char *extra) 9074 { 9075 struct ipw_priv *priv = libipw_priv(dev); 9076 int err = 0; 9077 9078 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value); 9079 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value); 9080 mutex_lock(&priv->mutex); 9081 9082 if (wrqu->sens.fixed == 0) 9083 { 9084 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; 9085 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; 9086 goto out; 9087 } 9088 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) || 9089 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) { 9090 err = -EINVAL; 9091 goto out; 9092 } 9093 9094 priv->roaming_threshold = wrqu->sens.value; 9095 priv->disassociate_threshold = 3*wrqu->sens.value; 9096 out: 9097 mutex_unlock(&priv->mutex); 9098 return err; 9099 } 9100 9101 static int ipw_wx_get_sens(struct net_device *dev, 9102 struct iw_request_info *info, 9103 union iwreq_data *wrqu, char *extra) 9104 { 9105 struct ipw_priv *priv = libipw_priv(dev); 9106 mutex_lock(&priv->mutex); 9107 wrqu->sens.fixed = 1; 9108 wrqu->sens.value = priv->roaming_threshold; 9109 mutex_unlock(&priv->mutex); 9110 9111 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n", 9112 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9113 9114 return 0; 9115 } 9116 9117 static int ipw_wx_set_rate(struct net_device *dev, 9118 struct iw_request_info *info, 9119 union iwreq_data *wrqu, char *extra) 9120 { 9121 /* TODO: We should use semaphores or locks for access to priv */ 9122 struct ipw_priv *priv = libipw_priv(dev); 9123 u32 target_rate = wrqu->bitrate.value; 9124 u32 fixed, mask; 9125 9126 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */ 9127 /* value = X, fixed = 1 means only rate X */ 9128 /* value = X, fixed = 0 means all rates lower equal X */ 9129 9130 if (target_rate == -1) { 9131 fixed = 0; 9132 mask = LIBIPW_DEFAULT_RATES_MASK; 9133 /* Now we should reassociate */ 9134 goto apply; 9135 } 9136 9137 mask = 0; 9138 fixed = wrqu->bitrate.fixed; 9139 9140 if (target_rate == 1000000 || !fixed) 9141 mask |= LIBIPW_CCK_RATE_1MB_MASK; 9142 if (target_rate == 1000000) 9143 goto apply; 9144 9145 if (target_rate == 2000000 || !fixed) 9146 mask |= LIBIPW_CCK_RATE_2MB_MASK; 9147 if (target_rate == 2000000) 9148 goto apply; 9149 9150 if (target_rate == 5500000 || !fixed) 9151 mask |= LIBIPW_CCK_RATE_5MB_MASK; 9152 if (target_rate == 5500000) 9153 goto apply; 9154 9155 if (target_rate == 6000000 || !fixed) 9156 mask |= LIBIPW_OFDM_RATE_6MB_MASK; 9157 if (target_rate == 6000000) 9158 goto apply; 9159 9160 if (target_rate == 9000000 || !fixed) 9161 mask |= LIBIPW_OFDM_RATE_9MB_MASK; 9162 if (target_rate == 9000000) 9163 goto apply; 9164 9165 if (target_rate == 11000000 || !fixed) 9166 mask |= LIBIPW_CCK_RATE_11MB_MASK; 9167 if (target_rate == 11000000) 9168 goto apply; 9169 9170 if (target_rate == 12000000 || !fixed) 9171 mask |= LIBIPW_OFDM_RATE_12MB_MASK; 9172 if (target_rate == 12000000) 9173 goto apply; 9174 9175 if (target_rate == 18000000 || !fixed) 9176 mask |= LIBIPW_OFDM_RATE_18MB_MASK; 9177 if (target_rate == 18000000) 9178 goto apply; 9179 9180 if (target_rate == 24000000 || !fixed) 9181 mask |= LIBIPW_OFDM_RATE_24MB_MASK; 9182 if (target_rate == 24000000) 9183 goto apply; 9184 9185 if (target_rate == 36000000 || !fixed) 9186 mask |= LIBIPW_OFDM_RATE_36MB_MASK; 9187 if (target_rate == 36000000) 9188 goto apply; 9189 9190 if (target_rate == 48000000 || !fixed) 9191 mask |= LIBIPW_OFDM_RATE_48MB_MASK; 9192 if (target_rate == 48000000) 9193 goto apply; 9194 9195 if (target_rate == 54000000 || !fixed) 9196 mask |= LIBIPW_OFDM_RATE_54MB_MASK; 9197 if (target_rate == 54000000) 9198 goto apply; 9199 9200 IPW_DEBUG_WX("invalid rate specified, returning error\n"); 9201 return -EINVAL; 9202 9203 apply: 9204 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", 9205 mask, fixed ? "fixed" : "sub-rates"); 9206 mutex_lock(&priv->mutex); 9207 if (mask == LIBIPW_DEFAULT_RATES_MASK) { 9208 priv->config &= ~CFG_FIXED_RATE; 9209 ipw_set_fixed_rate(priv, priv->ieee->mode); 9210 } else 9211 priv->config |= CFG_FIXED_RATE; 9212 9213 if (priv->rates_mask == mask) { 9214 IPW_DEBUG_WX("Mask set to current mask.\n"); 9215 mutex_unlock(&priv->mutex); 9216 return 0; 9217 } 9218 9219 priv->rates_mask = mask; 9220 9221 /* Network configuration changed -- force [re]association */ 9222 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n"); 9223 if (!ipw_disassociate(priv)) 9224 ipw_associate(priv); 9225 9226 mutex_unlock(&priv->mutex); 9227 return 0; 9228 } 9229 9230 static int ipw_wx_get_rate(struct net_device *dev, 9231 struct iw_request_info *info, 9232 union iwreq_data *wrqu, char *extra) 9233 { 9234 struct ipw_priv *priv = libipw_priv(dev); 9235 mutex_lock(&priv->mutex); 9236 wrqu->bitrate.value = priv->last_rate; 9237 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0; 9238 mutex_unlock(&priv->mutex); 9239 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value); 9240 return 0; 9241 } 9242 9243 static int ipw_wx_set_rts(struct net_device *dev, 9244 struct iw_request_info *info, 9245 union iwreq_data *wrqu, char *extra) 9246 { 9247 struct ipw_priv *priv = libipw_priv(dev); 9248 mutex_lock(&priv->mutex); 9249 if (wrqu->rts.disabled || !wrqu->rts.fixed) 9250 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 9251 else { 9252 if (wrqu->rts.value < MIN_RTS_THRESHOLD || 9253 wrqu->rts.value > MAX_RTS_THRESHOLD) { 9254 mutex_unlock(&priv->mutex); 9255 return -EINVAL; 9256 } 9257 priv->rts_threshold = wrqu->rts.value; 9258 } 9259 9260 ipw_send_rts_threshold(priv, priv->rts_threshold); 9261 mutex_unlock(&priv->mutex); 9262 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold); 9263 return 0; 9264 } 9265 9266 static int ipw_wx_get_rts(struct net_device *dev, 9267 struct iw_request_info *info, 9268 union iwreq_data *wrqu, char *extra) 9269 { 9270 struct ipw_priv *priv = libipw_priv(dev); 9271 mutex_lock(&priv->mutex); 9272 wrqu->rts.value = priv->rts_threshold; 9273 wrqu->rts.fixed = 0; /* no auto select */ 9274 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); 9275 mutex_unlock(&priv->mutex); 9276 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value); 9277 return 0; 9278 } 9279 9280 static int ipw_wx_set_txpow(struct net_device *dev, 9281 struct iw_request_info *info, 9282 union iwreq_data *wrqu, char *extra) 9283 { 9284 struct ipw_priv *priv = libipw_priv(dev); 9285 int err = 0; 9286 9287 mutex_lock(&priv->mutex); 9288 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { 9289 err = -EINPROGRESS; 9290 goto out; 9291 } 9292 9293 if (!wrqu->power.fixed) 9294 wrqu->power.value = IPW_TX_POWER_DEFAULT; 9295 9296 if (wrqu->power.flags != IW_TXPOW_DBM) { 9297 err = -EINVAL; 9298 goto out; 9299 } 9300 9301 if ((wrqu->power.value > IPW_TX_POWER_MAX) || 9302 (wrqu->power.value < IPW_TX_POWER_MIN)) { 9303 err = -EINVAL; 9304 goto out; 9305 } 9306 9307 priv->tx_power = wrqu->power.value; 9308 err = ipw_set_tx_power(priv); 9309 out: 9310 mutex_unlock(&priv->mutex); 9311 return err; 9312 } 9313 9314 static int ipw_wx_get_txpow(struct net_device *dev, 9315 struct iw_request_info *info, 9316 union iwreq_data *wrqu, char *extra) 9317 { 9318 struct ipw_priv *priv = libipw_priv(dev); 9319 mutex_lock(&priv->mutex); 9320 wrqu->power.value = priv->tx_power; 9321 wrqu->power.fixed = 1; 9322 wrqu->power.flags = IW_TXPOW_DBM; 9323 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; 9324 mutex_unlock(&priv->mutex); 9325 9326 IPW_DEBUG_WX("GET TX Power -> %s %d\n", 9327 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9328 9329 return 0; 9330 } 9331 9332 static int ipw_wx_set_frag(struct net_device *dev, 9333 struct iw_request_info *info, 9334 union iwreq_data *wrqu, char *extra) 9335 { 9336 struct ipw_priv *priv = libipw_priv(dev); 9337 mutex_lock(&priv->mutex); 9338 if (wrqu->frag.disabled || !wrqu->frag.fixed) 9339 priv->ieee->fts = DEFAULT_FTS; 9340 else { 9341 if (wrqu->frag.value < MIN_FRAG_THRESHOLD || 9342 wrqu->frag.value > MAX_FRAG_THRESHOLD) { 9343 mutex_unlock(&priv->mutex); 9344 return -EINVAL; 9345 } 9346 9347 priv->ieee->fts = wrqu->frag.value & ~0x1; 9348 } 9349 9350 ipw_send_frag_threshold(priv, wrqu->frag.value); 9351 mutex_unlock(&priv->mutex); 9352 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value); 9353 return 0; 9354 } 9355 9356 static int ipw_wx_get_frag(struct net_device *dev, 9357 struct iw_request_info *info, 9358 union iwreq_data *wrqu, char *extra) 9359 { 9360 struct ipw_priv *priv = libipw_priv(dev); 9361 mutex_lock(&priv->mutex); 9362 wrqu->frag.value = priv->ieee->fts; 9363 wrqu->frag.fixed = 0; /* no auto select */ 9364 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); 9365 mutex_unlock(&priv->mutex); 9366 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value); 9367 9368 return 0; 9369 } 9370 9371 static int ipw_wx_set_retry(struct net_device *dev, 9372 struct iw_request_info *info, 9373 union iwreq_data *wrqu, char *extra) 9374 { 9375 struct ipw_priv *priv = libipw_priv(dev); 9376 9377 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) 9378 return -EINVAL; 9379 9380 if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) 9381 return 0; 9382 9383 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255) 9384 return -EINVAL; 9385 9386 mutex_lock(&priv->mutex); 9387 if (wrqu->retry.flags & IW_RETRY_SHORT) 9388 priv->short_retry_limit = (u8) wrqu->retry.value; 9389 else if (wrqu->retry.flags & IW_RETRY_LONG) 9390 priv->long_retry_limit = (u8) wrqu->retry.value; 9391 else { 9392 priv->short_retry_limit = (u8) wrqu->retry.value; 9393 priv->long_retry_limit = (u8) wrqu->retry.value; 9394 } 9395 9396 ipw_send_retry_limit(priv, priv->short_retry_limit, 9397 priv->long_retry_limit); 9398 mutex_unlock(&priv->mutex); 9399 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", 9400 priv->short_retry_limit, priv->long_retry_limit); 9401 return 0; 9402 } 9403 9404 static int ipw_wx_get_retry(struct net_device *dev, 9405 struct iw_request_info *info, 9406 union iwreq_data *wrqu, char *extra) 9407 { 9408 struct ipw_priv *priv = libipw_priv(dev); 9409 9410 mutex_lock(&priv->mutex); 9411 wrqu->retry.disabled = 0; 9412 9413 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { 9414 mutex_unlock(&priv->mutex); 9415 return -EINVAL; 9416 } 9417 9418 if (wrqu->retry.flags & IW_RETRY_LONG) { 9419 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG; 9420 wrqu->retry.value = priv->long_retry_limit; 9421 } else if (wrqu->retry.flags & IW_RETRY_SHORT) { 9422 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; 9423 wrqu->retry.value = priv->short_retry_limit; 9424 } else { 9425 wrqu->retry.flags = IW_RETRY_LIMIT; 9426 wrqu->retry.value = priv->short_retry_limit; 9427 } 9428 mutex_unlock(&priv->mutex); 9429 9430 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value); 9431 9432 return 0; 9433 } 9434 9435 static int ipw_wx_set_scan(struct net_device *dev, 9436 struct iw_request_info *info, 9437 union iwreq_data *wrqu, char *extra) 9438 { 9439 struct ipw_priv *priv = libipw_priv(dev); 9440 struct iw_scan_req *req = (struct iw_scan_req *)extra; 9441 struct delayed_work *work = NULL; 9442 9443 mutex_lock(&priv->mutex); 9444 9445 priv->user_requested_scan = 1; 9446 9447 if (wrqu->data.length == sizeof(struct iw_scan_req)) { 9448 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 9449 int len = min((int)req->essid_len, 9450 (int)sizeof(priv->direct_scan_ssid)); 9451 memcpy(priv->direct_scan_ssid, req->essid, len); 9452 priv->direct_scan_ssid_len = len; 9453 work = &priv->request_direct_scan; 9454 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { 9455 work = &priv->request_passive_scan; 9456 } 9457 } else { 9458 /* Normal active broadcast scan */ 9459 work = &priv->request_scan; 9460 } 9461 9462 mutex_unlock(&priv->mutex); 9463 9464 IPW_DEBUG_WX("Start scan\n"); 9465 9466 schedule_delayed_work(work, 0); 9467 9468 return 0; 9469 } 9470 9471 static int ipw_wx_get_scan(struct net_device *dev, 9472 struct iw_request_info *info, 9473 union iwreq_data *wrqu, char *extra) 9474 { 9475 struct ipw_priv *priv = libipw_priv(dev); 9476 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra); 9477 } 9478 9479 static int ipw_wx_set_encode(struct net_device *dev, 9480 struct iw_request_info *info, 9481 union iwreq_data *wrqu, char *key) 9482 { 9483 struct ipw_priv *priv = libipw_priv(dev); 9484 int ret; 9485 u32 cap = priv->capability; 9486 9487 mutex_lock(&priv->mutex); 9488 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key); 9489 9490 /* In IBSS mode, we need to notify the firmware to update 9491 * the beacon info after we changed the capability. */ 9492 if (cap != priv->capability && 9493 priv->ieee->iw_mode == IW_MODE_ADHOC && 9494 priv->status & STATUS_ASSOCIATED) 9495 ipw_disassociate(priv); 9496 9497 mutex_unlock(&priv->mutex); 9498 return ret; 9499 } 9500 9501 static int ipw_wx_get_encode(struct net_device *dev, 9502 struct iw_request_info *info, 9503 union iwreq_data *wrqu, char *key) 9504 { 9505 struct ipw_priv *priv = libipw_priv(dev); 9506 return libipw_wx_get_encode(priv->ieee, info, wrqu, key); 9507 } 9508 9509 static int ipw_wx_set_power(struct net_device *dev, 9510 struct iw_request_info *info, 9511 union iwreq_data *wrqu, char *extra) 9512 { 9513 struct ipw_priv *priv = libipw_priv(dev); 9514 int err; 9515 mutex_lock(&priv->mutex); 9516 if (wrqu->power.disabled) { 9517 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); 9518 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); 9519 if (err) { 9520 IPW_DEBUG_WX("failed setting power mode.\n"); 9521 mutex_unlock(&priv->mutex); 9522 return err; 9523 } 9524 IPW_DEBUG_WX("SET Power Management Mode -> off\n"); 9525 mutex_unlock(&priv->mutex); 9526 return 0; 9527 } 9528 9529 switch (wrqu->power.flags & IW_POWER_MODE) { 9530 case IW_POWER_ON: /* If not specified */ 9531 case IW_POWER_MODE: /* If set all mask */ 9532 case IW_POWER_ALL_R: /* If explicitly state all */ 9533 break; 9534 default: /* Otherwise we don't support it */ 9535 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", 9536 wrqu->power.flags); 9537 mutex_unlock(&priv->mutex); 9538 return -EOPNOTSUPP; 9539 } 9540 9541 /* If the user hasn't specified a power management mode yet, default 9542 * to BATTERY */ 9543 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC) 9544 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; 9545 else 9546 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; 9547 9548 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); 9549 if (err) { 9550 IPW_DEBUG_WX("failed setting power mode.\n"); 9551 mutex_unlock(&priv->mutex); 9552 return err; 9553 } 9554 9555 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); 9556 mutex_unlock(&priv->mutex); 9557 return 0; 9558 } 9559 9560 static int ipw_wx_get_power(struct net_device *dev, 9561 struct iw_request_info *info, 9562 union iwreq_data *wrqu, char *extra) 9563 { 9564 struct ipw_priv *priv = libipw_priv(dev); 9565 mutex_lock(&priv->mutex); 9566 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9567 wrqu->power.disabled = 1; 9568 else 9569 wrqu->power.disabled = 0; 9570 9571 mutex_unlock(&priv->mutex); 9572 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); 9573 9574 return 0; 9575 } 9576 9577 static int ipw_wx_set_powermode(struct net_device *dev, 9578 struct iw_request_info *info, 9579 union iwreq_data *wrqu, char *extra) 9580 { 9581 struct ipw_priv *priv = libipw_priv(dev); 9582 int mode = *(int *)extra; 9583 int err; 9584 9585 mutex_lock(&priv->mutex); 9586 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) 9587 mode = IPW_POWER_AC; 9588 9589 if (IPW_POWER_LEVEL(priv->power_mode) != mode) { 9590 err = ipw_send_power_mode(priv, mode); 9591 if (err) { 9592 IPW_DEBUG_WX("failed setting power mode.\n"); 9593 mutex_unlock(&priv->mutex); 9594 return err; 9595 } 9596 priv->power_mode = IPW_POWER_ENABLED | mode; 9597 } 9598 mutex_unlock(&priv->mutex); 9599 return 0; 9600 } 9601 9602 #define MAX_WX_STRING 80 9603 static int ipw_wx_get_powermode(struct net_device *dev, 9604 struct iw_request_info *info, 9605 union iwreq_data *wrqu, char *extra) 9606 { 9607 struct ipw_priv *priv = libipw_priv(dev); 9608 int level = IPW_POWER_LEVEL(priv->power_mode); 9609 char *p = extra; 9610 9611 p += scnprintf(p, MAX_WX_STRING, "Power save level: %d ", level); 9612 9613 switch (level) { 9614 case IPW_POWER_AC: 9615 p += scnprintf(p, MAX_WX_STRING - (p - extra), "(AC)"); 9616 break; 9617 case IPW_POWER_BATTERY: 9618 p += scnprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)"); 9619 break; 9620 default: 9621 p += scnprintf(p, MAX_WX_STRING - (p - extra), 9622 "(Timeout %dms, Period %dms)", 9623 timeout_duration[level - 1] / 1000, 9624 period_duration[level - 1] / 1000); 9625 } 9626 9627 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9628 p += scnprintf(p, MAX_WX_STRING - (p - extra), " OFF"); 9629 9630 wrqu->data.length = p - extra + 1; 9631 9632 return 0; 9633 } 9634 9635 static int ipw_wx_set_wireless_mode(struct net_device *dev, 9636 struct iw_request_info *info, 9637 union iwreq_data *wrqu, char *extra) 9638 { 9639 struct ipw_priv *priv = libipw_priv(dev); 9640 int mode = *(int *)extra; 9641 u8 band = 0, modulation = 0; 9642 9643 if (mode == 0 || mode & ~IEEE_MODE_MASK) { 9644 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); 9645 return -EINVAL; 9646 } 9647 mutex_lock(&priv->mutex); 9648 if (priv->adapter == IPW_2915ABG) { 9649 priv->ieee->abg_true = 1; 9650 if (mode & IEEE_A) { 9651 band |= LIBIPW_52GHZ_BAND; 9652 modulation |= LIBIPW_OFDM_MODULATION; 9653 } else 9654 priv->ieee->abg_true = 0; 9655 } else { 9656 if (mode & IEEE_A) { 9657 IPW_WARNING("Attempt to set 2200BG into " 9658 "802.11a mode\n"); 9659 mutex_unlock(&priv->mutex); 9660 return -EINVAL; 9661 } 9662 9663 priv->ieee->abg_true = 0; 9664 } 9665 9666 if (mode & IEEE_B) { 9667 band |= LIBIPW_24GHZ_BAND; 9668 modulation |= LIBIPW_CCK_MODULATION; 9669 } else 9670 priv->ieee->abg_true = 0; 9671 9672 if (mode & IEEE_G) { 9673 band |= LIBIPW_24GHZ_BAND; 9674 modulation |= LIBIPW_OFDM_MODULATION; 9675 } else 9676 priv->ieee->abg_true = 0; 9677 9678 priv->ieee->mode = mode; 9679 priv->ieee->freq_band = band; 9680 priv->ieee->modulation = modulation; 9681 init_supported_rates(priv, &priv->rates); 9682 9683 /* Network configuration changed -- force [re]association */ 9684 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n"); 9685 if (!ipw_disassociate(priv)) { 9686 ipw_send_supported_rates(priv, &priv->rates); 9687 ipw_associate(priv); 9688 } 9689 9690 /* Update the band LEDs */ 9691 ipw_led_band_on(priv); 9692 9693 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", 9694 mode & IEEE_A ? 'a' : '.', 9695 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); 9696 mutex_unlock(&priv->mutex); 9697 return 0; 9698 } 9699 9700 static int ipw_wx_get_wireless_mode(struct net_device *dev, 9701 struct iw_request_info *info, 9702 union iwreq_data *wrqu, char *extra) 9703 { 9704 struct ipw_priv *priv = libipw_priv(dev); 9705 mutex_lock(&priv->mutex); 9706 switch (priv->ieee->mode) { 9707 case IEEE_A: 9708 strncpy(extra, "802.11a (1)", MAX_WX_STRING); 9709 break; 9710 case IEEE_B: 9711 strncpy(extra, "802.11b (2)", MAX_WX_STRING); 9712 break; 9713 case IEEE_A | IEEE_B: 9714 strncpy(extra, "802.11ab (3)", MAX_WX_STRING); 9715 break; 9716 case IEEE_G: 9717 strncpy(extra, "802.11g (4)", MAX_WX_STRING); 9718 break; 9719 case IEEE_A | IEEE_G: 9720 strncpy(extra, "802.11ag (5)", MAX_WX_STRING); 9721 break; 9722 case IEEE_B | IEEE_G: 9723 strncpy(extra, "802.11bg (6)", MAX_WX_STRING); 9724 break; 9725 case IEEE_A | IEEE_B | IEEE_G: 9726 strncpy(extra, "802.11abg (7)", MAX_WX_STRING); 9727 break; 9728 default: 9729 strncpy(extra, "unknown", MAX_WX_STRING); 9730 break; 9731 } 9732 extra[MAX_WX_STRING - 1] = '\0'; 9733 9734 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); 9735 9736 wrqu->data.length = strlen(extra) + 1; 9737 mutex_unlock(&priv->mutex); 9738 9739 return 0; 9740 } 9741 9742 static int ipw_wx_set_preamble(struct net_device *dev, 9743 struct iw_request_info *info, 9744 union iwreq_data *wrqu, char *extra) 9745 { 9746 struct ipw_priv *priv = libipw_priv(dev); 9747 int mode = *(int *)extra; 9748 mutex_lock(&priv->mutex); 9749 /* Switching from SHORT -> LONG requires a disassociation */ 9750 if (mode == 1) { 9751 if (!(priv->config & CFG_PREAMBLE_LONG)) { 9752 priv->config |= CFG_PREAMBLE_LONG; 9753 9754 /* Network configuration changed -- force [re]association */ 9755 IPW_DEBUG_ASSOC 9756 ("[re]association triggered due to preamble change.\n"); 9757 if (!ipw_disassociate(priv)) 9758 ipw_associate(priv); 9759 } 9760 goto done; 9761 } 9762 9763 if (mode == 0) { 9764 priv->config &= ~CFG_PREAMBLE_LONG; 9765 goto done; 9766 } 9767 mutex_unlock(&priv->mutex); 9768 return -EINVAL; 9769 9770 done: 9771 mutex_unlock(&priv->mutex); 9772 return 0; 9773 } 9774 9775 static int ipw_wx_get_preamble(struct net_device *dev, 9776 struct iw_request_info *info, 9777 union iwreq_data *wrqu, char *extra) 9778 { 9779 struct ipw_priv *priv = libipw_priv(dev); 9780 mutex_lock(&priv->mutex); 9781 if (priv->config & CFG_PREAMBLE_LONG) 9782 snprintf(wrqu->name, IFNAMSIZ, "long (1)"); 9783 else 9784 snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); 9785 mutex_unlock(&priv->mutex); 9786 return 0; 9787 } 9788 9789 #ifdef CONFIG_IPW2200_MONITOR 9790 static int ipw_wx_set_monitor(struct net_device *dev, 9791 struct iw_request_info *info, 9792 union iwreq_data *wrqu, char *extra) 9793 { 9794 struct ipw_priv *priv = libipw_priv(dev); 9795 int *parms = (int *)extra; 9796 int enable = (parms[0] > 0); 9797 mutex_lock(&priv->mutex); 9798 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); 9799 if (enable) { 9800 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9801 #ifdef CONFIG_IPW2200_RADIOTAP 9802 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 9803 #else 9804 priv->net_dev->type = ARPHRD_IEEE80211; 9805 #endif 9806 schedule_work(&priv->adapter_restart); 9807 } 9808 9809 ipw_set_channel(priv, parms[1]); 9810 } else { 9811 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9812 mutex_unlock(&priv->mutex); 9813 return 0; 9814 } 9815 priv->net_dev->type = ARPHRD_ETHER; 9816 schedule_work(&priv->adapter_restart); 9817 } 9818 mutex_unlock(&priv->mutex); 9819 return 0; 9820 } 9821 9822 #endif /* CONFIG_IPW2200_MONITOR */ 9823 9824 static int ipw_wx_reset(struct net_device *dev, 9825 struct iw_request_info *info, 9826 union iwreq_data *wrqu, char *extra) 9827 { 9828 struct ipw_priv *priv = libipw_priv(dev); 9829 IPW_DEBUG_WX("RESET\n"); 9830 schedule_work(&priv->adapter_restart); 9831 return 0; 9832 } 9833 9834 static int ipw_wx_sw_reset(struct net_device *dev, 9835 struct iw_request_info *info, 9836 union iwreq_data *wrqu, char *extra) 9837 { 9838 struct ipw_priv *priv = libipw_priv(dev); 9839 union iwreq_data wrqu_sec = { 9840 .encoding = { 9841 .flags = IW_ENCODE_DISABLED, 9842 }, 9843 }; 9844 int ret; 9845 9846 IPW_DEBUG_WX("SW_RESET\n"); 9847 9848 mutex_lock(&priv->mutex); 9849 9850 ret = ipw_sw_reset(priv, 2); 9851 if (!ret) { 9852 free_firmware(); 9853 ipw_adapter_restart(priv); 9854 } 9855 9856 /* The SW reset bit might have been toggled on by the 'disable' 9857 * module parameter, so take appropriate action */ 9858 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); 9859 9860 mutex_unlock(&priv->mutex); 9861 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); 9862 mutex_lock(&priv->mutex); 9863 9864 if (!(priv->status & STATUS_RF_KILL_MASK)) { 9865 /* Configuration likely changed -- force [re]association */ 9866 IPW_DEBUG_ASSOC("[re]association triggered due to sw " 9867 "reset.\n"); 9868 if (!ipw_disassociate(priv)) 9869 ipw_associate(priv); 9870 } 9871 9872 mutex_unlock(&priv->mutex); 9873 9874 return 0; 9875 } 9876 9877 /* Rebase the WE IOCTLs to zero for the handler array */ 9878 static iw_handler ipw_wx_handlers[] = { 9879 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname), 9880 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq), 9881 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq), 9882 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode), 9883 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode), 9884 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens), 9885 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens), 9886 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range), 9887 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap), 9888 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap), 9889 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan), 9890 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan), 9891 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid), 9892 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid), 9893 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick), 9894 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick), 9895 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate), 9896 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate), 9897 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts), 9898 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts), 9899 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag), 9900 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag), 9901 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow), 9902 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow), 9903 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry), 9904 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry), 9905 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode), 9906 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode), 9907 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power), 9908 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power), 9909 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), 9910 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), 9911 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), 9912 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), 9913 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie), 9914 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie), 9915 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme), 9916 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth), 9917 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth), 9918 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext), 9919 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext), 9920 }; 9921 9922 enum { 9923 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV, 9924 IPW_PRIV_GET_POWER, 9925 IPW_PRIV_SET_MODE, 9926 IPW_PRIV_GET_MODE, 9927 IPW_PRIV_SET_PREAMBLE, 9928 IPW_PRIV_GET_PREAMBLE, 9929 IPW_PRIV_RESET, 9930 IPW_PRIV_SW_RESET, 9931 #ifdef CONFIG_IPW2200_MONITOR 9932 IPW_PRIV_SET_MONITOR, 9933 #endif 9934 }; 9935 9936 static struct iw_priv_args ipw_priv_args[] = { 9937 { 9938 .cmd = IPW_PRIV_SET_POWER, 9939 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 9940 .name = "set_power"}, 9941 { 9942 .cmd = IPW_PRIV_GET_POWER, 9943 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 9944 .name = "get_power"}, 9945 { 9946 .cmd = IPW_PRIV_SET_MODE, 9947 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 9948 .name = "set_mode"}, 9949 { 9950 .cmd = IPW_PRIV_GET_MODE, 9951 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 9952 .name = "get_mode"}, 9953 { 9954 .cmd = IPW_PRIV_SET_PREAMBLE, 9955 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 9956 .name = "set_preamble"}, 9957 { 9958 .cmd = IPW_PRIV_GET_PREAMBLE, 9959 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, 9960 .name = "get_preamble"}, 9961 { 9962 IPW_PRIV_RESET, 9963 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"}, 9964 { 9965 IPW_PRIV_SW_RESET, 9966 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"}, 9967 #ifdef CONFIG_IPW2200_MONITOR 9968 { 9969 IPW_PRIV_SET_MONITOR, 9970 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"}, 9971 #endif /* CONFIG_IPW2200_MONITOR */ 9972 }; 9973 9974 static iw_handler ipw_priv_handler[] = { 9975 ipw_wx_set_powermode, 9976 ipw_wx_get_powermode, 9977 ipw_wx_set_wireless_mode, 9978 ipw_wx_get_wireless_mode, 9979 ipw_wx_set_preamble, 9980 ipw_wx_get_preamble, 9981 ipw_wx_reset, 9982 ipw_wx_sw_reset, 9983 #ifdef CONFIG_IPW2200_MONITOR 9984 ipw_wx_set_monitor, 9985 #endif 9986 }; 9987 9988 static const struct iw_handler_def ipw_wx_handler_def = { 9989 .standard = ipw_wx_handlers, 9990 .num_standard = ARRAY_SIZE(ipw_wx_handlers), 9991 .num_private = ARRAY_SIZE(ipw_priv_handler), 9992 .num_private_args = ARRAY_SIZE(ipw_priv_args), 9993 .private = ipw_priv_handler, 9994 .private_args = ipw_priv_args, 9995 .get_wireless_stats = ipw_get_wireless_stats, 9996 }; 9997 9998 /* 9999 * Get wireless statistics. 10000 * Called by /proc/net/wireless 10001 * Also called by SIOCGIWSTATS 10002 */ 10003 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) 10004 { 10005 struct ipw_priv *priv = libipw_priv(dev); 10006 struct iw_statistics *wstats; 10007 10008 wstats = &priv->wstats; 10009 10010 /* if hw is disabled, then ipw_get_ordinal() can't be called. 10011 * netdev->get_wireless_stats seems to be called before fw is 10012 * initialized. STATUS_ASSOCIATED will only be set if the hw is up 10013 * and associated; if not associcated, the values are all meaningless 10014 * anyway, so set them all to NULL and INVALID */ 10015 if (!(priv->status & STATUS_ASSOCIATED)) { 10016 wstats->miss.beacon = 0; 10017 wstats->discard.retries = 0; 10018 wstats->qual.qual = 0; 10019 wstats->qual.level = 0; 10020 wstats->qual.noise = 0; 10021 wstats->qual.updated = 7; 10022 wstats->qual.updated |= IW_QUAL_NOISE_INVALID | 10023 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; 10024 return wstats; 10025 } 10026 10027 wstats->qual.qual = priv->quality; 10028 wstats->qual.level = priv->exp_avg_rssi; 10029 wstats->qual.noise = priv->exp_avg_noise; 10030 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | 10031 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; 10032 10033 wstats->miss.beacon = average_value(&priv->average_missed_beacons); 10034 wstats->discard.retries = priv->last_tx_failures; 10035 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable; 10036 10037 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len)) 10038 goto fail_get_ordinal; 10039 wstats->discard.retries += tx_retry; */ 10040 10041 return wstats; 10042 } 10043 10044 /* net device stuff */ 10045 10046 static void init_sys_config(struct ipw_sys_config *sys_config) 10047 { 10048 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 10049 sys_config->bt_coexistence = 0; 10050 sys_config->answer_broadcast_ssid_probe = 0; 10051 sys_config->accept_all_data_frames = 0; 10052 sys_config->accept_non_directed_frames = 1; 10053 sys_config->exclude_unicast_unencrypted = 0; 10054 sys_config->disable_unicast_decryption = 1; 10055 sys_config->exclude_multicast_unencrypted = 0; 10056 sys_config->disable_multicast_decryption = 1; 10057 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B) 10058 antenna = CFG_SYS_ANTENNA_BOTH; 10059 sys_config->antenna_diversity = antenna; 10060 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ 10061 sys_config->dot11g_auto_detection = 0; 10062 sys_config->enable_cts_to_self = 0; 10063 sys_config->bt_coexist_collision_thr = 0; 10064 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */ 10065 sys_config->silence_threshold = 0x1e; 10066 } 10067 10068 static int ipw_net_open(struct net_device *dev) 10069 { 10070 IPW_DEBUG_INFO("dev->open\n"); 10071 netif_start_queue(dev); 10072 return 0; 10073 } 10074 10075 static int ipw_net_stop(struct net_device *dev) 10076 { 10077 IPW_DEBUG_INFO("dev->close\n"); 10078 netif_stop_queue(dev); 10079 return 0; 10080 } 10081 10082 /* 10083 todo: 10084 10085 modify to send one tfd per fragment instead of using chunking. otherwise 10086 we need to heavily modify the libipw_skb_to_txb. 10087 */ 10088 10089 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, 10090 int pri) 10091 { 10092 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *) 10093 txb->fragments[0]->data; 10094 int i = 0; 10095 struct tfd_frame *tfd; 10096 #ifdef CONFIG_IPW2200_QOS 10097 int tx_id = ipw_get_tx_queue_number(priv, pri); 10098 struct clx2_tx_queue *txq = &priv->txq[tx_id]; 10099 #else 10100 struct clx2_tx_queue *txq = &priv->txq[0]; 10101 #endif 10102 struct clx2_queue *q = &txq->q; 10103 u8 id, hdr_len, unicast; 10104 int fc; 10105 10106 if (!(priv->status & STATUS_ASSOCIATED)) 10107 goto drop; 10108 10109 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10110 switch (priv->ieee->iw_mode) { 10111 case IW_MODE_ADHOC: 10112 unicast = !is_multicast_ether_addr(hdr->addr1); 10113 id = ipw_find_station(priv, hdr->addr1); 10114 if (id == IPW_INVALID_STATION) { 10115 id = ipw_add_station(priv, hdr->addr1); 10116 if (id == IPW_INVALID_STATION) { 10117 IPW_WARNING("Attempt to send data to " 10118 "invalid cell: %pM\n", 10119 hdr->addr1); 10120 goto drop; 10121 } 10122 } 10123 break; 10124 10125 case IW_MODE_INFRA: 10126 default: 10127 unicast = !is_multicast_ether_addr(hdr->addr3); 10128 id = 0; 10129 break; 10130 } 10131 10132 tfd = &txq->bd[q->first_empty]; 10133 txq->txb[q->first_empty] = txb; 10134 memset(tfd, 0, sizeof(*tfd)); 10135 tfd->u.data.station_number = id; 10136 10137 tfd->control_flags.message_type = TX_FRAME_TYPE; 10138 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; 10139 10140 tfd->u.data.cmd_id = DINO_CMD_TX; 10141 tfd->u.data.len = cpu_to_le16(txb->payload_size); 10142 10143 if (priv->assoc_request.ieee_mode == IPW_B_MODE) 10144 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK; 10145 else 10146 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM; 10147 10148 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE) 10149 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE; 10150 10151 fc = le16_to_cpu(hdr->frame_ctl); 10152 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS); 10153 10154 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len); 10155 10156 if (likely(unicast)) 10157 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; 10158 10159 if (txb->encrypted && !priv->ieee->host_encrypt) { 10160 switch (priv->ieee->sec.level) { 10161 case SEC_LEVEL_3: 10162 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10163 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10164 /* XXX: ACK flag must be set for CCMP even if it 10165 * is a multicast/broadcast packet, because CCMP 10166 * group communication encrypted by GTK is 10167 * actually done by the AP. */ 10168 if (!unicast) 10169 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; 10170 10171 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10172 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM; 10173 tfd->u.data.key_index = 0; 10174 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE; 10175 break; 10176 case SEC_LEVEL_2: 10177 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10178 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10179 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10180 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP; 10181 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE; 10182 break; 10183 case SEC_LEVEL_1: 10184 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10185 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10186 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx; 10187 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <= 10188 40) 10189 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit; 10190 else 10191 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit; 10192 break; 10193 case SEC_LEVEL_0: 10194 break; 10195 default: 10196 printk(KERN_ERR "Unknown security level %d\n", 10197 priv->ieee->sec.level); 10198 break; 10199 } 10200 } else 10201 /* No hardware encryption */ 10202 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP; 10203 10204 #ifdef CONFIG_IPW2200_QOS 10205 if (fc & IEEE80211_STYPE_QOS_DATA) 10206 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data)); 10207 #endif /* CONFIG_IPW2200_QOS */ 10208 10209 /* payload */ 10210 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2), 10211 txb->nr_frags)); 10212 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n", 10213 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks)); 10214 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) { 10215 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n", 10216 i, le32_to_cpu(tfd->u.data.num_chunks), 10217 txb->fragments[i]->len - hdr_len); 10218 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n", 10219 i, tfd->u.data.num_chunks, 10220 txb->fragments[i]->len - hdr_len); 10221 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len, 10222 txb->fragments[i]->len - hdr_len); 10223 10224 tfd->u.data.chunk_ptr[i] = 10225 cpu_to_le32(pci_map_single 10226 (priv->pci_dev, 10227 txb->fragments[i]->data + hdr_len, 10228 txb->fragments[i]->len - hdr_len, 10229 PCI_DMA_TODEVICE)); 10230 tfd->u.data.chunk_len[i] = 10231 cpu_to_le16(txb->fragments[i]->len - hdr_len); 10232 } 10233 10234 if (i != txb->nr_frags) { 10235 struct sk_buff *skb; 10236 u16 remaining_bytes = 0; 10237 int j; 10238 10239 for (j = i; j < txb->nr_frags; j++) 10240 remaining_bytes += txb->fragments[j]->len - hdr_len; 10241 10242 printk(KERN_INFO "Trying to reallocate for %d bytes\n", 10243 remaining_bytes); 10244 skb = alloc_skb(remaining_bytes, GFP_ATOMIC); 10245 if (skb != NULL) { 10246 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes); 10247 for (j = i; j < txb->nr_frags; j++) { 10248 int size = txb->fragments[j]->len - hdr_len; 10249 10250 printk(KERN_INFO "Adding frag %d %d...\n", 10251 j, size); 10252 skb_put_data(skb, 10253 txb->fragments[j]->data + hdr_len, 10254 size); 10255 } 10256 dev_kfree_skb_any(txb->fragments[i]); 10257 txb->fragments[i] = skb; 10258 tfd->u.data.chunk_ptr[i] = 10259 cpu_to_le32(pci_map_single 10260 (priv->pci_dev, skb->data, 10261 remaining_bytes, 10262 PCI_DMA_TODEVICE)); 10263 10264 le32_add_cpu(&tfd->u.data.num_chunks, 1); 10265 } 10266 } 10267 10268 /* kick DMA */ 10269 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 10270 ipw_write32(priv, q->reg_w, q->first_empty); 10271 10272 if (ipw_tx_queue_space(q) < q->high_mark) 10273 netif_stop_queue(priv->net_dev); 10274 10275 return NETDEV_TX_OK; 10276 10277 drop: 10278 IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); 10279 libipw_txb_free(txb); 10280 return NETDEV_TX_OK; 10281 } 10282 10283 static int ipw_net_is_queue_full(struct net_device *dev, int pri) 10284 { 10285 struct ipw_priv *priv = libipw_priv(dev); 10286 #ifdef CONFIG_IPW2200_QOS 10287 int tx_id = ipw_get_tx_queue_number(priv, pri); 10288 struct clx2_tx_queue *txq = &priv->txq[tx_id]; 10289 #else 10290 struct clx2_tx_queue *txq = &priv->txq[0]; 10291 #endif /* CONFIG_IPW2200_QOS */ 10292 10293 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark) 10294 return 1; 10295 10296 return 0; 10297 } 10298 10299 #ifdef CONFIG_IPW2200_PROMISCUOUS 10300 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, 10301 struct libipw_txb *txb) 10302 { 10303 struct libipw_rx_stats dummystats; 10304 struct ieee80211_hdr *hdr; 10305 u8 n; 10306 u16 filter = priv->prom_priv->filter; 10307 int hdr_only = 0; 10308 10309 if (filter & IPW_PROM_NO_TX) 10310 return; 10311 10312 memset(&dummystats, 0, sizeof(dummystats)); 10313 10314 /* Filtering of fragment chains is done against the first fragment */ 10315 hdr = (void *)txb->fragments[0]->data; 10316 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { 10317 if (filter & IPW_PROM_NO_MGMT) 10318 return; 10319 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 10320 hdr_only = 1; 10321 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { 10322 if (filter & IPW_PROM_NO_CTL) 10323 return; 10324 if (filter & IPW_PROM_CTL_HEADER_ONLY) 10325 hdr_only = 1; 10326 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { 10327 if (filter & IPW_PROM_NO_DATA) 10328 return; 10329 if (filter & IPW_PROM_DATA_HEADER_ONLY) 10330 hdr_only = 1; 10331 } 10332 10333 for(n=0; n<txb->nr_frags; ++n) { 10334 struct sk_buff *src = txb->fragments[n]; 10335 struct sk_buff *dst; 10336 struct ieee80211_radiotap_header *rt_hdr; 10337 int len; 10338 10339 if (hdr_only) { 10340 hdr = (void *)src->data; 10341 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); 10342 } else 10343 len = src->len; 10344 10345 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC); 10346 if (!dst) 10347 continue; 10348 10349 rt_hdr = skb_put(dst, sizeof(*rt_hdr)); 10350 10351 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION; 10352 rt_hdr->it_pad = 0; 10353 rt_hdr->it_present = 0; /* after all, it's just an idea */ 10354 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL); 10355 10356 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16( 10357 ieee80211chan2mhz(priv->channel)); 10358 if (priv->channel > 14) /* 802.11a */ 10359 *(__le16*)skb_put(dst, sizeof(u16)) = 10360 cpu_to_le16(IEEE80211_CHAN_OFDM | 10361 IEEE80211_CHAN_5GHZ); 10362 else if (priv->ieee->mode == IEEE_B) /* 802.11b */ 10363 *(__le16*)skb_put(dst, sizeof(u16)) = 10364 cpu_to_le16(IEEE80211_CHAN_CCK | 10365 IEEE80211_CHAN_2GHZ); 10366 else /* 802.11g */ 10367 *(__le16*)skb_put(dst, sizeof(u16)) = 10368 cpu_to_le16(IEEE80211_CHAN_OFDM | 10369 IEEE80211_CHAN_2GHZ); 10370 10371 rt_hdr->it_len = cpu_to_le16(dst->len); 10372 10373 skb_copy_from_linear_data(src, skb_put(dst, len), len); 10374 10375 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats)) 10376 dev_kfree_skb_any(dst); 10377 } 10378 } 10379 #endif 10380 10381 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb, 10382 struct net_device *dev, int pri) 10383 { 10384 struct ipw_priv *priv = libipw_priv(dev); 10385 unsigned long flags; 10386 netdev_tx_t ret; 10387 10388 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); 10389 spin_lock_irqsave(&priv->lock, flags); 10390 10391 #ifdef CONFIG_IPW2200_PROMISCUOUS 10392 if (rtap_iface && netif_running(priv->prom_net_dev)) 10393 ipw_handle_promiscuous_tx(priv, txb); 10394 #endif 10395 10396 ret = ipw_tx_skb(priv, txb, pri); 10397 if (ret == NETDEV_TX_OK) 10398 __ipw_led_activity_on(priv); 10399 spin_unlock_irqrestore(&priv->lock, flags); 10400 10401 return ret; 10402 } 10403 10404 static void ipw_net_set_multicast_list(struct net_device *dev) 10405 { 10406 10407 } 10408 10409 static int ipw_net_set_mac_address(struct net_device *dev, void *p) 10410 { 10411 struct ipw_priv *priv = libipw_priv(dev); 10412 struct sockaddr *addr = p; 10413 10414 if (!is_valid_ether_addr(addr->sa_data)) 10415 return -EADDRNOTAVAIL; 10416 mutex_lock(&priv->mutex); 10417 priv->config |= CFG_CUSTOM_MAC; 10418 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 10419 printk(KERN_INFO "%s: Setting MAC to %pM\n", 10420 priv->net_dev->name, priv->mac_addr); 10421 schedule_work(&priv->adapter_restart); 10422 mutex_unlock(&priv->mutex); 10423 return 0; 10424 } 10425 10426 static void ipw_ethtool_get_drvinfo(struct net_device *dev, 10427 struct ethtool_drvinfo *info) 10428 { 10429 struct ipw_priv *p = libipw_priv(dev); 10430 char vers[64]; 10431 char date[32]; 10432 u32 len; 10433 10434 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 10435 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 10436 10437 len = sizeof(vers); 10438 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); 10439 len = sizeof(date); 10440 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); 10441 10442 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", 10443 vers, date); 10444 strlcpy(info->bus_info, pci_name(p->pci_dev), 10445 sizeof(info->bus_info)); 10446 } 10447 10448 static u32 ipw_ethtool_get_link(struct net_device *dev) 10449 { 10450 struct ipw_priv *priv = libipw_priv(dev); 10451 return (priv->status & STATUS_ASSOCIATED) != 0; 10452 } 10453 10454 static int ipw_ethtool_get_eeprom_len(struct net_device *dev) 10455 { 10456 return IPW_EEPROM_IMAGE_SIZE; 10457 } 10458 10459 static int ipw_ethtool_get_eeprom(struct net_device *dev, 10460 struct ethtool_eeprom *eeprom, u8 * bytes) 10461 { 10462 struct ipw_priv *p = libipw_priv(dev); 10463 10464 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 10465 return -EINVAL; 10466 mutex_lock(&p->mutex); 10467 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); 10468 mutex_unlock(&p->mutex); 10469 return 0; 10470 } 10471 10472 static int ipw_ethtool_set_eeprom(struct net_device *dev, 10473 struct ethtool_eeprom *eeprom, u8 * bytes) 10474 { 10475 struct ipw_priv *p = libipw_priv(dev); 10476 int i; 10477 10478 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 10479 return -EINVAL; 10480 mutex_lock(&p->mutex); 10481 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); 10482 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) 10483 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]); 10484 mutex_unlock(&p->mutex); 10485 return 0; 10486 } 10487 10488 static const struct ethtool_ops ipw_ethtool_ops = { 10489 .get_link = ipw_ethtool_get_link, 10490 .get_drvinfo = ipw_ethtool_get_drvinfo, 10491 .get_eeprom_len = ipw_ethtool_get_eeprom_len, 10492 .get_eeprom = ipw_ethtool_get_eeprom, 10493 .set_eeprom = ipw_ethtool_set_eeprom, 10494 }; 10495 10496 static irqreturn_t ipw_isr(int irq, void *data) 10497 { 10498 struct ipw_priv *priv = data; 10499 u32 inta, inta_mask; 10500 10501 if (!priv) 10502 return IRQ_NONE; 10503 10504 spin_lock(&priv->irq_lock); 10505 10506 if (!(priv->status & STATUS_INT_ENABLED)) { 10507 /* IRQ is disabled */ 10508 goto none; 10509 } 10510 10511 inta = ipw_read32(priv, IPW_INTA_RW); 10512 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 10513 10514 if (inta == 0xFFFFFFFF) { 10515 /* Hardware disappeared */ 10516 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n"); 10517 goto none; 10518 } 10519 10520 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) { 10521 /* Shared interrupt */ 10522 goto none; 10523 } 10524 10525 /* tell the device to stop sending interrupts */ 10526 __ipw_disable_interrupts(priv); 10527 10528 /* ack current interrupts */ 10529 inta &= (IPW_INTA_MASK_ALL & inta_mask); 10530 ipw_write32(priv, IPW_INTA_RW, inta); 10531 10532 /* Cache INTA value for our tasklet */ 10533 priv->isr_inta = inta; 10534 10535 tasklet_schedule(&priv->irq_tasklet); 10536 10537 spin_unlock(&priv->irq_lock); 10538 10539 return IRQ_HANDLED; 10540 none: 10541 spin_unlock(&priv->irq_lock); 10542 return IRQ_NONE; 10543 } 10544 10545 static void ipw_rf_kill(void *adapter) 10546 { 10547 struct ipw_priv *priv = adapter; 10548 unsigned long flags; 10549 10550 spin_lock_irqsave(&priv->lock, flags); 10551 10552 if (rf_kill_active(priv)) { 10553 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 10554 schedule_delayed_work(&priv->rf_kill, 2 * HZ); 10555 goto exit_unlock; 10556 } 10557 10558 /* RF Kill is now disabled, so bring the device back up */ 10559 10560 if (!(priv->status & STATUS_RF_KILL_MASK)) { 10561 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting " 10562 "device\n"); 10563 10564 /* we can not do an adapter restart while inside an irq lock */ 10565 schedule_work(&priv->adapter_restart); 10566 } else 10567 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " 10568 "enabled\n"); 10569 10570 exit_unlock: 10571 spin_unlock_irqrestore(&priv->lock, flags); 10572 } 10573 10574 static void ipw_bg_rf_kill(struct work_struct *work) 10575 { 10576 struct ipw_priv *priv = 10577 container_of(work, struct ipw_priv, rf_kill.work); 10578 mutex_lock(&priv->mutex); 10579 ipw_rf_kill(priv); 10580 mutex_unlock(&priv->mutex); 10581 } 10582 10583 static void ipw_link_up(struct ipw_priv *priv) 10584 { 10585 priv->last_seq_num = -1; 10586 priv->last_frag_num = -1; 10587 priv->last_packet_time = 0; 10588 10589 netif_carrier_on(priv->net_dev); 10590 10591 cancel_delayed_work(&priv->request_scan); 10592 cancel_delayed_work(&priv->request_direct_scan); 10593 cancel_delayed_work(&priv->request_passive_scan); 10594 cancel_delayed_work(&priv->scan_event); 10595 ipw_reset_stats(priv); 10596 /* Ensure the rate is updated immediately */ 10597 priv->last_rate = ipw_get_current_rate(priv); 10598 ipw_gather_stats(priv); 10599 ipw_led_link_up(priv); 10600 notify_wx_assoc_event(priv); 10601 10602 if (priv->config & CFG_BACKGROUND_SCAN) 10603 schedule_delayed_work(&priv->request_scan, HZ); 10604 } 10605 10606 static void ipw_bg_link_up(struct work_struct *work) 10607 { 10608 struct ipw_priv *priv = 10609 container_of(work, struct ipw_priv, link_up); 10610 mutex_lock(&priv->mutex); 10611 ipw_link_up(priv); 10612 mutex_unlock(&priv->mutex); 10613 } 10614 10615 static void ipw_link_down(struct ipw_priv *priv) 10616 { 10617 ipw_led_link_down(priv); 10618 netif_carrier_off(priv->net_dev); 10619 notify_wx_assoc_event(priv); 10620 10621 /* Cancel any queued work ... */ 10622 cancel_delayed_work(&priv->request_scan); 10623 cancel_delayed_work(&priv->request_direct_scan); 10624 cancel_delayed_work(&priv->request_passive_scan); 10625 cancel_delayed_work(&priv->adhoc_check); 10626 cancel_delayed_work(&priv->gather_stats); 10627 10628 ipw_reset_stats(priv); 10629 10630 if (!(priv->status & STATUS_EXIT_PENDING)) { 10631 /* Queue up another scan... */ 10632 schedule_delayed_work(&priv->request_scan, 0); 10633 } else 10634 cancel_delayed_work(&priv->scan_event); 10635 } 10636 10637 static void ipw_bg_link_down(struct work_struct *work) 10638 { 10639 struct ipw_priv *priv = 10640 container_of(work, struct ipw_priv, link_down); 10641 mutex_lock(&priv->mutex); 10642 ipw_link_down(priv); 10643 mutex_unlock(&priv->mutex); 10644 } 10645 10646 static int ipw_setup_deferred_work(struct ipw_priv *priv) 10647 { 10648 int ret = 0; 10649 10650 init_waitqueue_head(&priv->wait_command_queue); 10651 init_waitqueue_head(&priv->wait_state); 10652 10653 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); 10654 INIT_WORK(&priv->associate, ipw_bg_associate); 10655 INIT_WORK(&priv->disassociate, ipw_bg_disassociate); 10656 INIT_WORK(&priv->system_config, ipw_system_config); 10657 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); 10658 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); 10659 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); 10660 INIT_WORK(&priv->up, ipw_bg_up); 10661 INIT_WORK(&priv->down, ipw_bg_down); 10662 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); 10663 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan); 10664 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan); 10665 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); 10666 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); 10667 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); 10668 INIT_WORK(&priv->roam, ipw_bg_roam); 10669 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); 10670 INIT_WORK(&priv->link_up, ipw_bg_link_up); 10671 INIT_WORK(&priv->link_down, ipw_bg_link_down); 10672 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); 10673 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); 10674 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); 10675 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); 10676 10677 #ifdef CONFIG_IPW2200_QOS 10678 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); 10679 #endif /* CONFIG_IPW2200_QOS */ 10680 10681 tasklet_init(&priv->irq_tasklet, 10682 ipw_irq_tasklet, (unsigned long)priv); 10683 10684 return ret; 10685 } 10686 10687 static void shim__set_security(struct net_device *dev, 10688 struct libipw_security *sec) 10689 { 10690 struct ipw_priv *priv = libipw_priv(dev); 10691 int i; 10692 for (i = 0; i < 4; i++) { 10693 if (sec->flags & (1 << i)) { 10694 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i]; 10695 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i]; 10696 if (sec->key_sizes[i] == 0) 10697 priv->ieee->sec.flags &= ~(1 << i); 10698 else { 10699 memcpy(priv->ieee->sec.keys[i], sec->keys[i], 10700 sec->key_sizes[i]); 10701 priv->ieee->sec.flags |= (1 << i); 10702 } 10703 priv->status |= STATUS_SECURITY_UPDATED; 10704 } else if (sec->level != SEC_LEVEL_1) 10705 priv->ieee->sec.flags &= ~(1 << i); 10706 } 10707 10708 if (sec->flags & SEC_ACTIVE_KEY) { 10709 priv->ieee->sec.active_key = sec->active_key; 10710 priv->ieee->sec.flags |= SEC_ACTIVE_KEY; 10711 priv->status |= STATUS_SECURITY_UPDATED; 10712 } else 10713 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; 10714 10715 if ((sec->flags & SEC_AUTH_MODE) && 10716 (priv->ieee->sec.auth_mode != sec->auth_mode)) { 10717 priv->ieee->sec.auth_mode = sec->auth_mode; 10718 priv->ieee->sec.flags |= SEC_AUTH_MODE; 10719 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY) 10720 priv->capability |= CAP_SHARED_KEY; 10721 else 10722 priv->capability &= ~CAP_SHARED_KEY; 10723 priv->status |= STATUS_SECURITY_UPDATED; 10724 } 10725 10726 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) { 10727 priv->ieee->sec.flags |= SEC_ENABLED; 10728 priv->ieee->sec.enabled = sec->enabled; 10729 priv->status |= STATUS_SECURITY_UPDATED; 10730 if (sec->enabled) 10731 priv->capability |= CAP_PRIVACY_ON; 10732 else 10733 priv->capability &= ~CAP_PRIVACY_ON; 10734 } 10735 10736 if (sec->flags & SEC_ENCRYPT) 10737 priv->ieee->sec.encrypt = sec->encrypt; 10738 10739 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) { 10740 priv->ieee->sec.level = sec->level; 10741 priv->ieee->sec.flags |= SEC_LEVEL; 10742 priv->status |= STATUS_SECURITY_UPDATED; 10743 } 10744 10745 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT)) 10746 ipw_set_hwcrypto_keys(priv); 10747 10748 /* To match current functionality of ipw2100 (which works well w/ 10749 * various supplicants, we don't force a disassociate if the 10750 * privacy capability changes ... */ 10751 #if 0 10752 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) && 10753 (((priv->assoc_request.capability & 10754 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) || 10755 (!(priv->assoc_request.capability & 10756 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) { 10757 IPW_DEBUG_ASSOC("Disassociating due to capability " 10758 "change.\n"); 10759 ipw_disassociate(priv); 10760 } 10761 #endif 10762 } 10763 10764 static int init_supported_rates(struct ipw_priv *priv, 10765 struct ipw_supported_rates *rates) 10766 { 10767 /* TODO: Mask out rates based on priv->rates_mask */ 10768 10769 memset(rates, 0, sizeof(*rates)); 10770 /* configure supported rates */ 10771 switch (priv->ieee->freq_band) { 10772 case LIBIPW_52GHZ_BAND: 10773 rates->ieee_mode = IPW_A_MODE; 10774 rates->purpose = IPW_RATE_CAPABILITIES; 10775 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, 10776 LIBIPW_OFDM_DEFAULT_RATES_MASK); 10777 break; 10778 10779 default: /* Mixed or 2.4Ghz */ 10780 rates->ieee_mode = IPW_G_MODE; 10781 rates->purpose = IPW_RATE_CAPABILITIES; 10782 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION, 10783 LIBIPW_CCK_DEFAULT_RATES_MASK); 10784 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) { 10785 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, 10786 LIBIPW_OFDM_DEFAULT_RATES_MASK); 10787 } 10788 break; 10789 } 10790 10791 return 0; 10792 } 10793 10794 static int ipw_config(struct ipw_priv *priv) 10795 { 10796 /* This is only called from ipw_up, which resets/reloads the firmware 10797 so, we don't need to first disable the card before we configure 10798 it */ 10799 if (ipw_set_tx_power(priv)) 10800 goto error; 10801 10802 /* initialize adapter address */ 10803 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr)) 10804 goto error; 10805 10806 /* set basic system config settings */ 10807 init_sys_config(&priv->sys_config); 10808 10809 /* Support Bluetooth if we have BT h/w on board, and user wants to. 10810 * Does not support BT priority yet (don't abort or defer our Tx) */ 10811 if (bt_coexist) { 10812 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY]; 10813 10814 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG) 10815 priv->sys_config.bt_coexistence 10816 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL; 10817 if (bt_caps & EEPROM_SKU_CAP_BT_OOB) 10818 priv->sys_config.bt_coexistence 10819 |= CFG_BT_COEXISTENCE_OOB; 10820 } 10821 10822 #ifdef CONFIG_IPW2200_PROMISCUOUS 10823 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 10824 priv->sys_config.accept_all_data_frames = 1; 10825 priv->sys_config.accept_non_directed_frames = 1; 10826 priv->sys_config.accept_all_mgmt_bcpr = 1; 10827 priv->sys_config.accept_all_mgmt_frames = 1; 10828 } 10829 #endif 10830 10831 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 10832 priv->sys_config.answer_broadcast_ssid_probe = 1; 10833 else 10834 priv->sys_config.answer_broadcast_ssid_probe = 0; 10835 10836 if (ipw_send_system_config(priv)) 10837 goto error; 10838 10839 init_supported_rates(priv, &priv->rates); 10840 if (ipw_send_supported_rates(priv, &priv->rates)) 10841 goto error; 10842 10843 /* Set request-to-send threshold */ 10844 if (priv->rts_threshold) { 10845 if (ipw_send_rts_threshold(priv, priv->rts_threshold)) 10846 goto error; 10847 } 10848 #ifdef CONFIG_IPW2200_QOS 10849 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n"); 10850 ipw_qos_activate(priv, NULL); 10851 #endif /* CONFIG_IPW2200_QOS */ 10852 10853 if (ipw_set_random_seed(priv)) 10854 goto error; 10855 10856 /* final state transition to the RUN state */ 10857 if (ipw_send_host_complete(priv)) 10858 goto error; 10859 10860 priv->status |= STATUS_INIT; 10861 10862 ipw_led_init(priv); 10863 ipw_led_radio_on(priv); 10864 priv->notif_missed_beacons = 0; 10865 10866 /* Set hardware WEP key if it is configured. */ 10867 if ((priv->capability & CAP_PRIVACY_ON) && 10868 (priv->ieee->sec.level == SEC_LEVEL_1) && 10869 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt)) 10870 ipw_set_hwcrypto_keys(priv); 10871 10872 return 0; 10873 10874 error: 10875 return -EIO; 10876 } 10877 10878 /* 10879 * NOTE: 10880 * 10881 * These tables have been tested in conjunction with the 10882 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters. 10883 * 10884 * Altering this values, using it on other hardware, or in geographies 10885 * not intended for resale of the above mentioned Intel adapters has 10886 * not been tested. 10887 * 10888 * Remember to update the table in README.ipw2200 when changing this 10889 * table. 10890 * 10891 */ 10892 static const struct libipw_geo ipw_geos[] = { 10893 { /* Restricted */ 10894 "---", 10895 .bg_channels = 11, 10896 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10897 {2427, 4}, {2432, 5}, {2437, 6}, 10898 {2442, 7}, {2447, 8}, {2452, 9}, 10899 {2457, 10}, {2462, 11}}, 10900 }, 10901 10902 { /* Custom US/Canada */ 10903 "ZZF", 10904 .bg_channels = 11, 10905 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10906 {2427, 4}, {2432, 5}, {2437, 6}, 10907 {2442, 7}, {2447, 8}, {2452, 9}, 10908 {2457, 10}, {2462, 11}}, 10909 .a_channels = 8, 10910 .a = {{5180, 36}, 10911 {5200, 40}, 10912 {5220, 44}, 10913 {5240, 48}, 10914 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 10915 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 10916 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 10917 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}}, 10918 }, 10919 10920 { /* Rest of World */ 10921 "ZZD", 10922 .bg_channels = 13, 10923 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10924 {2427, 4}, {2432, 5}, {2437, 6}, 10925 {2442, 7}, {2447, 8}, {2452, 9}, 10926 {2457, 10}, {2462, 11}, {2467, 12}, 10927 {2472, 13}}, 10928 }, 10929 10930 { /* Custom USA & Europe & High */ 10931 "ZZA", 10932 .bg_channels = 11, 10933 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10934 {2427, 4}, {2432, 5}, {2437, 6}, 10935 {2442, 7}, {2447, 8}, {2452, 9}, 10936 {2457, 10}, {2462, 11}}, 10937 .a_channels = 13, 10938 .a = {{5180, 36}, 10939 {5200, 40}, 10940 {5220, 44}, 10941 {5240, 48}, 10942 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 10943 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 10944 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 10945 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 10946 {5745, 149}, 10947 {5765, 153}, 10948 {5785, 157}, 10949 {5805, 161}, 10950 {5825, 165}}, 10951 }, 10952 10953 { /* Custom NA & Europe */ 10954 "ZZB", 10955 .bg_channels = 11, 10956 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10957 {2427, 4}, {2432, 5}, {2437, 6}, 10958 {2442, 7}, {2447, 8}, {2452, 9}, 10959 {2457, 10}, {2462, 11}}, 10960 .a_channels = 13, 10961 .a = {{5180, 36}, 10962 {5200, 40}, 10963 {5220, 44}, 10964 {5240, 48}, 10965 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 10966 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 10967 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 10968 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 10969 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 10970 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 10971 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 10972 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 10973 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 10974 }, 10975 10976 { /* Custom Japan */ 10977 "ZZC", 10978 .bg_channels = 11, 10979 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10980 {2427, 4}, {2432, 5}, {2437, 6}, 10981 {2442, 7}, {2447, 8}, {2452, 9}, 10982 {2457, 10}, {2462, 11}}, 10983 .a_channels = 4, 10984 .a = {{5170, 34}, {5190, 38}, 10985 {5210, 42}, {5230, 46}}, 10986 }, 10987 10988 { /* Custom */ 10989 "ZZM", 10990 .bg_channels = 11, 10991 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10992 {2427, 4}, {2432, 5}, {2437, 6}, 10993 {2442, 7}, {2447, 8}, {2452, 9}, 10994 {2457, 10}, {2462, 11}}, 10995 }, 10996 10997 { /* Europe */ 10998 "ZZE", 10999 .bg_channels = 13, 11000 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11001 {2427, 4}, {2432, 5}, {2437, 6}, 11002 {2442, 7}, {2447, 8}, {2452, 9}, 11003 {2457, 10}, {2462, 11}, {2467, 12}, 11004 {2472, 13}}, 11005 .a_channels = 19, 11006 .a = {{5180, 36}, 11007 {5200, 40}, 11008 {5220, 44}, 11009 {5240, 48}, 11010 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11011 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11012 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11013 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11014 {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, 11015 {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, 11016 {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, 11017 {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, 11018 {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, 11019 {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, 11020 {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, 11021 {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, 11022 {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, 11023 {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, 11024 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}}, 11025 }, 11026 11027 { /* Custom Japan */ 11028 "ZZJ", 11029 .bg_channels = 14, 11030 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11031 {2427, 4}, {2432, 5}, {2437, 6}, 11032 {2442, 7}, {2447, 8}, {2452, 9}, 11033 {2457, 10}, {2462, 11}, {2467, 12}, 11034 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}}, 11035 .a_channels = 4, 11036 .a = {{5170, 34}, {5190, 38}, 11037 {5210, 42}, {5230, 46}}, 11038 }, 11039 11040 { /* Rest of World */ 11041 "ZZR", 11042 .bg_channels = 14, 11043 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11044 {2427, 4}, {2432, 5}, {2437, 6}, 11045 {2442, 7}, {2447, 8}, {2452, 9}, 11046 {2457, 10}, {2462, 11}, {2467, 12}, 11047 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY | 11048 LIBIPW_CH_PASSIVE_ONLY}}, 11049 }, 11050 11051 { /* High Band */ 11052 "ZZH", 11053 .bg_channels = 13, 11054 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11055 {2427, 4}, {2432, 5}, {2437, 6}, 11056 {2442, 7}, {2447, 8}, {2452, 9}, 11057 {2457, 10}, {2462, 11}, 11058 {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, 11059 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, 11060 .a_channels = 4, 11061 .a = {{5745, 149}, {5765, 153}, 11062 {5785, 157}, {5805, 161}}, 11063 }, 11064 11065 { /* Custom Europe */ 11066 "ZZG", 11067 .bg_channels = 13, 11068 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11069 {2427, 4}, {2432, 5}, {2437, 6}, 11070 {2442, 7}, {2447, 8}, {2452, 9}, 11071 {2457, 10}, {2462, 11}, 11072 {2467, 12}, {2472, 13}}, 11073 .a_channels = 4, 11074 .a = {{5180, 36}, {5200, 40}, 11075 {5220, 44}, {5240, 48}}, 11076 }, 11077 11078 { /* Europe */ 11079 "ZZK", 11080 .bg_channels = 13, 11081 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11082 {2427, 4}, {2432, 5}, {2437, 6}, 11083 {2442, 7}, {2447, 8}, {2452, 9}, 11084 {2457, 10}, {2462, 11}, 11085 {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, 11086 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, 11087 .a_channels = 24, 11088 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, 11089 {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, 11090 {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, 11091 {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, 11092 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11093 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11094 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11095 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11096 {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, 11097 {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, 11098 {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, 11099 {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, 11100 {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, 11101 {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, 11102 {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, 11103 {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, 11104 {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, 11105 {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, 11106 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}, 11107 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11108 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11109 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11110 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11111 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11112 }, 11113 11114 { /* Europe */ 11115 "ZZL", 11116 .bg_channels = 11, 11117 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11118 {2427, 4}, {2432, 5}, {2437, 6}, 11119 {2442, 7}, {2447, 8}, {2452, 9}, 11120 {2457, 10}, {2462, 11}}, 11121 .a_channels = 13, 11122 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, 11123 {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, 11124 {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, 11125 {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, 11126 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11127 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11128 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11129 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11130 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11131 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11132 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11133 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11134 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11135 } 11136 }; 11137 11138 static void ipw_set_geo(struct ipw_priv *priv) 11139 { 11140 int j; 11141 11142 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) { 11143 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE], 11144 ipw_geos[j].name, 3)) 11145 break; 11146 } 11147 11148 if (j == ARRAY_SIZE(ipw_geos)) { 11149 IPW_WARNING("SKU [%c%c%c] not recognized.\n", 11150 priv->eeprom[EEPROM_COUNTRY_CODE + 0], 11151 priv->eeprom[EEPROM_COUNTRY_CODE + 1], 11152 priv->eeprom[EEPROM_COUNTRY_CODE + 2]); 11153 j = 0; 11154 } 11155 11156 libipw_set_geo(priv->ieee, &ipw_geos[j]); 11157 } 11158 11159 #define MAX_HW_RESTARTS 5 11160 static int ipw_up(struct ipw_priv *priv) 11161 { 11162 int rc, i; 11163 11164 /* Age scan list entries found before suspend */ 11165 if (priv->suspend_time) { 11166 libipw_networks_age(priv->ieee, priv->suspend_time); 11167 priv->suspend_time = 0; 11168 } 11169 11170 if (priv->status & STATUS_EXIT_PENDING) 11171 return -EIO; 11172 11173 if (cmdlog && !priv->cmdlog) { 11174 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog), 11175 GFP_KERNEL); 11176 if (priv->cmdlog == NULL) { 11177 IPW_ERROR("Error allocating %d command log entries.\n", 11178 cmdlog); 11179 return -ENOMEM; 11180 } else { 11181 priv->cmdlog_len = cmdlog; 11182 } 11183 } 11184 11185 for (i = 0; i < MAX_HW_RESTARTS; i++) { 11186 /* Load the microcode, firmware, and eeprom. 11187 * Also start the clocks. */ 11188 rc = ipw_load(priv); 11189 if (rc) { 11190 IPW_ERROR("Unable to load firmware: %d\n", rc); 11191 return rc; 11192 } 11193 11194 ipw_init_ordinals(priv); 11195 if (!(priv->config & CFG_CUSTOM_MAC)) 11196 eeprom_parse_mac(priv, priv->mac_addr); 11197 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11198 11199 ipw_set_geo(priv); 11200 11201 if (priv->status & STATUS_RF_KILL_SW) { 11202 IPW_WARNING("Radio disabled by module parameter.\n"); 11203 return 0; 11204 } else if (rf_kill_active(priv)) { 11205 IPW_WARNING("Radio Frequency Kill Switch is On:\n" 11206 "Kill switch must be turned off for " 11207 "wireless networking to work.\n"); 11208 schedule_delayed_work(&priv->rf_kill, 2 * HZ); 11209 return 0; 11210 } 11211 11212 rc = ipw_config(priv); 11213 if (!rc) { 11214 IPW_DEBUG_INFO("Configured device on count %i\n", i); 11215 11216 /* If configure to try and auto-associate, kick 11217 * off a scan. */ 11218 schedule_delayed_work(&priv->request_scan, 0); 11219 11220 return 0; 11221 } 11222 11223 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc); 11224 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n", 11225 i, MAX_HW_RESTARTS); 11226 11227 /* We had an error bringing up the hardware, so take it 11228 * all the way back down so we can try again */ 11229 ipw_down(priv); 11230 } 11231 11232 /* tried to restart and config the device for as long as our 11233 * patience could withstand */ 11234 IPW_ERROR("Unable to initialize device after %d attempts.\n", i); 11235 11236 return -EIO; 11237 } 11238 11239 static void ipw_bg_up(struct work_struct *work) 11240 { 11241 struct ipw_priv *priv = 11242 container_of(work, struct ipw_priv, up); 11243 mutex_lock(&priv->mutex); 11244 ipw_up(priv); 11245 mutex_unlock(&priv->mutex); 11246 } 11247 11248 static void ipw_deinit(struct ipw_priv *priv) 11249 { 11250 int i; 11251 11252 if (priv->status & STATUS_SCANNING) { 11253 IPW_DEBUG_INFO("Aborting scan during shutdown.\n"); 11254 ipw_abort_scan(priv); 11255 } 11256 11257 if (priv->status & STATUS_ASSOCIATED) { 11258 IPW_DEBUG_INFO("Disassociating during shutdown.\n"); 11259 ipw_disassociate(priv); 11260 } 11261 11262 ipw_led_shutdown(priv); 11263 11264 /* Wait up to 1s for status to change to not scanning and not 11265 * associated (disassociation can take a while for a ful 802.11 11266 * exchange */ 11267 for (i = 1000; i && (priv->status & 11268 (STATUS_DISASSOCIATING | 11269 STATUS_ASSOCIATED | STATUS_SCANNING)); i--) 11270 udelay(10); 11271 11272 if (priv->status & (STATUS_DISASSOCIATING | 11273 STATUS_ASSOCIATED | STATUS_SCANNING)) 11274 IPW_DEBUG_INFO("Still associated or scanning...\n"); 11275 else 11276 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i); 11277 11278 /* Attempt to disable the card */ 11279 ipw_send_card_disable(priv, 0); 11280 11281 priv->status &= ~STATUS_INIT; 11282 } 11283 11284 static void ipw_down(struct ipw_priv *priv) 11285 { 11286 int exit_pending = priv->status & STATUS_EXIT_PENDING; 11287 11288 priv->status |= STATUS_EXIT_PENDING; 11289 11290 if (ipw_is_init(priv)) 11291 ipw_deinit(priv); 11292 11293 /* Wipe out the EXIT_PENDING status bit if we are not actually 11294 * exiting the module */ 11295 if (!exit_pending) 11296 priv->status &= ~STATUS_EXIT_PENDING; 11297 11298 /* tell the device to stop sending interrupts */ 11299 ipw_disable_interrupts(priv); 11300 11301 /* Clear all bits but the RF Kill */ 11302 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING; 11303 netif_carrier_off(priv->net_dev); 11304 11305 ipw_stop_nic(priv); 11306 11307 ipw_led_radio_off(priv); 11308 } 11309 11310 static void ipw_bg_down(struct work_struct *work) 11311 { 11312 struct ipw_priv *priv = 11313 container_of(work, struct ipw_priv, down); 11314 mutex_lock(&priv->mutex); 11315 ipw_down(priv); 11316 mutex_unlock(&priv->mutex); 11317 } 11318 11319 static int ipw_wdev_init(struct net_device *dev) 11320 { 11321 int i, rc = 0; 11322 struct ipw_priv *priv = libipw_priv(dev); 11323 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 11324 struct wireless_dev *wdev = &priv->ieee->wdev; 11325 11326 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 11327 11328 /* fill-out priv->ieee->bg_band */ 11329 if (geo->bg_channels) { 11330 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; 11331 11332 bg_band->band = NL80211_BAND_2GHZ; 11333 bg_band->n_channels = geo->bg_channels; 11334 bg_band->channels = kcalloc(geo->bg_channels, 11335 sizeof(struct ieee80211_channel), 11336 GFP_KERNEL); 11337 if (!bg_band->channels) { 11338 rc = -ENOMEM; 11339 goto out; 11340 } 11341 /* translate geo->bg to bg_band.channels */ 11342 for (i = 0; i < geo->bg_channels; i++) { 11343 bg_band->channels[i].band = NL80211_BAND_2GHZ; 11344 bg_band->channels[i].center_freq = geo->bg[i].freq; 11345 bg_band->channels[i].hw_value = geo->bg[i].channel; 11346 bg_band->channels[i].max_power = geo->bg[i].max_power; 11347 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) 11348 bg_band->channels[i].flags |= 11349 IEEE80211_CHAN_NO_IR; 11350 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS) 11351 bg_band->channels[i].flags |= 11352 IEEE80211_CHAN_NO_IR; 11353 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT) 11354 bg_band->channels[i].flags |= 11355 IEEE80211_CHAN_RADAR; 11356 /* No equivalent for LIBIPW_CH_80211H_RULES, 11357 LIBIPW_CH_UNIFORM_SPREADING, or 11358 LIBIPW_CH_B_ONLY... */ 11359 } 11360 /* point at bitrate info */ 11361 bg_band->bitrates = ipw2200_bg_rates; 11362 bg_band->n_bitrates = ipw2200_num_bg_rates; 11363 11364 wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band; 11365 } 11366 11367 /* fill-out priv->ieee->a_band */ 11368 if (geo->a_channels) { 11369 struct ieee80211_supported_band *a_band = &priv->ieee->a_band; 11370 11371 a_band->band = NL80211_BAND_5GHZ; 11372 a_band->n_channels = geo->a_channels; 11373 a_band->channels = kcalloc(geo->a_channels, 11374 sizeof(struct ieee80211_channel), 11375 GFP_KERNEL); 11376 if (!a_band->channels) { 11377 rc = -ENOMEM; 11378 goto out; 11379 } 11380 /* translate geo->a to a_band.channels */ 11381 for (i = 0; i < geo->a_channels; i++) { 11382 a_band->channels[i].band = NL80211_BAND_5GHZ; 11383 a_band->channels[i].center_freq = geo->a[i].freq; 11384 a_band->channels[i].hw_value = geo->a[i].channel; 11385 a_band->channels[i].max_power = geo->a[i].max_power; 11386 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) 11387 a_band->channels[i].flags |= 11388 IEEE80211_CHAN_NO_IR; 11389 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS) 11390 a_band->channels[i].flags |= 11391 IEEE80211_CHAN_NO_IR; 11392 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT) 11393 a_band->channels[i].flags |= 11394 IEEE80211_CHAN_RADAR; 11395 /* No equivalent for LIBIPW_CH_80211H_RULES, 11396 LIBIPW_CH_UNIFORM_SPREADING, or 11397 LIBIPW_CH_B_ONLY... */ 11398 } 11399 /* point at bitrate info */ 11400 a_band->bitrates = ipw2200_a_rates; 11401 a_band->n_bitrates = ipw2200_num_a_rates; 11402 11403 wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band; 11404 } 11405 11406 wdev->wiphy->cipher_suites = ipw_cipher_suites; 11407 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites); 11408 11409 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11410 11411 /* With that information in place, we can now register the wiphy... */ 11412 if (wiphy_register(wdev->wiphy)) 11413 rc = -EIO; 11414 out: 11415 return rc; 11416 } 11417 11418 /* PCI driver stuff */ 11419 static const struct pci_device_id card_ids[] = { 11420 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, 11421 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, 11422 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, 11423 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0}, 11424 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0}, 11425 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0}, 11426 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0}, 11427 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0}, 11428 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0}, 11429 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0}, 11430 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0}, 11431 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0}, 11432 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0}, 11433 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0}, 11434 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0}, 11435 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0}, 11436 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0}, 11437 {PCI_VDEVICE(INTEL, 0x104f), 0}, 11438 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */ 11439 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */ 11440 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */ 11441 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */ 11442 11443 /* required last entry */ 11444 {0,} 11445 }; 11446 11447 MODULE_DEVICE_TABLE(pci, card_ids); 11448 11449 static struct attribute *ipw_sysfs_entries[] = { 11450 &dev_attr_rf_kill.attr, 11451 &dev_attr_direct_dword.attr, 11452 &dev_attr_indirect_byte.attr, 11453 &dev_attr_indirect_dword.attr, 11454 &dev_attr_mem_gpio_reg.attr, 11455 &dev_attr_command_event_reg.attr, 11456 &dev_attr_nic_type.attr, 11457 &dev_attr_status.attr, 11458 &dev_attr_cfg.attr, 11459 &dev_attr_error.attr, 11460 &dev_attr_event_log.attr, 11461 &dev_attr_cmd_log.attr, 11462 &dev_attr_eeprom_delay.attr, 11463 &dev_attr_ucode_version.attr, 11464 &dev_attr_rtc.attr, 11465 &dev_attr_scan_age.attr, 11466 &dev_attr_led.attr, 11467 &dev_attr_speed_scan.attr, 11468 &dev_attr_net_stats.attr, 11469 &dev_attr_channels.attr, 11470 #ifdef CONFIG_IPW2200_PROMISCUOUS 11471 &dev_attr_rtap_iface.attr, 11472 &dev_attr_rtap_filter.attr, 11473 #endif 11474 NULL 11475 }; 11476 11477 static const struct attribute_group ipw_attribute_group = { 11478 .name = NULL, /* put in device directory */ 11479 .attrs = ipw_sysfs_entries, 11480 }; 11481 11482 #ifdef CONFIG_IPW2200_PROMISCUOUS 11483 static int ipw_prom_open(struct net_device *dev) 11484 { 11485 struct ipw_prom_priv *prom_priv = libipw_priv(dev); 11486 struct ipw_priv *priv = prom_priv->priv; 11487 11488 IPW_DEBUG_INFO("prom dev->open\n"); 11489 netif_carrier_off(dev); 11490 11491 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 11492 priv->sys_config.accept_all_data_frames = 1; 11493 priv->sys_config.accept_non_directed_frames = 1; 11494 priv->sys_config.accept_all_mgmt_bcpr = 1; 11495 priv->sys_config.accept_all_mgmt_frames = 1; 11496 11497 ipw_send_system_config(priv); 11498 } 11499 11500 return 0; 11501 } 11502 11503 static int ipw_prom_stop(struct net_device *dev) 11504 { 11505 struct ipw_prom_priv *prom_priv = libipw_priv(dev); 11506 struct ipw_priv *priv = prom_priv->priv; 11507 11508 IPW_DEBUG_INFO("prom dev->stop\n"); 11509 11510 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 11511 priv->sys_config.accept_all_data_frames = 0; 11512 priv->sys_config.accept_non_directed_frames = 0; 11513 priv->sys_config.accept_all_mgmt_bcpr = 0; 11514 priv->sys_config.accept_all_mgmt_frames = 0; 11515 11516 ipw_send_system_config(priv); 11517 } 11518 11519 return 0; 11520 } 11521 11522 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb, 11523 struct net_device *dev) 11524 { 11525 IPW_DEBUG_INFO("prom dev->xmit\n"); 11526 dev_kfree_skb(skb); 11527 return NETDEV_TX_OK; 11528 } 11529 11530 static const struct net_device_ops ipw_prom_netdev_ops = { 11531 .ndo_open = ipw_prom_open, 11532 .ndo_stop = ipw_prom_stop, 11533 .ndo_start_xmit = ipw_prom_hard_start_xmit, 11534 .ndo_set_mac_address = eth_mac_addr, 11535 .ndo_validate_addr = eth_validate_addr, 11536 }; 11537 11538 static int ipw_prom_alloc(struct ipw_priv *priv) 11539 { 11540 int rc = 0; 11541 11542 if (priv->prom_net_dev) 11543 return -EPERM; 11544 11545 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1); 11546 if (priv->prom_net_dev == NULL) 11547 return -ENOMEM; 11548 11549 priv->prom_priv = libipw_priv(priv->prom_net_dev); 11550 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev); 11551 priv->prom_priv->priv = priv; 11552 11553 strcpy(priv->prom_net_dev->name, "rtap%d"); 11554 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11555 11556 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 11557 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops; 11558 11559 priv->prom_net_dev->min_mtu = 68; 11560 priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN; 11561 11562 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; 11563 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev); 11564 11565 rc = register_netdev(priv->prom_net_dev); 11566 if (rc) { 11567 free_libipw(priv->prom_net_dev, 1); 11568 priv->prom_net_dev = NULL; 11569 return rc; 11570 } 11571 11572 return 0; 11573 } 11574 11575 static void ipw_prom_free(struct ipw_priv *priv) 11576 { 11577 if (!priv->prom_net_dev) 11578 return; 11579 11580 unregister_netdev(priv->prom_net_dev); 11581 free_libipw(priv->prom_net_dev, 1); 11582 11583 priv->prom_net_dev = NULL; 11584 } 11585 11586 #endif 11587 11588 static const struct net_device_ops ipw_netdev_ops = { 11589 .ndo_open = ipw_net_open, 11590 .ndo_stop = ipw_net_stop, 11591 .ndo_set_rx_mode = ipw_net_set_multicast_list, 11592 .ndo_set_mac_address = ipw_net_set_mac_address, 11593 .ndo_start_xmit = libipw_xmit, 11594 .ndo_validate_addr = eth_validate_addr, 11595 }; 11596 11597 static int ipw_pci_probe(struct pci_dev *pdev, 11598 const struct pci_device_id *ent) 11599 { 11600 int err = 0; 11601 struct net_device *net_dev; 11602 void __iomem *base; 11603 u32 length, val; 11604 struct ipw_priv *priv; 11605 int i; 11606 11607 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0); 11608 if (net_dev == NULL) { 11609 err = -ENOMEM; 11610 goto out; 11611 } 11612 11613 priv = libipw_priv(net_dev); 11614 priv->ieee = netdev_priv(net_dev); 11615 11616 priv->net_dev = net_dev; 11617 priv->pci_dev = pdev; 11618 ipw_debug_level = debug; 11619 spin_lock_init(&priv->irq_lock); 11620 spin_lock_init(&priv->lock); 11621 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) 11622 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); 11623 11624 mutex_init(&priv->mutex); 11625 if (pci_enable_device(pdev)) { 11626 err = -ENODEV; 11627 goto out_free_libipw; 11628 } 11629 11630 pci_set_master(pdev); 11631 11632 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 11633 if (!err) 11634 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 11635 if (err) { 11636 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); 11637 goto out_pci_disable_device; 11638 } 11639 11640 pci_set_drvdata(pdev, priv); 11641 11642 err = pci_request_regions(pdev, DRV_NAME); 11643 if (err) 11644 goto out_pci_disable_device; 11645 11646 /* We disable the RETRY_TIMEOUT register (0x41) to keep 11647 * PCI Tx retries from interfering with C3 CPU state */ 11648 pci_read_config_dword(pdev, 0x40, &val); 11649 if ((val & 0x0000ff00) != 0) 11650 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 11651 11652 length = pci_resource_len(pdev, 0); 11653 priv->hw_len = length; 11654 11655 base = pci_ioremap_bar(pdev, 0); 11656 if (!base) { 11657 err = -ENODEV; 11658 goto out_pci_release_regions; 11659 } 11660 11661 priv->hw_base = base; 11662 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length); 11663 IPW_DEBUG_INFO("pci_resource_base = %p\n", base); 11664 11665 err = ipw_setup_deferred_work(priv); 11666 if (err) { 11667 IPW_ERROR("Unable to setup deferred work\n"); 11668 goto out_iounmap; 11669 } 11670 11671 ipw_sw_reset(priv, 1); 11672 11673 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); 11674 if (err) { 11675 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); 11676 goto out_iounmap; 11677 } 11678 11679 SET_NETDEV_DEV(net_dev, &pdev->dev); 11680 11681 mutex_lock(&priv->mutex); 11682 11683 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; 11684 priv->ieee->set_security = shim__set_security; 11685 priv->ieee->is_queue_full = ipw_net_is_queue_full; 11686 11687 #ifdef CONFIG_IPW2200_QOS 11688 priv->ieee->is_qos_active = ipw_is_qos_active; 11689 priv->ieee->handle_probe_response = ipw_handle_beacon; 11690 priv->ieee->handle_beacon = ipw_handle_probe_response; 11691 priv->ieee->handle_assoc_response = ipw_handle_assoc_response; 11692 #endif /* CONFIG_IPW2200_QOS */ 11693 11694 priv->ieee->perfect_rssi = -20; 11695 priv->ieee->worst_rssi = -85; 11696 11697 net_dev->netdev_ops = &ipw_netdev_ops; 11698 priv->wireless_data.spy_data = &priv->ieee->spy_data; 11699 net_dev->wireless_data = &priv->wireless_data; 11700 net_dev->wireless_handlers = &ipw_wx_handler_def; 11701 net_dev->ethtool_ops = &ipw_ethtool_ops; 11702 11703 net_dev->min_mtu = 68; 11704 net_dev->max_mtu = LIBIPW_DATA_LEN; 11705 11706 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); 11707 if (err) { 11708 IPW_ERROR("failed to create sysfs device attributes\n"); 11709 mutex_unlock(&priv->mutex); 11710 goto out_release_irq; 11711 } 11712 11713 if (ipw_up(priv)) { 11714 mutex_unlock(&priv->mutex); 11715 err = -EIO; 11716 goto out_remove_sysfs; 11717 } 11718 11719 mutex_unlock(&priv->mutex); 11720 11721 err = ipw_wdev_init(net_dev); 11722 if (err) { 11723 IPW_ERROR("failed to register wireless device\n"); 11724 goto out_remove_sysfs; 11725 } 11726 11727 err = register_netdev(net_dev); 11728 if (err) { 11729 IPW_ERROR("failed to register network device\n"); 11730 goto out_unregister_wiphy; 11731 } 11732 11733 #ifdef CONFIG_IPW2200_PROMISCUOUS 11734 if (rtap_iface) { 11735 err = ipw_prom_alloc(priv); 11736 if (err) { 11737 IPW_ERROR("Failed to register promiscuous network " 11738 "device (error %d).\n", err); 11739 unregister_netdev(priv->net_dev); 11740 goto out_unregister_wiphy; 11741 } 11742 } 11743 #endif 11744 11745 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " 11746 "channels, %d 802.11a channels)\n", 11747 priv->ieee->geo.name, priv->ieee->geo.bg_channels, 11748 priv->ieee->geo.a_channels); 11749 11750 return 0; 11751 11752 out_unregister_wiphy: 11753 wiphy_unregister(priv->ieee->wdev.wiphy); 11754 kfree(priv->ieee->a_band.channels); 11755 kfree(priv->ieee->bg_band.channels); 11756 out_remove_sysfs: 11757 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11758 out_release_irq: 11759 free_irq(pdev->irq, priv); 11760 out_iounmap: 11761 iounmap(priv->hw_base); 11762 out_pci_release_regions: 11763 pci_release_regions(pdev); 11764 out_pci_disable_device: 11765 pci_disable_device(pdev); 11766 out_free_libipw: 11767 free_libipw(priv->net_dev, 0); 11768 out: 11769 return err; 11770 } 11771 11772 static void ipw_pci_remove(struct pci_dev *pdev) 11773 { 11774 struct ipw_priv *priv = pci_get_drvdata(pdev); 11775 struct list_head *p, *q; 11776 int i; 11777 11778 if (!priv) 11779 return; 11780 11781 mutex_lock(&priv->mutex); 11782 11783 priv->status |= STATUS_EXIT_PENDING; 11784 ipw_down(priv); 11785 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11786 11787 mutex_unlock(&priv->mutex); 11788 11789 unregister_netdev(priv->net_dev); 11790 11791 if (priv->rxq) { 11792 ipw_rx_queue_free(priv, priv->rxq); 11793 priv->rxq = NULL; 11794 } 11795 ipw_tx_queue_free(priv); 11796 11797 if (priv->cmdlog) { 11798 kfree(priv->cmdlog); 11799 priv->cmdlog = NULL; 11800 } 11801 11802 /* make sure all works are inactive */ 11803 cancel_delayed_work_sync(&priv->adhoc_check); 11804 cancel_work_sync(&priv->associate); 11805 cancel_work_sync(&priv->disassociate); 11806 cancel_work_sync(&priv->system_config); 11807 cancel_work_sync(&priv->rx_replenish); 11808 cancel_work_sync(&priv->adapter_restart); 11809 cancel_delayed_work_sync(&priv->rf_kill); 11810 cancel_work_sync(&priv->up); 11811 cancel_work_sync(&priv->down); 11812 cancel_delayed_work_sync(&priv->request_scan); 11813 cancel_delayed_work_sync(&priv->request_direct_scan); 11814 cancel_delayed_work_sync(&priv->request_passive_scan); 11815 cancel_delayed_work_sync(&priv->scan_event); 11816 cancel_delayed_work_sync(&priv->gather_stats); 11817 cancel_work_sync(&priv->abort_scan); 11818 cancel_work_sync(&priv->roam); 11819 cancel_delayed_work_sync(&priv->scan_check); 11820 cancel_work_sync(&priv->link_up); 11821 cancel_work_sync(&priv->link_down); 11822 cancel_delayed_work_sync(&priv->led_link_on); 11823 cancel_delayed_work_sync(&priv->led_link_off); 11824 cancel_delayed_work_sync(&priv->led_act_off); 11825 cancel_work_sync(&priv->merge_networks); 11826 11827 /* Free MAC hash list for ADHOC */ 11828 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { 11829 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { 11830 list_del(p); 11831 kfree(list_entry(p, struct ipw_ibss_seq, list)); 11832 } 11833 } 11834 11835 kfree(priv->error); 11836 priv->error = NULL; 11837 11838 #ifdef CONFIG_IPW2200_PROMISCUOUS 11839 ipw_prom_free(priv); 11840 #endif 11841 11842 free_irq(pdev->irq, priv); 11843 iounmap(priv->hw_base); 11844 pci_release_regions(pdev); 11845 pci_disable_device(pdev); 11846 /* wiphy_unregister needs to be here, before free_libipw */ 11847 wiphy_unregister(priv->ieee->wdev.wiphy); 11848 kfree(priv->ieee->a_band.channels); 11849 kfree(priv->ieee->bg_band.channels); 11850 free_libipw(priv->net_dev, 0); 11851 free_firmware(); 11852 } 11853 11854 #ifdef CONFIG_PM 11855 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 11856 { 11857 struct ipw_priv *priv = pci_get_drvdata(pdev); 11858 struct net_device *dev = priv->net_dev; 11859 11860 printk(KERN_INFO "%s: Going into suspend...\n", dev->name); 11861 11862 /* Take down the device; powers it off, etc. */ 11863 ipw_down(priv); 11864 11865 /* Remove the PRESENT state of the device */ 11866 netif_device_detach(dev); 11867 11868 pci_save_state(pdev); 11869 pci_disable_device(pdev); 11870 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 11871 11872 priv->suspend_at = ktime_get_boottime_seconds(); 11873 11874 return 0; 11875 } 11876 11877 static int ipw_pci_resume(struct pci_dev *pdev) 11878 { 11879 struct ipw_priv *priv = pci_get_drvdata(pdev); 11880 struct net_device *dev = priv->net_dev; 11881 int err; 11882 u32 val; 11883 11884 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); 11885 11886 pci_set_power_state(pdev, PCI_D0); 11887 err = pci_enable_device(pdev); 11888 if (err) { 11889 printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 11890 dev->name); 11891 return err; 11892 } 11893 pci_restore_state(pdev); 11894 11895 /* 11896 * Suspend/Resume resets the PCI configuration space, so we have to 11897 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 11898 * from interfering with C3 CPU state. pci_restore_state won't help 11899 * here since it only restores the first 64 bytes pci config header. 11900 */ 11901 pci_read_config_dword(pdev, 0x40, &val); 11902 if ((val & 0x0000ff00) != 0) 11903 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 11904 11905 /* Set the device back into the PRESENT state; this will also wake 11906 * the queue of needed */ 11907 netif_device_attach(dev); 11908 11909 priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at; 11910 11911 /* Bring the device back up */ 11912 schedule_work(&priv->up); 11913 11914 return 0; 11915 } 11916 #endif 11917 11918 static void ipw_pci_shutdown(struct pci_dev *pdev) 11919 { 11920 struct ipw_priv *priv = pci_get_drvdata(pdev); 11921 11922 /* Take down the device; powers it off, etc. */ 11923 ipw_down(priv); 11924 11925 pci_disable_device(pdev); 11926 } 11927 11928 /* driver initialization stuff */ 11929 static struct pci_driver ipw_driver = { 11930 .name = DRV_NAME, 11931 .id_table = card_ids, 11932 .probe = ipw_pci_probe, 11933 .remove = ipw_pci_remove, 11934 #ifdef CONFIG_PM 11935 .suspend = ipw_pci_suspend, 11936 .resume = ipw_pci_resume, 11937 #endif 11938 .shutdown = ipw_pci_shutdown, 11939 }; 11940 11941 static int __init ipw_init(void) 11942 { 11943 int ret; 11944 11945 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 11946 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 11947 11948 ret = pci_register_driver(&ipw_driver); 11949 if (ret) { 11950 IPW_ERROR("Unable to initialize PCI module\n"); 11951 return ret; 11952 } 11953 11954 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level); 11955 if (ret) { 11956 IPW_ERROR("Unable to create driver sysfs file\n"); 11957 pci_unregister_driver(&ipw_driver); 11958 return ret; 11959 } 11960 11961 return ret; 11962 } 11963 11964 static void __exit ipw_exit(void) 11965 { 11966 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level); 11967 pci_unregister_driver(&ipw_driver); 11968 } 11969 11970 module_param(disable, int, 0444); 11971 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 11972 11973 module_param(associate, int, 0444); 11974 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)"); 11975 11976 module_param(auto_create, int, 0444); 11977 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); 11978 11979 module_param_named(led, led_support, int, 0444); 11980 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)"); 11981 11982 module_param(debug, int, 0444); 11983 MODULE_PARM_DESC(debug, "debug output mask"); 11984 11985 module_param_named(channel, default_channel, int, 0444); 11986 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); 11987 11988 #ifdef CONFIG_IPW2200_PROMISCUOUS 11989 module_param(rtap_iface, int, 0444); 11990 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)"); 11991 #endif 11992 11993 #ifdef CONFIG_IPW2200_QOS 11994 module_param(qos_enable, int, 0444); 11995 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities"); 11996 11997 module_param(qos_burst_enable, int, 0444); 11998 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode"); 11999 12000 module_param(qos_no_ack_mask, int, 0444); 12001 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack"); 12002 12003 module_param(burst_duration_CCK, int, 0444); 12004 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value"); 12005 12006 module_param(burst_duration_OFDM, int, 0444); 12007 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value"); 12008 #endif /* CONFIG_IPW2200_QOS */ 12009 12010 #ifdef CONFIG_IPW2200_MONITOR 12011 module_param_named(mode, network_mode, int, 0444); 12012 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); 12013 #else 12014 module_param_named(mode, network_mode, int, 0444); 12015 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); 12016 #endif 12017 12018 module_param(bt_coexist, int, 0444); 12019 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)"); 12020 12021 module_param(hwcrypto, int, 0444); 12022 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)"); 12023 12024 module_param(cmdlog, int, 0444); 12025 MODULE_PARM_DESC(cmdlog, 12026 "allocate a ring buffer for logging firmware commands"); 12027 12028 module_param(roaming, int, 0444); 12029 MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); 12030 12031 module_param(antenna, int, 0444); 12032 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)"); 12033 12034 module_exit(ipw_exit); 12035 module_init(ipw_init); 12036