1 /******************************************************************************
2 
3   Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4 
5   802.11 status code portion of this file from ethereal-0.10.6:
6     Copyright 2000, Axis Communications AB
7     Ethereal - Network traffic analyzer
8     By Gerald Combs <gerald@ethereal.com>
9     Copyright 1998 Gerald Combs
10 
11   This program is free software; you can redistribute it and/or modify it
12   under the terms of version 2 of the GNU General Public License as
13   published by the Free Software Foundation.
14 
15   This program is distributed in the hope that it will be useful, but WITHOUT
16   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18   more details.
19 
20   You should have received a copy of the GNU General Public License along with
21   this program; if not, write to the Free Software Foundation, Inc., 59
22   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23 
24   The full GNU General Public License is included in this distribution in the
25   file called LICENSE.
26 
27   Contact Information:
28   Intel Linux Wireless <ilw@linux.intel.com>
29   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 
31 ******************************************************************************/
32 
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37 #include "ipw.h"
38 
39 
40 #ifndef KBUILD_EXTMOD
41 #define VK "k"
42 #else
43 #define VK
44 #endif
45 
46 #ifdef CONFIG_IPW2200_DEBUG
47 #define VD "d"
48 #else
49 #define VD
50 #endif
51 
52 #ifdef CONFIG_IPW2200_MONITOR
53 #define VM "m"
54 #else
55 #define VM
56 #endif
57 
58 #ifdef CONFIG_IPW2200_PROMISCUOUS
59 #define VP "p"
60 #else
61 #define VP
62 #endif
63 
64 #ifdef CONFIG_IPW2200_RADIOTAP
65 #define VR "r"
66 #else
67 #define VR
68 #endif
69 
70 #ifdef CONFIG_IPW2200_QOS
71 #define VQ "q"
72 #else
73 #define VQ
74 #endif
75 
76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77 #define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
78 #define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
79 #define DRV_VERSION     IPW2200_VERSION
80 
81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82 
83 MODULE_DESCRIPTION(DRV_DESCRIPTION);
84 MODULE_VERSION(DRV_VERSION);
85 MODULE_AUTHOR(DRV_COPYRIGHT);
86 MODULE_LICENSE("GPL");
87 MODULE_FIRMWARE("ipw2200-ibss.fw");
88 #ifdef CONFIG_IPW2200_MONITOR
89 MODULE_FIRMWARE("ipw2200-sniffer.fw");
90 #endif
91 MODULE_FIRMWARE("ipw2200-bss.fw");
92 
93 static int cmdlog = 0;
94 static int debug = 0;
95 static int default_channel = 0;
96 static int network_mode = 0;
97 
98 static u32 ipw_debug_level;
99 static int associate;
100 static int auto_create = 1;
101 static int led_support = 1;
102 static int disable = 0;
103 static int bt_coexist = 0;
104 static int hwcrypto = 0;
105 static int roaming = 1;
106 static const char ipw_modes[] = {
107 	'a', 'b', 'g', '?'
108 };
109 static int antenna = CFG_SYS_ANTENNA_BOTH;
110 
111 #ifdef CONFIG_IPW2200_PROMISCUOUS
112 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
113 #endif
114 
115 static struct ieee80211_rate ipw2200_rates[] = {
116 	{ .bitrate = 10 },
117 	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120 	{ .bitrate = 60 },
121 	{ .bitrate = 90 },
122 	{ .bitrate = 120 },
123 	{ .bitrate = 180 },
124 	{ .bitrate = 240 },
125 	{ .bitrate = 360 },
126 	{ .bitrate = 480 },
127 	{ .bitrate = 540 }
128 };
129 
130 #define ipw2200_a_rates		(ipw2200_rates + 4)
131 #define ipw2200_num_a_rates	8
132 #define ipw2200_bg_rates	(ipw2200_rates + 0)
133 #define ipw2200_num_bg_rates	12
134 
135 /* Ugly macro to convert literal channel numbers into their mhz equivalents
136  * There are certianly some conditions that will break this (like feeding it '30')
137  * but they shouldn't arise since nothing talks on channel 30. */
138 #define ieee80211chan2mhz(x) \
139 	(((x) <= 14) ? \
140 	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141 	((x) + 1000) * 5)
142 
143 #ifdef CONFIG_IPW2200_QOS
144 static int qos_enable = 0;
145 static int qos_burst_enable = 0;
146 static int qos_no_ack_mask = 0;
147 static int burst_duration_CCK = 0;
148 static int burst_duration_OFDM = 0;
149 
150 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151 	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152 	 QOS_TX3_CW_MIN_OFDM},
153 	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154 	 QOS_TX3_CW_MAX_OFDM},
155 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158 	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159 };
160 
161 static struct libipw_qos_parameters def_qos_parameters_CCK = {
162 	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163 	 QOS_TX3_CW_MIN_CCK},
164 	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165 	 QOS_TX3_CW_MAX_CCK},
166 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168 	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169 	 QOS_TX3_TXOP_LIMIT_CCK}
170 };
171 
172 static struct libipw_qos_parameters def_parameters_OFDM = {
173 	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174 	 DEF_TX3_CW_MIN_OFDM},
175 	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176 	 DEF_TX3_CW_MAX_OFDM},
177 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180 	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181 };
182 
183 static struct libipw_qos_parameters def_parameters_CCK = {
184 	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185 	 DEF_TX3_CW_MIN_CCK},
186 	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187 	 DEF_TX3_CW_MAX_CCK},
188 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190 	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191 	 DEF_TX3_TXOP_LIMIT_CCK}
192 };
193 
194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195 
196 static int from_priority_to_tx_queue[] = {
197 	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198 	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199 };
200 
201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202 
203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204 				       *qos_param);
205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206 				     *qos_param);
207 #endif				/* CONFIG_IPW2200_QOS */
208 
209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210 static void ipw_remove_current_network(struct ipw_priv *priv);
211 static void ipw_rx(struct ipw_priv *priv);
212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213 				struct clx2_tx_queue *txq, int qindex);
214 static int ipw_queue_reset(struct ipw_priv *priv);
215 
216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217 			     int len, int sync);
218 
219 static void ipw_tx_queue_free(struct ipw_priv *);
220 
221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223 static void ipw_rx_queue_replenish(void *);
224 static int ipw_up(struct ipw_priv *);
225 static void ipw_bg_up(struct work_struct *work);
226 static void ipw_down(struct ipw_priv *);
227 static void ipw_bg_down(struct work_struct *work);
228 static int ipw_config(struct ipw_priv *);
229 static int init_supported_rates(struct ipw_priv *priv,
230 				struct ipw_supported_rates *prates);
231 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232 static void ipw_send_wep_keys(struct ipw_priv *, int);
233 
234 static int snprint_line(char *buf, size_t count,
235 			const u8 * data, u32 len, u32 ofs)
236 {
237 	int out, i, j, l;
238 	char c;
239 
240 	out = snprintf(buf, count, "%08X", ofs);
241 
242 	for (l = 0, i = 0; i < 2; i++) {
243 		out += snprintf(buf + out, count - out, " ");
244 		for (j = 0; j < 8 && l < len; j++, l++)
245 			out += snprintf(buf + out, count - out, "%02X ",
246 					data[(i * 8 + j)]);
247 		for (; j < 8; j++)
248 			out += snprintf(buf + out, count - out, "   ");
249 	}
250 
251 	out += snprintf(buf + out, count - out, " ");
252 	for (l = 0, i = 0; i < 2; i++) {
253 		out += snprintf(buf + out, count - out, " ");
254 		for (j = 0; j < 8 && l < len; j++, l++) {
255 			c = data[(i * 8 + j)];
256 			if (!isascii(c) || !isprint(c))
257 				c = '.';
258 
259 			out += snprintf(buf + out, count - out, "%c", c);
260 		}
261 
262 		for (; j < 8; j++)
263 			out += snprintf(buf + out, count - out, " ");
264 	}
265 
266 	return out;
267 }
268 
269 static void printk_buf(int level, const u8 * data, u32 len)
270 {
271 	char line[81];
272 	u32 ofs = 0;
273 	if (!(ipw_debug_level & level))
274 		return;
275 
276 	while (len) {
277 		snprint_line(line, sizeof(line), &data[ofs],
278 			     min(len, 16U), ofs);
279 		printk(KERN_DEBUG "%s\n", line);
280 		ofs += 16;
281 		len -= min(len, 16U);
282 	}
283 }
284 
285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286 {
287 	size_t out = size;
288 	u32 ofs = 0;
289 	int total = 0;
290 
291 	while (size && len) {
292 		out = snprint_line(output, size, &data[ofs],
293 				   min_t(size_t, len, 16U), ofs);
294 
295 		ofs += 16;
296 		output += out;
297 		size -= out;
298 		len -= min_t(size_t, len, 16U);
299 		total += out;
300 	}
301 	return total;
302 }
303 
304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307 
308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311 
312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315 {
316 	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317 		     __LINE__, (u32) (b), (u32) (c));
318 	_ipw_write_reg8(a, b, c);
319 }
320 
321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324 {
325 	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326 		     __LINE__, (u32) (b), (u32) (c));
327 	_ipw_write_reg16(a, b, c);
328 }
329 
330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333 {
334 	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335 		     __LINE__, (u32) (b), (u32) (c));
336 	_ipw_write_reg32(a, b, c);
337 }
338 
339 /* 8-bit direct write (low 4K) */
340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341 		u8 val)
342 {
343 	writeb(val, ipw->hw_base + ofs);
344 }
345 
346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write8(ipw, ofs, val) do { \
348 	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349 			__LINE__, (u32)(ofs), (u32)(val)); \
350 	_ipw_write8(ipw, ofs, val); \
351 } while (0)
352 
353 /* 16-bit direct write (low 4K) */
354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355 		u16 val)
356 {
357 	writew(val, ipw->hw_base + ofs);
358 }
359 
360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write16(ipw, ofs, val) do { \
362 	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363 			__LINE__, (u32)(ofs), (u32)(val)); \
364 	_ipw_write16(ipw, ofs, val); \
365 } while (0)
366 
367 /* 32-bit direct write (low 4K) */
368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369 		u32 val)
370 {
371 	writel(val, ipw->hw_base + ofs);
372 }
373 
374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375 #define ipw_write32(ipw, ofs, val) do { \
376 	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377 			__LINE__, (u32)(ofs), (u32)(val)); \
378 	_ipw_write32(ipw, ofs, val); \
379 } while (0)
380 
381 /* 8-bit direct read (low 4K) */
382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383 {
384 	return readb(ipw->hw_base + ofs);
385 }
386 
387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388 #define ipw_read8(ipw, ofs) ({ \
389 	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390 			(u32)(ofs)); \
391 	_ipw_read8(ipw, ofs); \
392 })
393 
394 /* 16-bit direct read (low 4K) */
395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396 {
397 	return readw(ipw->hw_base + ofs);
398 }
399 
400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401 #define ipw_read16(ipw, ofs) ({ \
402 	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403 			(u32)(ofs)); \
404 	_ipw_read16(ipw, ofs); \
405 })
406 
407 /* 32-bit direct read (low 4K) */
408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409 {
410 	return readl(ipw->hw_base + ofs);
411 }
412 
413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414 #define ipw_read32(ipw, ofs) ({ \
415 	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416 			(u32)(ofs)); \
417 	_ipw_read32(ipw, ofs); \
418 })
419 
420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422 #define ipw_read_indirect(a, b, c, d) ({ \
423 	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424 			__LINE__, (u32)(b), (u32)(d)); \
425 	_ipw_read_indirect(a, b, c, d); \
426 })
427 
428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430 				int num);
431 #define ipw_write_indirect(a, b, c, d) do { \
432 	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433 			__LINE__, (u32)(b), (u32)(d)); \
434 	_ipw_write_indirect(a, b, c, d); \
435 } while (0)
436 
437 /* 32-bit indirect write (above 4K) */
438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439 {
440 	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442 	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
443 }
444 
445 /* 8-bit indirect write (above 4K) */
446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447 {
448 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
449 	u32 dif_len = reg - aligned_addr;
450 
451 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455 
456 /* 16-bit indirect write (above 4K) */
457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458 {
459 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
460 	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461 
462 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464 	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465 }
466 
467 /* 8-bit indirect read (above 4K) */
468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469 {
470 	u32 word;
471 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472 	IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473 	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474 	return (word >> ((reg & 0x3) * 8)) & 0xff;
475 }
476 
477 /* 32-bit indirect read (above 4K) */
478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479 {
480 	u32 value;
481 
482 	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483 
484 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485 	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486 	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487 	return value;
488 }
489 
490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
491 /*    for area above 1st 4K of SRAM/reg space */
492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493 			       int num)
494 {
495 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
496 	u32 dif_len = addr - aligned_addr;
497 	u32 i;
498 
499 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500 
501 	if (num <= 0) {
502 		return;
503 	}
504 
505 	/* Read the first dword (or portion) byte by byte */
506 	if (unlikely(dif_len)) {
507 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 		/* Start reading at aligned_addr + dif_len */
509 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510 			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511 		aligned_addr += 4;
512 	}
513 
514 	/* Read all of the middle dwords as dwords, with auto-increment */
515 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517 		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518 
519 	/* Read the last dword (or portion) byte by byte */
520 	if (unlikely(num)) {
521 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522 		for (i = 0; num > 0; i++, num--)
523 			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524 	}
525 }
526 
527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
528 /*    for area above 1st 4K of SRAM/reg space */
529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530 				int num)
531 {
532 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
533 	u32 dif_len = addr - aligned_addr;
534 	u32 i;
535 
536 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537 
538 	if (num <= 0) {
539 		return;
540 	}
541 
542 	/* Write the first dword (or portion) byte by byte */
543 	if (unlikely(dif_len)) {
544 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 		/* Start writing at aligned_addr + dif_len */
546 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548 		aligned_addr += 4;
549 	}
550 
551 	/* Write all of the middle dwords as dwords, with auto-increment */
552 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554 		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555 
556 	/* Write the last dword (or portion) byte by byte */
557 	if (unlikely(num)) {
558 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559 		for (i = 0; num > 0; i++, num--, buf++)
560 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561 	}
562 }
563 
564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
565 /*    for 1st 4K of SRAM/regs space */
566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567 			     int num)
568 {
569 	memcpy_toio((priv->hw_base + addr), buf, num);
570 }
571 
572 /* Set bit(s) in low 4K of SRAM/regs */
573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574 {
575 	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576 }
577 
578 /* Clear bit(s) in low 4K of SRAM/regs */
579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580 {
581 	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582 }
583 
584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 	if (priv->status & STATUS_INT_ENABLED)
587 		return;
588 	priv->status |= STATUS_INT_ENABLED;
589 	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590 }
591 
592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 	if (!(priv->status & STATUS_INT_ENABLED))
595 		return;
596 	priv->status &= ~STATUS_INT_ENABLED;
597 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598 }
599 
600 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601 {
602 	unsigned long flags;
603 
604 	spin_lock_irqsave(&priv->irq_lock, flags);
605 	__ipw_enable_interrupts(priv);
606 	spin_unlock_irqrestore(&priv->irq_lock, flags);
607 }
608 
609 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610 {
611 	unsigned long flags;
612 
613 	spin_lock_irqsave(&priv->irq_lock, flags);
614 	__ipw_disable_interrupts(priv);
615 	spin_unlock_irqrestore(&priv->irq_lock, flags);
616 }
617 
618 static char *ipw_error_desc(u32 val)
619 {
620 	switch (val) {
621 	case IPW_FW_ERROR_OK:
622 		return "ERROR_OK";
623 	case IPW_FW_ERROR_FAIL:
624 		return "ERROR_FAIL";
625 	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626 		return "MEMORY_UNDERFLOW";
627 	case IPW_FW_ERROR_MEMORY_OVERFLOW:
628 		return "MEMORY_OVERFLOW";
629 	case IPW_FW_ERROR_BAD_PARAM:
630 		return "BAD_PARAM";
631 	case IPW_FW_ERROR_BAD_CHECKSUM:
632 		return "BAD_CHECKSUM";
633 	case IPW_FW_ERROR_NMI_INTERRUPT:
634 		return "NMI_INTERRUPT";
635 	case IPW_FW_ERROR_BAD_DATABASE:
636 		return "BAD_DATABASE";
637 	case IPW_FW_ERROR_ALLOC_FAIL:
638 		return "ALLOC_FAIL";
639 	case IPW_FW_ERROR_DMA_UNDERRUN:
640 		return "DMA_UNDERRUN";
641 	case IPW_FW_ERROR_DMA_STATUS:
642 		return "DMA_STATUS";
643 	case IPW_FW_ERROR_DINO_ERROR:
644 		return "DINO_ERROR";
645 	case IPW_FW_ERROR_EEPROM_ERROR:
646 		return "EEPROM_ERROR";
647 	case IPW_FW_ERROR_SYSASSERT:
648 		return "SYSASSERT";
649 	case IPW_FW_ERROR_FATAL_ERROR:
650 		return "FATAL_ERROR";
651 	default:
652 		return "UNKNOWN_ERROR";
653 	}
654 }
655 
656 static void ipw_dump_error_log(struct ipw_priv *priv,
657 			       struct ipw_fw_error *error)
658 {
659 	u32 i;
660 
661 	if (!error) {
662 		IPW_ERROR("Error allocating and capturing error log.  "
663 			  "Nothing to dump.\n");
664 		return;
665 	}
666 
667 	IPW_ERROR("Start IPW Error Log Dump:\n");
668 	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669 		  error->status, error->config);
670 
671 	for (i = 0; i < error->elem_len; i++)
672 		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
673 			  ipw_error_desc(error->elem[i].desc),
674 			  error->elem[i].time,
675 			  error->elem[i].blink1,
676 			  error->elem[i].blink2,
677 			  error->elem[i].link1,
678 			  error->elem[i].link2, error->elem[i].data);
679 	for (i = 0; i < error->log_len; i++)
680 		IPW_ERROR("%i\t0x%08x\t%i\n",
681 			  error->log[i].time,
682 			  error->log[i].data, error->log[i].event);
683 }
684 
685 static inline int ipw_is_init(struct ipw_priv *priv)
686 {
687 	return (priv->status & STATUS_INIT) ? 1 : 0;
688 }
689 
690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691 {
692 	u32 addr, field_info, field_len, field_count, total_len;
693 
694 	IPW_DEBUG_ORD("ordinal = %i\n", ord);
695 
696 	if (!priv || !val || !len) {
697 		IPW_DEBUG_ORD("Invalid argument\n");
698 		return -EINVAL;
699 	}
700 
701 	/* verify device ordinal tables have been initialized */
702 	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703 		IPW_DEBUG_ORD("Access ordinals before initialization\n");
704 		return -EINVAL;
705 	}
706 
707 	switch (IPW_ORD_TABLE_ID_MASK & ord) {
708 	case IPW_ORD_TABLE_0_MASK:
709 		/*
710 		 * TABLE 0: Direct access to a table of 32 bit values
711 		 *
712 		 * This is a very simple table with the data directly
713 		 * read from the table
714 		 */
715 
716 		/* remove the table id from the ordinal */
717 		ord &= IPW_ORD_TABLE_VALUE_MASK;
718 
719 		/* boundary check */
720 		if (ord > priv->table0_len) {
721 			IPW_DEBUG_ORD("ordinal value (%i) longer then "
722 				      "max (%i)\n", ord, priv->table0_len);
723 			return -EINVAL;
724 		}
725 
726 		/* verify we have enough room to store the value */
727 		if (*len < sizeof(u32)) {
728 			IPW_DEBUG_ORD("ordinal buffer length too small, "
729 				      "need %zd\n", sizeof(u32));
730 			return -EINVAL;
731 		}
732 
733 		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734 			      ord, priv->table0_addr + (ord << 2));
735 
736 		*len = sizeof(u32);
737 		ord <<= 2;
738 		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739 		break;
740 
741 	case IPW_ORD_TABLE_1_MASK:
742 		/*
743 		 * TABLE 1: Indirect access to a table of 32 bit values
744 		 *
745 		 * This is a fairly large table of u32 values each
746 		 * representing starting addr for the data (which is
747 		 * also a u32)
748 		 */
749 
750 		/* remove the table id from the ordinal */
751 		ord &= IPW_ORD_TABLE_VALUE_MASK;
752 
753 		/* boundary check */
754 		if (ord > priv->table1_len) {
755 			IPW_DEBUG_ORD("ordinal value too long\n");
756 			return -EINVAL;
757 		}
758 
759 		/* verify we have enough room to store the value */
760 		if (*len < sizeof(u32)) {
761 			IPW_DEBUG_ORD("ordinal buffer length too small, "
762 				      "need %zd\n", sizeof(u32));
763 			return -EINVAL;
764 		}
765 
766 		*((u32 *) val) =
767 		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768 		*len = sizeof(u32);
769 		break;
770 
771 	case IPW_ORD_TABLE_2_MASK:
772 		/*
773 		 * TABLE 2: Indirect access to a table of variable sized values
774 		 *
775 		 * This table consist of six values, each containing
776 		 *     - dword containing the starting offset of the data
777 		 *     - dword containing the lengh in the first 16bits
778 		 *       and the count in the second 16bits
779 		 */
780 
781 		/* remove the table id from the ordinal */
782 		ord &= IPW_ORD_TABLE_VALUE_MASK;
783 
784 		/* boundary check */
785 		if (ord > priv->table2_len) {
786 			IPW_DEBUG_ORD("ordinal value too long\n");
787 			return -EINVAL;
788 		}
789 
790 		/* get the address of statistic */
791 		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792 
793 		/* get the second DW of statistics ;
794 		 * two 16-bit words - first is length, second is count */
795 		field_info =
796 		    ipw_read_reg32(priv,
797 				   priv->table2_addr + (ord << 3) +
798 				   sizeof(u32));
799 
800 		/* get each entry length */
801 		field_len = *((u16 *) & field_info);
802 
803 		/* get number of entries */
804 		field_count = *(((u16 *) & field_info) + 1);
805 
806 		/* abort if not enough memory */
807 		total_len = field_len * field_count;
808 		if (total_len > *len) {
809 			*len = total_len;
810 			return -EINVAL;
811 		}
812 
813 		*len = total_len;
814 		if (!total_len)
815 			return 0;
816 
817 		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818 			      "field_info = 0x%08x\n",
819 			      addr, total_len, field_info);
820 		ipw_read_indirect(priv, addr, val, total_len);
821 		break;
822 
823 	default:
824 		IPW_DEBUG_ORD("Invalid ordinal!\n");
825 		return -EINVAL;
826 
827 	}
828 
829 	return 0;
830 }
831 
832 static void ipw_init_ordinals(struct ipw_priv *priv)
833 {
834 	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835 	priv->table0_len = ipw_read32(priv, priv->table0_addr);
836 
837 	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838 		      priv->table0_addr, priv->table0_len);
839 
840 	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841 	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842 
843 	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844 		      priv->table1_addr, priv->table1_len);
845 
846 	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847 	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848 	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
849 
850 	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851 		      priv->table2_addr, priv->table2_len);
852 
853 }
854 
855 static u32 ipw_register_toggle(u32 reg)
856 {
857 	reg &= ~IPW_START_STANDBY;
858 	if (reg & IPW_GATE_ODMA)
859 		reg &= ~IPW_GATE_ODMA;
860 	if (reg & IPW_GATE_IDMA)
861 		reg &= ~IPW_GATE_IDMA;
862 	if (reg & IPW_GATE_ADMA)
863 		reg &= ~IPW_GATE_ADMA;
864 	return reg;
865 }
866 
867 /*
868  * LED behavior:
869  * - On radio ON, turn on any LEDs that require to be on during start
870  * - On initialization, start unassociated blink
871  * - On association, disable unassociated blink
872  * - On disassociation, start unassociated blink
873  * - On radio OFF, turn off any LEDs started during radio on
874  *
875  */
876 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
879 
880 static void ipw_led_link_on(struct ipw_priv *priv)
881 {
882 	unsigned long flags;
883 	u32 led;
884 
885 	/* If configured to not use LEDs, or nic_type is 1,
886 	 * then we don't toggle a LINK led */
887 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888 		return;
889 
890 	spin_lock_irqsave(&priv->lock, flags);
891 
892 	if (!(priv->status & STATUS_RF_KILL_MASK) &&
893 	    !(priv->status & STATUS_LED_LINK_ON)) {
894 		IPW_DEBUG_LED("Link LED On\n");
895 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
896 		led |= priv->led_association_on;
897 
898 		led = ipw_register_toggle(led);
899 
900 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
902 
903 		priv->status |= STATUS_LED_LINK_ON;
904 
905 		/* If we aren't associated, schedule turning the LED off */
906 		if (!(priv->status & STATUS_ASSOCIATED))
907 			schedule_delayed_work(&priv->led_link_off,
908 					      LD_TIME_LINK_ON);
909 	}
910 
911 	spin_unlock_irqrestore(&priv->lock, flags);
912 }
913 
914 static void ipw_bg_led_link_on(struct work_struct *work)
915 {
916 	struct ipw_priv *priv =
917 		container_of(work, struct ipw_priv, led_link_on.work);
918 	mutex_lock(&priv->mutex);
919 	ipw_led_link_on(priv);
920 	mutex_unlock(&priv->mutex);
921 }
922 
923 static void ipw_led_link_off(struct ipw_priv *priv)
924 {
925 	unsigned long flags;
926 	u32 led;
927 
928 	/* If configured not to use LEDs, or nic type is 1,
929 	 * then we don't goggle the LINK led. */
930 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931 		return;
932 
933 	spin_lock_irqsave(&priv->lock, flags);
934 
935 	if (priv->status & STATUS_LED_LINK_ON) {
936 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
937 		led &= priv->led_association_off;
938 		led = ipw_register_toggle(led);
939 
940 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
942 
943 		IPW_DEBUG_LED("Link LED Off\n");
944 
945 		priv->status &= ~STATUS_LED_LINK_ON;
946 
947 		/* If we aren't associated and the radio is on, schedule
948 		 * turning the LED on (blink while unassociated) */
949 		if (!(priv->status & STATUS_RF_KILL_MASK) &&
950 		    !(priv->status & STATUS_ASSOCIATED))
951 			schedule_delayed_work(&priv->led_link_on,
952 					      LD_TIME_LINK_OFF);
953 
954 	}
955 
956 	spin_unlock_irqrestore(&priv->lock, flags);
957 }
958 
959 static void ipw_bg_led_link_off(struct work_struct *work)
960 {
961 	struct ipw_priv *priv =
962 		container_of(work, struct ipw_priv, led_link_off.work);
963 	mutex_lock(&priv->mutex);
964 	ipw_led_link_off(priv);
965 	mutex_unlock(&priv->mutex);
966 }
967 
968 static void __ipw_led_activity_on(struct ipw_priv *priv)
969 {
970 	u32 led;
971 
972 	if (priv->config & CFG_NO_LED)
973 		return;
974 
975 	if (priv->status & STATUS_RF_KILL_MASK)
976 		return;
977 
978 	if (!(priv->status & STATUS_LED_ACT_ON)) {
979 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
980 		led |= priv->led_activity_on;
981 
982 		led = ipw_register_toggle(led);
983 
984 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
986 
987 		IPW_DEBUG_LED("Activity LED On\n");
988 
989 		priv->status |= STATUS_LED_ACT_ON;
990 
991 		cancel_delayed_work(&priv->led_act_off);
992 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993 	} else {
994 		/* Reschedule LED off for full time period */
995 		cancel_delayed_work(&priv->led_act_off);
996 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997 	}
998 }
999 
1000 #if 0
1001 void ipw_led_activity_on(struct ipw_priv *priv)
1002 {
1003 	unsigned long flags;
1004 	spin_lock_irqsave(&priv->lock, flags);
1005 	__ipw_led_activity_on(priv);
1006 	spin_unlock_irqrestore(&priv->lock, flags);
1007 }
1008 #endif  /*  0  */
1009 
1010 static void ipw_led_activity_off(struct ipw_priv *priv)
1011 {
1012 	unsigned long flags;
1013 	u32 led;
1014 
1015 	if (priv->config & CFG_NO_LED)
1016 		return;
1017 
1018 	spin_lock_irqsave(&priv->lock, flags);
1019 
1020 	if (priv->status & STATUS_LED_ACT_ON) {
1021 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022 		led &= priv->led_activity_off;
1023 
1024 		led = ipw_register_toggle(led);
1025 
1026 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028 
1029 		IPW_DEBUG_LED("Activity LED Off\n");
1030 
1031 		priv->status &= ~STATUS_LED_ACT_ON;
1032 	}
1033 
1034 	spin_unlock_irqrestore(&priv->lock, flags);
1035 }
1036 
1037 static void ipw_bg_led_activity_off(struct work_struct *work)
1038 {
1039 	struct ipw_priv *priv =
1040 		container_of(work, struct ipw_priv, led_act_off.work);
1041 	mutex_lock(&priv->mutex);
1042 	ipw_led_activity_off(priv);
1043 	mutex_unlock(&priv->mutex);
1044 }
1045 
1046 static void ipw_led_band_on(struct ipw_priv *priv)
1047 {
1048 	unsigned long flags;
1049 	u32 led;
1050 
1051 	/* Only nic type 1 supports mode LEDs */
1052 	if (priv->config & CFG_NO_LED ||
1053 	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054 		return;
1055 
1056 	spin_lock_irqsave(&priv->lock, flags);
1057 
1058 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059 	if (priv->assoc_network->mode == IEEE_A) {
1060 		led |= priv->led_ofdm_on;
1061 		led &= priv->led_association_off;
1062 		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063 	} else if (priv->assoc_network->mode == IEEE_G) {
1064 		led |= priv->led_ofdm_on;
1065 		led |= priv->led_association_on;
1066 		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067 	} else {
1068 		led &= priv->led_ofdm_off;
1069 		led |= priv->led_association_on;
1070 		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071 	}
1072 
1073 	led = ipw_register_toggle(led);
1074 
1075 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077 
1078 	spin_unlock_irqrestore(&priv->lock, flags);
1079 }
1080 
1081 static void ipw_led_band_off(struct ipw_priv *priv)
1082 {
1083 	unsigned long flags;
1084 	u32 led;
1085 
1086 	/* Only nic type 1 supports mode LEDs */
1087 	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088 		return;
1089 
1090 	spin_lock_irqsave(&priv->lock, flags);
1091 
1092 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093 	led &= priv->led_ofdm_off;
1094 	led &= priv->led_association_off;
1095 
1096 	led = ipw_register_toggle(led);
1097 
1098 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100 
1101 	spin_unlock_irqrestore(&priv->lock, flags);
1102 }
1103 
1104 static void ipw_led_radio_on(struct ipw_priv *priv)
1105 {
1106 	ipw_led_link_on(priv);
1107 }
1108 
1109 static void ipw_led_radio_off(struct ipw_priv *priv)
1110 {
1111 	ipw_led_activity_off(priv);
1112 	ipw_led_link_off(priv);
1113 }
1114 
1115 static void ipw_led_link_up(struct ipw_priv *priv)
1116 {
1117 	/* Set the Link Led on for all nic types */
1118 	ipw_led_link_on(priv);
1119 }
1120 
1121 static void ipw_led_link_down(struct ipw_priv *priv)
1122 {
1123 	ipw_led_activity_off(priv);
1124 	ipw_led_link_off(priv);
1125 
1126 	if (priv->status & STATUS_RF_KILL_MASK)
1127 		ipw_led_radio_off(priv);
1128 }
1129 
1130 static void ipw_led_init(struct ipw_priv *priv)
1131 {
1132 	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133 
1134 	/* Set the default PINs for the link and activity leds */
1135 	priv->led_activity_on = IPW_ACTIVITY_LED;
1136 	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137 
1138 	priv->led_association_on = IPW_ASSOCIATED_LED;
1139 	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140 
1141 	/* Set the default PINs for the OFDM leds */
1142 	priv->led_ofdm_on = IPW_OFDM_LED;
1143 	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144 
1145 	switch (priv->nic_type) {
1146 	case EEPROM_NIC_TYPE_1:
1147 		/* In this NIC type, the LEDs are reversed.... */
1148 		priv->led_activity_on = IPW_ASSOCIATED_LED;
1149 		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150 		priv->led_association_on = IPW_ACTIVITY_LED;
1151 		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152 
1153 		if (!(priv->config & CFG_NO_LED))
1154 			ipw_led_band_on(priv);
1155 
1156 		/* And we don't blink link LEDs for this nic, so
1157 		 * just return here */
1158 		return;
1159 
1160 	case EEPROM_NIC_TYPE_3:
1161 	case EEPROM_NIC_TYPE_2:
1162 	case EEPROM_NIC_TYPE_4:
1163 	case EEPROM_NIC_TYPE_0:
1164 		break;
1165 
1166 	default:
1167 		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168 			       priv->nic_type);
1169 		priv->nic_type = EEPROM_NIC_TYPE_0;
1170 		break;
1171 	}
1172 
1173 	if (!(priv->config & CFG_NO_LED)) {
1174 		if (priv->status & STATUS_ASSOCIATED)
1175 			ipw_led_link_on(priv);
1176 		else
1177 			ipw_led_link_off(priv);
1178 	}
1179 }
1180 
1181 static void ipw_led_shutdown(struct ipw_priv *priv)
1182 {
1183 	ipw_led_activity_off(priv);
1184 	ipw_led_link_off(priv);
1185 	ipw_led_band_off(priv);
1186 	cancel_delayed_work(&priv->led_link_on);
1187 	cancel_delayed_work(&priv->led_link_off);
1188 	cancel_delayed_work(&priv->led_act_off);
1189 }
1190 
1191 /*
1192  * The following adds a new attribute to the sysfs representation
1193  * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194  * used for controlling the debug level.
1195  *
1196  * See the level definitions in ipw for details.
1197  */
1198 static ssize_t debug_level_show(struct device_driver *d, char *buf)
1199 {
1200 	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201 }
1202 
1203 static ssize_t debug_level_store(struct device_driver *d, const char *buf,
1204 				 size_t count)
1205 {
1206 	char *p = (char *)buf;
1207 	u32 val;
1208 
1209 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210 		p++;
1211 		if (p[0] == 'x' || p[0] == 'X')
1212 			p++;
1213 		val = simple_strtoul(p, &p, 16);
1214 	} else
1215 		val = simple_strtoul(p, &p, 10);
1216 	if (p == buf)
1217 		printk(KERN_INFO DRV_NAME
1218 		       ": %s is not in hex or decimal form.\n", buf);
1219 	else
1220 		ipw_debug_level = val;
1221 
1222 	return strnlen(buf, count);
1223 }
1224 static DRIVER_ATTR_RW(debug_level);
1225 
1226 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1227 {
1228 	/* length = 1st dword in log */
1229 	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1230 }
1231 
1232 static void ipw_capture_event_log(struct ipw_priv *priv,
1233 				  u32 log_len, struct ipw_event *log)
1234 {
1235 	u32 base;
1236 
1237 	if (log_len) {
1238 		base = ipw_read32(priv, IPW_EVENT_LOG);
1239 		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1240 				  (u8 *) log, sizeof(*log) * log_len);
1241 	}
1242 }
1243 
1244 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1245 {
1246 	struct ipw_fw_error *error;
1247 	u32 log_len = ipw_get_event_log_len(priv);
1248 	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1249 	u32 elem_len = ipw_read_reg32(priv, base);
1250 
1251 	error = kmalloc(sizeof(*error) +
1252 			sizeof(*error->elem) * elem_len +
1253 			sizeof(*error->log) * log_len, GFP_ATOMIC);
1254 	if (!error) {
1255 		IPW_ERROR("Memory allocation for firmware error log "
1256 			  "failed.\n");
1257 		return NULL;
1258 	}
1259 	error->jiffies = jiffies;
1260 	error->status = priv->status;
1261 	error->config = priv->config;
1262 	error->elem_len = elem_len;
1263 	error->log_len = log_len;
1264 	error->elem = (struct ipw_error_elem *)error->payload;
1265 	error->log = (struct ipw_event *)(error->elem + elem_len);
1266 
1267 	ipw_capture_event_log(priv, log_len, error->log);
1268 
1269 	if (elem_len)
1270 		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1271 				  sizeof(*error->elem) * elem_len);
1272 
1273 	return error;
1274 }
1275 
1276 static ssize_t show_event_log(struct device *d,
1277 			      struct device_attribute *attr, char *buf)
1278 {
1279 	struct ipw_priv *priv = dev_get_drvdata(d);
1280 	u32 log_len = ipw_get_event_log_len(priv);
1281 	u32 log_size;
1282 	struct ipw_event *log;
1283 	u32 len = 0, i;
1284 
1285 	/* not using min() because of its strict type checking */
1286 	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1287 			sizeof(*log) * log_len : PAGE_SIZE;
1288 	log = kzalloc(log_size, GFP_KERNEL);
1289 	if (!log) {
1290 		IPW_ERROR("Unable to allocate memory for log\n");
1291 		return 0;
1292 	}
1293 	log_len = log_size / sizeof(*log);
1294 	ipw_capture_event_log(priv, log_len, log);
1295 
1296 	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1297 	for (i = 0; i < log_len; i++)
1298 		len += snprintf(buf + len, PAGE_SIZE - len,
1299 				"\n%08X%08X%08X",
1300 				log[i].time, log[i].event, log[i].data);
1301 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1302 	kfree(log);
1303 	return len;
1304 }
1305 
1306 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1307 
1308 static ssize_t show_error(struct device *d,
1309 			  struct device_attribute *attr, char *buf)
1310 {
1311 	struct ipw_priv *priv = dev_get_drvdata(d);
1312 	u32 len = 0, i;
1313 	if (!priv->error)
1314 		return 0;
1315 	len += snprintf(buf + len, PAGE_SIZE - len,
1316 			"%08lX%08X%08X%08X",
1317 			priv->error->jiffies,
1318 			priv->error->status,
1319 			priv->error->config, priv->error->elem_len);
1320 	for (i = 0; i < priv->error->elem_len; i++)
1321 		len += snprintf(buf + len, PAGE_SIZE - len,
1322 				"\n%08X%08X%08X%08X%08X%08X%08X",
1323 				priv->error->elem[i].time,
1324 				priv->error->elem[i].desc,
1325 				priv->error->elem[i].blink1,
1326 				priv->error->elem[i].blink2,
1327 				priv->error->elem[i].link1,
1328 				priv->error->elem[i].link2,
1329 				priv->error->elem[i].data);
1330 
1331 	len += snprintf(buf + len, PAGE_SIZE - len,
1332 			"\n%08X", priv->error->log_len);
1333 	for (i = 0; i < priv->error->log_len; i++)
1334 		len += snprintf(buf + len, PAGE_SIZE - len,
1335 				"\n%08X%08X%08X",
1336 				priv->error->log[i].time,
1337 				priv->error->log[i].event,
1338 				priv->error->log[i].data);
1339 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1340 	return len;
1341 }
1342 
1343 static ssize_t clear_error(struct device *d,
1344 			   struct device_attribute *attr,
1345 			   const char *buf, size_t count)
1346 {
1347 	struct ipw_priv *priv = dev_get_drvdata(d);
1348 
1349 	kfree(priv->error);
1350 	priv->error = NULL;
1351 	return count;
1352 }
1353 
1354 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1355 
1356 static ssize_t show_cmd_log(struct device *d,
1357 			    struct device_attribute *attr, char *buf)
1358 {
1359 	struct ipw_priv *priv = dev_get_drvdata(d);
1360 	u32 len = 0, i;
1361 	if (!priv->cmdlog)
1362 		return 0;
1363 	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1364 	     (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
1365 	     i = (i + 1) % priv->cmdlog_len) {
1366 		len +=
1367 		    snprintf(buf + len, PAGE_SIZE - len,
1368 			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1369 			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1370 			     priv->cmdlog[i].cmd.len);
1371 		len +=
1372 		    snprintk_buf(buf + len, PAGE_SIZE - len,
1373 				 (u8 *) priv->cmdlog[i].cmd.param,
1374 				 priv->cmdlog[i].cmd.len);
1375 		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1376 	}
1377 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378 	return len;
1379 }
1380 
1381 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1382 
1383 #ifdef CONFIG_IPW2200_PROMISCUOUS
1384 static void ipw_prom_free(struct ipw_priv *priv);
1385 static int ipw_prom_alloc(struct ipw_priv *priv);
1386 static ssize_t store_rtap_iface(struct device *d,
1387 			 struct device_attribute *attr,
1388 			 const char *buf, size_t count)
1389 {
1390 	struct ipw_priv *priv = dev_get_drvdata(d);
1391 	int rc = 0;
1392 
1393 	if (count < 1)
1394 		return -EINVAL;
1395 
1396 	switch (buf[0]) {
1397 	case '0':
1398 		if (!rtap_iface)
1399 			return count;
1400 
1401 		if (netif_running(priv->prom_net_dev)) {
1402 			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1403 			return count;
1404 		}
1405 
1406 		ipw_prom_free(priv);
1407 		rtap_iface = 0;
1408 		break;
1409 
1410 	case '1':
1411 		if (rtap_iface)
1412 			return count;
1413 
1414 		rc = ipw_prom_alloc(priv);
1415 		if (!rc)
1416 			rtap_iface = 1;
1417 		break;
1418 
1419 	default:
1420 		return -EINVAL;
1421 	}
1422 
1423 	if (rc) {
1424 		IPW_ERROR("Failed to register promiscuous network "
1425 			  "device (error %d).\n", rc);
1426 	}
1427 
1428 	return count;
1429 }
1430 
1431 static ssize_t show_rtap_iface(struct device *d,
1432 			struct device_attribute *attr,
1433 			char *buf)
1434 {
1435 	struct ipw_priv *priv = dev_get_drvdata(d);
1436 	if (rtap_iface)
1437 		return sprintf(buf, "%s", priv->prom_net_dev->name);
1438 	else {
1439 		buf[0] = '-';
1440 		buf[1] = '1';
1441 		buf[2] = '\0';
1442 		return 3;
1443 	}
1444 }
1445 
1446 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1447 		   store_rtap_iface);
1448 
1449 static ssize_t store_rtap_filter(struct device *d,
1450 			 struct device_attribute *attr,
1451 			 const char *buf, size_t count)
1452 {
1453 	struct ipw_priv *priv = dev_get_drvdata(d);
1454 
1455 	if (!priv->prom_priv) {
1456 		IPW_ERROR("Attempting to set filter without "
1457 			  "rtap_iface enabled.\n");
1458 		return -EPERM;
1459 	}
1460 
1461 	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1462 
1463 	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1464 		       BIT_ARG16(priv->prom_priv->filter));
1465 
1466 	return count;
1467 }
1468 
1469 static ssize_t show_rtap_filter(struct device *d,
1470 			struct device_attribute *attr,
1471 			char *buf)
1472 {
1473 	struct ipw_priv *priv = dev_get_drvdata(d);
1474 	return sprintf(buf, "0x%04X",
1475 		       priv->prom_priv ? priv->prom_priv->filter : 0);
1476 }
1477 
1478 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1479 		   store_rtap_filter);
1480 #endif
1481 
1482 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1483 			     char *buf)
1484 {
1485 	struct ipw_priv *priv = dev_get_drvdata(d);
1486 	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1487 }
1488 
1489 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1490 			      const char *buf, size_t count)
1491 {
1492 	struct ipw_priv *priv = dev_get_drvdata(d);
1493 	struct net_device *dev = priv->net_dev;
1494 	char buffer[] = "00000000";
1495 	unsigned long len =
1496 	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1497 	unsigned long val;
1498 	char *p = buffer;
1499 
1500 	IPW_DEBUG_INFO("enter\n");
1501 
1502 	strncpy(buffer, buf, len);
1503 	buffer[len] = 0;
1504 
1505 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1506 		p++;
1507 		if (p[0] == 'x' || p[0] == 'X')
1508 			p++;
1509 		val = simple_strtoul(p, &p, 16);
1510 	} else
1511 		val = simple_strtoul(p, &p, 10);
1512 	if (p == buffer) {
1513 		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1514 	} else {
1515 		priv->ieee->scan_age = val;
1516 		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1517 	}
1518 
1519 	IPW_DEBUG_INFO("exit\n");
1520 	return len;
1521 }
1522 
1523 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1524 
1525 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1526 			char *buf)
1527 {
1528 	struct ipw_priv *priv = dev_get_drvdata(d);
1529 	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1530 }
1531 
1532 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1533 			 const char *buf, size_t count)
1534 {
1535 	struct ipw_priv *priv = dev_get_drvdata(d);
1536 
1537 	IPW_DEBUG_INFO("enter\n");
1538 
1539 	if (count == 0)
1540 		return 0;
1541 
1542 	if (*buf == 0) {
1543 		IPW_DEBUG_LED("Disabling LED control.\n");
1544 		priv->config |= CFG_NO_LED;
1545 		ipw_led_shutdown(priv);
1546 	} else {
1547 		IPW_DEBUG_LED("Enabling LED control.\n");
1548 		priv->config &= ~CFG_NO_LED;
1549 		ipw_led_init(priv);
1550 	}
1551 
1552 	IPW_DEBUG_INFO("exit\n");
1553 	return count;
1554 }
1555 
1556 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1557 
1558 static ssize_t show_status(struct device *d,
1559 			   struct device_attribute *attr, char *buf)
1560 {
1561 	struct ipw_priv *p = dev_get_drvdata(d);
1562 	return sprintf(buf, "0x%08x\n", (int)p->status);
1563 }
1564 
1565 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1566 
1567 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1568 			char *buf)
1569 {
1570 	struct ipw_priv *p = dev_get_drvdata(d);
1571 	return sprintf(buf, "0x%08x\n", (int)p->config);
1572 }
1573 
1574 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1575 
1576 static ssize_t show_nic_type(struct device *d,
1577 			     struct device_attribute *attr, char *buf)
1578 {
1579 	struct ipw_priv *priv = dev_get_drvdata(d);
1580 	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1581 }
1582 
1583 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1584 
1585 static ssize_t show_ucode_version(struct device *d,
1586 				  struct device_attribute *attr, char *buf)
1587 {
1588 	u32 len = sizeof(u32), tmp = 0;
1589 	struct ipw_priv *p = dev_get_drvdata(d);
1590 
1591 	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1592 		return 0;
1593 
1594 	return sprintf(buf, "0x%08x\n", tmp);
1595 }
1596 
1597 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1598 
1599 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1600 			char *buf)
1601 {
1602 	u32 len = sizeof(u32), tmp = 0;
1603 	struct ipw_priv *p = dev_get_drvdata(d);
1604 
1605 	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1606 		return 0;
1607 
1608 	return sprintf(buf, "0x%08x\n", tmp);
1609 }
1610 
1611 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1612 
1613 /*
1614  * Add a device attribute to view/control the delay between eeprom
1615  * operations.
1616  */
1617 static ssize_t show_eeprom_delay(struct device *d,
1618 				 struct device_attribute *attr, char *buf)
1619 {
1620 	struct ipw_priv *p = dev_get_drvdata(d);
1621 	int n = p->eeprom_delay;
1622 	return sprintf(buf, "%i\n", n);
1623 }
1624 static ssize_t store_eeprom_delay(struct device *d,
1625 				  struct device_attribute *attr,
1626 				  const char *buf, size_t count)
1627 {
1628 	struct ipw_priv *p = dev_get_drvdata(d);
1629 	sscanf(buf, "%i", &p->eeprom_delay);
1630 	return strnlen(buf, count);
1631 }
1632 
1633 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1634 		   show_eeprom_delay, store_eeprom_delay);
1635 
1636 static ssize_t show_command_event_reg(struct device *d,
1637 				      struct device_attribute *attr, char *buf)
1638 {
1639 	u32 reg = 0;
1640 	struct ipw_priv *p = dev_get_drvdata(d);
1641 
1642 	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1643 	return sprintf(buf, "0x%08x\n", reg);
1644 }
1645 static ssize_t store_command_event_reg(struct device *d,
1646 				       struct device_attribute *attr,
1647 				       const char *buf, size_t count)
1648 {
1649 	u32 reg;
1650 	struct ipw_priv *p = dev_get_drvdata(d);
1651 
1652 	sscanf(buf, "%x", &reg);
1653 	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1654 	return strnlen(buf, count);
1655 }
1656 
1657 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1658 		   show_command_event_reg, store_command_event_reg);
1659 
1660 static ssize_t show_mem_gpio_reg(struct device *d,
1661 				 struct device_attribute *attr, char *buf)
1662 {
1663 	u32 reg = 0;
1664 	struct ipw_priv *p = dev_get_drvdata(d);
1665 
1666 	reg = ipw_read_reg32(p, 0x301100);
1667 	return sprintf(buf, "0x%08x\n", reg);
1668 }
1669 static ssize_t store_mem_gpio_reg(struct device *d,
1670 				  struct device_attribute *attr,
1671 				  const char *buf, size_t count)
1672 {
1673 	u32 reg;
1674 	struct ipw_priv *p = dev_get_drvdata(d);
1675 
1676 	sscanf(buf, "%x", &reg);
1677 	ipw_write_reg32(p, 0x301100, reg);
1678 	return strnlen(buf, count);
1679 }
1680 
1681 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1682 		   show_mem_gpio_reg, store_mem_gpio_reg);
1683 
1684 static ssize_t show_indirect_dword(struct device *d,
1685 				   struct device_attribute *attr, char *buf)
1686 {
1687 	u32 reg = 0;
1688 	struct ipw_priv *priv = dev_get_drvdata(d);
1689 
1690 	if (priv->status & STATUS_INDIRECT_DWORD)
1691 		reg = ipw_read_reg32(priv, priv->indirect_dword);
1692 	else
1693 		reg = 0;
1694 
1695 	return sprintf(buf, "0x%08x\n", reg);
1696 }
1697 static ssize_t store_indirect_dword(struct device *d,
1698 				    struct device_attribute *attr,
1699 				    const char *buf, size_t count)
1700 {
1701 	struct ipw_priv *priv = dev_get_drvdata(d);
1702 
1703 	sscanf(buf, "%x", &priv->indirect_dword);
1704 	priv->status |= STATUS_INDIRECT_DWORD;
1705 	return strnlen(buf, count);
1706 }
1707 
1708 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1709 		   show_indirect_dword, store_indirect_dword);
1710 
1711 static ssize_t show_indirect_byte(struct device *d,
1712 				  struct device_attribute *attr, char *buf)
1713 {
1714 	u8 reg = 0;
1715 	struct ipw_priv *priv = dev_get_drvdata(d);
1716 
1717 	if (priv->status & STATUS_INDIRECT_BYTE)
1718 		reg = ipw_read_reg8(priv, priv->indirect_byte);
1719 	else
1720 		reg = 0;
1721 
1722 	return sprintf(buf, "0x%02x\n", reg);
1723 }
1724 static ssize_t store_indirect_byte(struct device *d,
1725 				   struct device_attribute *attr,
1726 				   const char *buf, size_t count)
1727 {
1728 	struct ipw_priv *priv = dev_get_drvdata(d);
1729 
1730 	sscanf(buf, "%x", &priv->indirect_byte);
1731 	priv->status |= STATUS_INDIRECT_BYTE;
1732 	return strnlen(buf, count);
1733 }
1734 
1735 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1736 		   show_indirect_byte, store_indirect_byte);
1737 
1738 static ssize_t show_direct_dword(struct device *d,
1739 				 struct device_attribute *attr, char *buf)
1740 {
1741 	u32 reg = 0;
1742 	struct ipw_priv *priv = dev_get_drvdata(d);
1743 
1744 	if (priv->status & STATUS_DIRECT_DWORD)
1745 		reg = ipw_read32(priv, priv->direct_dword);
1746 	else
1747 		reg = 0;
1748 
1749 	return sprintf(buf, "0x%08x\n", reg);
1750 }
1751 static ssize_t store_direct_dword(struct device *d,
1752 				  struct device_attribute *attr,
1753 				  const char *buf, size_t count)
1754 {
1755 	struct ipw_priv *priv = dev_get_drvdata(d);
1756 
1757 	sscanf(buf, "%x", &priv->direct_dword);
1758 	priv->status |= STATUS_DIRECT_DWORD;
1759 	return strnlen(buf, count);
1760 }
1761 
1762 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1763 		   show_direct_dword, store_direct_dword);
1764 
1765 static int rf_kill_active(struct ipw_priv *priv)
1766 {
1767 	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1768 		priv->status |= STATUS_RF_KILL_HW;
1769 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1770 	} else {
1771 		priv->status &= ~STATUS_RF_KILL_HW;
1772 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1773 	}
1774 
1775 	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1776 }
1777 
1778 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1779 			    char *buf)
1780 {
1781 	/* 0 - RF kill not enabled
1782 	   1 - SW based RF kill active (sysfs)
1783 	   2 - HW based RF kill active
1784 	   3 - Both HW and SW baed RF kill active */
1785 	struct ipw_priv *priv = dev_get_drvdata(d);
1786 	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1787 	    (rf_kill_active(priv) ? 0x2 : 0x0);
1788 	return sprintf(buf, "%i\n", val);
1789 }
1790 
1791 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1792 {
1793 	if ((disable_radio ? 1 : 0) ==
1794 	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1795 		return 0;
1796 
1797 	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1798 			  disable_radio ? "OFF" : "ON");
1799 
1800 	if (disable_radio) {
1801 		priv->status |= STATUS_RF_KILL_SW;
1802 
1803 		cancel_delayed_work(&priv->request_scan);
1804 		cancel_delayed_work(&priv->request_direct_scan);
1805 		cancel_delayed_work(&priv->request_passive_scan);
1806 		cancel_delayed_work(&priv->scan_event);
1807 		schedule_work(&priv->down);
1808 	} else {
1809 		priv->status &= ~STATUS_RF_KILL_SW;
1810 		if (rf_kill_active(priv)) {
1811 			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1812 					  "disabled by HW switch\n");
1813 			/* Make sure the RF_KILL check timer is running */
1814 			cancel_delayed_work(&priv->rf_kill);
1815 			schedule_delayed_work(&priv->rf_kill,
1816 					      round_jiffies_relative(2 * HZ));
1817 		} else
1818 			schedule_work(&priv->up);
1819 	}
1820 
1821 	return 1;
1822 }
1823 
1824 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1825 			     const char *buf, size_t count)
1826 {
1827 	struct ipw_priv *priv = dev_get_drvdata(d);
1828 
1829 	ipw_radio_kill_sw(priv, buf[0] == '1');
1830 
1831 	return count;
1832 }
1833 
1834 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1835 
1836 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1837 			       char *buf)
1838 {
1839 	struct ipw_priv *priv = dev_get_drvdata(d);
1840 	int pos = 0, len = 0;
1841 	if (priv->config & CFG_SPEED_SCAN) {
1842 		while (priv->speed_scan[pos] != 0)
1843 			len += sprintf(&buf[len], "%d ",
1844 				       priv->speed_scan[pos++]);
1845 		return len + sprintf(&buf[len], "\n");
1846 	}
1847 
1848 	return sprintf(buf, "0\n");
1849 }
1850 
1851 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1852 				const char *buf, size_t count)
1853 {
1854 	struct ipw_priv *priv = dev_get_drvdata(d);
1855 	int channel, pos = 0;
1856 	const char *p = buf;
1857 
1858 	/* list of space separated channels to scan, optionally ending with 0 */
1859 	while ((channel = simple_strtol(p, NULL, 0))) {
1860 		if (pos == MAX_SPEED_SCAN - 1) {
1861 			priv->speed_scan[pos] = 0;
1862 			break;
1863 		}
1864 
1865 		if (libipw_is_valid_channel(priv->ieee, channel))
1866 			priv->speed_scan[pos++] = channel;
1867 		else
1868 			IPW_WARNING("Skipping invalid channel request: %d\n",
1869 				    channel);
1870 		p = strchr(p, ' ');
1871 		if (!p)
1872 			break;
1873 		while (*p == ' ' || *p == '\t')
1874 			p++;
1875 	}
1876 
1877 	if (pos == 0)
1878 		priv->config &= ~CFG_SPEED_SCAN;
1879 	else {
1880 		priv->speed_scan_pos = 0;
1881 		priv->config |= CFG_SPEED_SCAN;
1882 	}
1883 
1884 	return count;
1885 }
1886 
1887 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1888 		   store_speed_scan);
1889 
1890 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1891 			      char *buf)
1892 {
1893 	struct ipw_priv *priv = dev_get_drvdata(d);
1894 	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1895 }
1896 
1897 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1898 			       const char *buf, size_t count)
1899 {
1900 	struct ipw_priv *priv = dev_get_drvdata(d);
1901 	if (buf[0] == '1')
1902 		priv->config |= CFG_NET_STATS;
1903 	else
1904 		priv->config &= ~CFG_NET_STATS;
1905 
1906 	return count;
1907 }
1908 
1909 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1910 		   show_net_stats, store_net_stats);
1911 
1912 static ssize_t show_channels(struct device *d,
1913 			     struct device_attribute *attr,
1914 			     char *buf)
1915 {
1916 	struct ipw_priv *priv = dev_get_drvdata(d);
1917 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1918 	int len = 0, i;
1919 
1920 	len = sprintf(&buf[len],
1921 		      "Displaying %d channels in 2.4Ghz band "
1922 		      "(802.11bg):\n", geo->bg_channels);
1923 
1924 	for (i = 0; i < geo->bg_channels; i++) {
1925 		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1926 			       geo->bg[i].channel,
1927 			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1928 			       " (radar spectrum)" : "",
1929 			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1930 				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1931 			       ? "" : ", IBSS",
1932 			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1933 			       "passive only" : "active/passive",
1934 			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1935 			       "B" : "B/G");
1936 	}
1937 
1938 	len += sprintf(&buf[len],
1939 		       "Displaying %d channels in 5.2Ghz band "
1940 		       "(802.11a):\n", geo->a_channels);
1941 	for (i = 0; i < geo->a_channels; i++) {
1942 		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1943 			       geo->a[i].channel,
1944 			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1945 			       " (radar spectrum)" : "",
1946 			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1947 				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1948 			       ? "" : ", IBSS",
1949 			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1950 			       "passive only" : "active/passive");
1951 	}
1952 
1953 	return len;
1954 }
1955 
1956 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1957 
1958 static void notify_wx_assoc_event(struct ipw_priv *priv)
1959 {
1960 	union iwreq_data wrqu;
1961 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1962 	if (priv->status & STATUS_ASSOCIATED)
1963 		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1964 	else
1965 		eth_zero_addr(wrqu.ap_addr.sa_data);
1966 	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1967 }
1968 
1969 static void ipw_irq_tasklet(struct ipw_priv *priv)
1970 {
1971 	u32 inta, inta_mask, handled = 0;
1972 	unsigned long flags;
1973 	int rc = 0;
1974 
1975 	spin_lock_irqsave(&priv->irq_lock, flags);
1976 
1977 	inta = ipw_read32(priv, IPW_INTA_RW);
1978 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1979 
1980 	if (inta == 0xFFFFFFFF) {
1981 		/* Hardware disappeared */
1982 		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1983 		/* Only handle the cached INTA values */
1984 		inta = 0;
1985 	}
1986 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1987 
1988 	/* Add any cached INTA values that need to be handled */
1989 	inta |= priv->isr_inta;
1990 
1991 	spin_unlock_irqrestore(&priv->irq_lock, flags);
1992 
1993 	spin_lock_irqsave(&priv->lock, flags);
1994 
1995 	/* handle all the justifications for the interrupt */
1996 	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1997 		ipw_rx(priv);
1998 		handled |= IPW_INTA_BIT_RX_TRANSFER;
1999 	}
2000 
2001 	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2002 		IPW_DEBUG_HC("Command completed.\n");
2003 		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2004 		priv->status &= ~STATUS_HCMD_ACTIVE;
2005 		wake_up_interruptible(&priv->wait_command_queue);
2006 		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2007 	}
2008 
2009 	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2010 		IPW_DEBUG_TX("TX_QUEUE_1\n");
2011 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2012 		handled |= IPW_INTA_BIT_TX_QUEUE_1;
2013 	}
2014 
2015 	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2016 		IPW_DEBUG_TX("TX_QUEUE_2\n");
2017 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2018 		handled |= IPW_INTA_BIT_TX_QUEUE_2;
2019 	}
2020 
2021 	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2022 		IPW_DEBUG_TX("TX_QUEUE_3\n");
2023 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2024 		handled |= IPW_INTA_BIT_TX_QUEUE_3;
2025 	}
2026 
2027 	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2028 		IPW_DEBUG_TX("TX_QUEUE_4\n");
2029 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2030 		handled |= IPW_INTA_BIT_TX_QUEUE_4;
2031 	}
2032 
2033 	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2034 		IPW_WARNING("STATUS_CHANGE\n");
2035 		handled |= IPW_INTA_BIT_STATUS_CHANGE;
2036 	}
2037 
2038 	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2039 		IPW_WARNING("TX_PERIOD_EXPIRED\n");
2040 		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2041 	}
2042 
2043 	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2044 		IPW_WARNING("HOST_CMD_DONE\n");
2045 		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2046 	}
2047 
2048 	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2049 		IPW_WARNING("FW_INITIALIZATION_DONE\n");
2050 		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2051 	}
2052 
2053 	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2054 		IPW_WARNING("PHY_OFF_DONE\n");
2055 		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2056 	}
2057 
2058 	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2059 		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2060 		priv->status |= STATUS_RF_KILL_HW;
2061 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2062 		wake_up_interruptible(&priv->wait_command_queue);
2063 		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2064 		cancel_delayed_work(&priv->request_scan);
2065 		cancel_delayed_work(&priv->request_direct_scan);
2066 		cancel_delayed_work(&priv->request_passive_scan);
2067 		cancel_delayed_work(&priv->scan_event);
2068 		schedule_work(&priv->link_down);
2069 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2070 		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2071 	}
2072 
2073 	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2074 		IPW_WARNING("Firmware error detected.  Restarting.\n");
2075 		if (priv->error) {
2076 			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2077 			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2078 				struct ipw_fw_error *error =
2079 				    ipw_alloc_error_log(priv);
2080 				ipw_dump_error_log(priv, error);
2081 				kfree(error);
2082 			}
2083 		} else {
2084 			priv->error = ipw_alloc_error_log(priv);
2085 			if (priv->error)
2086 				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2087 			else
2088 				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2089 					     "log.\n");
2090 			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2091 				ipw_dump_error_log(priv, priv->error);
2092 		}
2093 
2094 		/* XXX: If hardware encryption is for WPA/WPA2,
2095 		 * we have to notify the supplicant. */
2096 		if (priv->ieee->sec.encrypt) {
2097 			priv->status &= ~STATUS_ASSOCIATED;
2098 			notify_wx_assoc_event(priv);
2099 		}
2100 
2101 		/* Keep the restart process from trying to send host
2102 		 * commands by clearing the INIT status bit */
2103 		priv->status &= ~STATUS_INIT;
2104 
2105 		/* Cancel currently queued command. */
2106 		priv->status &= ~STATUS_HCMD_ACTIVE;
2107 		wake_up_interruptible(&priv->wait_command_queue);
2108 
2109 		schedule_work(&priv->adapter_restart);
2110 		handled |= IPW_INTA_BIT_FATAL_ERROR;
2111 	}
2112 
2113 	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2114 		IPW_ERROR("Parity error\n");
2115 		handled |= IPW_INTA_BIT_PARITY_ERROR;
2116 	}
2117 
2118 	if (handled != inta) {
2119 		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2120 	}
2121 
2122 	spin_unlock_irqrestore(&priv->lock, flags);
2123 
2124 	/* enable all interrupts */
2125 	ipw_enable_interrupts(priv);
2126 }
2127 
2128 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2129 static char *get_cmd_string(u8 cmd)
2130 {
2131 	switch (cmd) {
2132 		IPW_CMD(HOST_COMPLETE);
2133 		IPW_CMD(POWER_DOWN);
2134 		IPW_CMD(SYSTEM_CONFIG);
2135 		IPW_CMD(MULTICAST_ADDRESS);
2136 		IPW_CMD(SSID);
2137 		IPW_CMD(ADAPTER_ADDRESS);
2138 		IPW_CMD(PORT_TYPE);
2139 		IPW_CMD(RTS_THRESHOLD);
2140 		IPW_CMD(FRAG_THRESHOLD);
2141 		IPW_CMD(POWER_MODE);
2142 		IPW_CMD(WEP_KEY);
2143 		IPW_CMD(TGI_TX_KEY);
2144 		IPW_CMD(SCAN_REQUEST);
2145 		IPW_CMD(SCAN_REQUEST_EXT);
2146 		IPW_CMD(ASSOCIATE);
2147 		IPW_CMD(SUPPORTED_RATES);
2148 		IPW_CMD(SCAN_ABORT);
2149 		IPW_CMD(TX_FLUSH);
2150 		IPW_CMD(QOS_PARAMETERS);
2151 		IPW_CMD(DINO_CONFIG);
2152 		IPW_CMD(RSN_CAPABILITIES);
2153 		IPW_CMD(RX_KEY);
2154 		IPW_CMD(CARD_DISABLE);
2155 		IPW_CMD(SEED_NUMBER);
2156 		IPW_CMD(TX_POWER);
2157 		IPW_CMD(COUNTRY_INFO);
2158 		IPW_CMD(AIRONET_INFO);
2159 		IPW_CMD(AP_TX_POWER);
2160 		IPW_CMD(CCKM_INFO);
2161 		IPW_CMD(CCX_VER_INFO);
2162 		IPW_CMD(SET_CALIBRATION);
2163 		IPW_CMD(SENSITIVITY_CALIB);
2164 		IPW_CMD(RETRY_LIMIT);
2165 		IPW_CMD(IPW_PRE_POWER_DOWN);
2166 		IPW_CMD(VAP_BEACON_TEMPLATE);
2167 		IPW_CMD(VAP_DTIM_PERIOD);
2168 		IPW_CMD(EXT_SUPPORTED_RATES);
2169 		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2170 		IPW_CMD(VAP_QUIET_INTERVALS);
2171 		IPW_CMD(VAP_CHANNEL_SWITCH);
2172 		IPW_CMD(VAP_MANDATORY_CHANNELS);
2173 		IPW_CMD(VAP_CELL_PWR_LIMIT);
2174 		IPW_CMD(VAP_CF_PARAM_SET);
2175 		IPW_CMD(VAP_SET_BEACONING_STATE);
2176 		IPW_CMD(MEASUREMENT);
2177 		IPW_CMD(POWER_CAPABILITY);
2178 		IPW_CMD(SUPPORTED_CHANNELS);
2179 		IPW_CMD(TPC_REPORT);
2180 		IPW_CMD(WME_INFO);
2181 		IPW_CMD(PRODUCTION_COMMAND);
2182 	default:
2183 		return "UNKNOWN";
2184 	}
2185 }
2186 
2187 #define HOST_COMPLETE_TIMEOUT HZ
2188 
2189 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2190 {
2191 	int rc = 0;
2192 	unsigned long flags;
2193 	unsigned long now, end;
2194 
2195 	spin_lock_irqsave(&priv->lock, flags);
2196 	if (priv->status & STATUS_HCMD_ACTIVE) {
2197 		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2198 			  get_cmd_string(cmd->cmd));
2199 		spin_unlock_irqrestore(&priv->lock, flags);
2200 		return -EAGAIN;
2201 	}
2202 
2203 	priv->status |= STATUS_HCMD_ACTIVE;
2204 
2205 	if (priv->cmdlog) {
2206 		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2207 		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2208 		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2209 		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2210 		       cmd->len);
2211 		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2212 	}
2213 
2214 	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2215 		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2216 		     priv->status);
2217 
2218 #ifndef DEBUG_CMD_WEP_KEY
2219 	if (cmd->cmd == IPW_CMD_WEP_KEY)
2220 		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2221 	else
2222 #endif
2223 		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2224 
2225 	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2226 	if (rc) {
2227 		priv->status &= ~STATUS_HCMD_ACTIVE;
2228 		IPW_ERROR("Failed to send %s: Reason %d\n",
2229 			  get_cmd_string(cmd->cmd), rc);
2230 		spin_unlock_irqrestore(&priv->lock, flags);
2231 		goto exit;
2232 	}
2233 	spin_unlock_irqrestore(&priv->lock, flags);
2234 
2235 	now = jiffies;
2236 	end = now + HOST_COMPLETE_TIMEOUT;
2237 again:
2238 	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2239 					      !(priv->
2240 						status & STATUS_HCMD_ACTIVE),
2241 					      end - now);
2242 	if (rc < 0) {
2243 		now = jiffies;
2244 		if (time_before(now, end))
2245 			goto again;
2246 		rc = 0;
2247 	}
2248 
2249 	if (rc == 0) {
2250 		spin_lock_irqsave(&priv->lock, flags);
2251 		if (priv->status & STATUS_HCMD_ACTIVE) {
2252 			IPW_ERROR("Failed to send %s: Command timed out.\n",
2253 				  get_cmd_string(cmd->cmd));
2254 			priv->status &= ~STATUS_HCMD_ACTIVE;
2255 			spin_unlock_irqrestore(&priv->lock, flags);
2256 			rc = -EIO;
2257 			goto exit;
2258 		}
2259 		spin_unlock_irqrestore(&priv->lock, flags);
2260 	} else
2261 		rc = 0;
2262 
2263 	if (priv->status & STATUS_RF_KILL_HW) {
2264 		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2265 			  get_cmd_string(cmd->cmd));
2266 		rc = -EIO;
2267 		goto exit;
2268 	}
2269 
2270       exit:
2271 	if (priv->cmdlog) {
2272 		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2273 		priv->cmdlog_pos %= priv->cmdlog_len;
2274 	}
2275 	return rc;
2276 }
2277 
2278 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2279 {
2280 	struct host_cmd cmd = {
2281 		.cmd = command,
2282 	};
2283 
2284 	return __ipw_send_cmd(priv, &cmd);
2285 }
2286 
2287 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2288 			    void *data)
2289 {
2290 	struct host_cmd cmd = {
2291 		.cmd = command,
2292 		.len = len,
2293 		.param = data,
2294 	};
2295 
2296 	return __ipw_send_cmd(priv, &cmd);
2297 }
2298 
2299 static int ipw_send_host_complete(struct ipw_priv *priv)
2300 {
2301 	if (!priv) {
2302 		IPW_ERROR("Invalid args\n");
2303 		return -1;
2304 	}
2305 
2306 	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2307 }
2308 
2309 static int ipw_send_system_config(struct ipw_priv *priv)
2310 {
2311 	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2312 				sizeof(priv->sys_config),
2313 				&priv->sys_config);
2314 }
2315 
2316 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2317 {
2318 	if (!priv || !ssid) {
2319 		IPW_ERROR("Invalid args\n");
2320 		return -1;
2321 	}
2322 
2323 	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2324 				ssid);
2325 }
2326 
2327 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2328 {
2329 	if (!priv || !mac) {
2330 		IPW_ERROR("Invalid args\n");
2331 		return -1;
2332 	}
2333 
2334 	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2335 		       priv->net_dev->name, mac);
2336 
2337 	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2338 }
2339 
2340 static void ipw_adapter_restart(void *adapter)
2341 {
2342 	struct ipw_priv *priv = adapter;
2343 
2344 	if (priv->status & STATUS_RF_KILL_MASK)
2345 		return;
2346 
2347 	ipw_down(priv);
2348 
2349 	if (priv->assoc_network &&
2350 	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2351 		ipw_remove_current_network(priv);
2352 
2353 	if (ipw_up(priv)) {
2354 		IPW_ERROR("Failed to up device\n");
2355 		return;
2356 	}
2357 }
2358 
2359 static void ipw_bg_adapter_restart(struct work_struct *work)
2360 {
2361 	struct ipw_priv *priv =
2362 		container_of(work, struct ipw_priv, adapter_restart);
2363 	mutex_lock(&priv->mutex);
2364 	ipw_adapter_restart(priv);
2365 	mutex_unlock(&priv->mutex);
2366 }
2367 
2368 static void ipw_abort_scan(struct ipw_priv *priv);
2369 
2370 #define IPW_SCAN_CHECK_WATCHDOG	(5 * HZ)
2371 
2372 static void ipw_scan_check(void *data)
2373 {
2374 	struct ipw_priv *priv = data;
2375 
2376 	if (priv->status & STATUS_SCAN_ABORTING) {
2377 		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2378 			       "adapter after (%dms).\n",
2379 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2380 		schedule_work(&priv->adapter_restart);
2381 	} else if (priv->status & STATUS_SCANNING) {
2382 		IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2383 			       "after (%dms).\n",
2384 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2385 		ipw_abort_scan(priv);
2386 		schedule_delayed_work(&priv->scan_check, HZ);
2387 	}
2388 }
2389 
2390 static void ipw_bg_scan_check(struct work_struct *work)
2391 {
2392 	struct ipw_priv *priv =
2393 		container_of(work, struct ipw_priv, scan_check.work);
2394 	mutex_lock(&priv->mutex);
2395 	ipw_scan_check(priv);
2396 	mutex_unlock(&priv->mutex);
2397 }
2398 
2399 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2400 				     struct ipw_scan_request_ext *request)
2401 {
2402 	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2403 				sizeof(*request), request);
2404 }
2405 
2406 static int ipw_send_scan_abort(struct ipw_priv *priv)
2407 {
2408 	if (!priv) {
2409 		IPW_ERROR("Invalid args\n");
2410 		return -1;
2411 	}
2412 
2413 	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2414 }
2415 
2416 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2417 {
2418 	struct ipw_sensitivity_calib calib = {
2419 		.beacon_rssi_raw = cpu_to_le16(sens),
2420 	};
2421 
2422 	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2423 				&calib);
2424 }
2425 
2426 static int ipw_send_associate(struct ipw_priv *priv,
2427 			      struct ipw_associate *associate)
2428 {
2429 	if (!priv || !associate) {
2430 		IPW_ERROR("Invalid args\n");
2431 		return -1;
2432 	}
2433 
2434 	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2435 				associate);
2436 }
2437 
2438 static int ipw_send_supported_rates(struct ipw_priv *priv,
2439 				    struct ipw_supported_rates *rates)
2440 {
2441 	if (!priv || !rates) {
2442 		IPW_ERROR("Invalid args\n");
2443 		return -1;
2444 	}
2445 
2446 	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2447 				rates);
2448 }
2449 
2450 static int ipw_set_random_seed(struct ipw_priv *priv)
2451 {
2452 	u32 val;
2453 
2454 	if (!priv) {
2455 		IPW_ERROR("Invalid args\n");
2456 		return -1;
2457 	}
2458 
2459 	get_random_bytes(&val, sizeof(val));
2460 
2461 	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2462 }
2463 
2464 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2465 {
2466 	__le32 v = cpu_to_le32(phy_off);
2467 	if (!priv) {
2468 		IPW_ERROR("Invalid args\n");
2469 		return -1;
2470 	}
2471 
2472 	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2473 }
2474 
2475 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2476 {
2477 	if (!priv || !power) {
2478 		IPW_ERROR("Invalid args\n");
2479 		return -1;
2480 	}
2481 
2482 	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2483 }
2484 
2485 static int ipw_set_tx_power(struct ipw_priv *priv)
2486 {
2487 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2488 	struct ipw_tx_power tx_power;
2489 	s8 max_power;
2490 	int i;
2491 
2492 	memset(&tx_power, 0, sizeof(tx_power));
2493 
2494 	/* configure device for 'G' band */
2495 	tx_power.ieee_mode = IPW_G_MODE;
2496 	tx_power.num_channels = geo->bg_channels;
2497 	for (i = 0; i < geo->bg_channels; i++) {
2498 		max_power = geo->bg[i].max_power;
2499 		tx_power.channels_tx_power[i].channel_number =
2500 		    geo->bg[i].channel;
2501 		tx_power.channels_tx_power[i].tx_power = max_power ?
2502 		    min(max_power, priv->tx_power) : priv->tx_power;
2503 	}
2504 	if (ipw_send_tx_power(priv, &tx_power))
2505 		return -EIO;
2506 
2507 	/* configure device to also handle 'B' band */
2508 	tx_power.ieee_mode = IPW_B_MODE;
2509 	if (ipw_send_tx_power(priv, &tx_power))
2510 		return -EIO;
2511 
2512 	/* configure device to also handle 'A' band */
2513 	if (priv->ieee->abg_true) {
2514 		tx_power.ieee_mode = IPW_A_MODE;
2515 		tx_power.num_channels = geo->a_channels;
2516 		for (i = 0; i < tx_power.num_channels; i++) {
2517 			max_power = geo->a[i].max_power;
2518 			tx_power.channels_tx_power[i].channel_number =
2519 			    geo->a[i].channel;
2520 			tx_power.channels_tx_power[i].tx_power = max_power ?
2521 			    min(max_power, priv->tx_power) : priv->tx_power;
2522 		}
2523 		if (ipw_send_tx_power(priv, &tx_power))
2524 			return -EIO;
2525 	}
2526 	return 0;
2527 }
2528 
2529 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2530 {
2531 	struct ipw_rts_threshold rts_threshold = {
2532 		.rts_threshold = cpu_to_le16(rts),
2533 	};
2534 
2535 	if (!priv) {
2536 		IPW_ERROR("Invalid args\n");
2537 		return -1;
2538 	}
2539 
2540 	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2541 				sizeof(rts_threshold), &rts_threshold);
2542 }
2543 
2544 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2545 {
2546 	struct ipw_frag_threshold frag_threshold = {
2547 		.frag_threshold = cpu_to_le16(frag),
2548 	};
2549 
2550 	if (!priv) {
2551 		IPW_ERROR("Invalid args\n");
2552 		return -1;
2553 	}
2554 
2555 	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2556 				sizeof(frag_threshold), &frag_threshold);
2557 }
2558 
2559 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2560 {
2561 	__le32 param;
2562 
2563 	if (!priv) {
2564 		IPW_ERROR("Invalid args\n");
2565 		return -1;
2566 	}
2567 
2568 	/* If on battery, set to 3, if AC set to CAM, else user
2569 	 * level */
2570 	switch (mode) {
2571 	case IPW_POWER_BATTERY:
2572 		param = cpu_to_le32(IPW_POWER_INDEX_3);
2573 		break;
2574 	case IPW_POWER_AC:
2575 		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2576 		break;
2577 	default:
2578 		param = cpu_to_le32(mode);
2579 		break;
2580 	}
2581 
2582 	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2583 				&param);
2584 }
2585 
2586 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2587 {
2588 	struct ipw_retry_limit retry_limit = {
2589 		.short_retry_limit = slimit,
2590 		.long_retry_limit = llimit
2591 	};
2592 
2593 	if (!priv) {
2594 		IPW_ERROR("Invalid args\n");
2595 		return -1;
2596 	}
2597 
2598 	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2599 				&retry_limit);
2600 }
2601 
2602 /*
2603  * The IPW device contains a Microwire compatible EEPROM that stores
2604  * various data like the MAC address.  Usually the firmware has exclusive
2605  * access to the eeprom, but during device initialization (before the
2606  * device driver has sent the HostComplete command to the firmware) the
2607  * device driver has read access to the EEPROM by way of indirect addressing
2608  * through a couple of memory mapped registers.
2609  *
2610  * The following is a simplified implementation for pulling data out of the
2611  * the eeprom, along with some helper functions to find information in
2612  * the per device private data's copy of the eeprom.
2613  *
2614  * NOTE: To better understand how these functions work (i.e what is a chip
2615  *       select and why do have to keep driving the eeprom clock?), read
2616  *       just about any data sheet for a Microwire compatible EEPROM.
2617  */
2618 
2619 /* write a 32 bit value into the indirect accessor register */
2620 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2621 {
2622 	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2623 
2624 	/* the eeprom requires some time to complete the operation */
2625 	udelay(p->eeprom_delay);
2626 }
2627 
2628 /* perform a chip select operation */
2629 static void eeprom_cs(struct ipw_priv *priv)
2630 {
2631 	eeprom_write_reg(priv, 0);
2632 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2633 	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2634 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2635 }
2636 
2637 /* perform a chip select operation */
2638 static void eeprom_disable_cs(struct ipw_priv *priv)
2639 {
2640 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2641 	eeprom_write_reg(priv, 0);
2642 	eeprom_write_reg(priv, EEPROM_BIT_SK);
2643 }
2644 
2645 /* push a single bit down to the eeprom */
2646 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2647 {
2648 	int d = (bit ? EEPROM_BIT_DI : 0);
2649 	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2650 	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2651 }
2652 
2653 /* push an opcode followed by an address down to the eeprom */
2654 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2655 {
2656 	int i;
2657 
2658 	eeprom_cs(priv);
2659 	eeprom_write_bit(priv, 1);
2660 	eeprom_write_bit(priv, op & 2);
2661 	eeprom_write_bit(priv, op & 1);
2662 	for (i = 7; i >= 0; i--) {
2663 		eeprom_write_bit(priv, addr & (1 << i));
2664 	}
2665 }
2666 
2667 /* pull 16 bits off the eeprom, one bit at a time */
2668 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2669 {
2670 	int i;
2671 	u16 r = 0;
2672 
2673 	/* Send READ Opcode */
2674 	eeprom_op(priv, EEPROM_CMD_READ, addr);
2675 
2676 	/* Send dummy bit */
2677 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2678 
2679 	/* Read the byte off the eeprom one bit at a time */
2680 	for (i = 0; i < 16; i++) {
2681 		u32 data = 0;
2682 		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2683 		eeprom_write_reg(priv, EEPROM_BIT_CS);
2684 		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2685 		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2686 	}
2687 
2688 	/* Send another dummy bit */
2689 	eeprom_write_reg(priv, 0);
2690 	eeprom_disable_cs(priv);
2691 
2692 	return r;
2693 }
2694 
2695 /* helper function for pulling the mac address out of the private */
2696 /* data's copy of the eeprom data                                 */
2697 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2698 {
2699 	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2700 }
2701 
2702 static void ipw_read_eeprom(struct ipw_priv *priv)
2703 {
2704 	int i;
2705 	__le16 *eeprom = (__le16 *) priv->eeprom;
2706 
2707 	IPW_DEBUG_TRACE(">>\n");
2708 
2709 	/* read entire contents of eeprom into private buffer */
2710 	for (i = 0; i < 128; i++)
2711 		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2712 
2713 	IPW_DEBUG_TRACE("<<\n");
2714 }
2715 
2716 /*
2717  * Either the device driver (i.e. the host) or the firmware can
2718  * load eeprom data into the designated region in SRAM.  If neither
2719  * happens then the FW will shutdown with a fatal error.
2720  *
2721  * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2722  * bit needs region of shared SRAM needs to be non-zero.
2723  */
2724 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2725 {
2726 	int i;
2727 
2728 	IPW_DEBUG_TRACE(">>\n");
2729 
2730 	/*
2731 	   If the data looks correct, then copy it to our private
2732 	   copy.  Otherwise let the firmware know to perform the operation
2733 	   on its own.
2734 	 */
2735 	if (priv->eeprom[EEPROM_VERSION] != 0) {
2736 		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2737 
2738 		/* write the eeprom data to sram */
2739 		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2740 			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2741 
2742 		/* Do not load eeprom data on fatal error or suspend */
2743 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2744 	} else {
2745 		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2746 
2747 		/* Load eeprom data on fatal error or suspend */
2748 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2749 	}
2750 
2751 	IPW_DEBUG_TRACE("<<\n");
2752 }
2753 
2754 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2755 {
2756 	count >>= 2;
2757 	if (!count)
2758 		return;
2759 	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2760 	while (count--)
2761 		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2762 }
2763 
2764 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2765 {
2766 	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2767 			CB_NUMBER_OF_ELEMENTS_SMALL *
2768 			sizeof(struct command_block));
2769 }
2770 
2771 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2772 {				/* start dma engine but no transfers yet */
2773 
2774 	IPW_DEBUG_FW(">> :\n");
2775 
2776 	/* Start the dma */
2777 	ipw_fw_dma_reset_command_blocks(priv);
2778 
2779 	/* Write CB base address */
2780 	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2781 
2782 	IPW_DEBUG_FW("<< :\n");
2783 	return 0;
2784 }
2785 
2786 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2787 {
2788 	u32 control = 0;
2789 
2790 	IPW_DEBUG_FW(">> :\n");
2791 
2792 	/* set the Stop and Abort bit */
2793 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2794 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2795 	priv->sram_desc.last_cb_index = 0;
2796 
2797 	IPW_DEBUG_FW("<<\n");
2798 }
2799 
2800 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2801 					  struct command_block *cb)
2802 {
2803 	u32 address =
2804 	    IPW_SHARED_SRAM_DMA_CONTROL +
2805 	    (sizeof(struct command_block) * index);
2806 	IPW_DEBUG_FW(">> :\n");
2807 
2808 	ipw_write_indirect(priv, address, (u8 *) cb,
2809 			   (int)sizeof(struct command_block));
2810 
2811 	IPW_DEBUG_FW("<< :\n");
2812 	return 0;
2813 
2814 }
2815 
2816 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2817 {
2818 	u32 control = 0;
2819 	u32 index = 0;
2820 
2821 	IPW_DEBUG_FW(">> :\n");
2822 
2823 	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2824 		ipw_fw_dma_write_command_block(priv, index,
2825 					       &priv->sram_desc.cb_list[index]);
2826 
2827 	/* Enable the DMA in the CSR register */
2828 	ipw_clear_bit(priv, IPW_RESET_REG,
2829 		      IPW_RESET_REG_MASTER_DISABLED |
2830 		      IPW_RESET_REG_STOP_MASTER);
2831 
2832 	/* Set the Start bit. */
2833 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2834 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2835 
2836 	IPW_DEBUG_FW("<< :\n");
2837 	return 0;
2838 }
2839 
2840 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2841 {
2842 	u32 address;
2843 	u32 register_value = 0;
2844 	u32 cb_fields_address = 0;
2845 
2846 	IPW_DEBUG_FW(">> :\n");
2847 	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2848 	IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2849 
2850 	/* Read the DMA Controlor register */
2851 	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2852 	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2853 
2854 	/* Print the CB values */
2855 	cb_fields_address = address;
2856 	register_value = ipw_read_reg32(priv, cb_fields_address);
2857 	IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2858 
2859 	cb_fields_address += sizeof(u32);
2860 	register_value = ipw_read_reg32(priv, cb_fields_address);
2861 	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2862 
2863 	cb_fields_address += sizeof(u32);
2864 	register_value = ipw_read_reg32(priv, cb_fields_address);
2865 	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2866 			  register_value);
2867 
2868 	cb_fields_address += sizeof(u32);
2869 	register_value = ipw_read_reg32(priv, cb_fields_address);
2870 	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2871 
2872 	IPW_DEBUG_FW(">> :\n");
2873 }
2874 
2875 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2876 {
2877 	u32 current_cb_address = 0;
2878 	u32 current_cb_index = 0;
2879 
2880 	IPW_DEBUG_FW("<< :\n");
2881 	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2882 
2883 	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2884 	    sizeof(struct command_block);
2885 
2886 	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2887 			  current_cb_index, current_cb_address);
2888 
2889 	IPW_DEBUG_FW(">> :\n");
2890 	return current_cb_index;
2891 
2892 }
2893 
2894 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2895 					u32 src_address,
2896 					u32 dest_address,
2897 					u32 length,
2898 					int interrupt_enabled, int is_last)
2899 {
2900 
2901 	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2902 	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2903 	    CB_DEST_SIZE_LONG;
2904 	struct command_block *cb;
2905 	u32 last_cb_element = 0;
2906 
2907 	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2908 			  src_address, dest_address, length);
2909 
2910 	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2911 		return -1;
2912 
2913 	last_cb_element = priv->sram_desc.last_cb_index;
2914 	cb = &priv->sram_desc.cb_list[last_cb_element];
2915 	priv->sram_desc.last_cb_index++;
2916 
2917 	/* Calculate the new CB control word */
2918 	if (interrupt_enabled)
2919 		control |= CB_INT_ENABLED;
2920 
2921 	if (is_last)
2922 		control |= CB_LAST_VALID;
2923 
2924 	control |= length;
2925 
2926 	/* Calculate the CB Element's checksum value */
2927 	cb->status = control ^ src_address ^ dest_address;
2928 
2929 	/* Copy the Source and Destination addresses */
2930 	cb->dest_addr = dest_address;
2931 	cb->source_addr = src_address;
2932 
2933 	/* Copy the Control Word last */
2934 	cb->control = control;
2935 
2936 	return 0;
2937 }
2938 
2939 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2940 				 int nr, u32 dest_address, u32 len)
2941 {
2942 	int ret, i;
2943 	u32 size;
2944 
2945 	IPW_DEBUG_FW(">>\n");
2946 	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2947 			  nr, dest_address, len);
2948 
2949 	for (i = 0; i < nr; i++) {
2950 		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2951 		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2952 						   dest_address +
2953 						   i * CB_MAX_LENGTH, size,
2954 						   0, 0);
2955 		if (ret) {
2956 			IPW_DEBUG_FW_INFO(": Failed\n");
2957 			return -1;
2958 		} else
2959 			IPW_DEBUG_FW_INFO(": Added new cb\n");
2960 	}
2961 
2962 	IPW_DEBUG_FW("<<\n");
2963 	return 0;
2964 }
2965 
2966 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2967 {
2968 	u32 current_index = 0, previous_index;
2969 	u32 watchdog = 0;
2970 
2971 	IPW_DEBUG_FW(">> :\n");
2972 
2973 	current_index = ipw_fw_dma_command_block_index(priv);
2974 	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2975 			  (int)priv->sram_desc.last_cb_index);
2976 
2977 	while (current_index < priv->sram_desc.last_cb_index) {
2978 		udelay(50);
2979 		previous_index = current_index;
2980 		current_index = ipw_fw_dma_command_block_index(priv);
2981 
2982 		if (previous_index < current_index) {
2983 			watchdog = 0;
2984 			continue;
2985 		}
2986 		if (++watchdog > 400) {
2987 			IPW_DEBUG_FW_INFO("Timeout\n");
2988 			ipw_fw_dma_dump_command_block(priv);
2989 			ipw_fw_dma_abort(priv);
2990 			return -1;
2991 		}
2992 	}
2993 
2994 	ipw_fw_dma_abort(priv);
2995 
2996 	/*Disable the DMA in the CSR register */
2997 	ipw_set_bit(priv, IPW_RESET_REG,
2998 		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2999 
3000 	IPW_DEBUG_FW("<< dmaWaitSync\n");
3001 	return 0;
3002 }
3003 
3004 static void ipw_remove_current_network(struct ipw_priv *priv)
3005 {
3006 	struct list_head *element, *safe;
3007 	struct libipw_network *network = NULL;
3008 	unsigned long flags;
3009 
3010 	spin_lock_irqsave(&priv->ieee->lock, flags);
3011 	list_for_each_safe(element, safe, &priv->ieee->network_list) {
3012 		network = list_entry(element, struct libipw_network, list);
3013 		if (ether_addr_equal(network->bssid, priv->bssid)) {
3014 			list_del(element);
3015 			list_add_tail(&network->list,
3016 				      &priv->ieee->network_free_list);
3017 		}
3018 	}
3019 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
3020 }
3021 
3022 /**
3023  * Check that card is still alive.
3024  * Reads debug register from domain0.
3025  * If card is present, pre-defined value should
3026  * be found there.
3027  *
3028  * @param priv
3029  * @return 1 if card is present, 0 otherwise
3030  */
3031 static inline int ipw_alive(struct ipw_priv *priv)
3032 {
3033 	return ipw_read32(priv, 0x90) == 0xd55555d5;
3034 }
3035 
3036 /* timeout in msec, attempted in 10-msec quanta */
3037 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3038 			       int timeout)
3039 {
3040 	int i = 0;
3041 
3042 	do {
3043 		if ((ipw_read32(priv, addr) & mask) == mask)
3044 			return i;
3045 		mdelay(10);
3046 		i += 10;
3047 	} while (i < timeout);
3048 
3049 	return -ETIME;
3050 }
3051 
3052 /* These functions load the firmware and micro code for the operation of
3053  * the ipw hardware.  It assumes the buffer has all the bits for the
3054  * image and the caller is handling the memory allocation and clean up.
3055  */
3056 
3057 static int ipw_stop_master(struct ipw_priv *priv)
3058 {
3059 	int rc;
3060 
3061 	IPW_DEBUG_TRACE(">>\n");
3062 	/* stop master. typical delay - 0 */
3063 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3064 
3065 	/* timeout is in msec, polled in 10-msec quanta */
3066 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3067 			  IPW_RESET_REG_MASTER_DISABLED, 100);
3068 	if (rc < 0) {
3069 		IPW_ERROR("wait for stop master failed after 100ms\n");
3070 		return -1;
3071 	}
3072 
3073 	IPW_DEBUG_INFO("stop master %dms\n", rc);
3074 
3075 	return rc;
3076 }
3077 
3078 static void ipw_arc_release(struct ipw_priv *priv)
3079 {
3080 	IPW_DEBUG_TRACE(">>\n");
3081 	mdelay(5);
3082 
3083 	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3084 
3085 	/* no one knows timing, for safety add some delay */
3086 	mdelay(5);
3087 }
3088 
3089 struct fw_chunk {
3090 	__le32 address;
3091 	__le32 length;
3092 };
3093 
3094 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3095 {
3096 	int rc = 0, i, addr;
3097 	u8 cr = 0;
3098 	__le16 *image;
3099 
3100 	image = (__le16 *) data;
3101 
3102 	IPW_DEBUG_TRACE(">>\n");
3103 
3104 	rc = ipw_stop_master(priv);
3105 
3106 	if (rc < 0)
3107 		return rc;
3108 
3109 	for (addr = IPW_SHARED_LOWER_BOUND;
3110 	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3111 		ipw_write32(priv, addr, 0);
3112 	}
3113 
3114 	/* no ucode (yet) */
3115 	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3116 	/* destroy DMA queues */
3117 	/* reset sequence */
3118 
3119 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3120 	ipw_arc_release(priv);
3121 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3122 	mdelay(1);
3123 
3124 	/* reset PHY */
3125 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3126 	mdelay(1);
3127 
3128 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3129 	mdelay(1);
3130 
3131 	/* enable ucode store */
3132 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3133 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3134 	mdelay(1);
3135 
3136 	/* write ucode */
3137 	/**
3138 	 * @bug
3139 	 * Do NOT set indirect address register once and then
3140 	 * store data to indirect data register in the loop.
3141 	 * It seems very reasonable, but in this case DINO do not
3142 	 * accept ucode. It is essential to set address each time.
3143 	 */
3144 	/* load new ipw uCode */
3145 	for (i = 0; i < len / 2; i++)
3146 		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3147 				le16_to_cpu(image[i]));
3148 
3149 	/* enable DINO */
3150 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3151 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3152 
3153 	/* this is where the igx / win driver deveates from the VAP driver. */
3154 
3155 	/* wait for alive response */
3156 	for (i = 0; i < 100; i++) {
3157 		/* poll for incoming data */
3158 		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3159 		if (cr & DINO_RXFIFO_DATA)
3160 			break;
3161 		mdelay(1);
3162 	}
3163 
3164 	if (cr & DINO_RXFIFO_DATA) {
3165 		/* alive_command_responce size is NOT multiple of 4 */
3166 		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3167 
3168 		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3169 			response_buffer[i] =
3170 			    cpu_to_le32(ipw_read_reg32(priv,
3171 						       IPW_BASEBAND_RX_FIFO_READ));
3172 		memcpy(&priv->dino_alive, response_buffer,
3173 		       sizeof(priv->dino_alive));
3174 		if (priv->dino_alive.alive_command == 1
3175 		    && priv->dino_alive.ucode_valid == 1) {
3176 			rc = 0;
3177 			IPW_DEBUG_INFO
3178 			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3179 			     "of %02d/%02d/%02d %02d:%02d\n",
3180 			     priv->dino_alive.software_revision,
3181 			     priv->dino_alive.software_revision,
3182 			     priv->dino_alive.device_identifier,
3183 			     priv->dino_alive.device_identifier,
3184 			     priv->dino_alive.time_stamp[0],
3185 			     priv->dino_alive.time_stamp[1],
3186 			     priv->dino_alive.time_stamp[2],
3187 			     priv->dino_alive.time_stamp[3],
3188 			     priv->dino_alive.time_stamp[4]);
3189 		} else {
3190 			IPW_DEBUG_INFO("Microcode is not alive\n");
3191 			rc = -EINVAL;
3192 		}
3193 	} else {
3194 		IPW_DEBUG_INFO("No alive response from DINO\n");
3195 		rc = -ETIME;
3196 	}
3197 
3198 	/* disable DINO, otherwise for some reason
3199 	   firmware have problem getting alive resp. */
3200 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3201 
3202 	return rc;
3203 }
3204 
3205 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3206 {
3207 	int ret = -1;
3208 	int offset = 0;
3209 	struct fw_chunk *chunk;
3210 	int total_nr = 0;
3211 	int i;
3212 	struct pci_pool *pool;
3213 	void **virts;
3214 	dma_addr_t *phys;
3215 
3216 	IPW_DEBUG_TRACE("<< :\n");
3217 
3218 	virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3219 			GFP_KERNEL);
3220 	if (!virts)
3221 		return -ENOMEM;
3222 
3223 	phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3224 			GFP_KERNEL);
3225 	if (!phys) {
3226 		kfree(virts);
3227 		return -ENOMEM;
3228 	}
3229 	pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3230 	if (!pool) {
3231 		IPW_ERROR("pci_pool_create failed\n");
3232 		kfree(phys);
3233 		kfree(virts);
3234 		return -ENOMEM;
3235 	}
3236 
3237 	/* Start the Dma */
3238 	ret = ipw_fw_dma_enable(priv);
3239 
3240 	/* the DMA is already ready this would be a bug. */
3241 	BUG_ON(priv->sram_desc.last_cb_index > 0);
3242 
3243 	do {
3244 		u32 chunk_len;
3245 		u8 *start;
3246 		int size;
3247 		int nr = 0;
3248 
3249 		chunk = (struct fw_chunk *)(data + offset);
3250 		offset += sizeof(struct fw_chunk);
3251 		chunk_len = le32_to_cpu(chunk->length);
3252 		start = data + offset;
3253 
3254 		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3255 		for (i = 0; i < nr; i++) {
3256 			virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3257 							 &phys[total_nr]);
3258 			if (!virts[total_nr]) {
3259 				ret = -ENOMEM;
3260 				goto out;
3261 			}
3262 			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3263 				     CB_MAX_LENGTH);
3264 			memcpy(virts[total_nr], start, size);
3265 			start += size;
3266 			total_nr++;
3267 			/* We don't support fw chunk larger than 64*8K */
3268 			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3269 		}
3270 
3271 		/* build DMA packet and queue up for sending */
3272 		/* dma to chunk->address, the chunk->length bytes from data +
3273 		 * offeset*/
3274 		/* Dma loading */
3275 		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3276 					    nr, le32_to_cpu(chunk->address),
3277 					    chunk_len);
3278 		if (ret) {
3279 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3280 			goto out;
3281 		}
3282 
3283 		offset += chunk_len;
3284 	} while (offset < len);
3285 
3286 	/* Run the DMA and wait for the answer */
3287 	ret = ipw_fw_dma_kick(priv);
3288 	if (ret) {
3289 		IPW_ERROR("dmaKick Failed\n");
3290 		goto out;
3291 	}
3292 
3293 	ret = ipw_fw_dma_wait(priv);
3294 	if (ret) {
3295 		IPW_ERROR("dmaWaitSync Failed\n");
3296 		goto out;
3297 	}
3298  out:
3299 	for (i = 0; i < total_nr; i++)
3300 		pci_pool_free(pool, virts[i], phys[i]);
3301 
3302 	pci_pool_destroy(pool);
3303 	kfree(phys);
3304 	kfree(virts);
3305 
3306 	return ret;
3307 }
3308 
3309 /* stop nic */
3310 static int ipw_stop_nic(struct ipw_priv *priv)
3311 {
3312 	int rc = 0;
3313 
3314 	/* stop */
3315 	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3316 
3317 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3318 			  IPW_RESET_REG_MASTER_DISABLED, 500);
3319 	if (rc < 0) {
3320 		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3321 		return rc;
3322 	}
3323 
3324 	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3325 
3326 	return rc;
3327 }
3328 
3329 static void ipw_start_nic(struct ipw_priv *priv)
3330 {
3331 	IPW_DEBUG_TRACE(">>\n");
3332 
3333 	/* prvHwStartNic  release ARC */
3334 	ipw_clear_bit(priv, IPW_RESET_REG,
3335 		      IPW_RESET_REG_MASTER_DISABLED |
3336 		      IPW_RESET_REG_STOP_MASTER |
3337 		      CBD_RESET_REG_PRINCETON_RESET);
3338 
3339 	/* enable power management */
3340 	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3341 		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3342 
3343 	IPW_DEBUG_TRACE("<<\n");
3344 }
3345 
3346 static int ipw_init_nic(struct ipw_priv *priv)
3347 {
3348 	int rc;
3349 
3350 	IPW_DEBUG_TRACE(">>\n");
3351 	/* reset */
3352 	/*prvHwInitNic */
3353 	/* set "initialization complete" bit to move adapter to D0 state */
3354 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3355 
3356 	/* low-level PLL activation */
3357 	ipw_write32(priv, IPW_READ_INT_REGISTER,
3358 		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3359 
3360 	/* wait for clock stabilization */
3361 	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3362 			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3363 	if (rc < 0)
3364 		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3365 
3366 	/* assert SW reset */
3367 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3368 
3369 	udelay(10);
3370 
3371 	/* set "initialization complete" bit to move adapter to D0 state */
3372 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3373 
3374 	IPW_DEBUG_TRACE(">>\n");
3375 	return 0;
3376 }
3377 
3378 /* Call this function from process context, it will sleep in request_firmware.
3379  * Probe is an ok place to call this from.
3380  */
3381 static int ipw_reset_nic(struct ipw_priv *priv)
3382 {
3383 	int rc = 0;
3384 	unsigned long flags;
3385 
3386 	IPW_DEBUG_TRACE(">>\n");
3387 
3388 	rc = ipw_init_nic(priv);
3389 
3390 	spin_lock_irqsave(&priv->lock, flags);
3391 	/* Clear the 'host command active' bit... */
3392 	priv->status &= ~STATUS_HCMD_ACTIVE;
3393 	wake_up_interruptible(&priv->wait_command_queue);
3394 	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3395 	wake_up_interruptible(&priv->wait_state);
3396 	spin_unlock_irqrestore(&priv->lock, flags);
3397 
3398 	IPW_DEBUG_TRACE("<<\n");
3399 	return rc;
3400 }
3401 
3402 
3403 struct ipw_fw {
3404 	__le32 ver;
3405 	__le32 boot_size;
3406 	__le32 ucode_size;
3407 	__le32 fw_size;
3408 	u8 data[0];
3409 };
3410 
3411 static int ipw_get_fw(struct ipw_priv *priv,
3412 		      const struct firmware **raw, const char *name)
3413 {
3414 	struct ipw_fw *fw;
3415 	int rc;
3416 
3417 	/* ask firmware_class module to get the boot firmware off disk */
3418 	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3419 	if (rc < 0) {
3420 		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3421 		return rc;
3422 	}
3423 
3424 	if ((*raw)->size < sizeof(*fw)) {
3425 		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3426 		return -EINVAL;
3427 	}
3428 
3429 	fw = (void *)(*raw)->data;
3430 
3431 	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3432 	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3433 		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3434 			  name, (*raw)->size);
3435 		return -EINVAL;
3436 	}
3437 
3438 	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3439 		       name,
3440 		       le32_to_cpu(fw->ver) >> 16,
3441 		       le32_to_cpu(fw->ver) & 0xff,
3442 		       (*raw)->size - sizeof(*fw));
3443 	return 0;
3444 }
3445 
3446 #define IPW_RX_BUF_SIZE (3000)
3447 
3448 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3449 				      struct ipw_rx_queue *rxq)
3450 {
3451 	unsigned long flags;
3452 	int i;
3453 
3454 	spin_lock_irqsave(&rxq->lock, flags);
3455 
3456 	INIT_LIST_HEAD(&rxq->rx_free);
3457 	INIT_LIST_HEAD(&rxq->rx_used);
3458 
3459 	/* Fill the rx_used queue with _all_ of the Rx buffers */
3460 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3461 		/* In the reset function, these buffers may have been allocated
3462 		 * to an SKB, so we need to unmap and free potential storage */
3463 		if (rxq->pool[i].skb != NULL) {
3464 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3465 					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3466 			dev_kfree_skb(rxq->pool[i].skb);
3467 			rxq->pool[i].skb = NULL;
3468 		}
3469 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3470 	}
3471 
3472 	/* Set us so that we have processed and used all buffers, but have
3473 	 * not restocked the Rx queue with fresh buffers */
3474 	rxq->read = rxq->write = 0;
3475 	rxq->free_count = 0;
3476 	spin_unlock_irqrestore(&rxq->lock, flags);
3477 }
3478 
3479 #ifdef CONFIG_PM
3480 static int fw_loaded = 0;
3481 static const struct firmware *raw = NULL;
3482 
3483 static void free_firmware(void)
3484 {
3485 	if (fw_loaded) {
3486 		release_firmware(raw);
3487 		raw = NULL;
3488 		fw_loaded = 0;
3489 	}
3490 }
3491 #else
3492 #define free_firmware() do {} while (0)
3493 #endif
3494 
3495 static int ipw_load(struct ipw_priv *priv)
3496 {
3497 #ifndef CONFIG_PM
3498 	const struct firmware *raw = NULL;
3499 #endif
3500 	struct ipw_fw *fw;
3501 	u8 *boot_img, *ucode_img, *fw_img;
3502 	u8 *name = NULL;
3503 	int rc = 0, retries = 3;
3504 
3505 	switch (priv->ieee->iw_mode) {
3506 	case IW_MODE_ADHOC:
3507 		name = "ipw2200-ibss.fw";
3508 		break;
3509 #ifdef CONFIG_IPW2200_MONITOR
3510 	case IW_MODE_MONITOR:
3511 		name = "ipw2200-sniffer.fw";
3512 		break;
3513 #endif
3514 	case IW_MODE_INFRA:
3515 		name = "ipw2200-bss.fw";
3516 		break;
3517 	}
3518 
3519 	if (!name) {
3520 		rc = -EINVAL;
3521 		goto error;
3522 	}
3523 
3524 #ifdef CONFIG_PM
3525 	if (!fw_loaded) {
3526 #endif
3527 		rc = ipw_get_fw(priv, &raw, name);
3528 		if (rc < 0)
3529 			goto error;
3530 #ifdef CONFIG_PM
3531 	}
3532 #endif
3533 
3534 	fw = (void *)raw->data;
3535 	boot_img = &fw->data[0];
3536 	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3537 	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3538 			   le32_to_cpu(fw->ucode_size)];
3539 
3540 	if (!priv->rxq)
3541 		priv->rxq = ipw_rx_queue_alloc(priv);
3542 	else
3543 		ipw_rx_queue_reset(priv, priv->rxq);
3544 	if (!priv->rxq) {
3545 		IPW_ERROR("Unable to initialize Rx queue\n");
3546 		rc = -ENOMEM;
3547 		goto error;
3548 	}
3549 
3550       retry:
3551 	/* Ensure interrupts are disabled */
3552 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3553 	priv->status &= ~STATUS_INT_ENABLED;
3554 
3555 	/* ack pending interrupts */
3556 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3557 
3558 	ipw_stop_nic(priv);
3559 
3560 	rc = ipw_reset_nic(priv);
3561 	if (rc < 0) {
3562 		IPW_ERROR("Unable to reset NIC\n");
3563 		goto error;
3564 	}
3565 
3566 	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3567 			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3568 
3569 	/* DMA the initial boot firmware into the device */
3570 	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3571 	if (rc < 0) {
3572 		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3573 		goto error;
3574 	}
3575 
3576 	/* kick start the device */
3577 	ipw_start_nic(priv);
3578 
3579 	/* wait for the device to finish its initial startup sequence */
3580 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3581 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3582 	if (rc < 0) {
3583 		IPW_ERROR("device failed to boot initial fw image\n");
3584 		goto error;
3585 	}
3586 	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3587 
3588 	/* ack fw init done interrupt */
3589 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3590 
3591 	/* DMA the ucode into the device */
3592 	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3593 	if (rc < 0) {
3594 		IPW_ERROR("Unable to load ucode: %d\n", rc);
3595 		goto error;
3596 	}
3597 
3598 	/* stop nic */
3599 	ipw_stop_nic(priv);
3600 
3601 	/* DMA bss firmware into the device */
3602 	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3603 	if (rc < 0) {
3604 		IPW_ERROR("Unable to load firmware: %d\n", rc);
3605 		goto error;
3606 	}
3607 #ifdef CONFIG_PM
3608 	fw_loaded = 1;
3609 #endif
3610 
3611 	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3612 
3613 	rc = ipw_queue_reset(priv);
3614 	if (rc < 0) {
3615 		IPW_ERROR("Unable to initialize queues\n");
3616 		goto error;
3617 	}
3618 
3619 	/* Ensure interrupts are disabled */
3620 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3621 	/* ack pending interrupts */
3622 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3623 
3624 	/* kick start the device */
3625 	ipw_start_nic(priv);
3626 
3627 	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3628 		if (retries > 0) {
3629 			IPW_WARNING("Parity error.  Retrying init.\n");
3630 			retries--;
3631 			goto retry;
3632 		}
3633 
3634 		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3635 		rc = -EIO;
3636 		goto error;
3637 	}
3638 
3639 	/* wait for the device */
3640 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3641 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3642 	if (rc < 0) {
3643 		IPW_ERROR("device failed to start within 500ms\n");
3644 		goto error;
3645 	}
3646 	IPW_DEBUG_INFO("device response after %dms\n", rc);
3647 
3648 	/* ack fw init done interrupt */
3649 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3650 
3651 	/* read eeprom data */
3652 	priv->eeprom_delay = 1;
3653 	ipw_read_eeprom(priv);
3654 	/* initialize the eeprom region of sram */
3655 	ipw_eeprom_init_sram(priv);
3656 
3657 	/* enable interrupts */
3658 	ipw_enable_interrupts(priv);
3659 
3660 	/* Ensure our queue has valid packets */
3661 	ipw_rx_queue_replenish(priv);
3662 
3663 	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3664 
3665 	/* ack pending interrupts */
3666 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3667 
3668 #ifndef CONFIG_PM
3669 	release_firmware(raw);
3670 #endif
3671 	return 0;
3672 
3673       error:
3674 	if (priv->rxq) {
3675 		ipw_rx_queue_free(priv, priv->rxq);
3676 		priv->rxq = NULL;
3677 	}
3678 	ipw_tx_queue_free(priv);
3679 	release_firmware(raw);
3680 #ifdef CONFIG_PM
3681 	fw_loaded = 0;
3682 	raw = NULL;
3683 #endif
3684 
3685 	return rc;
3686 }
3687 
3688 /**
3689  * DMA services
3690  *
3691  * Theory of operation
3692  *
3693  * A queue is a circular buffers with 'Read' and 'Write' pointers.
3694  * 2 empty entries always kept in the buffer to protect from overflow.
3695  *
3696  * For Tx queue, there are low mark and high mark limits. If, after queuing
3697  * the packet for Tx, free space become < low mark, Tx queue stopped. When
3698  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3699  * Tx queue resumed.
3700  *
3701  * The IPW operates with six queues, one receive queue in the device's
3702  * sram, one transmit queue for sending commands to the device firmware,
3703  * and four transmit queues for data.
3704  *
3705  * The four transmit queues allow for performing quality of service (qos)
3706  * transmissions as per the 802.11 protocol.  Currently Linux does not
3707  * provide a mechanism to the user for utilizing prioritized queues, so
3708  * we only utilize the first data transmit queue (queue1).
3709  */
3710 
3711 /**
3712  * Driver allocates buffers of this size for Rx
3713  */
3714 
3715 /**
3716  * ipw_rx_queue_space - Return number of free slots available in queue.
3717  */
3718 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3719 {
3720 	int s = q->read - q->write;
3721 	if (s <= 0)
3722 		s += RX_QUEUE_SIZE;
3723 	/* keep some buffer to not confuse full and empty queue */
3724 	s -= 2;
3725 	if (s < 0)
3726 		s = 0;
3727 	return s;
3728 }
3729 
3730 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3731 {
3732 	int s = q->last_used - q->first_empty;
3733 	if (s <= 0)
3734 		s += q->n_bd;
3735 	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3736 	if (s < 0)
3737 		s = 0;
3738 	return s;
3739 }
3740 
3741 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3742 {
3743 	return (++index == n_bd) ? 0 : index;
3744 }
3745 
3746 /**
3747  * Initialize common DMA queue structure
3748  *
3749  * @param q                queue to init
3750  * @param count            Number of BD's to allocate. Should be power of 2
3751  * @param read_register    Address for 'read' register
3752  *                         (not offset within BAR, full address)
3753  * @param write_register   Address for 'write' register
3754  *                         (not offset within BAR, full address)
3755  * @param base_register    Address for 'base' register
3756  *                         (not offset within BAR, full address)
3757  * @param size             Address for 'size' register
3758  *                         (not offset within BAR, full address)
3759  */
3760 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3761 			   int count, u32 read, u32 write, u32 base, u32 size)
3762 {
3763 	q->n_bd = count;
3764 
3765 	q->low_mark = q->n_bd / 4;
3766 	if (q->low_mark < 4)
3767 		q->low_mark = 4;
3768 
3769 	q->high_mark = q->n_bd / 8;
3770 	if (q->high_mark < 2)
3771 		q->high_mark = 2;
3772 
3773 	q->first_empty = q->last_used = 0;
3774 	q->reg_r = read;
3775 	q->reg_w = write;
3776 
3777 	ipw_write32(priv, base, q->dma_addr);
3778 	ipw_write32(priv, size, count);
3779 	ipw_write32(priv, read, 0);
3780 	ipw_write32(priv, write, 0);
3781 
3782 	_ipw_read32(priv, 0x90);
3783 }
3784 
3785 static int ipw_queue_tx_init(struct ipw_priv *priv,
3786 			     struct clx2_tx_queue *q,
3787 			     int count, u32 read, u32 write, u32 base, u32 size)
3788 {
3789 	struct pci_dev *dev = priv->pci_dev;
3790 
3791 	q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3792 	if (!q->txb) {
3793 		IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3794 		return -ENOMEM;
3795 	}
3796 
3797 	q->bd =
3798 	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3799 	if (!q->bd) {
3800 		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3801 			  sizeof(q->bd[0]) * count);
3802 		kfree(q->txb);
3803 		q->txb = NULL;
3804 		return -ENOMEM;
3805 	}
3806 
3807 	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3808 	return 0;
3809 }
3810 
3811 /**
3812  * Free one TFD, those at index [txq->q.last_used].
3813  * Do NOT advance any indexes
3814  *
3815  * @param dev
3816  * @param txq
3817  */
3818 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3819 				  struct clx2_tx_queue *txq)
3820 {
3821 	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3822 	struct pci_dev *dev = priv->pci_dev;
3823 	int i;
3824 
3825 	/* classify bd */
3826 	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3827 		/* nothing to cleanup after for host commands */
3828 		return;
3829 
3830 	/* sanity check */
3831 	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3832 		IPW_ERROR("Too many chunks: %i\n",
3833 			  le32_to_cpu(bd->u.data.num_chunks));
3834 		/** @todo issue fatal error, it is quite serious situation */
3835 		return;
3836 	}
3837 
3838 	/* unmap chunks if any */
3839 	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3840 		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3841 				 le16_to_cpu(bd->u.data.chunk_len[i]),
3842 				 PCI_DMA_TODEVICE);
3843 		if (txq->txb[txq->q.last_used]) {
3844 			libipw_txb_free(txq->txb[txq->q.last_used]);
3845 			txq->txb[txq->q.last_used] = NULL;
3846 		}
3847 	}
3848 }
3849 
3850 /**
3851  * Deallocate DMA queue.
3852  *
3853  * Empty queue by removing and destroying all BD's.
3854  * Free all buffers.
3855  *
3856  * @param dev
3857  * @param q
3858  */
3859 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3860 {
3861 	struct clx2_queue *q = &txq->q;
3862 	struct pci_dev *dev = priv->pci_dev;
3863 
3864 	if (q->n_bd == 0)
3865 		return;
3866 
3867 	/* first, empty all BD's */
3868 	for (; q->first_empty != q->last_used;
3869 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3870 		ipw_queue_tx_free_tfd(priv, txq);
3871 	}
3872 
3873 	/* free buffers belonging to queue itself */
3874 	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3875 			    q->dma_addr);
3876 	kfree(txq->txb);
3877 
3878 	/* 0 fill whole structure */
3879 	memset(txq, 0, sizeof(*txq));
3880 }
3881 
3882 /**
3883  * Destroy all DMA queues and structures
3884  *
3885  * @param priv
3886  */
3887 static void ipw_tx_queue_free(struct ipw_priv *priv)
3888 {
3889 	/* Tx CMD queue */
3890 	ipw_queue_tx_free(priv, &priv->txq_cmd);
3891 
3892 	/* Tx queues */
3893 	ipw_queue_tx_free(priv, &priv->txq[0]);
3894 	ipw_queue_tx_free(priv, &priv->txq[1]);
3895 	ipw_queue_tx_free(priv, &priv->txq[2]);
3896 	ipw_queue_tx_free(priv, &priv->txq[3]);
3897 }
3898 
3899 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3900 {
3901 	/* First 3 bytes are manufacturer */
3902 	bssid[0] = priv->mac_addr[0];
3903 	bssid[1] = priv->mac_addr[1];
3904 	bssid[2] = priv->mac_addr[2];
3905 
3906 	/* Last bytes are random */
3907 	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3908 
3909 	bssid[0] &= 0xfe;	/* clear multicast bit */
3910 	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3911 }
3912 
3913 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3914 {
3915 	struct ipw_station_entry entry;
3916 	int i;
3917 
3918 	for (i = 0; i < priv->num_stations; i++) {
3919 		if (ether_addr_equal(priv->stations[i], bssid)) {
3920 			/* Another node is active in network */
3921 			priv->missed_adhoc_beacons = 0;
3922 			if (!(priv->config & CFG_STATIC_CHANNEL))
3923 				/* when other nodes drop out, we drop out */
3924 				priv->config &= ~CFG_ADHOC_PERSIST;
3925 
3926 			return i;
3927 		}
3928 	}
3929 
3930 	if (i == MAX_STATIONS)
3931 		return IPW_INVALID_STATION;
3932 
3933 	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3934 
3935 	entry.reserved = 0;
3936 	entry.support_mode = 0;
3937 	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3938 	memcpy(priv->stations[i], bssid, ETH_ALEN);
3939 	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3940 			 &entry, sizeof(entry));
3941 	priv->num_stations++;
3942 
3943 	return i;
3944 }
3945 
3946 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3947 {
3948 	int i;
3949 
3950 	for (i = 0; i < priv->num_stations; i++)
3951 		if (ether_addr_equal(priv->stations[i], bssid))
3952 			return i;
3953 
3954 	return IPW_INVALID_STATION;
3955 }
3956 
3957 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3958 {
3959 	int err;
3960 
3961 	if (priv->status & STATUS_ASSOCIATING) {
3962 		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3963 		schedule_work(&priv->disassociate);
3964 		return;
3965 	}
3966 
3967 	if (!(priv->status & STATUS_ASSOCIATED)) {
3968 		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3969 		return;
3970 	}
3971 
3972 	IPW_DEBUG_ASSOC("Disassociation attempt from %pM "
3973 			"on channel %d.\n",
3974 			priv->assoc_request.bssid,
3975 			priv->assoc_request.channel);
3976 
3977 	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3978 	priv->status |= STATUS_DISASSOCIATING;
3979 
3980 	if (quiet)
3981 		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3982 	else
3983 		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3984 
3985 	err = ipw_send_associate(priv, &priv->assoc_request);
3986 	if (err) {
3987 		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3988 			     "failed.\n");
3989 		return;
3990 	}
3991 
3992 }
3993 
3994 static int ipw_disassociate(void *data)
3995 {
3996 	struct ipw_priv *priv = data;
3997 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3998 		return 0;
3999 	ipw_send_disassociate(data, 0);
4000 	netif_carrier_off(priv->net_dev);
4001 	return 1;
4002 }
4003 
4004 static void ipw_bg_disassociate(struct work_struct *work)
4005 {
4006 	struct ipw_priv *priv =
4007 		container_of(work, struct ipw_priv, disassociate);
4008 	mutex_lock(&priv->mutex);
4009 	ipw_disassociate(priv);
4010 	mutex_unlock(&priv->mutex);
4011 }
4012 
4013 static void ipw_system_config(struct work_struct *work)
4014 {
4015 	struct ipw_priv *priv =
4016 		container_of(work, struct ipw_priv, system_config);
4017 
4018 #ifdef CONFIG_IPW2200_PROMISCUOUS
4019 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4020 		priv->sys_config.accept_all_data_frames = 1;
4021 		priv->sys_config.accept_non_directed_frames = 1;
4022 		priv->sys_config.accept_all_mgmt_bcpr = 1;
4023 		priv->sys_config.accept_all_mgmt_frames = 1;
4024 	}
4025 #endif
4026 
4027 	ipw_send_system_config(priv);
4028 }
4029 
4030 struct ipw_status_code {
4031 	u16 status;
4032 	const char *reason;
4033 };
4034 
4035 static const struct ipw_status_code ipw_status_codes[] = {
4036 	{0x00, "Successful"},
4037 	{0x01, "Unspecified failure"},
4038 	{0x0A, "Cannot support all requested capabilities in the "
4039 	 "Capability information field"},
4040 	{0x0B, "Reassociation denied due to inability to confirm that "
4041 	 "association exists"},
4042 	{0x0C, "Association denied due to reason outside the scope of this "
4043 	 "standard"},
4044 	{0x0D,
4045 	 "Responding station does not support the specified authentication "
4046 	 "algorithm"},
4047 	{0x0E,
4048 	 "Received an Authentication frame with authentication sequence "
4049 	 "transaction sequence number out of expected sequence"},
4050 	{0x0F, "Authentication rejected because of challenge failure"},
4051 	{0x10, "Authentication rejected due to timeout waiting for next "
4052 	 "frame in sequence"},
4053 	{0x11, "Association denied because AP is unable to handle additional "
4054 	 "associated stations"},
4055 	{0x12,
4056 	 "Association denied due to requesting station not supporting all "
4057 	 "of the datarates in the BSSBasicServiceSet Parameter"},
4058 	{0x13,
4059 	 "Association denied due to requesting station not supporting "
4060 	 "short preamble operation"},
4061 	{0x14,
4062 	 "Association denied due to requesting station not supporting "
4063 	 "PBCC encoding"},
4064 	{0x15,
4065 	 "Association denied due to requesting station not supporting "
4066 	 "channel agility"},
4067 	{0x19,
4068 	 "Association denied due to requesting station not supporting "
4069 	 "short slot operation"},
4070 	{0x1A,
4071 	 "Association denied due to requesting station not supporting "
4072 	 "DSSS-OFDM operation"},
4073 	{0x28, "Invalid Information Element"},
4074 	{0x29, "Group Cipher is not valid"},
4075 	{0x2A, "Pairwise Cipher is not valid"},
4076 	{0x2B, "AKMP is not valid"},
4077 	{0x2C, "Unsupported RSN IE version"},
4078 	{0x2D, "Invalid RSN IE Capabilities"},
4079 	{0x2E, "Cipher suite is rejected per security policy"},
4080 };
4081 
4082 static const char *ipw_get_status_code(u16 status)
4083 {
4084 	int i;
4085 	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4086 		if (ipw_status_codes[i].status == (status & 0xff))
4087 			return ipw_status_codes[i].reason;
4088 	return "Unknown status value.";
4089 }
4090 
4091 static inline void average_init(struct average *avg)
4092 {
4093 	memset(avg, 0, sizeof(*avg));
4094 }
4095 
4096 #define DEPTH_RSSI 8
4097 #define DEPTH_NOISE 16
4098 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4099 {
4100 	return ((depth-1)*prev_avg +  val)/depth;
4101 }
4102 
4103 static void average_add(struct average *avg, s16 val)
4104 {
4105 	avg->sum -= avg->entries[avg->pos];
4106 	avg->sum += val;
4107 	avg->entries[avg->pos++] = val;
4108 	if (unlikely(avg->pos == AVG_ENTRIES)) {
4109 		avg->init = 1;
4110 		avg->pos = 0;
4111 	}
4112 }
4113 
4114 static s16 average_value(struct average *avg)
4115 {
4116 	if (!unlikely(avg->init)) {
4117 		if (avg->pos)
4118 			return avg->sum / avg->pos;
4119 		return 0;
4120 	}
4121 
4122 	return avg->sum / AVG_ENTRIES;
4123 }
4124 
4125 static void ipw_reset_stats(struct ipw_priv *priv)
4126 {
4127 	u32 len = sizeof(u32);
4128 
4129 	priv->quality = 0;
4130 
4131 	average_init(&priv->average_missed_beacons);
4132 	priv->exp_avg_rssi = -60;
4133 	priv->exp_avg_noise = -85 + 0x100;
4134 
4135 	priv->last_rate = 0;
4136 	priv->last_missed_beacons = 0;
4137 	priv->last_rx_packets = 0;
4138 	priv->last_tx_packets = 0;
4139 	priv->last_tx_failures = 0;
4140 
4141 	/* Firmware managed, reset only when NIC is restarted, so we have to
4142 	 * normalize on the current value */
4143 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4144 			&priv->last_rx_err, &len);
4145 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4146 			&priv->last_tx_failures, &len);
4147 
4148 	/* Driver managed, reset with each association */
4149 	priv->missed_adhoc_beacons = 0;
4150 	priv->missed_beacons = 0;
4151 	priv->tx_packets = 0;
4152 	priv->rx_packets = 0;
4153 
4154 }
4155 
4156 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4157 {
4158 	u32 i = 0x80000000;
4159 	u32 mask = priv->rates_mask;
4160 	/* If currently associated in B mode, restrict the maximum
4161 	 * rate match to B rates */
4162 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4163 		mask &= LIBIPW_CCK_RATES_MASK;
4164 
4165 	/* TODO: Verify that the rate is supported by the current rates
4166 	 * list. */
4167 
4168 	while (i && !(mask & i))
4169 		i >>= 1;
4170 	switch (i) {
4171 	case LIBIPW_CCK_RATE_1MB_MASK:
4172 		return 1000000;
4173 	case LIBIPW_CCK_RATE_2MB_MASK:
4174 		return 2000000;
4175 	case LIBIPW_CCK_RATE_5MB_MASK:
4176 		return 5500000;
4177 	case LIBIPW_OFDM_RATE_6MB_MASK:
4178 		return 6000000;
4179 	case LIBIPW_OFDM_RATE_9MB_MASK:
4180 		return 9000000;
4181 	case LIBIPW_CCK_RATE_11MB_MASK:
4182 		return 11000000;
4183 	case LIBIPW_OFDM_RATE_12MB_MASK:
4184 		return 12000000;
4185 	case LIBIPW_OFDM_RATE_18MB_MASK:
4186 		return 18000000;
4187 	case LIBIPW_OFDM_RATE_24MB_MASK:
4188 		return 24000000;
4189 	case LIBIPW_OFDM_RATE_36MB_MASK:
4190 		return 36000000;
4191 	case LIBIPW_OFDM_RATE_48MB_MASK:
4192 		return 48000000;
4193 	case LIBIPW_OFDM_RATE_54MB_MASK:
4194 		return 54000000;
4195 	}
4196 
4197 	if (priv->ieee->mode == IEEE_B)
4198 		return 11000000;
4199 	else
4200 		return 54000000;
4201 }
4202 
4203 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4204 {
4205 	u32 rate, len = sizeof(rate);
4206 	int err;
4207 
4208 	if (!(priv->status & STATUS_ASSOCIATED))
4209 		return 0;
4210 
4211 	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4212 		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4213 				      &len);
4214 		if (err) {
4215 			IPW_DEBUG_INFO("failed querying ordinals.\n");
4216 			return 0;
4217 		}
4218 	} else
4219 		return ipw_get_max_rate(priv);
4220 
4221 	switch (rate) {
4222 	case IPW_TX_RATE_1MB:
4223 		return 1000000;
4224 	case IPW_TX_RATE_2MB:
4225 		return 2000000;
4226 	case IPW_TX_RATE_5MB:
4227 		return 5500000;
4228 	case IPW_TX_RATE_6MB:
4229 		return 6000000;
4230 	case IPW_TX_RATE_9MB:
4231 		return 9000000;
4232 	case IPW_TX_RATE_11MB:
4233 		return 11000000;
4234 	case IPW_TX_RATE_12MB:
4235 		return 12000000;
4236 	case IPW_TX_RATE_18MB:
4237 		return 18000000;
4238 	case IPW_TX_RATE_24MB:
4239 		return 24000000;
4240 	case IPW_TX_RATE_36MB:
4241 		return 36000000;
4242 	case IPW_TX_RATE_48MB:
4243 		return 48000000;
4244 	case IPW_TX_RATE_54MB:
4245 		return 54000000;
4246 	}
4247 
4248 	return 0;
4249 }
4250 
4251 #define IPW_STATS_INTERVAL (2 * HZ)
4252 static void ipw_gather_stats(struct ipw_priv *priv)
4253 {
4254 	u32 rx_err, rx_err_delta, rx_packets_delta;
4255 	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4256 	u32 missed_beacons_percent, missed_beacons_delta;
4257 	u32 quality = 0;
4258 	u32 len = sizeof(u32);
4259 	s16 rssi;
4260 	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4261 	    rate_quality;
4262 	u32 max_rate;
4263 
4264 	if (!(priv->status & STATUS_ASSOCIATED)) {
4265 		priv->quality = 0;
4266 		return;
4267 	}
4268 
4269 	/* Update the statistics */
4270 	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4271 			&priv->missed_beacons, &len);
4272 	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4273 	priv->last_missed_beacons = priv->missed_beacons;
4274 	if (priv->assoc_request.beacon_interval) {
4275 		missed_beacons_percent = missed_beacons_delta *
4276 		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4277 		    (IPW_STATS_INTERVAL * 10);
4278 	} else {
4279 		missed_beacons_percent = 0;
4280 	}
4281 	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4282 
4283 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4284 	rx_err_delta = rx_err - priv->last_rx_err;
4285 	priv->last_rx_err = rx_err;
4286 
4287 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4288 	tx_failures_delta = tx_failures - priv->last_tx_failures;
4289 	priv->last_tx_failures = tx_failures;
4290 
4291 	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4292 	priv->last_rx_packets = priv->rx_packets;
4293 
4294 	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4295 	priv->last_tx_packets = priv->tx_packets;
4296 
4297 	/* Calculate quality based on the following:
4298 	 *
4299 	 * Missed beacon: 100% = 0, 0% = 70% missed
4300 	 * Rate: 60% = 1Mbs, 100% = Max
4301 	 * Rx and Tx errors represent a straight % of total Rx/Tx
4302 	 * RSSI: 100% = > -50,  0% = < -80
4303 	 * Rx errors: 100% = 0, 0% = 50% missed
4304 	 *
4305 	 * The lowest computed quality is used.
4306 	 *
4307 	 */
4308 #define BEACON_THRESHOLD 5
4309 	beacon_quality = 100 - missed_beacons_percent;
4310 	if (beacon_quality < BEACON_THRESHOLD)
4311 		beacon_quality = 0;
4312 	else
4313 		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4314 		    (100 - BEACON_THRESHOLD);
4315 	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4316 			beacon_quality, missed_beacons_percent);
4317 
4318 	priv->last_rate = ipw_get_current_rate(priv);
4319 	max_rate = ipw_get_max_rate(priv);
4320 	rate_quality = priv->last_rate * 40 / max_rate + 60;
4321 	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4322 			rate_quality, priv->last_rate / 1000000);
4323 
4324 	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4325 		rx_quality = 100 - (rx_err_delta * 100) /
4326 		    (rx_packets_delta + rx_err_delta);
4327 	else
4328 		rx_quality = 100;
4329 	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4330 			rx_quality, rx_err_delta, rx_packets_delta);
4331 
4332 	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4333 		tx_quality = 100 - (tx_failures_delta * 100) /
4334 		    (tx_packets_delta + tx_failures_delta);
4335 	else
4336 		tx_quality = 100;
4337 	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4338 			tx_quality, tx_failures_delta, tx_packets_delta);
4339 
4340 	rssi = priv->exp_avg_rssi;
4341 	signal_quality =
4342 	    (100 *
4343 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4344 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4345 	     (priv->ieee->perfect_rssi - rssi) *
4346 	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4347 	      62 * (priv->ieee->perfect_rssi - rssi))) /
4348 	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4349 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4350 	if (signal_quality > 100)
4351 		signal_quality = 100;
4352 	else if (signal_quality < 1)
4353 		signal_quality = 0;
4354 
4355 	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4356 			signal_quality, rssi);
4357 
4358 	quality = min(rx_quality, signal_quality);
4359 	quality = min(tx_quality, quality);
4360 	quality = min(rate_quality, quality);
4361 	quality = min(beacon_quality, quality);
4362 	if (quality == beacon_quality)
4363 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4364 				quality);
4365 	if (quality == rate_quality)
4366 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4367 				quality);
4368 	if (quality == tx_quality)
4369 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4370 				quality);
4371 	if (quality == rx_quality)
4372 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4373 				quality);
4374 	if (quality == signal_quality)
4375 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4376 				quality);
4377 
4378 	priv->quality = quality;
4379 
4380 	schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4381 }
4382 
4383 static void ipw_bg_gather_stats(struct work_struct *work)
4384 {
4385 	struct ipw_priv *priv =
4386 		container_of(work, struct ipw_priv, gather_stats.work);
4387 	mutex_lock(&priv->mutex);
4388 	ipw_gather_stats(priv);
4389 	mutex_unlock(&priv->mutex);
4390 }
4391 
4392 /* Missed beacon behavior:
4393  * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4394  * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4395  * Above disassociate threshold, give up and stop scanning.
4396  * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4397 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4398 					    int missed_count)
4399 {
4400 	priv->notif_missed_beacons = missed_count;
4401 
4402 	if (missed_count > priv->disassociate_threshold &&
4403 	    priv->status & STATUS_ASSOCIATED) {
4404 		/* If associated and we've hit the missed
4405 		 * beacon threshold, disassociate, turn
4406 		 * off roaming, and abort any active scans */
4407 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4408 			  IPW_DL_STATE | IPW_DL_ASSOC,
4409 			  "Missed beacon: %d - disassociate\n", missed_count);
4410 		priv->status &= ~STATUS_ROAMING;
4411 		if (priv->status & STATUS_SCANNING) {
4412 			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4413 				  IPW_DL_STATE,
4414 				  "Aborting scan with missed beacon.\n");
4415 			schedule_work(&priv->abort_scan);
4416 		}
4417 
4418 		schedule_work(&priv->disassociate);
4419 		return;
4420 	}
4421 
4422 	if (priv->status & STATUS_ROAMING) {
4423 		/* If we are currently roaming, then just
4424 		 * print a debug statement... */
4425 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4426 			  "Missed beacon: %d - roam in progress\n",
4427 			  missed_count);
4428 		return;
4429 	}
4430 
4431 	if (roaming &&
4432 	    (missed_count > priv->roaming_threshold &&
4433 	     missed_count <= priv->disassociate_threshold)) {
4434 		/* If we are not already roaming, set the ROAM
4435 		 * bit in the status and kick off a scan.
4436 		 * This can happen several times before we reach
4437 		 * disassociate_threshold. */
4438 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4439 			  "Missed beacon: %d - initiate "
4440 			  "roaming\n", missed_count);
4441 		if (!(priv->status & STATUS_ROAMING)) {
4442 			priv->status |= STATUS_ROAMING;
4443 			if (!(priv->status & STATUS_SCANNING))
4444 				schedule_delayed_work(&priv->request_scan, 0);
4445 		}
4446 		return;
4447 	}
4448 
4449 	if (priv->status & STATUS_SCANNING &&
4450 	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4451 		/* Stop scan to keep fw from getting
4452 		 * stuck (only if we aren't roaming --
4453 		 * otherwise we'll never scan more than 2 or 3
4454 		 * channels..) */
4455 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4456 			  "Aborting scan with missed beacon.\n");
4457 		schedule_work(&priv->abort_scan);
4458 	}
4459 
4460 	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4461 }
4462 
4463 static void ipw_scan_event(struct work_struct *work)
4464 {
4465 	union iwreq_data wrqu;
4466 
4467 	struct ipw_priv *priv =
4468 		container_of(work, struct ipw_priv, scan_event.work);
4469 
4470 	wrqu.data.length = 0;
4471 	wrqu.data.flags = 0;
4472 	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4473 }
4474 
4475 static void handle_scan_event(struct ipw_priv *priv)
4476 {
4477 	/* Only userspace-requested scan completion events go out immediately */
4478 	if (!priv->user_requested_scan) {
4479 		schedule_delayed_work(&priv->scan_event,
4480 				      round_jiffies_relative(msecs_to_jiffies(4000)));
4481 	} else {
4482 		priv->user_requested_scan = 0;
4483 		mod_delayed_work(system_wq, &priv->scan_event, 0);
4484 	}
4485 }
4486 
4487 /**
4488  * Handle host notification packet.
4489  * Called from interrupt routine
4490  */
4491 static void ipw_rx_notification(struct ipw_priv *priv,
4492 				       struct ipw_rx_notification *notif)
4493 {
4494 	u16 size = le16_to_cpu(notif->size);
4495 
4496 	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4497 
4498 	switch (notif->subtype) {
4499 	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4500 			struct notif_association *assoc = &notif->u.assoc;
4501 
4502 			switch (assoc->state) {
4503 			case CMAS_ASSOCIATED:{
4504 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4505 						  IPW_DL_ASSOC,
4506 						  "associated: '%*pE' %pM\n",
4507 						  priv->essid_len, priv->essid,
4508 						  priv->bssid);
4509 
4510 					switch (priv->ieee->iw_mode) {
4511 					case IW_MODE_INFRA:
4512 						memcpy(priv->ieee->bssid,
4513 						       priv->bssid, ETH_ALEN);
4514 						break;
4515 
4516 					case IW_MODE_ADHOC:
4517 						memcpy(priv->ieee->bssid,
4518 						       priv->bssid, ETH_ALEN);
4519 
4520 						/* clear out the station table */
4521 						priv->num_stations = 0;
4522 
4523 						IPW_DEBUG_ASSOC
4524 						    ("queueing adhoc check\n");
4525 						schedule_delayed_work(
4526 							&priv->adhoc_check,
4527 							le16_to_cpu(priv->
4528 							assoc_request.
4529 							beacon_interval));
4530 						break;
4531 					}
4532 
4533 					priv->status &= ~STATUS_ASSOCIATING;
4534 					priv->status |= STATUS_ASSOCIATED;
4535 					schedule_work(&priv->system_config);
4536 
4537 #ifdef CONFIG_IPW2200_QOS
4538 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4539 			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4540 					if ((priv->status & STATUS_AUTH) &&
4541 					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4542 					     == IEEE80211_STYPE_ASSOC_RESP)) {
4543 						if ((sizeof
4544 						     (struct
4545 						      libipw_assoc_response)
4546 						     <= size)
4547 						    && (size <= 2314)) {
4548 							struct
4549 							libipw_rx_stats
4550 							    stats = {
4551 								.len = size - 1,
4552 							};
4553 
4554 							IPW_DEBUG_QOS
4555 							    ("QoS Associate "
4556 							     "size %d\n", size);
4557 							libipw_rx_mgt(priv->
4558 									 ieee,
4559 									 (struct
4560 									  libipw_hdr_4addr
4561 									  *)
4562 									 &notif->u.raw, &stats);
4563 						}
4564 					}
4565 #endif
4566 
4567 					schedule_work(&priv->link_up);
4568 
4569 					break;
4570 				}
4571 
4572 			case CMAS_AUTHENTICATED:{
4573 					if (priv->
4574 					    status & (STATUS_ASSOCIATED |
4575 						      STATUS_AUTH)) {
4576 						struct notif_authenticate *auth
4577 						    = &notif->u.auth;
4578 						IPW_DEBUG(IPW_DL_NOTIF |
4579 							  IPW_DL_STATE |
4580 							  IPW_DL_ASSOC,
4581 							  "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
4582 							  priv->essid_len,
4583 							  priv->essid,
4584 							  priv->bssid,
4585 							  le16_to_cpu(auth->status),
4586 							  ipw_get_status_code
4587 							  (le16_to_cpu
4588 							   (auth->status)));
4589 
4590 						priv->status &=
4591 						    ~(STATUS_ASSOCIATING |
4592 						      STATUS_AUTH |
4593 						      STATUS_ASSOCIATED);
4594 
4595 						schedule_work(&priv->link_down);
4596 						break;
4597 					}
4598 
4599 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4600 						  IPW_DL_ASSOC,
4601 						  "authenticated: '%*pE' %pM\n",
4602 						  priv->essid_len, priv->essid,
4603 						  priv->bssid);
4604 					break;
4605 				}
4606 
4607 			case CMAS_INIT:{
4608 					if (priv->status & STATUS_AUTH) {
4609 						struct
4610 						    libipw_assoc_response
4611 						*resp;
4612 						resp =
4613 						    (struct
4614 						     libipw_assoc_response
4615 						     *)&notif->u.raw;
4616 						IPW_DEBUG(IPW_DL_NOTIF |
4617 							  IPW_DL_STATE |
4618 							  IPW_DL_ASSOC,
4619 							  "association failed (0x%04X): %s\n",
4620 							  le16_to_cpu(resp->status),
4621 							  ipw_get_status_code
4622 							  (le16_to_cpu
4623 							   (resp->status)));
4624 					}
4625 
4626 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4627 						  IPW_DL_ASSOC,
4628 						  "disassociated: '%*pE' %pM\n",
4629 						  priv->essid_len, priv->essid,
4630 						  priv->bssid);
4631 
4632 					priv->status &=
4633 					    ~(STATUS_DISASSOCIATING |
4634 					      STATUS_ASSOCIATING |
4635 					      STATUS_ASSOCIATED | STATUS_AUTH);
4636 					if (priv->assoc_network
4637 					    && (priv->assoc_network->
4638 						capability &
4639 						WLAN_CAPABILITY_IBSS))
4640 						ipw_remove_current_network
4641 						    (priv);
4642 
4643 					schedule_work(&priv->link_down);
4644 
4645 					break;
4646 				}
4647 
4648 			case CMAS_RX_ASSOC_RESP:
4649 				break;
4650 
4651 			default:
4652 				IPW_ERROR("assoc: unknown (%d)\n",
4653 					  assoc->state);
4654 				break;
4655 			}
4656 
4657 			break;
4658 		}
4659 
4660 	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4661 			struct notif_authenticate *auth = &notif->u.auth;
4662 			switch (auth->state) {
4663 			case CMAS_AUTHENTICATED:
4664 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4665 					  "authenticated: '%*pE' %pM\n",
4666 					  priv->essid_len, priv->essid,
4667 					  priv->bssid);
4668 				priv->status |= STATUS_AUTH;
4669 				break;
4670 
4671 			case CMAS_INIT:
4672 				if (priv->status & STATUS_AUTH) {
4673 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4674 						  IPW_DL_ASSOC,
4675 						  "authentication failed (0x%04X): %s\n",
4676 						  le16_to_cpu(auth->status),
4677 						  ipw_get_status_code(le16_to_cpu
4678 								      (auth->
4679 								       status)));
4680 				}
4681 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4682 					  IPW_DL_ASSOC,
4683 					  "deauthenticated: '%*pE' %pM\n",
4684 					  priv->essid_len, priv->essid,
4685 					  priv->bssid);
4686 
4687 				priv->status &= ~(STATUS_ASSOCIATING |
4688 						  STATUS_AUTH |
4689 						  STATUS_ASSOCIATED);
4690 
4691 				schedule_work(&priv->link_down);
4692 				break;
4693 
4694 			case CMAS_TX_AUTH_SEQ_1:
4695 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4696 					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4697 				break;
4698 			case CMAS_RX_AUTH_SEQ_2:
4699 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4700 					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4701 				break;
4702 			case CMAS_AUTH_SEQ_1_PASS:
4703 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4704 					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4705 				break;
4706 			case CMAS_AUTH_SEQ_1_FAIL:
4707 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4708 					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4709 				break;
4710 			case CMAS_TX_AUTH_SEQ_3:
4711 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4712 					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4713 				break;
4714 			case CMAS_RX_AUTH_SEQ_4:
4715 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4716 					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4717 				break;
4718 			case CMAS_AUTH_SEQ_2_PASS:
4719 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4720 					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4721 				break;
4722 			case CMAS_AUTH_SEQ_2_FAIL:
4723 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4724 					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4725 				break;
4726 			case CMAS_TX_ASSOC:
4727 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4728 					  IPW_DL_ASSOC, "TX_ASSOC\n");
4729 				break;
4730 			case CMAS_RX_ASSOC_RESP:
4731 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4732 					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4733 
4734 				break;
4735 			case CMAS_ASSOCIATED:
4736 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4737 					  IPW_DL_ASSOC, "ASSOCIATED\n");
4738 				break;
4739 			default:
4740 				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4741 						auth->state);
4742 				break;
4743 			}
4744 			break;
4745 		}
4746 
4747 	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4748 			struct notif_channel_result *x =
4749 			    &notif->u.channel_result;
4750 
4751 			if (size == sizeof(*x)) {
4752 				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4753 					       x->channel_num);
4754 			} else {
4755 				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4756 					       "(should be %zd)\n",
4757 					       size, sizeof(*x));
4758 			}
4759 			break;
4760 		}
4761 
4762 	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4763 			struct notif_scan_complete *x = &notif->u.scan_complete;
4764 			if (size == sizeof(*x)) {
4765 				IPW_DEBUG_SCAN
4766 				    ("Scan completed: type %d, %d channels, "
4767 				     "%d status\n", x->scan_type,
4768 				     x->num_channels, x->status);
4769 			} else {
4770 				IPW_ERROR("Scan completed of wrong size %d "
4771 					  "(should be %zd)\n",
4772 					  size, sizeof(*x));
4773 			}
4774 
4775 			priv->status &=
4776 			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4777 
4778 			wake_up_interruptible(&priv->wait_state);
4779 			cancel_delayed_work(&priv->scan_check);
4780 
4781 			if (priv->status & STATUS_EXIT_PENDING)
4782 				break;
4783 
4784 			priv->ieee->scans++;
4785 
4786 #ifdef CONFIG_IPW2200_MONITOR
4787 			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4788 				priv->status |= STATUS_SCAN_FORCED;
4789 				schedule_delayed_work(&priv->request_scan, 0);
4790 				break;
4791 			}
4792 			priv->status &= ~STATUS_SCAN_FORCED;
4793 #endif				/* CONFIG_IPW2200_MONITOR */
4794 
4795 			/* Do queued direct scans first */
4796 			if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4797 				schedule_delayed_work(&priv->request_direct_scan, 0);
4798 
4799 			if (!(priv->status & (STATUS_ASSOCIATED |
4800 					      STATUS_ASSOCIATING |
4801 					      STATUS_ROAMING |
4802 					      STATUS_DISASSOCIATING)))
4803 				schedule_work(&priv->associate);
4804 			else if (priv->status & STATUS_ROAMING) {
4805 				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4806 					/* If a scan completed and we are in roam mode, then
4807 					 * the scan that completed was the one requested as a
4808 					 * result of entering roam... so, schedule the
4809 					 * roam work */
4810 					schedule_work(&priv->roam);
4811 				else
4812 					/* Don't schedule if we aborted the scan */
4813 					priv->status &= ~STATUS_ROAMING;
4814 			} else if (priv->status & STATUS_SCAN_PENDING)
4815 				schedule_delayed_work(&priv->request_scan, 0);
4816 			else if (priv->config & CFG_BACKGROUND_SCAN
4817 				 && priv->status & STATUS_ASSOCIATED)
4818 				schedule_delayed_work(&priv->request_scan,
4819 						      round_jiffies_relative(HZ));
4820 
4821 			/* Send an empty event to user space.
4822 			 * We don't send the received data on the event because
4823 			 * it would require us to do complex transcoding, and
4824 			 * we want to minimise the work done in the irq handler
4825 			 * Use a request to extract the data.
4826 			 * Also, we generate this even for any scan, regardless
4827 			 * on how the scan was initiated. User space can just
4828 			 * sync on periodic scan to get fresh data...
4829 			 * Jean II */
4830 			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4831 				handle_scan_event(priv);
4832 			break;
4833 		}
4834 
4835 	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4836 			struct notif_frag_length *x = &notif->u.frag_len;
4837 
4838 			if (size == sizeof(*x))
4839 				IPW_ERROR("Frag length: %d\n",
4840 					  le16_to_cpu(x->frag_length));
4841 			else
4842 				IPW_ERROR("Frag length of wrong size %d "
4843 					  "(should be %zd)\n",
4844 					  size, sizeof(*x));
4845 			break;
4846 		}
4847 
4848 	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4849 			struct notif_link_deterioration *x =
4850 			    &notif->u.link_deterioration;
4851 
4852 			if (size == sizeof(*x)) {
4853 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4854 					"link deterioration: type %d, cnt %d\n",
4855 					x->silence_notification_type,
4856 					x->silence_count);
4857 				memcpy(&priv->last_link_deterioration, x,
4858 				       sizeof(*x));
4859 			} else {
4860 				IPW_ERROR("Link Deterioration of wrong size %d "
4861 					  "(should be %zd)\n",
4862 					  size, sizeof(*x));
4863 			}
4864 			break;
4865 		}
4866 
4867 	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4868 			IPW_ERROR("Dino config\n");
4869 			if (priv->hcmd
4870 			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4871 				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4872 
4873 			break;
4874 		}
4875 
4876 	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4877 			struct notif_beacon_state *x = &notif->u.beacon_state;
4878 			if (size != sizeof(*x)) {
4879 				IPW_ERROR
4880 				    ("Beacon state of wrong size %d (should "
4881 				     "be %zd)\n", size, sizeof(*x));
4882 				break;
4883 			}
4884 
4885 			if (le32_to_cpu(x->state) ==
4886 			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4887 				ipw_handle_missed_beacon(priv,
4888 							 le32_to_cpu(x->
4889 								     number));
4890 
4891 			break;
4892 		}
4893 
4894 	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4895 			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4896 			if (size == sizeof(*x)) {
4897 				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4898 					  "0x%02x station %d\n",
4899 					  x->key_state, x->security_type,
4900 					  x->station_index);
4901 				break;
4902 			}
4903 
4904 			IPW_ERROR
4905 			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4906 			     size, sizeof(*x));
4907 			break;
4908 		}
4909 
4910 	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4911 			struct notif_calibration *x = &notif->u.calibration;
4912 
4913 			if (size == sizeof(*x)) {
4914 				memcpy(&priv->calib, x, sizeof(*x));
4915 				IPW_DEBUG_INFO("TODO: Calibration\n");
4916 				break;
4917 			}
4918 
4919 			IPW_ERROR
4920 			    ("Calibration of wrong size %d (should be %zd)\n",
4921 			     size, sizeof(*x));
4922 			break;
4923 		}
4924 
4925 	case HOST_NOTIFICATION_NOISE_STATS:{
4926 			if (size == sizeof(u32)) {
4927 				priv->exp_avg_noise =
4928 				    exponential_average(priv->exp_avg_noise,
4929 				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4930 				    DEPTH_NOISE);
4931 				break;
4932 			}
4933 
4934 			IPW_ERROR
4935 			    ("Noise stat is wrong size %d (should be %zd)\n",
4936 			     size, sizeof(u32));
4937 			break;
4938 		}
4939 
4940 	default:
4941 		IPW_DEBUG_NOTIF("Unknown notification: "
4942 				"subtype=%d,flags=0x%2x,size=%d\n",
4943 				notif->subtype, notif->flags, size);
4944 	}
4945 }
4946 
4947 /**
4948  * Destroys all DMA structures and initialise them again
4949  *
4950  * @param priv
4951  * @return error code
4952  */
4953 static int ipw_queue_reset(struct ipw_priv *priv)
4954 {
4955 	int rc = 0;
4956 	/** @todo customize queue sizes */
4957 	int nTx = 64, nTxCmd = 8;
4958 	ipw_tx_queue_free(priv);
4959 	/* Tx CMD queue */
4960 	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4961 			       IPW_TX_CMD_QUEUE_READ_INDEX,
4962 			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4963 			       IPW_TX_CMD_QUEUE_BD_BASE,
4964 			       IPW_TX_CMD_QUEUE_BD_SIZE);
4965 	if (rc) {
4966 		IPW_ERROR("Tx Cmd queue init failed\n");
4967 		goto error;
4968 	}
4969 	/* Tx queue(s) */
4970 	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4971 			       IPW_TX_QUEUE_0_READ_INDEX,
4972 			       IPW_TX_QUEUE_0_WRITE_INDEX,
4973 			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4974 	if (rc) {
4975 		IPW_ERROR("Tx 0 queue init failed\n");
4976 		goto error;
4977 	}
4978 	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4979 			       IPW_TX_QUEUE_1_READ_INDEX,
4980 			       IPW_TX_QUEUE_1_WRITE_INDEX,
4981 			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4982 	if (rc) {
4983 		IPW_ERROR("Tx 1 queue init failed\n");
4984 		goto error;
4985 	}
4986 	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4987 			       IPW_TX_QUEUE_2_READ_INDEX,
4988 			       IPW_TX_QUEUE_2_WRITE_INDEX,
4989 			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4990 	if (rc) {
4991 		IPW_ERROR("Tx 2 queue init failed\n");
4992 		goto error;
4993 	}
4994 	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4995 			       IPW_TX_QUEUE_3_READ_INDEX,
4996 			       IPW_TX_QUEUE_3_WRITE_INDEX,
4997 			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4998 	if (rc) {
4999 		IPW_ERROR("Tx 3 queue init failed\n");
5000 		goto error;
5001 	}
5002 	/* statistics */
5003 	priv->rx_bufs_min = 0;
5004 	priv->rx_pend_max = 0;
5005 	return rc;
5006 
5007       error:
5008 	ipw_tx_queue_free(priv);
5009 	return rc;
5010 }
5011 
5012 /**
5013  * Reclaim Tx queue entries no more used by NIC.
5014  *
5015  * When FW advances 'R' index, all entries between old and
5016  * new 'R' index need to be reclaimed. As result, some free space
5017  * forms. If there is enough free space (> low mark), wake Tx queue.
5018  *
5019  * @note Need to protect against garbage in 'R' index
5020  * @param priv
5021  * @param txq
5022  * @param qindex
5023  * @return Number of used entries remains in the queue
5024  */
5025 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5026 				struct clx2_tx_queue *txq, int qindex)
5027 {
5028 	u32 hw_tail;
5029 	int used;
5030 	struct clx2_queue *q = &txq->q;
5031 
5032 	hw_tail = ipw_read32(priv, q->reg_r);
5033 	if (hw_tail >= q->n_bd) {
5034 		IPW_ERROR
5035 		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5036 		     hw_tail, q->n_bd);
5037 		goto done;
5038 	}
5039 	for (; q->last_used != hw_tail;
5040 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5041 		ipw_queue_tx_free_tfd(priv, txq);
5042 		priv->tx_packets++;
5043 	}
5044       done:
5045 	if ((ipw_tx_queue_space(q) > q->low_mark) &&
5046 	    (qindex >= 0))
5047 		netif_wake_queue(priv->net_dev);
5048 	used = q->first_empty - q->last_used;
5049 	if (used < 0)
5050 		used += q->n_bd;
5051 
5052 	return used;
5053 }
5054 
5055 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5056 			     int len, int sync)
5057 {
5058 	struct clx2_tx_queue *txq = &priv->txq_cmd;
5059 	struct clx2_queue *q = &txq->q;
5060 	struct tfd_frame *tfd;
5061 
5062 	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5063 		IPW_ERROR("No space for Tx\n");
5064 		return -EBUSY;
5065 	}
5066 
5067 	tfd = &txq->bd[q->first_empty];
5068 	txq->txb[q->first_empty] = NULL;
5069 
5070 	memset(tfd, 0, sizeof(*tfd));
5071 	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5072 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5073 	priv->hcmd_seq++;
5074 	tfd->u.cmd.index = hcmd;
5075 	tfd->u.cmd.length = len;
5076 	memcpy(tfd->u.cmd.payload, buf, len);
5077 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5078 	ipw_write32(priv, q->reg_w, q->first_empty);
5079 	_ipw_read32(priv, 0x90);
5080 
5081 	return 0;
5082 }
5083 
5084 /*
5085  * Rx theory of operation
5086  *
5087  * The host allocates 32 DMA target addresses and passes the host address
5088  * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5089  * 0 to 31
5090  *
5091  * Rx Queue Indexes
5092  * The host/firmware share two index registers for managing the Rx buffers.
5093  *
5094  * The READ index maps to the first position that the firmware may be writing
5095  * to -- the driver can read up to (but not including) this position and get
5096  * good data.
5097  * The READ index is managed by the firmware once the card is enabled.
5098  *
5099  * The WRITE index maps to the last position the driver has read from -- the
5100  * position preceding WRITE is the last slot the firmware can place a packet.
5101  *
5102  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5103  * WRITE = READ.
5104  *
5105  * During initialization the host sets up the READ queue position to the first
5106  * INDEX position, and WRITE to the last (READ - 1 wrapped)
5107  *
5108  * When the firmware places a packet in a buffer it will advance the READ index
5109  * and fire the RX interrupt.  The driver can then query the READ index and
5110  * process as many packets as possible, moving the WRITE index forward as it
5111  * resets the Rx queue buffers with new memory.
5112  *
5113  * The management in the driver is as follows:
5114  * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5115  *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5116  *   to replensish the ipw->rxq->rx_free.
5117  * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5118  *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5119  *   'processed' and 'read' driver indexes as well)
5120  * + A received packet is processed and handed to the kernel network stack,
5121  *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5122  * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5123  *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5124  *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5125  *   were enough free buffers and RX_STALLED is set it is cleared.
5126  *
5127  *
5128  * Driver sequence:
5129  *
5130  * ipw_rx_queue_alloc()       Allocates rx_free
5131  * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5132  *                            ipw_rx_queue_restock
5133  * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5134  *                            queue, updates firmware pointers, and updates
5135  *                            the WRITE index.  If insufficient rx_free buffers
5136  *                            are available, schedules ipw_rx_queue_replenish
5137  *
5138  * -- enable interrupts --
5139  * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5140  *                            READ INDEX, detaching the SKB from the pool.
5141  *                            Moves the packet buffer from queue to rx_used.
5142  *                            Calls ipw_rx_queue_restock to refill any empty
5143  *                            slots.
5144  * ...
5145  *
5146  */
5147 
5148 /*
5149  * If there are slots in the RX queue that  need to be restocked,
5150  * and we have free pre-allocated buffers, fill the ranks as much
5151  * as we can pulling from rx_free.
5152  *
5153  * This moves the 'write' index forward to catch up with 'processed', and
5154  * also updates the memory address in the firmware to reference the new
5155  * target buffer.
5156  */
5157 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5158 {
5159 	struct ipw_rx_queue *rxq = priv->rxq;
5160 	struct list_head *element;
5161 	struct ipw_rx_mem_buffer *rxb;
5162 	unsigned long flags;
5163 	int write;
5164 
5165 	spin_lock_irqsave(&rxq->lock, flags);
5166 	write = rxq->write;
5167 	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5168 		element = rxq->rx_free.next;
5169 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5170 		list_del(element);
5171 
5172 		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5173 			    rxb->dma_addr);
5174 		rxq->queue[rxq->write] = rxb;
5175 		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5176 		rxq->free_count--;
5177 	}
5178 	spin_unlock_irqrestore(&rxq->lock, flags);
5179 
5180 	/* If the pre-allocated buffer pool is dropping low, schedule to
5181 	 * refill it */
5182 	if (rxq->free_count <= RX_LOW_WATERMARK)
5183 		schedule_work(&priv->rx_replenish);
5184 
5185 	/* If we've added more space for the firmware to place data, tell it */
5186 	if (write != rxq->write)
5187 		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5188 }
5189 
5190 /*
5191  * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5192  * Also restock the Rx queue via ipw_rx_queue_restock.
5193  *
5194  * This is called as a scheduled work item (except for during initialization)
5195  */
5196 static void ipw_rx_queue_replenish(void *data)
5197 {
5198 	struct ipw_priv *priv = data;
5199 	struct ipw_rx_queue *rxq = priv->rxq;
5200 	struct list_head *element;
5201 	struct ipw_rx_mem_buffer *rxb;
5202 	unsigned long flags;
5203 
5204 	spin_lock_irqsave(&rxq->lock, flags);
5205 	while (!list_empty(&rxq->rx_used)) {
5206 		element = rxq->rx_used.next;
5207 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5208 		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5209 		if (!rxb->skb) {
5210 			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5211 			       priv->net_dev->name);
5212 			/* We don't reschedule replenish work here -- we will
5213 			 * call the restock method and if it still needs
5214 			 * more buffers it will schedule replenish */
5215 			break;
5216 		}
5217 		list_del(element);
5218 
5219 		rxb->dma_addr =
5220 		    pci_map_single(priv->pci_dev, rxb->skb->data,
5221 				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5222 
5223 		list_add_tail(&rxb->list, &rxq->rx_free);
5224 		rxq->free_count++;
5225 	}
5226 	spin_unlock_irqrestore(&rxq->lock, flags);
5227 
5228 	ipw_rx_queue_restock(priv);
5229 }
5230 
5231 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5232 {
5233 	struct ipw_priv *priv =
5234 		container_of(work, struct ipw_priv, rx_replenish);
5235 	mutex_lock(&priv->mutex);
5236 	ipw_rx_queue_replenish(priv);
5237 	mutex_unlock(&priv->mutex);
5238 }
5239 
5240 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5241  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5242  * This free routine walks the list of POOL entries and if SKB is set to
5243  * non NULL it is unmapped and freed
5244  */
5245 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5246 {
5247 	int i;
5248 
5249 	if (!rxq)
5250 		return;
5251 
5252 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5253 		if (rxq->pool[i].skb != NULL) {
5254 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5255 					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5256 			dev_kfree_skb(rxq->pool[i].skb);
5257 		}
5258 	}
5259 
5260 	kfree(rxq);
5261 }
5262 
5263 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5264 {
5265 	struct ipw_rx_queue *rxq;
5266 	int i;
5267 
5268 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5269 	if (unlikely(!rxq)) {
5270 		IPW_ERROR("memory allocation failed\n");
5271 		return NULL;
5272 	}
5273 	spin_lock_init(&rxq->lock);
5274 	INIT_LIST_HEAD(&rxq->rx_free);
5275 	INIT_LIST_HEAD(&rxq->rx_used);
5276 
5277 	/* Fill the rx_used queue with _all_ of the Rx buffers */
5278 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5279 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5280 
5281 	/* Set us so that we have processed and used all buffers, but have
5282 	 * not restocked the Rx queue with fresh buffers */
5283 	rxq->read = rxq->write = 0;
5284 	rxq->free_count = 0;
5285 
5286 	return rxq;
5287 }
5288 
5289 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5290 {
5291 	rate &= ~LIBIPW_BASIC_RATE_MASK;
5292 	if (ieee_mode == IEEE_A) {
5293 		switch (rate) {
5294 		case LIBIPW_OFDM_RATE_6MB:
5295 			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5296 			    1 : 0;
5297 		case LIBIPW_OFDM_RATE_9MB:
5298 			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5299 			    1 : 0;
5300 		case LIBIPW_OFDM_RATE_12MB:
5301 			return priv->
5302 			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5303 		case LIBIPW_OFDM_RATE_18MB:
5304 			return priv->
5305 			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5306 		case LIBIPW_OFDM_RATE_24MB:
5307 			return priv->
5308 			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5309 		case LIBIPW_OFDM_RATE_36MB:
5310 			return priv->
5311 			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5312 		case LIBIPW_OFDM_RATE_48MB:
5313 			return priv->
5314 			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5315 		case LIBIPW_OFDM_RATE_54MB:
5316 			return priv->
5317 			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5318 		default:
5319 			return 0;
5320 		}
5321 	}
5322 
5323 	/* B and G mixed */
5324 	switch (rate) {
5325 	case LIBIPW_CCK_RATE_1MB:
5326 		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5327 	case LIBIPW_CCK_RATE_2MB:
5328 		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5329 	case LIBIPW_CCK_RATE_5MB:
5330 		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5331 	case LIBIPW_CCK_RATE_11MB:
5332 		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5333 	}
5334 
5335 	/* If we are limited to B modulations, bail at this point */
5336 	if (ieee_mode == IEEE_B)
5337 		return 0;
5338 
5339 	/* G */
5340 	switch (rate) {
5341 	case LIBIPW_OFDM_RATE_6MB:
5342 		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5343 	case LIBIPW_OFDM_RATE_9MB:
5344 		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5345 	case LIBIPW_OFDM_RATE_12MB:
5346 		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5347 	case LIBIPW_OFDM_RATE_18MB:
5348 		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5349 	case LIBIPW_OFDM_RATE_24MB:
5350 		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5351 	case LIBIPW_OFDM_RATE_36MB:
5352 		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5353 	case LIBIPW_OFDM_RATE_48MB:
5354 		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5355 	case LIBIPW_OFDM_RATE_54MB:
5356 		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5357 	}
5358 
5359 	return 0;
5360 }
5361 
5362 static int ipw_compatible_rates(struct ipw_priv *priv,
5363 				const struct libipw_network *network,
5364 				struct ipw_supported_rates *rates)
5365 {
5366 	int num_rates, i;
5367 
5368 	memset(rates, 0, sizeof(*rates));
5369 	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5370 	rates->num_rates = 0;
5371 	for (i = 0; i < num_rates; i++) {
5372 		if (!ipw_is_rate_in_mask(priv, network->mode,
5373 					 network->rates[i])) {
5374 
5375 			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5376 				IPW_DEBUG_SCAN("Adding masked mandatory "
5377 					       "rate %02X\n",
5378 					       network->rates[i]);
5379 				rates->supported_rates[rates->num_rates++] =
5380 				    network->rates[i];
5381 				continue;
5382 			}
5383 
5384 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5385 				       network->rates[i], priv->rates_mask);
5386 			continue;
5387 		}
5388 
5389 		rates->supported_rates[rates->num_rates++] = network->rates[i];
5390 	}
5391 
5392 	num_rates = min(network->rates_ex_len,
5393 			(u8) (IPW_MAX_RATES - num_rates));
5394 	for (i = 0; i < num_rates; i++) {
5395 		if (!ipw_is_rate_in_mask(priv, network->mode,
5396 					 network->rates_ex[i])) {
5397 			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5398 				IPW_DEBUG_SCAN("Adding masked mandatory "
5399 					       "rate %02X\n",
5400 					       network->rates_ex[i]);
5401 				rates->supported_rates[rates->num_rates++] =
5402 				    network->rates[i];
5403 				continue;
5404 			}
5405 
5406 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5407 				       network->rates_ex[i], priv->rates_mask);
5408 			continue;
5409 		}
5410 
5411 		rates->supported_rates[rates->num_rates++] =
5412 		    network->rates_ex[i];
5413 	}
5414 
5415 	return 1;
5416 }
5417 
5418 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5419 				  const struct ipw_supported_rates *src)
5420 {
5421 	u8 i;
5422 	for (i = 0; i < src->num_rates; i++)
5423 		dest->supported_rates[i] = src->supported_rates[i];
5424 	dest->num_rates = src->num_rates;
5425 }
5426 
5427 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5428  * mask should ever be used -- right now all callers to add the scan rates are
5429  * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5430 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5431 				   u8 modulation, u32 rate_mask)
5432 {
5433 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5434 	    LIBIPW_BASIC_RATE_MASK : 0;
5435 
5436 	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5437 		rates->supported_rates[rates->num_rates++] =
5438 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5439 
5440 	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5441 		rates->supported_rates[rates->num_rates++] =
5442 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5443 
5444 	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5445 		rates->supported_rates[rates->num_rates++] = basic_mask |
5446 		    LIBIPW_CCK_RATE_5MB;
5447 
5448 	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5449 		rates->supported_rates[rates->num_rates++] = basic_mask |
5450 		    LIBIPW_CCK_RATE_11MB;
5451 }
5452 
5453 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5454 				    u8 modulation, u32 rate_mask)
5455 {
5456 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5457 	    LIBIPW_BASIC_RATE_MASK : 0;
5458 
5459 	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5460 		rates->supported_rates[rates->num_rates++] = basic_mask |
5461 		    LIBIPW_OFDM_RATE_6MB;
5462 
5463 	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5464 		rates->supported_rates[rates->num_rates++] =
5465 		    LIBIPW_OFDM_RATE_9MB;
5466 
5467 	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5468 		rates->supported_rates[rates->num_rates++] = basic_mask |
5469 		    LIBIPW_OFDM_RATE_12MB;
5470 
5471 	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5472 		rates->supported_rates[rates->num_rates++] =
5473 		    LIBIPW_OFDM_RATE_18MB;
5474 
5475 	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5476 		rates->supported_rates[rates->num_rates++] = basic_mask |
5477 		    LIBIPW_OFDM_RATE_24MB;
5478 
5479 	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5480 		rates->supported_rates[rates->num_rates++] =
5481 		    LIBIPW_OFDM_RATE_36MB;
5482 
5483 	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5484 		rates->supported_rates[rates->num_rates++] =
5485 		    LIBIPW_OFDM_RATE_48MB;
5486 
5487 	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5488 		rates->supported_rates[rates->num_rates++] =
5489 		    LIBIPW_OFDM_RATE_54MB;
5490 }
5491 
5492 struct ipw_network_match {
5493 	struct libipw_network *network;
5494 	struct ipw_supported_rates rates;
5495 };
5496 
5497 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5498 				  struct ipw_network_match *match,
5499 				  struct libipw_network *network,
5500 				  int roaming)
5501 {
5502 	struct ipw_supported_rates rates;
5503 
5504 	/* Verify that this network's capability is compatible with the
5505 	 * current mode (AdHoc or Infrastructure) */
5506 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5507 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5508 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5509 				network->ssid_len, network->ssid,
5510 				network->bssid);
5511 		return 0;
5512 	}
5513 
5514 	if (unlikely(roaming)) {
5515 		/* If we are roaming, then ensure check if this is a valid
5516 		 * network to try and roam to */
5517 		if ((network->ssid_len != match->network->ssid_len) ||
5518 		    memcmp(network->ssid, match->network->ssid,
5519 			   network->ssid_len)) {
5520 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5521 					network->ssid_len, network->ssid,
5522 					network->bssid);
5523 			return 0;
5524 		}
5525 	} else {
5526 		/* If an ESSID has been configured then compare the broadcast
5527 		 * ESSID to ours */
5528 		if ((priv->config & CFG_STATIC_ESSID) &&
5529 		    ((network->ssid_len != priv->essid_len) ||
5530 		     memcmp(network->ssid, priv->essid,
5531 			    min(network->ssid_len, priv->essid_len)))) {
5532 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5533 					network->ssid_len, network->ssid,
5534 					network->bssid, priv->essid_len,
5535 					priv->essid);
5536 			return 0;
5537 		}
5538 	}
5539 
5540 	/* If the old network rate is better than this one, don't bother
5541 	 * testing everything else. */
5542 
5543 	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5544 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5545 				match->network->ssid_len, match->network->ssid);
5546 		return 0;
5547 	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5548 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5549 				match->network->ssid_len, match->network->ssid);
5550 		return 0;
5551 	}
5552 
5553 	/* Now go through and see if the requested network is valid... */
5554 	if (priv->ieee->scan_age != 0 &&
5555 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5556 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5557 				network->ssid_len, network->ssid,
5558 				network->bssid,
5559 				jiffies_to_msecs(jiffies -
5560 						 network->last_scanned));
5561 		return 0;
5562 	}
5563 
5564 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5565 	    (network->channel != priv->channel)) {
5566 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5567 				network->ssid_len, network->ssid,
5568 				network->bssid,
5569 				network->channel, priv->channel);
5570 		return 0;
5571 	}
5572 
5573 	/* Verify privacy compatibility */
5574 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5575 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5576 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5577 				network->ssid_len, network->ssid,
5578 				network->bssid,
5579 				priv->
5580 				capability & CAP_PRIVACY_ON ? "on" : "off",
5581 				network->
5582 				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5583 				"off");
5584 		return 0;
5585 	}
5586 
5587 	if (ether_addr_equal(network->bssid, priv->bssid)) {
5588 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
5589 				network->ssid_len, network->ssid,
5590 				network->bssid, priv->bssid);
5591 		return 0;
5592 	}
5593 
5594 	/* Filter out any incompatible freq / mode combinations */
5595 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5596 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5597 				network->ssid_len, network->ssid,
5598 				network->bssid);
5599 		return 0;
5600 	}
5601 
5602 	/* Ensure that the rates supported by the driver are compatible with
5603 	 * this AP, including verification of basic rates (mandatory) */
5604 	if (!ipw_compatible_rates(priv, network, &rates)) {
5605 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5606 				network->ssid_len, network->ssid,
5607 				network->bssid);
5608 		return 0;
5609 	}
5610 
5611 	if (rates.num_rates == 0) {
5612 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5613 				network->ssid_len, network->ssid,
5614 				network->bssid);
5615 		return 0;
5616 	}
5617 
5618 	/* TODO: Perform any further minimal comparititive tests.  We do not
5619 	 * want to put too much policy logic here; intelligent scan selection
5620 	 * should occur within a generic IEEE 802.11 user space tool.  */
5621 
5622 	/* Set up 'new' AP to this network */
5623 	ipw_copy_rates(&match->rates, &rates);
5624 	match->network = network;
5625 	IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
5626 			network->ssid_len, network->ssid, network->bssid);
5627 
5628 	return 1;
5629 }
5630 
5631 static void ipw_merge_adhoc_network(struct work_struct *work)
5632 {
5633 	struct ipw_priv *priv =
5634 		container_of(work, struct ipw_priv, merge_networks);
5635 	struct libipw_network *network = NULL;
5636 	struct ipw_network_match match = {
5637 		.network = priv->assoc_network
5638 	};
5639 
5640 	if ((priv->status & STATUS_ASSOCIATED) &&
5641 	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5642 		/* First pass through ROAM process -- look for a better
5643 		 * network */
5644 		unsigned long flags;
5645 
5646 		spin_lock_irqsave(&priv->ieee->lock, flags);
5647 		list_for_each_entry(network, &priv->ieee->network_list, list) {
5648 			if (network != priv->assoc_network)
5649 				ipw_find_adhoc_network(priv, &match, network,
5650 						       1);
5651 		}
5652 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5653 
5654 		if (match.network == priv->assoc_network) {
5655 			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5656 					"merge to.\n");
5657 			return;
5658 		}
5659 
5660 		mutex_lock(&priv->mutex);
5661 		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5662 			IPW_DEBUG_MERGE("remove network %*pE\n",
5663 					priv->essid_len, priv->essid);
5664 			ipw_remove_current_network(priv);
5665 		}
5666 
5667 		ipw_disassociate(priv);
5668 		priv->assoc_network = match.network;
5669 		mutex_unlock(&priv->mutex);
5670 		return;
5671 	}
5672 }
5673 
5674 static int ipw_best_network(struct ipw_priv *priv,
5675 			    struct ipw_network_match *match,
5676 			    struct libipw_network *network, int roaming)
5677 {
5678 	struct ipw_supported_rates rates;
5679 
5680 	/* Verify that this network's capability is compatible with the
5681 	 * current mode (AdHoc or Infrastructure) */
5682 	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5683 	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5684 	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5685 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5686 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5687 				network->ssid_len, network->ssid,
5688 				network->bssid);
5689 		return 0;
5690 	}
5691 
5692 	if (unlikely(roaming)) {
5693 		/* If we are roaming, then ensure check if this is a valid
5694 		 * network to try and roam to */
5695 		if ((network->ssid_len != match->network->ssid_len) ||
5696 		    memcmp(network->ssid, match->network->ssid,
5697 			   network->ssid_len)) {
5698 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5699 					network->ssid_len, network->ssid,
5700 					network->bssid);
5701 			return 0;
5702 		}
5703 	} else {
5704 		/* If an ESSID has been configured then compare the broadcast
5705 		 * ESSID to ours */
5706 		if ((priv->config & CFG_STATIC_ESSID) &&
5707 		    ((network->ssid_len != priv->essid_len) ||
5708 		     memcmp(network->ssid, priv->essid,
5709 			    min(network->ssid_len, priv->essid_len)))) {
5710 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5711 					network->ssid_len, network->ssid,
5712 					network->bssid, priv->essid_len,
5713 					priv->essid);
5714 			return 0;
5715 		}
5716 	}
5717 
5718 	/* If the old network rate is better than this one, don't bother
5719 	 * testing everything else. */
5720 	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5721 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
5722 				network->ssid_len, network->ssid,
5723 				network->bssid, match->network->ssid_len,
5724 				match->network->ssid, match->network->bssid);
5725 		return 0;
5726 	}
5727 
5728 	/* If this network has already had an association attempt within the
5729 	 * last 3 seconds, do not try and associate again... */
5730 	if (network->last_associate &&
5731 	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5732 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
5733 				network->ssid_len, network->ssid,
5734 				network->bssid,
5735 				jiffies_to_msecs(jiffies -
5736 						 network->last_associate));
5737 		return 0;
5738 	}
5739 
5740 	/* Now go through and see if the requested network is valid... */
5741 	if (priv->ieee->scan_age != 0 &&
5742 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5743 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5744 				network->ssid_len, network->ssid,
5745 				network->bssid,
5746 				jiffies_to_msecs(jiffies -
5747 						 network->last_scanned));
5748 		return 0;
5749 	}
5750 
5751 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5752 	    (network->channel != priv->channel)) {
5753 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5754 				network->ssid_len, network->ssid,
5755 				network->bssid,
5756 				network->channel, priv->channel);
5757 		return 0;
5758 	}
5759 
5760 	/* Verify privacy compatibility */
5761 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5762 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5763 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5764 				network->ssid_len, network->ssid,
5765 				network->bssid,
5766 				priv->capability & CAP_PRIVACY_ON ? "on" :
5767 				"off",
5768 				network->capability &
5769 				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5770 		return 0;
5771 	}
5772 
5773 	if ((priv->config & CFG_STATIC_BSSID) &&
5774 	    !ether_addr_equal(network->bssid, priv->bssid)) {
5775 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
5776 				network->ssid_len, network->ssid,
5777 				network->bssid, priv->bssid);
5778 		return 0;
5779 	}
5780 
5781 	/* Filter out any incompatible freq / mode combinations */
5782 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5783 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5784 				network->ssid_len, network->ssid,
5785 				network->bssid);
5786 		return 0;
5787 	}
5788 
5789 	/* Filter out invalid channel in current GEO */
5790 	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5791 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
5792 				network->ssid_len, network->ssid,
5793 				network->bssid);
5794 		return 0;
5795 	}
5796 
5797 	/* Ensure that the rates supported by the driver are compatible with
5798 	 * this AP, including verification of basic rates (mandatory) */
5799 	if (!ipw_compatible_rates(priv, network, &rates)) {
5800 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5801 				network->ssid_len, network->ssid,
5802 				network->bssid);
5803 		return 0;
5804 	}
5805 
5806 	if (rates.num_rates == 0) {
5807 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5808 				network->ssid_len, network->ssid,
5809 				network->bssid);
5810 		return 0;
5811 	}
5812 
5813 	/* TODO: Perform any further minimal comparititive tests.  We do not
5814 	 * want to put too much policy logic here; intelligent scan selection
5815 	 * should occur within a generic IEEE 802.11 user space tool.  */
5816 
5817 	/* Set up 'new' AP to this network */
5818 	ipw_copy_rates(&match->rates, &rates);
5819 	match->network = network;
5820 
5821 	IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
5822 			network->ssid_len, network->ssid, network->bssid);
5823 
5824 	return 1;
5825 }
5826 
5827 static void ipw_adhoc_create(struct ipw_priv *priv,
5828 			     struct libipw_network *network)
5829 {
5830 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5831 	int i;
5832 
5833 	/*
5834 	 * For the purposes of scanning, we can set our wireless mode
5835 	 * to trigger scans across combinations of bands, but when it
5836 	 * comes to creating a new ad-hoc network, we have tell the FW
5837 	 * exactly which band to use.
5838 	 *
5839 	 * We also have the possibility of an invalid channel for the
5840 	 * chossen band.  Attempting to create a new ad-hoc network
5841 	 * with an invalid channel for wireless mode will trigger a
5842 	 * FW fatal error.
5843 	 *
5844 	 */
5845 	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5846 	case LIBIPW_52GHZ_BAND:
5847 		network->mode = IEEE_A;
5848 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5849 		BUG_ON(i == -1);
5850 		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5851 			IPW_WARNING("Overriding invalid channel\n");
5852 			priv->channel = geo->a[0].channel;
5853 		}
5854 		break;
5855 
5856 	case LIBIPW_24GHZ_BAND:
5857 		if (priv->ieee->mode & IEEE_G)
5858 			network->mode = IEEE_G;
5859 		else
5860 			network->mode = IEEE_B;
5861 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5862 		BUG_ON(i == -1);
5863 		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5864 			IPW_WARNING("Overriding invalid channel\n");
5865 			priv->channel = geo->bg[0].channel;
5866 		}
5867 		break;
5868 
5869 	default:
5870 		IPW_WARNING("Overriding invalid channel\n");
5871 		if (priv->ieee->mode & IEEE_A) {
5872 			network->mode = IEEE_A;
5873 			priv->channel = geo->a[0].channel;
5874 		} else if (priv->ieee->mode & IEEE_G) {
5875 			network->mode = IEEE_G;
5876 			priv->channel = geo->bg[0].channel;
5877 		} else {
5878 			network->mode = IEEE_B;
5879 			priv->channel = geo->bg[0].channel;
5880 		}
5881 		break;
5882 	}
5883 
5884 	network->channel = priv->channel;
5885 	priv->config |= CFG_ADHOC_PERSIST;
5886 	ipw_create_bssid(priv, network->bssid);
5887 	network->ssid_len = priv->essid_len;
5888 	memcpy(network->ssid, priv->essid, priv->essid_len);
5889 	memset(&network->stats, 0, sizeof(network->stats));
5890 	network->capability = WLAN_CAPABILITY_IBSS;
5891 	if (!(priv->config & CFG_PREAMBLE_LONG))
5892 		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5893 	if (priv->capability & CAP_PRIVACY_ON)
5894 		network->capability |= WLAN_CAPABILITY_PRIVACY;
5895 	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5896 	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5897 	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5898 	memcpy(network->rates_ex,
5899 	       &priv->rates.supported_rates[network->rates_len],
5900 	       network->rates_ex_len);
5901 	network->last_scanned = 0;
5902 	network->flags = 0;
5903 	network->last_associate = 0;
5904 	network->time_stamp[0] = 0;
5905 	network->time_stamp[1] = 0;
5906 	network->beacon_interval = 100;	/* Default */
5907 	network->listen_interval = 10;	/* Default */
5908 	network->atim_window = 0;	/* Default */
5909 	network->wpa_ie_len = 0;
5910 	network->rsn_ie_len = 0;
5911 }
5912 
5913 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5914 {
5915 	struct ipw_tgi_tx_key key;
5916 
5917 	if (!(priv->ieee->sec.flags & (1 << index)))
5918 		return;
5919 
5920 	key.key_id = index;
5921 	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5922 	key.security_type = type;
5923 	key.station_index = 0;	/* always 0 for BSS */
5924 	key.flags = 0;
5925 	/* 0 for new key; previous value of counter (after fatal error) */
5926 	key.tx_counter[0] = cpu_to_le32(0);
5927 	key.tx_counter[1] = cpu_to_le32(0);
5928 
5929 	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5930 }
5931 
5932 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5933 {
5934 	struct ipw_wep_key key;
5935 	int i;
5936 
5937 	key.cmd_id = DINO_CMD_WEP_KEY;
5938 	key.seq_num = 0;
5939 
5940 	/* Note: AES keys cannot be set for multiple times.
5941 	 * Only set it at the first time. */
5942 	for (i = 0; i < 4; i++) {
5943 		key.key_index = i | type;
5944 		if (!(priv->ieee->sec.flags & (1 << i))) {
5945 			key.key_size = 0;
5946 			continue;
5947 		}
5948 
5949 		key.key_size = priv->ieee->sec.key_sizes[i];
5950 		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5951 
5952 		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5953 	}
5954 }
5955 
5956 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5957 {
5958 	if (priv->ieee->host_encrypt)
5959 		return;
5960 
5961 	switch (level) {
5962 	case SEC_LEVEL_3:
5963 		priv->sys_config.disable_unicast_decryption = 0;
5964 		priv->ieee->host_decrypt = 0;
5965 		break;
5966 	case SEC_LEVEL_2:
5967 		priv->sys_config.disable_unicast_decryption = 1;
5968 		priv->ieee->host_decrypt = 1;
5969 		break;
5970 	case SEC_LEVEL_1:
5971 		priv->sys_config.disable_unicast_decryption = 0;
5972 		priv->ieee->host_decrypt = 0;
5973 		break;
5974 	case SEC_LEVEL_0:
5975 		priv->sys_config.disable_unicast_decryption = 1;
5976 		break;
5977 	default:
5978 		break;
5979 	}
5980 }
5981 
5982 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5983 {
5984 	if (priv->ieee->host_encrypt)
5985 		return;
5986 
5987 	switch (level) {
5988 	case SEC_LEVEL_3:
5989 		priv->sys_config.disable_multicast_decryption = 0;
5990 		break;
5991 	case SEC_LEVEL_2:
5992 		priv->sys_config.disable_multicast_decryption = 1;
5993 		break;
5994 	case SEC_LEVEL_1:
5995 		priv->sys_config.disable_multicast_decryption = 0;
5996 		break;
5997 	case SEC_LEVEL_0:
5998 		priv->sys_config.disable_multicast_decryption = 1;
5999 		break;
6000 	default:
6001 		break;
6002 	}
6003 }
6004 
6005 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6006 {
6007 	switch (priv->ieee->sec.level) {
6008 	case SEC_LEVEL_3:
6009 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6010 			ipw_send_tgi_tx_key(priv,
6011 					    DCT_FLAG_EXT_SECURITY_CCM,
6012 					    priv->ieee->sec.active_key);
6013 
6014 		if (!priv->ieee->host_mc_decrypt)
6015 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6016 		break;
6017 	case SEC_LEVEL_2:
6018 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6019 			ipw_send_tgi_tx_key(priv,
6020 					    DCT_FLAG_EXT_SECURITY_TKIP,
6021 					    priv->ieee->sec.active_key);
6022 		break;
6023 	case SEC_LEVEL_1:
6024 		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6025 		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6026 		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6027 		break;
6028 	case SEC_LEVEL_0:
6029 	default:
6030 		break;
6031 	}
6032 }
6033 
6034 static void ipw_adhoc_check(void *data)
6035 {
6036 	struct ipw_priv *priv = data;
6037 
6038 	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6039 	    !(priv->config & CFG_ADHOC_PERSIST)) {
6040 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6041 			  IPW_DL_STATE | IPW_DL_ASSOC,
6042 			  "Missed beacon: %d - disassociate\n",
6043 			  priv->missed_adhoc_beacons);
6044 		ipw_remove_current_network(priv);
6045 		ipw_disassociate(priv);
6046 		return;
6047 	}
6048 
6049 	schedule_delayed_work(&priv->adhoc_check,
6050 			      le16_to_cpu(priv->assoc_request.beacon_interval));
6051 }
6052 
6053 static void ipw_bg_adhoc_check(struct work_struct *work)
6054 {
6055 	struct ipw_priv *priv =
6056 		container_of(work, struct ipw_priv, adhoc_check.work);
6057 	mutex_lock(&priv->mutex);
6058 	ipw_adhoc_check(priv);
6059 	mutex_unlock(&priv->mutex);
6060 }
6061 
6062 static void ipw_debug_config(struct ipw_priv *priv)
6063 {
6064 	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6065 		       "[CFG 0x%08X]\n", priv->config);
6066 	if (priv->config & CFG_STATIC_CHANNEL)
6067 		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6068 	else
6069 		IPW_DEBUG_INFO("Channel unlocked.\n");
6070 	if (priv->config & CFG_STATIC_ESSID)
6071 		IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
6072 			       priv->essid_len, priv->essid);
6073 	else
6074 		IPW_DEBUG_INFO("ESSID unlocked.\n");
6075 	if (priv->config & CFG_STATIC_BSSID)
6076 		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6077 	else
6078 		IPW_DEBUG_INFO("BSSID unlocked.\n");
6079 	if (priv->capability & CAP_PRIVACY_ON)
6080 		IPW_DEBUG_INFO("PRIVACY on\n");
6081 	else
6082 		IPW_DEBUG_INFO("PRIVACY off\n");
6083 	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6084 }
6085 
6086 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6087 {
6088 	/* TODO: Verify that this works... */
6089 	struct ipw_fixed_rate fr;
6090 	u32 reg;
6091 	u16 mask = 0;
6092 	u16 new_tx_rates = priv->rates_mask;
6093 
6094 	/* Identify 'current FW band' and match it with the fixed
6095 	 * Tx rates */
6096 
6097 	switch (priv->ieee->freq_band) {
6098 	case LIBIPW_52GHZ_BAND:	/* A only */
6099 		/* IEEE_A */
6100 		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6101 			/* Invalid fixed rate mask */
6102 			IPW_DEBUG_WX
6103 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6104 			new_tx_rates = 0;
6105 			break;
6106 		}
6107 
6108 		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6109 		break;
6110 
6111 	default:		/* 2.4Ghz or Mixed */
6112 		/* IEEE_B */
6113 		if (mode == IEEE_B) {
6114 			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6115 				/* Invalid fixed rate mask */
6116 				IPW_DEBUG_WX
6117 				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6118 				new_tx_rates = 0;
6119 			}
6120 			break;
6121 		}
6122 
6123 		/* IEEE_G */
6124 		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6125 				    LIBIPW_OFDM_RATES_MASK)) {
6126 			/* Invalid fixed rate mask */
6127 			IPW_DEBUG_WX
6128 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6129 			new_tx_rates = 0;
6130 			break;
6131 		}
6132 
6133 		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6134 			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6135 			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6136 		}
6137 
6138 		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6139 			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6140 			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6141 		}
6142 
6143 		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6144 			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6145 			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6146 		}
6147 
6148 		new_tx_rates |= mask;
6149 		break;
6150 	}
6151 
6152 	fr.tx_rates = cpu_to_le16(new_tx_rates);
6153 
6154 	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6155 	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6156 }
6157 
6158 static void ipw_abort_scan(struct ipw_priv *priv)
6159 {
6160 	int err;
6161 
6162 	if (priv->status & STATUS_SCAN_ABORTING) {
6163 		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6164 		return;
6165 	}
6166 	priv->status |= STATUS_SCAN_ABORTING;
6167 
6168 	err = ipw_send_scan_abort(priv);
6169 	if (err)
6170 		IPW_DEBUG_HC("Request to abort scan failed.\n");
6171 }
6172 
6173 static void ipw_add_scan_channels(struct ipw_priv *priv,
6174 				  struct ipw_scan_request_ext *scan,
6175 				  int scan_type)
6176 {
6177 	int channel_index = 0;
6178 	const struct libipw_geo *geo;
6179 	int i;
6180 
6181 	geo = libipw_get_geo(priv->ieee);
6182 
6183 	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6184 		int start = channel_index;
6185 		for (i = 0; i < geo->a_channels; i++) {
6186 			if ((priv->status & STATUS_ASSOCIATED) &&
6187 			    geo->a[i].channel == priv->channel)
6188 				continue;
6189 			channel_index++;
6190 			scan->channels_list[channel_index] = geo->a[i].channel;
6191 			ipw_set_scan_type(scan, channel_index,
6192 					  geo->a[i].
6193 					  flags & LIBIPW_CH_PASSIVE_ONLY ?
6194 					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6195 					  scan_type);
6196 		}
6197 
6198 		if (start != channel_index) {
6199 			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6200 			    (channel_index - start);
6201 			channel_index++;
6202 		}
6203 	}
6204 
6205 	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6206 		int start = channel_index;
6207 		if (priv->config & CFG_SPEED_SCAN) {
6208 			int index;
6209 			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6210 				/* nop out the list */
6211 				[0] = 0
6212 			};
6213 
6214 			u8 channel;
6215 			while (channel_index < IPW_SCAN_CHANNELS - 1) {
6216 				channel =
6217 				    priv->speed_scan[priv->speed_scan_pos];
6218 				if (channel == 0) {
6219 					priv->speed_scan_pos = 0;
6220 					channel = priv->speed_scan[0];
6221 				}
6222 				if ((priv->status & STATUS_ASSOCIATED) &&
6223 				    channel == priv->channel) {
6224 					priv->speed_scan_pos++;
6225 					continue;
6226 				}
6227 
6228 				/* If this channel has already been
6229 				 * added in scan, break from loop
6230 				 * and this will be the first channel
6231 				 * in the next scan.
6232 				 */
6233 				if (channels[channel - 1] != 0)
6234 					break;
6235 
6236 				channels[channel - 1] = 1;
6237 				priv->speed_scan_pos++;
6238 				channel_index++;
6239 				scan->channels_list[channel_index] = channel;
6240 				index =
6241 				    libipw_channel_to_index(priv->ieee, channel);
6242 				ipw_set_scan_type(scan, channel_index,
6243 						  geo->bg[index].
6244 						  flags &
6245 						  LIBIPW_CH_PASSIVE_ONLY ?
6246 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6247 						  : scan_type);
6248 			}
6249 		} else {
6250 			for (i = 0; i < geo->bg_channels; i++) {
6251 				if ((priv->status & STATUS_ASSOCIATED) &&
6252 				    geo->bg[i].channel == priv->channel)
6253 					continue;
6254 				channel_index++;
6255 				scan->channels_list[channel_index] =
6256 				    geo->bg[i].channel;
6257 				ipw_set_scan_type(scan, channel_index,
6258 						  geo->bg[i].
6259 						  flags &
6260 						  LIBIPW_CH_PASSIVE_ONLY ?
6261 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6262 						  : scan_type);
6263 			}
6264 		}
6265 
6266 		if (start != channel_index) {
6267 			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6268 			    (channel_index - start);
6269 		}
6270 	}
6271 }
6272 
6273 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6274 {
6275 	/* staying on passive channels longer than the DTIM interval during a
6276 	 * scan, while associated, causes the firmware to cancel the scan
6277 	 * without notification. Hence, don't stay on passive channels longer
6278 	 * than the beacon interval.
6279 	 */
6280 	if (priv->status & STATUS_ASSOCIATED
6281 	    && priv->assoc_network->beacon_interval > 10)
6282 		return priv->assoc_network->beacon_interval - 10;
6283 	else
6284 		return 120;
6285 }
6286 
6287 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6288 {
6289 	struct ipw_scan_request_ext scan;
6290 	int err = 0, scan_type;
6291 
6292 	if (!(priv->status & STATUS_INIT) ||
6293 	    (priv->status & STATUS_EXIT_PENDING))
6294 		return 0;
6295 
6296 	mutex_lock(&priv->mutex);
6297 
6298 	if (direct && (priv->direct_scan_ssid_len == 0)) {
6299 		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6300 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6301 		goto done;
6302 	}
6303 
6304 	if (priv->status & STATUS_SCANNING) {
6305 		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6306 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6307 					STATUS_SCAN_PENDING;
6308 		goto done;
6309 	}
6310 
6311 	if (!(priv->status & STATUS_SCAN_FORCED) &&
6312 	    priv->status & STATUS_SCAN_ABORTING) {
6313 		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6314 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6315 					STATUS_SCAN_PENDING;
6316 		goto done;
6317 	}
6318 
6319 	if (priv->status & STATUS_RF_KILL_MASK) {
6320 		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6321 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6322 					STATUS_SCAN_PENDING;
6323 		goto done;
6324 	}
6325 
6326 	memset(&scan, 0, sizeof(scan));
6327 	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6328 
6329 	if (type == IW_SCAN_TYPE_PASSIVE) {
6330 		IPW_DEBUG_WX("use passive scanning\n");
6331 		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6332 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6333 			cpu_to_le16(ipw_passive_dwell_time(priv));
6334 		ipw_add_scan_channels(priv, &scan, scan_type);
6335 		goto send_request;
6336 	}
6337 
6338 	/* Use active scan by default. */
6339 	if (priv->config & CFG_SPEED_SCAN)
6340 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6341 			cpu_to_le16(30);
6342 	else
6343 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6344 			cpu_to_le16(20);
6345 
6346 	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6347 		cpu_to_le16(20);
6348 
6349 	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6350 		cpu_to_le16(ipw_passive_dwell_time(priv));
6351 	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6352 
6353 #ifdef CONFIG_IPW2200_MONITOR
6354 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6355 		u8 channel;
6356 		u8 band = 0;
6357 
6358 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6359 		case LIBIPW_52GHZ_BAND:
6360 			band = (u8) (IPW_A_MODE << 6) | 1;
6361 			channel = priv->channel;
6362 			break;
6363 
6364 		case LIBIPW_24GHZ_BAND:
6365 			band = (u8) (IPW_B_MODE << 6) | 1;
6366 			channel = priv->channel;
6367 			break;
6368 
6369 		default:
6370 			band = (u8) (IPW_B_MODE << 6) | 1;
6371 			channel = 9;
6372 			break;
6373 		}
6374 
6375 		scan.channels_list[0] = band;
6376 		scan.channels_list[1] = channel;
6377 		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6378 
6379 		/* NOTE:  The card will sit on this channel for this time
6380 		 * period.  Scan aborts are timing sensitive and frequently
6381 		 * result in firmware restarts.  As such, it is best to
6382 		 * set a small dwell_time here and just keep re-issuing
6383 		 * scans.  Otherwise fast channel hopping will not actually
6384 		 * hop channels.
6385 		 *
6386 		 * TODO: Move SPEED SCAN support to all modes and bands */
6387 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6388 			cpu_to_le16(2000);
6389 	} else {
6390 #endif				/* CONFIG_IPW2200_MONITOR */
6391 		/* Honor direct scans first, otherwise if we are roaming make
6392 		 * this a direct scan for the current network.  Finally,
6393 		 * ensure that every other scan is a fast channel hop scan */
6394 		if (direct) {
6395 			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6396 			                    priv->direct_scan_ssid_len);
6397 			if (err) {
6398 				IPW_DEBUG_HC("Attempt to send SSID command  "
6399 					     "failed\n");
6400 				goto done;
6401 			}
6402 
6403 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6404 		} else if ((priv->status & STATUS_ROAMING)
6405 			   || (!(priv->status & STATUS_ASSOCIATED)
6406 			       && (priv->config & CFG_STATIC_ESSID)
6407 			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6408 			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6409 			if (err) {
6410 				IPW_DEBUG_HC("Attempt to send SSID command "
6411 					     "failed.\n");
6412 				goto done;
6413 			}
6414 
6415 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6416 		} else
6417 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6418 
6419 		ipw_add_scan_channels(priv, &scan, scan_type);
6420 #ifdef CONFIG_IPW2200_MONITOR
6421 	}
6422 #endif
6423 
6424 send_request:
6425 	err = ipw_send_scan_request_ext(priv, &scan);
6426 	if (err) {
6427 		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6428 		goto done;
6429 	}
6430 
6431 	priv->status |= STATUS_SCANNING;
6432 	if (direct) {
6433 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6434 		priv->direct_scan_ssid_len = 0;
6435 	} else
6436 		priv->status &= ~STATUS_SCAN_PENDING;
6437 
6438 	schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6439 done:
6440 	mutex_unlock(&priv->mutex);
6441 	return err;
6442 }
6443 
6444 static void ipw_request_passive_scan(struct work_struct *work)
6445 {
6446 	struct ipw_priv *priv =
6447 		container_of(work, struct ipw_priv, request_passive_scan.work);
6448 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6449 }
6450 
6451 static void ipw_request_scan(struct work_struct *work)
6452 {
6453 	struct ipw_priv *priv =
6454 		container_of(work, struct ipw_priv, request_scan.work);
6455 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6456 }
6457 
6458 static void ipw_request_direct_scan(struct work_struct *work)
6459 {
6460 	struct ipw_priv *priv =
6461 		container_of(work, struct ipw_priv, request_direct_scan.work);
6462 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6463 }
6464 
6465 static void ipw_bg_abort_scan(struct work_struct *work)
6466 {
6467 	struct ipw_priv *priv =
6468 		container_of(work, struct ipw_priv, abort_scan);
6469 	mutex_lock(&priv->mutex);
6470 	ipw_abort_scan(priv);
6471 	mutex_unlock(&priv->mutex);
6472 }
6473 
6474 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6475 {
6476 	/* This is called when wpa_supplicant loads and closes the driver
6477 	 * interface. */
6478 	priv->ieee->wpa_enabled = value;
6479 	return 0;
6480 }
6481 
6482 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6483 {
6484 	struct libipw_device *ieee = priv->ieee;
6485 	struct libipw_security sec = {
6486 		.flags = SEC_AUTH_MODE,
6487 	};
6488 	int ret = 0;
6489 
6490 	if (value & IW_AUTH_ALG_SHARED_KEY) {
6491 		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6492 		ieee->open_wep = 0;
6493 	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6494 		sec.auth_mode = WLAN_AUTH_OPEN;
6495 		ieee->open_wep = 1;
6496 	} else if (value & IW_AUTH_ALG_LEAP) {
6497 		sec.auth_mode = WLAN_AUTH_LEAP;
6498 		ieee->open_wep = 1;
6499 	} else
6500 		return -EINVAL;
6501 
6502 	if (ieee->set_security)
6503 		ieee->set_security(ieee->dev, &sec);
6504 	else
6505 		ret = -EOPNOTSUPP;
6506 
6507 	return ret;
6508 }
6509 
6510 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6511 				int wpa_ie_len)
6512 {
6513 	/* make sure WPA is enabled */
6514 	ipw_wpa_enable(priv, 1);
6515 }
6516 
6517 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6518 			    char *capabilities, int length)
6519 {
6520 	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6521 
6522 	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6523 				capabilities);
6524 }
6525 
6526 /*
6527  * WE-18 support
6528  */
6529 
6530 /* SIOCSIWGENIE */
6531 static int ipw_wx_set_genie(struct net_device *dev,
6532 			    struct iw_request_info *info,
6533 			    union iwreq_data *wrqu, char *extra)
6534 {
6535 	struct ipw_priv *priv = libipw_priv(dev);
6536 	struct libipw_device *ieee = priv->ieee;
6537 	u8 *buf;
6538 	int err = 0;
6539 
6540 	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6541 	    (wrqu->data.length && extra == NULL))
6542 		return -EINVAL;
6543 
6544 	if (wrqu->data.length) {
6545 		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6546 		if (buf == NULL) {
6547 			err = -ENOMEM;
6548 			goto out;
6549 		}
6550 
6551 		kfree(ieee->wpa_ie);
6552 		ieee->wpa_ie = buf;
6553 		ieee->wpa_ie_len = wrqu->data.length;
6554 	} else {
6555 		kfree(ieee->wpa_ie);
6556 		ieee->wpa_ie = NULL;
6557 		ieee->wpa_ie_len = 0;
6558 	}
6559 
6560 	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6561       out:
6562 	return err;
6563 }
6564 
6565 /* SIOCGIWGENIE */
6566 static int ipw_wx_get_genie(struct net_device *dev,
6567 			    struct iw_request_info *info,
6568 			    union iwreq_data *wrqu, char *extra)
6569 {
6570 	struct ipw_priv *priv = libipw_priv(dev);
6571 	struct libipw_device *ieee = priv->ieee;
6572 	int err = 0;
6573 
6574 	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6575 		wrqu->data.length = 0;
6576 		goto out;
6577 	}
6578 
6579 	if (wrqu->data.length < ieee->wpa_ie_len) {
6580 		err = -E2BIG;
6581 		goto out;
6582 	}
6583 
6584 	wrqu->data.length = ieee->wpa_ie_len;
6585 	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6586 
6587       out:
6588 	return err;
6589 }
6590 
6591 static int wext_cipher2level(int cipher)
6592 {
6593 	switch (cipher) {
6594 	case IW_AUTH_CIPHER_NONE:
6595 		return SEC_LEVEL_0;
6596 	case IW_AUTH_CIPHER_WEP40:
6597 	case IW_AUTH_CIPHER_WEP104:
6598 		return SEC_LEVEL_1;
6599 	case IW_AUTH_CIPHER_TKIP:
6600 		return SEC_LEVEL_2;
6601 	case IW_AUTH_CIPHER_CCMP:
6602 		return SEC_LEVEL_3;
6603 	default:
6604 		return -1;
6605 	}
6606 }
6607 
6608 /* SIOCSIWAUTH */
6609 static int ipw_wx_set_auth(struct net_device *dev,
6610 			   struct iw_request_info *info,
6611 			   union iwreq_data *wrqu, char *extra)
6612 {
6613 	struct ipw_priv *priv = libipw_priv(dev);
6614 	struct libipw_device *ieee = priv->ieee;
6615 	struct iw_param *param = &wrqu->param;
6616 	struct lib80211_crypt_data *crypt;
6617 	unsigned long flags;
6618 	int ret = 0;
6619 
6620 	switch (param->flags & IW_AUTH_INDEX) {
6621 	case IW_AUTH_WPA_VERSION:
6622 		break;
6623 	case IW_AUTH_CIPHER_PAIRWISE:
6624 		ipw_set_hw_decrypt_unicast(priv,
6625 					   wext_cipher2level(param->value));
6626 		break;
6627 	case IW_AUTH_CIPHER_GROUP:
6628 		ipw_set_hw_decrypt_multicast(priv,
6629 					     wext_cipher2level(param->value));
6630 		break;
6631 	case IW_AUTH_KEY_MGMT:
6632 		/*
6633 		 * ipw2200 does not use these parameters
6634 		 */
6635 		break;
6636 
6637 	case IW_AUTH_TKIP_COUNTERMEASURES:
6638 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6639 		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6640 			break;
6641 
6642 		flags = crypt->ops->get_flags(crypt->priv);
6643 
6644 		if (param->value)
6645 			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6646 		else
6647 			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6648 
6649 		crypt->ops->set_flags(flags, crypt->priv);
6650 
6651 		break;
6652 
6653 	case IW_AUTH_DROP_UNENCRYPTED:{
6654 			/* HACK:
6655 			 *
6656 			 * wpa_supplicant calls set_wpa_enabled when the driver
6657 			 * is loaded and unloaded, regardless of if WPA is being
6658 			 * used.  No other calls are made which can be used to
6659 			 * determine if encryption will be used or not prior to
6660 			 * association being expected.  If encryption is not being
6661 			 * used, drop_unencrypted is set to false, else true -- we
6662 			 * can use this to determine if the CAP_PRIVACY_ON bit should
6663 			 * be set.
6664 			 */
6665 			struct libipw_security sec = {
6666 				.flags = SEC_ENABLED,
6667 				.enabled = param->value,
6668 			};
6669 			priv->ieee->drop_unencrypted = param->value;
6670 			/* We only change SEC_LEVEL for open mode. Others
6671 			 * are set by ipw_wpa_set_encryption.
6672 			 */
6673 			if (!param->value) {
6674 				sec.flags |= SEC_LEVEL;
6675 				sec.level = SEC_LEVEL_0;
6676 			} else {
6677 				sec.flags |= SEC_LEVEL;
6678 				sec.level = SEC_LEVEL_1;
6679 			}
6680 			if (priv->ieee->set_security)
6681 				priv->ieee->set_security(priv->ieee->dev, &sec);
6682 			break;
6683 		}
6684 
6685 	case IW_AUTH_80211_AUTH_ALG:
6686 		ret = ipw_wpa_set_auth_algs(priv, param->value);
6687 		break;
6688 
6689 	case IW_AUTH_WPA_ENABLED:
6690 		ret = ipw_wpa_enable(priv, param->value);
6691 		ipw_disassociate(priv);
6692 		break;
6693 
6694 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6695 		ieee->ieee802_1x = param->value;
6696 		break;
6697 
6698 	case IW_AUTH_PRIVACY_INVOKED:
6699 		ieee->privacy_invoked = param->value;
6700 		break;
6701 
6702 	default:
6703 		return -EOPNOTSUPP;
6704 	}
6705 	return ret;
6706 }
6707 
6708 /* SIOCGIWAUTH */
6709 static int ipw_wx_get_auth(struct net_device *dev,
6710 			   struct iw_request_info *info,
6711 			   union iwreq_data *wrqu, char *extra)
6712 {
6713 	struct ipw_priv *priv = libipw_priv(dev);
6714 	struct libipw_device *ieee = priv->ieee;
6715 	struct lib80211_crypt_data *crypt;
6716 	struct iw_param *param = &wrqu->param;
6717 
6718 	switch (param->flags & IW_AUTH_INDEX) {
6719 	case IW_AUTH_WPA_VERSION:
6720 	case IW_AUTH_CIPHER_PAIRWISE:
6721 	case IW_AUTH_CIPHER_GROUP:
6722 	case IW_AUTH_KEY_MGMT:
6723 		/*
6724 		 * wpa_supplicant will control these internally
6725 		 */
6726 		return -EOPNOTSUPP;
6727 
6728 	case IW_AUTH_TKIP_COUNTERMEASURES:
6729 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6730 		if (!crypt || !crypt->ops->get_flags)
6731 			break;
6732 
6733 		param->value = (crypt->ops->get_flags(crypt->priv) &
6734 				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6735 
6736 		break;
6737 
6738 	case IW_AUTH_DROP_UNENCRYPTED:
6739 		param->value = ieee->drop_unencrypted;
6740 		break;
6741 
6742 	case IW_AUTH_80211_AUTH_ALG:
6743 		param->value = ieee->sec.auth_mode;
6744 		break;
6745 
6746 	case IW_AUTH_WPA_ENABLED:
6747 		param->value = ieee->wpa_enabled;
6748 		break;
6749 
6750 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6751 		param->value = ieee->ieee802_1x;
6752 		break;
6753 
6754 	case IW_AUTH_ROAMING_CONTROL:
6755 	case IW_AUTH_PRIVACY_INVOKED:
6756 		param->value = ieee->privacy_invoked;
6757 		break;
6758 
6759 	default:
6760 		return -EOPNOTSUPP;
6761 	}
6762 	return 0;
6763 }
6764 
6765 /* SIOCSIWENCODEEXT */
6766 static int ipw_wx_set_encodeext(struct net_device *dev,
6767 				struct iw_request_info *info,
6768 				union iwreq_data *wrqu, char *extra)
6769 {
6770 	struct ipw_priv *priv = libipw_priv(dev);
6771 	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6772 
6773 	if (hwcrypto) {
6774 		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6775 			/* IPW HW can't build TKIP MIC,
6776 			   host decryption still needed */
6777 			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6778 				priv->ieee->host_mc_decrypt = 1;
6779 			else {
6780 				priv->ieee->host_encrypt = 0;
6781 				priv->ieee->host_encrypt_msdu = 1;
6782 				priv->ieee->host_decrypt = 1;
6783 			}
6784 		} else {
6785 			priv->ieee->host_encrypt = 0;
6786 			priv->ieee->host_encrypt_msdu = 0;
6787 			priv->ieee->host_decrypt = 0;
6788 			priv->ieee->host_mc_decrypt = 0;
6789 		}
6790 	}
6791 
6792 	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6793 }
6794 
6795 /* SIOCGIWENCODEEXT */
6796 static int ipw_wx_get_encodeext(struct net_device *dev,
6797 				struct iw_request_info *info,
6798 				union iwreq_data *wrqu, char *extra)
6799 {
6800 	struct ipw_priv *priv = libipw_priv(dev);
6801 	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6802 }
6803 
6804 /* SIOCSIWMLME */
6805 static int ipw_wx_set_mlme(struct net_device *dev,
6806 			   struct iw_request_info *info,
6807 			   union iwreq_data *wrqu, char *extra)
6808 {
6809 	struct ipw_priv *priv = libipw_priv(dev);
6810 	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6811 	__le16 reason;
6812 
6813 	reason = cpu_to_le16(mlme->reason_code);
6814 
6815 	switch (mlme->cmd) {
6816 	case IW_MLME_DEAUTH:
6817 		/* silently ignore */
6818 		break;
6819 
6820 	case IW_MLME_DISASSOC:
6821 		ipw_disassociate(priv);
6822 		break;
6823 
6824 	default:
6825 		return -EOPNOTSUPP;
6826 	}
6827 	return 0;
6828 }
6829 
6830 #ifdef CONFIG_IPW2200_QOS
6831 
6832 /* QoS */
6833 /*
6834 * get the modulation type of the current network or
6835 * the card current mode
6836 */
6837 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6838 {
6839 	u8 mode = 0;
6840 
6841 	if (priv->status & STATUS_ASSOCIATED) {
6842 		unsigned long flags;
6843 
6844 		spin_lock_irqsave(&priv->ieee->lock, flags);
6845 		mode = priv->assoc_network->mode;
6846 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6847 	} else {
6848 		mode = priv->ieee->mode;
6849 	}
6850 	IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6851 	return mode;
6852 }
6853 
6854 /*
6855 * Handle management frame beacon and probe response
6856 */
6857 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6858 					 int active_network,
6859 					 struct libipw_network *network)
6860 {
6861 	u32 size = sizeof(struct libipw_qos_parameters);
6862 
6863 	if (network->capability & WLAN_CAPABILITY_IBSS)
6864 		network->qos_data.active = network->qos_data.supported;
6865 
6866 	if (network->flags & NETWORK_HAS_QOS_MASK) {
6867 		if (active_network &&
6868 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6869 			network->qos_data.active = network->qos_data.supported;
6870 
6871 		if ((network->qos_data.active == 1) && (active_network == 1) &&
6872 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6873 		    (network->qos_data.old_param_count !=
6874 		     network->qos_data.param_count)) {
6875 			network->qos_data.old_param_count =
6876 			    network->qos_data.param_count;
6877 			schedule_work(&priv->qos_activate);
6878 			IPW_DEBUG_QOS("QoS parameters change call "
6879 				      "qos_activate\n");
6880 		}
6881 	} else {
6882 		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6883 			memcpy(&network->qos_data.parameters,
6884 			       &def_parameters_CCK, size);
6885 		else
6886 			memcpy(&network->qos_data.parameters,
6887 			       &def_parameters_OFDM, size);
6888 
6889 		if ((network->qos_data.active == 1) && (active_network == 1)) {
6890 			IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6891 			schedule_work(&priv->qos_activate);
6892 		}
6893 
6894 		network->qos_data.active = 0;
6895 		network->qos_data.supported = 0;
6896 	}
6897 	if ((priv->status & STATUS_ASSOCIATED) &&
6898 	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6899 		if (!ether_addr_equal(network->bssid, priv->bssid))
6900 			if (network->capability & WLAN_CAPABILITY_IBSS)
6901 				if ((network->ssid_len ==
6902 				     priv->assoc_network->ssid_len) &&
6903 				    !memcmp(network->ssid,
6904 					    priv->assoc_network->ssid,
6905 					    network->ssid_len)) {
6906 					schedule_work(&priv->merge_networks);
6907 				}
6908 	}
6909 
6910 	return 0;
6911 }
6912 
6913 /*
6914 * This function set up the firmware to support QoS. It sends
6915 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6916 */
6917 static int ipw_qos_activate(struct ipw_priv *priv,
6918 			    struct libipw_qos_data *qos_network_data)
6919 {
6920 	int err;
6921 	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6922 	struct libipw_qos_parameters *active_one = NULL;
6923 	u32 size = sizeof(struct libipw_qos_parameters);
6924 	u32 burst_duration;
6925 	int i;
6926 	u8 type;
6927 
6928 	type = ipw_qos_current_mode(priv);
6929 
6930 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6931 	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6932 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6933 	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6934 
6935 	if (qos_network_data == NULL) {
6936 		if (type == IEEE_B) {
6937 			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6938 			active_one = &def_parameters_CCK;
6939 		} else
6940 			active_one = &def_parameters_OFDM;
6941 
6942 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6943 		burst_duration = ipw_qos_get_burst_duration(priv);
6944 		for (i = 0; i < QOS_QUEUE_NUM; i++)
6945 			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6946 			    cpu_to_le16(burst_duration);
6947 	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6948 		if (type == IEEE_B) {
6949 			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
6950 				      type);
6951 			if (priv->qos_data.qos_enable == 0)
6952 				active_one = &def_parameters_CCK;
6953 			else
6954 				active_one = priv->qos_data.def_qos_parm_CCK;
6955 		} else {
6956 			if (priv->qos_data.qos_enable == 0)
6957 				active_one = &def_parameters_OFDM;
6958 			else
6959 				active_one = priv->qos_data.def_qos_parm_OFDM;
6960 		}
6961 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6962 	} else {
6963 		unsigned long flags;
6964 		int active;
6965 
6966 		spin_lock_irqsave(&priv->ieee->lock, flags);
6967 		active_one = &(qos_network_data->parameters);
6968 		qos_network_data->old_param_count =
6969 		    qos_network_data->param_count;
6970 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6971 		active = qos_network_data->supported;
6972 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6973 
6974 		if (active == 0) {
6975 			burst_duration = ipw_qos_get_burst_duration(priv);
6976 			for (i = 0; i < QOS_QUEUE_NUM; i++)
6977 				qos_parameters[QOS_PARAM_SET_ACTIVE].
6978 				    tx_op_limit[i] = cpu_to_le16(burst_duration);
6979 		}
6980 	}
6981 
6982 	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6983 	err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
6984 	if (err)
6985 		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6986 
6987 	return err;
6988 }
6989 
6990 /*
6991 * send IPW_CMD_WME_INFO to the firmware
6992 */
6993 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6994 {
6995 	int ret = 0;
6996 	struct libipw_qos_information_element qos_info;
6997 
6998 	if (priv == NULL)
6999 		return -1;
7000 
7001 	qos_info.elementID = QOS_ELEMENT_ID;
7002 	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7003 
7004 	qos_info.version = QOS_VERSION_1;
7005 	qos_info.ac_info = 0;
7006 
7007 	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7008 	qos_info.qui_type = QOS_OUI_TYPE;
7009 	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7010 
7011 	ret = ipw_send_qos_info_command(priv, &qos_info);
7012 	if (ret != 0) {
7013 		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7014 	}
7015 	return ret;
7016 }
7017 
7018 /*
7019 * Set the QoS parameter with the association request structure
7020 */
7021 static int ipw_qos_association(struct ipw_priv *priv,
7022 			       struct libipw_network *network)
7023 {
7024 	int err = 0;
7025 	struct libipw_qos_data *qos_data = NULL;
7026 	struct libipw_qos_data ibss_data = {
7027 		.supported = 1,
7028 		.active = 1,
7029 	};
7030 
7031 	switch (priv->ieee->iw_mode) {
7032 	case IW_MODE_ADHOC:
7033 		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7034 
7035 		qos_data = &ibss_data;
7036 		break;
7037 
7038 	case IW_MODE_INFRA:
7039 		qos_data = &network->qos_data;
7040 		break;
7041 
7042 	default:
7043 		BUG();
7044 		break;
7045 	}
7046 
7047 	err = ipw_qos_activate(priv, qos_data);
7048 	if (err) {
7049 		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7050 		return err;
7051 	}
7052 
7053 	if (priv->qos_data.qos_enable && qos_data->supported) {
7054 		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7055 		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7056 		return ipw_qos_set_info_element(priv);
7057 	}
7058 
7059 	return 0;
7060 }
7061 
7062 /*
7063 * handling the beaconing responses. if we get different QoS setting
7064 * off the network from the associated setting, adjust the QoS
7065 * setting
7066 */
7067 static int ipw_qos_association_resp(struct ipw_priv *priv,
7068 				    struct libipw_network *network)
7069 {
7070 	int ret = 0;
7071 	unsigned long flags;
7072 	u32 size = sizeof(struct libipw_qos_parameters);
7073 	int set_qos_param = 0;
7074 
7075 	if ((priv == NULL) || (network == NULL) ||
7076 	    (priv->assoc_network == NULL))
7077 		return ret;
7078 
7079 	if (!(priv->status & STATUS_ASSOCIATED))
7080 		return ret;
7081 
7082 	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7083 		return ret;
7084 
7085 	spin_lock_irqsave(&priv->ieee->lock, flags);
7086 	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7087 		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7088 		       sizeof(struct libipw_qos_data));
7089 		priv->assoc_network->qos_data.active = 1;
7090 		if ((network->qos_data.old_param_count !=
7091 		     network->qos_data.param_count)) {
7092 			set_qos_param = 1;
7093 			network->qos_data.old_param_count =
7094 			    network->qos_data.param_count;
7095 		}
7096 
7097 	} else {
7098 		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7099 			memcpy(&priv->assoc_network->qos_data.parameters,
7100 			       &def_parameters_CCK, size);
7101 		else
7102 			memcpy(&priv->assoc_network->qos_data.parameters,
7103 			       &def_parameters_OFDM, size);
7104 		priv->assoc_network->qos_data.active = 0;
7105 		priv->assoc_network->qos_data.supported = 0;
7106 		set_qos_param = 1;
7107 	}
7108 
7109 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7110 
7111 	if (set_qos_param == 1)
7112 		schedule_work(&priv->qos_activate);
7113 
7114 	return ret;
7115 }
7116 
7117 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7118 {
7119 	u32 ret = 0;
7120 
7121 	if ((priv == NULL))
7122 		return 0;
7123 
7124 	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7125 		ret = priv->qos_data.burst_duration_CCK;
7126 	else
7127 		ret = priv->qos_data.burst_duration_OFDM;
7128 
7129 	return ret;
7130 }
7131 
7132 /*
7133 * Initialize the setting of QoS global
7134 */
7135 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7136 			 int burst_enable, u32 burst_duration_CCK,
7137 			 u32 burst_duration_OFDM)
7138 {
7139 	priv->qos_data.qos_enable = enable;
7140 
7141 	if (priv->qos_data.qos_enable) {
7142 		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7143 		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7144 		IPW_DEBUG_QOS("QoS is enabled\n");
7145 	} else {
7146 		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7147 		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7148 		IPW_DEBUG_QOS("QoS is not enabled\n");
7149 	}
7150 
7151 	priv->qos_data.burst_enable = burst_enable;
7152 
7153 	if (burst_enable) {
7154 		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7155 		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7156 	} else {
7157 		priv->qos_data.burst_duration_CCK = 0;
7158 		priv->qos_data.burst_duration_OFDM = 0;
7159 	}
7160 }
7161 
7162 /*
7163 * map the packet priority to the right TX Queue
7164 */
7165 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7166 {
7167 	if (priority > 7 || !priv->qos_data.qos_enable)
7168 		priority = 0;
7169 
7170 	return from_priority_to_tx_queue[priority] - 1;
7171 }
7172 
7173 static int ipw_is_qos_active(struct net_device *dev,
7174 			     struct sk_buff *skb)
7175 {
7176 	struct ipw_priv *priv = libipw_priv(dev);
7177 	struct libipw_qos_data *qos_data = NULL;
7178 	int active, supported;
7179 	u8 *daddr = skb->data + ETH_ALEN;
7180 	int unicast = !is_multicast_ether_addr(daddr);
7181 
7182 	if (!(priv->status & STATUS_ASSOCIATED))
7183 		return 0;
7184 
7185 	qos_data = &priv->assoc_network->qos_data;
7186 
7187 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7188 		if (unicast == 0)
7189 			qos_data->active = 0;
7190 		else
7191 			qos_data->active = qos_data->supported;
7192 	}
7193 	active = qos_data->active;
7194 	supported = qos_data->supported;
7195 	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7196 		      "unicast %d\n",
7197 		      priv->qos_data.qos_enable, active, supported, unicast);
7198 	if (active && priv->qos_data.qos_enable)
7199 		return 1;
7200 
7201 	return 0;
7202 
7203 }
7204 /*
7205 * add QoS parameter to the TX command
7206 */
7207 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7208 					u16 priority,
7209 					struct tfd_data *tfd)
7210 {
7211 	int tx_queue_id = 0;
7212 
7213 
7214 	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7215 	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7216 
7217 	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7218 		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7219 		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7220 	}
7221 	return 0;
7222 }
7223 
7224 /*
7225 * background support to run QoS activate functionality
7226 */
7227 static void ipw_bg_qos_activate(struct work_struct *work)
7228 {
7229 	struct ipw_priv *priv =
7230 		container_of(work, struct ipw_priv, qos_activate);
7231 
7232 	mutex_lock(&priv->mutex);
7233 
7234 	if (priv->status & STATUS_ASSOCIATED)
7235 		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7236 
7237 	mutex_unlock(&priv->mutex);
7238 }
7239 
7240 static int ipw_handle_probe_response(struct net_device *dev,
7241 				     struct libipw_probe_response *resp,
7242 				     struct libipw_network *network)
7243 {
7244 	struct ipw_priv *priv = libipw_priv(dev);
7245 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7246 			      (network == priv->assoc_network));
7247 
7248 	ipw_qos_handle_probe_response(priv, active_network, network);
7249 
7250 	return 0;
7251 }
7252 
7253 static int ipw_handle_beacon(struct net_device *dev,
7254 			     struct libipw_beacon *resp,
7255 			     struct libipw_network *network)
7256 {
7257 	struct ipw_priv *priv = libipw_priv(dev);
7258 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7259 			      (network == priv->assoc_network));
7260 
7261 	ipw_qos_handle_probe_response(priv, active_network, network);
7262 
7263 	return 0;
7264 }
7265 
7266 static int ipw_handle_assoc_response(struct net_device *dev,
7267 				     struct libipw_assoc_response *resp,
7268 				     struct libipw_network *network)
7269 {
7270 	struct ipw_priv *priv = libipw_priv(dev);
7271 	ipw_qos_association_resp(priv, network);
7272 	return 0;
7273 }
7274 
7275 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7276 				       *qos_param)
7277 {
7278 	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7279 				sizeof(*qos_param) * 3, qos_param);
7280 }
7281 
7282 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7283 				     *qos_param)
7284 {
7285 	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7286 				qos_param);
7287 }
7288 
7289 #endif				/* CONFIG_IPW2200_QOS */
7290 
7291 static int ipw_associate_network(struct ipw_priv *priv,
7292 				 struct libipw_network *network,
7293 				 struct ipw_supported_rates *rates, int roaming)
7294 {
7295 	int err;
7296 
7297 	if (priv->config & CFG_FIXED_RATE)
7298 		ipw_set_fixed_rate(priv, network->mode);
7299 
7300 	if (!(priv->config & CFG_STATIC_ESSID)) {
7301 		priv->essid_len = min(network->ssid_len,
7302 				      (u8) IW_ESSID_MAX_SIZE);
7303 		memcpy(priv->essid, network->ssid, priv->essid_len);
7304 	}
7305 
7306 	network->last_associate = jiffies;
7307 
7308 	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7309 	priv->assoc_request.channel = network->channel;
7310 	priv->assoc_request.auth_key = 0;
7311 
7312 	if ((priv->capability & CAP_PRIVACY_ON) &&
7313 	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7314 		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7315 		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7316 
7317 		if (priv->ieee->sec.level == SEC_LEVEL_1)
7318 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7319 
7320 	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7321 		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7322 		priv->assoc_request.auth_type = AUTH_LEAP;
7323 	else
7324 		priv->assoc_request.auth_type = AUTH_OPEN;
7325 
7326 	if (priv->ieee->wpa_ie_len) {
7327 		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7328 		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7329 				 priv->ieee->wpa_ie_len);
7330 	}
7331 
7332 	/*
7333 	 * It is valid for our ieee device to support multiple modes, but
7334 	 * when it comes to associating to a given network we have to choose
7335 	 * just one mode.
7336 	 */
7337 	if (network->mode & priv->ieee->mode & IEEE_A)
7338 		priv->assoc_request.ieee_mode = IPW_A_MODE;
7339 	else if (network->mode & priv->ieee->mode & IEEE_G)
7340 		priv->assoc_request.ieee_mode = IPW_G_MODE;
7341 	else if (network->mode & priv->ieee->mode & IEEE_B)
7342 		priv->assoc_request.ieee_mode = IPW_B_MODE;
7343 
7344 	priv->assoc_request.capability = cpu_to_le16(network->capability);
7345 	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7346 	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7347 		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7348 	} else {
7349 		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7350 
7351 		/* Clear the short preamble if we won't be supporting it */
7352 		priv->assoc_request.capability &=
7353 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7354 	}
7355 
7356 	/* Clear capability bits that aren't used in Ad Hoc */
7357 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7358 		priv->assoc_request.capability &=
7359 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7360 
7361 	IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7362 			roaming ? "Rea" : "A",
7363 			priv->essid_len, priv->essid,
7364 			network->channel,
7365 			ipw_modes[priv->assoc_request.ieee_mode],
7366 			rates->num_rates,
7367 			(priv->assoc_request.preamble_length ==
7368 			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7369 			network->capability &
7370 			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7371 			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7372 			priv->capability & CAP_PRIVACY_ON ?
7373 			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7374 			 "(open)") : "",
7375 			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7376 			priv->capability & CAP_PRIVACY_ON ?
7377 			'1' + priv->ieee->sec.active_key : '.',
7378 			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7379 
7380 	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7381 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7382 	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7383 		priv->assoc_request.assoc_type = HC_IBSS_START;
7384 		priv->assoc_request.assoc_tsf_msw = 0;
7385 		priv->assoc_request.assoc_tsf_lsw = 0;
7386 	} else {
7387 		if (unlikely(roaming))
7388 			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7389 		else
7390 			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7391 		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7392 		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7393 	}
7394 
7395 	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7396 
7397 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7398 		eth_broadcast_addr(priv->assoc_request.dest);
7399 		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7400 	} else {
7401 		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7402 		priv->assoc_request.atim_window = 0;
7403 	}
7404 
7405 	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7406 
7407 	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7408 	if (err) {
7409 		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7410 		return err;
7411 	}
7412 
7413 	rates->ieee_mode = priv->assoc_request.ieee_mode;
7414 	rates->purpose = IPW_RATE_CONNECT;
7415 	ipw_send_supported_rates(priv, rates);
7416 
7417 	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7418 		priv->sys_config.dot11g_auto_detection = 1;
7419 	else
7420 		priv->sys_config.dot11g_auto_detection = 0;
7421 
7422 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7423 		priv->sys_config.answer_broadcast_ssid_probe = 1;
7424 	else
7425 		priv->sys_config.answer_broadcast_ssid_probe = 0;
7426 
7427 	err = ipw_send_system_config(priv);
7428 	if (err) {
7429 		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7430 		return err;
7431 	}
7432 
7433 	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7434 	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7435 	if (err) {
7436 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7437 		return err;
7438 	}
7439 
7440 	/*
7441 	 * If preemption is enabled, it is possible for the association
7442 	 * to complete before we return from ipw_send_associate.  Therefore
7443 	 * we have to be sure and update our priviate data first.
7444 	 */
7445 	priv->channel = network->channel;
7446 	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7447 	priv->status |= STATUS_ASSOCIATING;
7448 	priv->status &= ~STATUS_SECURITY_UPDATED;
7449 
7450 	priv->assoc_network = network;
7451 
7452 #ifdef CONFIG_IPW2200_QOS
7453 	ipw_qos_association(priv, network);
7454 #endif
7455 
7456 	err = ipw_send_associate(priv, &priv->assoc_request);
7457 	if (err) {
7458 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7459 		return err;
7460 	}
7461 
7462 	IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
7463 		  priv->essid_len, priv->essid, priv->bssid);
7464 
7465 	return 0;
7466 }
7467 
7468 static void ipw_roam(void *data)
7469 {
7470 	struct ipw_priv *priv = data;
7471 	struct libipw_network *network = NULL;
7472 	struct ipw_network_match match = {
7473 		.network = priv->assoc_network
7474 	};
7475 
7476 	/* The roaming process is as follows:
7477 	 *
7478 	 * 1.  Missed beacon threshold triggers the roaming process by
7479 	 *     setting the status ROAM bit and requesting a scan.
7480 	 * 2.  When the scan completes, it schedules the ROAM work
7481 	 * 3.  The ROAM work looks at all of the known networks for one that
7482 	 *     is a better network than the currently associated.  If none
7483 	 *     found, the ROAM process is over (ROAM bit cleared)
7484 	 * 4.  If a better network is found, a disassociation request is
7485 	 *     sent.
7486 	 * 5.  When the disassociation completes, the roam work is again
7487 	 *     scheduled.  The second time through, the driver is no longer
7488 	 *     associated, and the newly selected network is sent an
7489 	 *     association request.
7490 	 * 6.  At this point ,the roaming process is complete and the ROAM
7491 	 *     status bit is cleared.
7492 	 */
7493 
7494 	/* If we are no longer associated, and the roaming bit is no longer
7495 	 * set, then we are not actively roaming, so just return */
7496 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7497 		return;
7498 
7499 	if (priv->status & STATUS_ASSOCIATED) {
7500 		/* First pass through ROAM process -- look for a better
7501 		 * network */
7502 		unsigned long flags;
7503 		u8 rssi = priv->assoc_network->stats.rssi;
7504 		priv->assoc_network->stats.rssi = -128;
7505 		spin_lock_irqsave(&priv->ieee->lock, flags);
7506 		list_for_each_entry(network, &priv->ieee->network_list, list) {
7507 			if (network != priv->assoc_network)
7508 				ipw_best_network(priv, &match, network, 1);
7509 		}
7510 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7511 		priv->assoc_network->stats.rssi = rssi;
7512 
7513 		if (match.network == priv->assoc_network) {
7514 			IPW_DEBUG_ASSOC("No better APs in this network to "
7515 					"roam to.\n");
7516 			priv->status &= ~STATUS_ROAMING;
7517 			ipw_debug_config(priv);
7518 			return;
7519 		}
7520 
7521 		ipw_send_disassociate(priv, 1);
7522 		priv->assoc_network = match.network;
7523 
7524 		return;
7525 	}
7526 
7527 	/* Second pass through ROAM process -- request association */
7528 	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7529 	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7530 	priv->status &= ~STATUS_ROAMING;
7531 }
7532 
7533 static void ipw_bg_roam(struct work_struct *work)
7534 {
7535 	struct ipw_priv *priv =
7536 		container_of(work, struct ipw_priv, roam);
7537 	mutex_lock(&priv->mutex);
7538 	ipw_roam(priv);
7539 	mutex_unlock(&priv->mutex);
7540 }
7541 
7542 static int ipw_associate(void *data)
7543 {
7544 	struct ipw_priv *priv = data;
7545 
7546 	struct libipw_network *network = NULL;
7547 	struct ipw_network_match match = {
7548 		.network = NULL
7549 	};
7550 	struct ipw_supported_rates *rates;
7551 	struct list_head *element;
7552 	unsigned long flags;
7553 
7554 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7555 		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7556 		return 0;
7557 	}
7558 
7559 	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7560 		IPW_DEBUG_ASSOC("Not attempting association (already in "
7561 				"progress)\n");
7562 		return 0;
7563 	}
7564 
7565 	if (priv->status & STATUS_DISASSOCIATING) {
7566 		IPW_DEBUG_ASSOC("Not attempting association (in "
7567 				"disassociating)\n ");
7568 		schedule_work(&priv->associate);
7569 		return 0;
7570 	}
7571 
7572 	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7573 		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7574 				"initialized)\n");
7575 		return 0;
7576 	}
7577 
7578 	if (!(priv->config & CFG_ASSOCIATE) &&
7579 	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7580 		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7581 		return 0;
7582 	}
7583 
7584 	/* Protect our use of the network_list */
7585 	spin_lock_irqsave(&priv->ieee->lock, flags);
7586 	list_for_each_entry(network, &priv->ieee->network_list, list)
7587 	    ipw_best_network(priv, &match, network, 0);
7588 
7589 	network = match.network;
7590 	rates = &match.rates;
7591 
7592 	if (network == NULL &&
7593 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7594 	    priv->config & CFG_ADHOC_CREATE &&
7595 	    priv->config & CFG_STATIC_ESSID &&
7596 	    priv->config & CFG_STATIC_CHANNEL) {
7597 		/* Use oldest network if the free list is empty */
7598 		if (list_empty(&priv->ieee->network_free_list)) {
7599 			struct libipw_network *oldest = NULL;
7600 			struct libipw_network *target;
7601 
7602 			list_for_each_entry(target, &priv->ieee->network_list, list) {
7603 				if ((oldest == NULL) ||
7604 				    (target->last_scanned < oldest->last_scanned))
7605 					oldest = target;
7606 			}
7607 
7608 			/* If there are no more slots, expire the oldest */
7609 			list_del(&oldest->list);
7610 			target = oldest;
7611 			IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
7612 					target->ssid_len, target->ssid,
7613 					target->bssid);
7614 			list_add_tail(&target->list,
7615 				      &priv->ieee->network_free_list);
7616 		}
7617 
7618 		element = priv->ieee->network_free_list.next;
7619 		network = list_entry(element, struct libipw_network, list);
7620 		ipw_adhoc_create(priv, network);
7621 		rates = &priv->rates;
7622 		list_del(element);
7623 		list_add_tail(&network->list, &priv->ieee->network_list);
7624 	}
7625 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7626 
7627 	/* If we reached the end of the list, then we don't have any valid
7628 	 * matching APs */
7629 	if (!network) {
7630 		ipw_debug_config(priv);
7631 
7632 		if (!(priv->status & STATUS_SCANNING)) {
7633 			if (!(priv->config & CFG_SPEED_SCAN))
7634 				schedule_delayed_work(&priv->request_scan,
7635 						      SCAN_INTERVAL);
7636 			else
7637 				schedule_delayed_work(&priv->request_scan, 0);
7638 		}
7639 
7640 		return 0;
7641 	}
7642 
7643 	ipw_associate_network(priv, network, rates, 0);
7644 
7645 	return 1;
7646 }
7647 
7648 static void ipw_bg_associate(struct work_struct *work)
7649 {
7650 	struct ipw_priv *priv =
7651 		container_of(work, struct ipw_priv, associate);
7652 	mutex_lock(&priv->mutex);
7653 	ipw_associate(priv);
7654 	mutex_unlock(&priv->mutex);
7655 }
7656 
7657 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7658 				      struct sk_buff *skb)
7659 {
7660 	struct ieee80211_hdr *hdr;
7661 	u16 fc;
7662 
7663 	hdr = (struct ieee80211_hdr *)skb->data;
7664 	fc = le16_to_cpu(hdr->frame_control);
7665 	if (!(fc & IEEE80211_FCTL_PROTECTED))
7666 		return;
7667 
7668 	fc &= ~IEEE80211_FCTL_PROTECTED;
7669 	hdr->frame_control = cpu_to_le16(fc);
7670 	switch (priv->ieee->sec.level) {
7671 	case SEC_LEVEL_3:
7672 		/* Remove CCMP HDR */
7673 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7674 			skb->data + LIBIPW_3ADDR_LEN + 8,
7675 			skb->len - LIBIPW_3ADDR_LEN - 8);
7676 		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7677 		break;
7678 	case SEC_LEVEL_2:
7679 		break;
7680 	case SEC_LEVEL_1:
7681 		/* Remove IV */
7682 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7683 			skb->data + LIBIPW_3ADDR_LEN + 4,
7684 			skb->len - LIBIPW_3ADDR_LEN - 4);
7685 		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7686 		break;
7687 	case SEC_LEVEL_0:
7688 		break;
7689 	default:
7690 		printk(KERN_ERR "Unknown security level %d\n",
7691 		       priv->ieee->sec.level);
7692 		break;
7693 	}
7694 }
7695 
7696 static void ipw_handle_data_packet(struct ipw_priv *priv,
7697 				   struct ipw_rx_mem_buffer *rxb,
7698 				   struct libipw_rx_stats *stats)
7699 {
7700 	struct net_device *dev = priv->net_dev;
7701 	struct libipw_hdr_4addr *hdr;
7702 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7703 
7704 	/* We received data from the HW, so stop the watchdog */
7705 	netif_trans_update(dev);
7706 
7707 	/* We only process data packets if the
7708 	 * interface is open */
7709 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7710 		     skb_tailroom(rxb->skb))) {
7711 		dev->stats.rx_errors++;
7712 		priv->wstats.discard.misc++;
7713 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7714 		return;
7715 	} else if (unlikely(!netif_running(priv->net_dev))) {
7716 		dev->stats.rx_dropped++;
7717 		priv->wstats.discard.misc++;
7718 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7719 		return;
7720 	}
7721 
7722 	/* Advance skb->data to the start of the actual payload */
7723 	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7724 
7725 	/* Set the size of the skb to the size of the frame */
7726 	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7727 
7728 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7729 
7730 	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7731 	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7732 	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7733 	    (is_multicast_ether_addr(hdr->addr1) ?
7734 	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7735 		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7736 
7737 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7738 		dev->stats.rx_errors++;
7739 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7740 		rxb->skb = NULL;
7741 		__ipw_led_activity_on(priv);
7742 	}
7743 }
7744 
7745 #ifdef CONFIG_IPW2200_RADIOTAP
7746 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7747 					   struct ipw_rx_mem_buffer *rxb,
7748 					   struct libipw_rx_stats *stats)
7749 {
7750 	struct net_device *dev = priv->net_dev;
7751 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7752 	struct ipw_rx_frame *frame = &pkt->u.frame;
7753 
7754 	/* initial pull of some data */
7755 	u16 received_channel = frame->received_channel;
7756 	u8 antennaAndPhy = frame->antennaAndPhy;
7757 	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7758 	u16 pktrate = frame->rate;
7759 
7760 	/* Magic struct that slots into the radiotap header -- no reason
7761 	 * to build this manually element by element, we can write it much
7762 	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7763 	struct ipw_rt_hdr *ipw_rt;
7764 
7765 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7766 
7767 	/* We received data from the HW, so stop the watchdog */
7768 	netif_trans_update(dev);
7769 
7770 	/* We only process data packets if the
7771 	 * interface is open */
7772 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7773 		     skb_tailroom(rxb->skb))) {
7774 		dev->stats.rx_errors++;
7775 		priv->wstats.discard.misc++;
7776 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7777 		return;
7778 	} else if (unlikely(!netif_running(priv->net_dev))) {
7779 		dev->stats.rx_dropped++;
7780 		priv->wstats.discard.misc++;
7781 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7782 		return;
7783 	}
7784 
7785 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7786 	 * that now */
7787 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7788 		/* FIXME: Should alloc bigger skb instead */
7789 		dev->stats.rx_dropped++;
7790 		priv->wstats.discard.misc++;
7791 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7792 		return;
7793 	}
7794 
7795 	/* copy the frame itself */
7796 	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7797 		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7798 
7799 	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7800 
7801 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7802 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7803 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7804 
7805 	/* Big bitfield of all the fields we provide in radiotap */
7806 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7807 	     (1 << IEEE80211_RADIOTAP_TSFT) |
7808 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7809 	     (1 << IEEE80211_RADIOTAP_RATE) |
7810 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7811 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7812 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7813 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7814 
7815 	/* Zero the flags, we'll add to them as we go */
7816 	ipw_rt->rt_flags = 0;
7817 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7818 			       frame->parent_tsf[2] << 16 |
7819 			       frame->parent_tsf[1] << 8  |
7820 			       frame->parent_tsf[0]);
7821 
7822 	/* Convert signal to DBM */
7823 	ipw_rt->rt_dbmsignal = antsignal;
7824 	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7825 
7826 	/* Convert the channel data and set the flags */
7827 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7828 	if (received_channel > 14) {	/* 802.11a */
7829 		ipw_rt->rt_chbitmask =
7830 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7831 	} else if (antennaAndPhy & 32) {	/* 802.11b */
7832 		ipw_rt->rt_chbitmask =
7833 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7834 	} else {		/* 802.11g */
7835 		ipw_rt->rt_chbitmask =
7836 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7837 	}
7838 
7839 	/* set the rate in multiples of 500k/s */
7840 	switch (pktrate) {
7841 	case IPW_TX_RATE_1MB:
7842 		ipw_rt->rt_rate = 2;
7843 		break;
7844 	case IPW_TX_RATE_2MB:
7845 		ipw_rt->rt_rate = 4;
7846 		break;
7847 	case IPW_TX_RATE_5MB:
7848 		ipw_rt->rt_rate = 10;
7849 		break;
7850 	case IPW_TX_RATE_6MB:
7851 		ipw_rt->rt_rate = 12;
7852 		break;
7853 	case IPW_TX_RATE_9MB:
7854 		ipw_rt->rt_rate = 18;
7855 		break;
7856 	case IPW_TX_RATE_11MB:
7857 		ipw_rt->rt_rate = 22;
7858 		break;
7859 	case IPW_TX_RATE_12MB:
7860 		ipw_rt->rt_rate = 24;
7861 		break;
7862 	case IPW_TX_RATE_18MB:
7863 		ipw_rt->rt_rate = 36;
7864 		break;
7865 	case IPW_TX_RATE_24MB:
7866 		ipw_rt->rt_rate = 48;
7867 		break;
7868 	case IPW_TX_RATE_36MB:
7869 		ipw_rt->rt_rate = 72;
7870 		break;
7871 	case IPW_TX_RATE_48MB:
7872 		ipw_rt->rt_rate = 96;
7873 		break;
7874 	case IPW_TX_RATE_54MB:
7875 		ipw_rt->rt_rate = 108;
7876 		break;
7877 	default:
7878 		ipw_rt->rt_rate = 0;
7879 		break;
7880 	}
7881 
7882 	/* antenna number */
7883 	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7884 
7885 	/* set the preamble flag if we have it */
7886 	if ((antennaAndPhy & 64))
7887 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7888 
7889 	/* Set the size of the skb to the size of the frame */
7890 	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7891 
7892 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7893 
7894 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7895 		dev->stats.rx_errors++;
7896 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7897 		rxb->skb = NULL;
7898 		/* no LED during capture */
7899 	}
7900 }
7901 #endif
7902 
7903 #ifdef CONFIG_IPW2200_PROMISCUOUS
7904 #define libipw_is_probe_response(fc) \
7905    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7906     (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7907 
7908 #define libipw_is_management(fc) \
7909    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7910 
7911 #define libipw_is_control(fc) \
7912    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7913 
7914 #define libipw_is_data(fc) \
7915    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7916 
7917 #define libipw_is_assoc_request(fc) \
7918    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7919 
7920 #define libipw_is_reassoc_request(fc) \
7921    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7922 
7923 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7924 				      struct ipw_rx_mem_buffer *rxb,
7925 				      struct libipw_rx_stats *stats)
7926 {
7927 	struct net_device *dev = priv->prom_net_dev;
7928 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7929 	struct ipw_rx_frame *frame = &pkt->u.frame;
7930 	struct ipw_rt_hdr *ipw_rt;
7931 
7932 	/* First cache any information we need before we overwrite
7933 	 * the information provided in the skb from the hardware */
7934 	struct ieee80211_hdr *hdr;
7935 	u16 channel = frame->received_channel;
7936 	u8 phy_flags = frame->antennaAndPhy;
7937 	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7938 	s8 noise = (s8) le16_to_cpu(frame->noise);
7939 	u8 rate = frame->rate;
7940 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7941 	struct sk_buff *skb;
7942 	int hdr_only = 0;
7943 	u16 filter = priv->prom_priv->filter;
7944 
7945 	/* If the filter is set to not include Rx frames then return */
7946 	if (filter & IPW_PROM_NO_RX)
7947 		return;
7948 
7949 	/* We received data from the HW, so stop the watchdog */
7950 	netif_trans_update(dev);
7951 
7952 	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7953 		dev->stats.rx_errors++;
7954 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7955 		return;
7956 	}
7957 
7958 	/* We only process data packets if the interface is open */
7959 	if (unlikely(!netif_running(dev))) {
7960 		dev->stats.rx_dropped++;
7961 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7962 		return;
7963 	}
7964 
7965 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7966 	 * that now */
7967 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7968 		/* FIXME: Should alloc bigger skb instead */
7969 		dev->stats.rx_dropped++;
7970 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7971 		return;
7972 	}
7973 
7974 	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7975 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
7976 		if (filter & IPW_PROM_NO_MGMT)
7977 			return;
7978 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7979 			hdr_only = 1;
7980 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
7981 		if (filter & IPW_PROM_NO_CTL)
7982 			return;
7983 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
7984 			hdr_only = 1;
7985 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
7986 		if (filter & IPW_PROM_NO_DATA)
7987 			return;
7988 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
7989 			hdr_only = 1;
7990 	}
7991 
7992 	/* Copy the SKB since this is for the promiscuous side */
7993 	skb = skb_copy(rxb->skb, GFP_ATOMIC);
7994 	if (skb == NULL) {
7995 		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7996 		return;
7997 	}
7998 
7999 	/* copy the frame data to write after where the radiotap header goes */
8000 	ipw_rt = (void *)skb->data;
8001 
8002 	if (hdr_only)
8003 		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8004 
8005 	memcpy(ipw_rt->payload, hdr, len);
8006 
8007 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8008 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
8009 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
8010 
8011 	/* Set the size of the skb to the size of the frame */
8012 	skb_put(skb, sizeof(*ipw_rt) + len);
8013 
8014 	/* Big bitfield of all the fields we provide in radiotap */
8015 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
8016 	     (1 << IEEE80211_RADIOTAP_TSFT) |
8017 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
8018 	     (1 << IEEE80211_RADIOTAP_RATE) |
8019 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
8020 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8021 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8022 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
8023 
8024 	/* Zero the flags, we'll add to them as we go */
8025 	ipw_rt->rt_flags = 0;
8026 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8027 			       frame->parent_tsf[2] << 16 |
8028 			       frame->parent_tsf[1] << 8  |
8029 			       frame->parent_tsf[0]);
8030 
8031 	/* Convert to DBM */
8032 	ipw_rt->rt_dbmsignal = signal;
8033 	ipw_rt->rt_dbmnoise = noise;
8034 
8035 	/* Convert the channel data and set the flags */
8036 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8037 	if (channel > 14) {	/* 802.11a */
8038 		ipw_rt->rt_chbitmask =
8039 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8040 	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
8041 		ipw_rt->rt_chbitmask =
8042 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8043 	} else {		/* 802.11g */
8044 		ipw_rt->rt_chbitmask =
8045 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8046 	}
8047 
8048 	/* set the rate in multiples of 500k/s */
8049 	switch (rate) {
8050 	case IPW_TX_RATE_1MB:
8051 		ipw_rt->rt_rate = 2;
8052 		break;
8053 	case IPW_TX_RATE_2MB:
8054 		ipw_rt->rt_rate = 4;
8055 		break;
8056 	case IPW_TX_RATE_5MB:
8057 		ipw_rt->rt_rate = 10;
8058 		break;
8059 	case IPW_TX_RATE_6MB:
8060 		ipw_rt->rt_rate = 12;
8061 		break;
8062 	case IPW_TX_RATE_9MB:
8063 		ipw_rt->rt_rate = 18;
8064 		break;
8065 	case IPW_TX_RATE_11MB:
8066 		ipw_rt->rt_rate = 22;
8067 		break;
8068 	case IPW_TX_RATE_12MB:
8069 		ipw_rt->rt_rate = 24;
8070 		break;
8071 	case IPW_TX_RATE_18MB:
8072 		ipw_rt->rt_rate = 36;
8073 		break;
8074 	case IPW_TX_RATE_24MB:
8075 		ipw_rt->rt_rate = 48;
8076 		break;
8077 	case IPW_TX_RATE_36MB:
8078 		ipw_rt->rt_rate = 72;
8079 		break;
8080 	case IPW_TX_RATE_48MB:
8081 		ipw_rt->rt_rate = 96;
8082 		break;
8083 	case IPW_TX_RATE_54MB:
8084 		ipw_rt->rt_rate = 108;
8085 		break;
8086 	default:
8087 		ipw_rt->rt_rate = 0;
8088 		break;
8089 	}
8090 
8091 	/* antenna number */
8092 	ipw_rt->rt_antenna = (phy_flags & 3);
8093 
8094 	/* set the preamble flag if we have it */
8095 	if (phy_flags & (1 << 6))
8096 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8097 
8098 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8099 
8100 	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8101 		dev->stats.rx_errors++;
8102 		dev_kfree_skb_any(skb);
8103 	}
8104 }
8105 #endif
8106 
8107 static int is_network_packet(struct ipw_priv *priv,
8108 				    struct libipw_hdr_4addr *header)
8109 {
8110 	/* Filter incoming packets to determine if they are targeted toward
8111 	 * this network, discarding packets coming from ourselves */
8112 	switch (priv->ieee->iw_mode) {
8113 	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8114 		/* packets from our adapter are dropped (echo) */
8115 		if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
8116 			return 0;
8117 
8118 		/* {broad,multi}cast packets to our BSSID go through */
8119 		if (is_multicast_ether_addr(header->addr1))
8120 			return ether_addr_equal(header->addr3, priv->bssid);
8121 
8122 		/* packets to our adapter go through */
8123 		return ether_addr_equal(header->addr1,
8124 					priv->net_dev->dev_addr);
8125 
8126 	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8127 		/* packets from our adapter are dropped (echo) */
8128 		if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
8129 			return 0;
8130 
8131 		/* {broad,multi}cast packets to our BSS go through */
8132 		if (is_multicast_ether_addr(header->addr1))
8133 			return ether_addr_equal(header->addr2, priv->bssid);
8134 
8135 		/* packets to our adapter go through */
8136 		return ether_addr_equal(header->addr1,
8137 					priv->net_dev->dev_addr);
8138 	}
8139 
8140 	return 1;
8141 }
8142 
8143 #define IPW_PACKET_RETRY_TIME HZ
8144 
8145 static  int is_duplicate_packet(struct ipw_priv *priv,
8146 				      struct libipw_hdr_4addr *header)
8147 {
8148 	u16 sc = le16_to_cpu(header->seq_ctl);
8149 	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8150 	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8151 	u16 *last_seq, *last_frag;
8152 	unsigned long *last_time;
8153 
8154 	switch (priv->ieee->iw_mode) {
8155 	case IW_MODE_ADHOC:
8156 		{
8157 			struct list_head *p;
8158 			struct ipw_ibss_seq *entry = NULL;
8159 			u8 *mac = header->addr2;
8160 			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8161 
8162 			list_for_each(p, &priv->ibss_mac_hash[index]) {
8163 				entry =
8164 				    list_entry(p, struct ipw_ibss_seq, list);
8165 				if (ether_addr_equal(entry->mac, mac))
8166 					break;
8167 			}
8168 			if (p == &priv->ibss_mac_hash[index]) {
8169 				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8170 				if (!entry) {
8171 					IPW_ERROR
8172 					    ("Cannot malloc new mac entry\n");
8173 					return 0;
8174 				}
8175 				memcpy(entry->mac, mac, ETH_ALEN);
8176 				entry->seq_num = seq;
8177 				entry->frag_num = frag;
8178 				entry->packet_time = jiffies;
8179 				list_add(&entry->list,
8180 					 &priv->ibss_mac_hash[index]);
8181 				return 0;
8182 			}
8183 			last_seq = &entry->seq_num;
8184 			last_frag = &entry->frag_num;
8185 			last_time = &entry->packet_time;
8186 			break;
8187 		}
8188 	case IW_MODE_INFRA:
8189 		last_seq = &priv->last_seq_num;
8190 		last_frag = &priv->last_frag_num;
8191 		last_time = &priv->last_packet_time;
8192 		break;
8193 	default:
8194 		return 0;
8195 	}
8196 	if ((*last_seq == seq) &&
8197 	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8198 		if (*last_frag == frag)
8199 			goto drop;
8200 		if (*last_frag + 1 != frag)
8201 			/* out-of-order fragment */
8202 			goto drop;
8203 	} else
8204 		*last_seq = seq;
8205 
8206 	*last_frag = frag;
8207 	*last_time = jiffies;
8208 	return 0;
8209 
8210       drop:
8211 	/* Comment this line now since we observed the card receives
8212 	 * duplicate packets but the FCTL_RETRY bit is not set in the
8213 	 * IBSS mode with fragmentation enabled.
8214 	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8215 	return 1;
8216 }
8217 
8218 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8219 				   struct ipw_rx_mem_buffer *rxb,
8220 				   struct libipw_rx_stats *stats)
8221 {
8222 	struct sk_buff *skb = rxb->skb;
8223 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8224 	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8225 	    (skb->data + IPW_RX_FRAME_SIZE);
8226 
8227 	libipw_rx_mgt(priv->ieee, header, stats);
8228 
8229 	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8230 	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8231 	      IEEE80211_STYPE_PROBE_RESP) ||
8232 	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8233 	      IEEE80211_STYPE_BEACON))) {
8234 		if (ether_addr_equal(header->addr3, priv->bssid))
8235 			ipw_add_station(priv, header->addr2);
8236 	}
8237 
8238 	if (priv->config & CFG_NET_STATS) {
8239 		IPW_DEBUG_HC("sending stat packet\n");
8240 
8241 		/* Set the size of the skb to the size of the full
8242 		 * ipw header and 802.11 frame */
8243 		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8244 			IPW_RX_FRAME_SIZE);
8245 
8246 		/* Advance past the ipw packet header to the 802.11 frame */
8247 		skb_pull(skb, IPW_RX_FRAME_SIZE);
8248 
8249 		/* Push the libipw_rx_stats before the 802.11 frame */
8250 		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8251 
8252 		skb->dev = priv->ieee->dev;
8253 
8254 		/* Point raw at the libipw_stats */
8255 		skb_reset_mac_header(skb);
8256 
8257 		skb->pkt_type = PACKET_OTHERHOST;
8258 		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8259 		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8260 		netif_rx(skb);
8261 		rxb->skb = NULL;
8262 	}
8263 }
8264 
8265 /*
8266  * Main entry function for receiving a packet with 80211 headers.  This
8267  * should be called when ever the FW has notified us that there is a new
8268  * skb in the receive queue.
8269  */
8270 static void ipw_rx(struct ipw_priv *priv)
8271 {
8272 	struct ipw_rx_mem_buffer *rxb;
8273 	struct ipw_rx_packet *pkt;
8274 	struct libipw_hdr_4addr *header;
8275 	u32 r, w, i;
8276 	u8 network_packet;
8277 	u8 fill_rx = 0;
8278 
8279 	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8280 	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8281 	i = priv->rxq->read;
8282 
8283 	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8284 		fill_rx = 1;
8285 
8286 	while (i != r) {
8287 		rxb = priv->rxq->queue[i];
8288 		if (unlikely(rxb == NULL)) {
8289 			printk(KERN_CRIT "Queue not allocated!\n");
8290 			break;
8291 		}
8292 		priv->rxq->queue[i] = NULL;
8293 
8294 		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8295 					    IPW_RX_BUF_SIZE,
8296 					    PCI_DMA_FROMDEVICE);
8297 
8298 		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8299 		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8300 			     pkt->header.message_type,
8301 			     pkt->header.rx_seq_num, pkt->header.control_bits);
8302 
8303 		switch (pkt->header.message_type) {
8304 		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8305 				struct libipw_rx_stats stats = {
8306 					.rssi = pkt->u.frame.rssi_dbm -
8307 					    IPW_RSSI_TO_DBM,
8308 					.signal =
8309 					    pkt->u.frame.rssi_dbm -
8310 					    IPW_RSSI_TO_DBM + 0x100,
8311 					.noise =
8312 					    le16_to_cpu(pkt->u.frame.noise),
8313 					.rate = pkt->u.frame.rate,
8314 					.mac_time = jiffies,
8315 					.received_channel =
8316 					    pkt->u.frame.received_channel,
8317 					.freq =
8318 					    (pkt->u.frame.
8319 					     control & (1 << 0)) ?
8320 					    LIBIPW_24GHZ_BAND :
8321 					    LIBIPW_52GHZ_BAND,
8322 					.len = le16_to_cpu(pkt->u.frame.length),
8323 				};
8324 
8325 				if (stats.rssi != 0)
8326 					stats.mask |= LIBIPW_STATMASK_RSSI;
8327 				if (stats.signal != 0)
8328 					stats.mask |= LIBIPW_STATMASK_SIGNAL;
8329 				if (stats.noise != 0)
8330 					stats.mask |= LIBIPW_STATMASK_NOISE;
8331 				if (stats.rate != 0)
8332 					stats.mask |= LIBIPW_STATMASK_RATE;
8333 
8334 				priv->rx_packets++;
8335 
8336 #ifdef CONFIG_IPW2200_PROMISCUOUS
8337 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8338 		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8339 #endif
8340 
8341 #ifdef CONFIG_IPW2200_MONITOR
8342 				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8343 #ifdef CONFIG_IPW2200_RADIOTAP
8344 
8345                 ipw_handle_data_packet_monitor(priv,
8346 					       rxb,
8347 					       &stats);
8348 #else
8349 		ipw_handle_data_packet(priv, rxb,
8350 				       &stats);
8351 #endif
8352 					break;
8353 				}
8354 #endif
8355 
8356 				header =
8357 				    (struct libipw_hdr_4addr *)(rxb->skb->
8358 								   data +
8359 								   IPW_RX_FRAME_SIZE);
8360 				/* TODO: Check Ad-Hoc dest/source and make sure
8361 				 * that we are actually parsing these packets
8362 				 * correctly -- we should probably use the
8363 				 * frame control of the packet and disregard
8364 				 * the current iw_mode */
8365 
8366 				network_packet =
8367 				    is_network_packet(priv, header);
8368 				if (network_packet && priv->assoc_network) {
8369 					priv->assoc_network->stats.rssi =
8370 					    stats.rssi;
8371 					priv->exp_avg_rssi =
8372 					    exponential_average(priv->exp_avg_rssi,
8373 					    stats.rssi, DEPTH_RSSI);
8374 				}
8375 
8376 				IPW_DEBUG_RX("Frame: len=%u\n",
8377 					     le16_to_cpu(pkt->u.frame.length));
8378 
8379 				if (le16_to_cpu(pkt->u.frame.length) <
8380 				    libipw_get_hdrlen(le16_to_cpu(
8381 						    header->frame_ctl))) {
8382 					IPW_DEBUG_DROP
8383 					    ("Received packet is too small. "
8384 					     "Dropping.\n");
8385 					priv->net_dev->stats.rx_errors++;
8386 					priv->wstats.discard.misc++;
8387 					break;
8388 				}
8389 
8390 				switch (WLAN_FC_GET_TYPE
8391 					(le16_to_cpu(header->frame_ctl))) {
8392 
8393 				case IEEE80211_FTYPE_MGMT:
8394 					ipw_handle_mgmt_packet(priv, rxb,
8395 							       &stats);
8396 					break;
8397 
8398 				case IEEE80211_FTYPE_CTL:
8399 					break;
8400 
8401 				case IEEE80211_FTYPE_DATA:
8402 					if (unlikely(!network_packet ||
8403 						     is_duplicate_packet(priv,
8404 									 header)))
8405 					{
8406 						IPW_DEBUG_DROP("Dropping: "
8407 							       "%pM, "
8408 							       "%pM, "
8409 							       "%pM\n",
8410 							       header->addr1,
8411 							       header->addr2,
8412 							       header->addr3);
8413 						break;
8414 					}
8415 
8416 					ipw_handle_data_packet(priv, rxb,
8417 							       &stats);
8418 
8419 					break;
8420 				}
8421 				break;
8422 			}
8423 
8424 		case RX_HOST_NOTIFICATION_TYPE:{
8425 				IPW_DEBUG_RX
8426 				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8427 				     pkt->u.notification.subtype,
8428 				     pkt->u.notification.flags,
8429 				     le16_to_cpu(pkt->u.notification.size));
8430 				ipw_rx_notification(priv, &pkt->u.notification);
8431 				break;
8432 			}
8433 
8434 		default:
8435 			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8436 				     pkt->header.message_type);
8437 			break;
8438 		}
8439 
8440 		/* For now we just don't re-use anything.  We can tweak this
8441 		 * later to try and re-use notification packets and SKBs that
8442 		 * fail to Rx correctly */
8443 		if (rxb->skb != NULL) {
8444 			dev_kfree_skb_any(rxb->skb);
8445 			rxb->skb = NULL;
8446 		}
8447 
8448 		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8449 				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8450 		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8451 
8452 		i = (i + 1) % RX_QUEUE_SIZE;
8453 
8454 		/* If there are a lot of unsued frames, restock the Rx queue
8455 		 * so the ucode won't assert */
8456 		if (fill_rx) {
8457 			priv->rxq->read = i;
8458 			ipw_rx_queue_replenish(priv);
8459 		}
8460 	}
8461 
8462 	/* Backtrack one entry */
8463 	priv->rxq->read = i;
8464 	ipw_rx_queue_restock(priv);
8465 }
8466 
8467 #define DEFAULT_RTS_THRESHOLD     2304U
8468 #define MIN_RTS_THRESHOLD         1U
8469 #define MAX_RTS_THRESHOLD         2304U
8470 #define DEFAULT_BEACON_INTERVAL   100U
8471 #define	DEFAULT_SHORT_RETRY_LIMIT 7U
8472 #define	DEFAULT_LONG_RETRY_LIMIT  4U
8473 
8474 /**
8475  * ipw_sw_reset
8476  * @option: options to control different reset behaviour
8477  * 	    0 = reset everything except the 'disable' module_param
8478  * 	    1 = reset everything and print out driver info (for probe only)
8479  * 	    2 = reset everything
8480  */
8481 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8482 {
8483 	int band, modulation;
8484 	int old_mode = priv->ieee->iw_mode;
8485 
8486 	/* Initialize module parameter values here */
8487 	priv->config = 0;
8488 
8489 	/* We default to disabling the LED code as right now it causes
8490 	 * too many systems to lock up... */
8491 	if (!led_support)
8492 		priv->config |= CFG_NO_LED;
8493 
8494 	if (associate)
8495 		priv->config |= CFG_ASSOCIATE;
8496 	else
8497 		IPW_DEBUG_INFO("Auto associate disabled.\n");
8498 
8499 	if (auto_create)
8500 		priv->config |= CFG_ADHOC_CREATE;
8501 	else
8502 		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8503 
8504 	priv->config &= ~CFG_STATIC_ESSID;
8505 	priv->essid_len = 0;
8506 	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8507 
8508 	if (disable && option) {
8509 		priv->status |= STATUS_RF_KILL_SW;
8510 		IPW_DEBUG_INFO("Radio disabled.\n");
8511 	}
8512 
8513 	if (default_channel != 0) {
8514 		priv->config |= CFG_STATIC_CHANNEL;
8515 		priv->channel = default_channel;
8516 		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8517 		/* TODO: Validate that provided channel is in range */
8518 	}
8519 #ifdef CONFIG_IPW2200_QOS
8520 	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8521 		     burst_duration_CCK, burst_duration_OFDM);
8522 #endif				/* CONFIG_IPW2200_QOS */
8523 
8524 	switch (network_mode) {
8525 	case 1:
8526 		priv->ieee->iw_mode = IW_MODE_ADHOC;
8527 		priv->net_dev->type = ARPHRD_ETHER;
8528 
8529 		break;
8530 #ifdef CONFIG_IPW2200_MONITOR
8531 	case 2:
8532 		priv->ieee->iw_mode = IW_MODE_MONITOR;
8533 #ifdef CONFIG_IPW2200_RADIOTAP
8534 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8535 #else
8536 		priv->net_dev->type = ARPHRD_IEEE80211;
8537 #endif
8538 		break;
8539 #endif
8540 	default:
8541 	case 0:
8542 		priv->net_dev->type = ARPHRD_ETHER;
8543 		priv->ieee->iw_mode = IW_MODE_INFRA;
8544 		break;
8545 	}
8546 
8547 	if (hwcrypto) {
8548 		priv->ieee->host_encrypt = 0;
8549 		priv->ieee->host_encrypt_msdu = 0;
8550 		priv->ieee->host_decrypt = 0;
8551 		priv->ieee->host_mc_decrypt = 0;
8552 	}
8553 	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8554 
8555 	/* IPW2200/2915 is abled to do hardware fragmentation. */
8556 	priv->ieee->host_open_frag = 0;
8557 
8558 	if ((priv->pci_dev->device == 0x4223) ||
8559 	    (priv->pci_dev->device == 0x4224)) {
8560 		if (option == 1)
8561 			printk(KERN_INFO DRV_NAME
8562 			       ": Detected Intel PRO/Wireless 2915ABG Network "
8563 			       "Connection\n");
8564 		priv->ieee->abg_true = 1;
8565 		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8566 		modulation = LIBIPW_OFDM_MODULATION |
8567 		    LIBIPW_CCK_MODULATION;
8568 		priv->adapter = IPW_2915ABG;
8569 		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8570 	} else {
8571 		if (option == 1)
8572 			printk(KERN_INFO DRV_NAME
8573 			       ": Detected Intel PRO/Wireless 2200BG Network "
8574 			       "Connection\n");
8575 
8576 		priv->ieee->abg_true = 0;
8577 		band = LIBIPW_24GHZ_BAND;
8578 		modulation = LIBIPW_OFDM_MODULATION |
8579 		    LIBIPW_CCK_MODULATION;
8580 		priv->adapter = IPW_2200BG;
8581 		priv->ieee->mode = IEEE_G | IEEE_B;
8582 	}
8583 
8584 	priv->ieee->freq_band = band;
8585 	priv->ieee->modulation = modulation;
8586 
8587 	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8588 
8589 	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8590 	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8591 
8592 	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8593 	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8594 	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8595 
8596 	/* If power management is turned on, default to AC mode */
8597 	priv->power_mode = IPW_POWER_AC;
8598 	priv->tx_power = IPW_TX_POWER_DEFAULT;
8599 
8600 	return old_mode == priv->ieee->iw_mode;
8601 }
8602 
8603 /*
8604  * This file defines the Wireless Extension handlers.  It does not
8605  * define any methods of hardware manipulation and relies on the
8606  * functions defined in ipw_main to provide the HW interaction.
8607  *
8608  * The exception to this is the use of the ipw_get_ordinal()
8609  * function used to poll the hardware vs. making unnecessary calls.
8610  *
8611  */
8612 
8613 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8614 {
8615 	if (channel == 0) {
8616 		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8617 		priv->config &= ~CFG_STATIC_CHANNEL;
8618 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8619 				"parameters.\n");
8620 		ipw_associate(priv);
8621 		return 0;
8622 	}
8623 
8624 	priv->config |= CFG_STATIC_CHANNEL;
8625 
8626 	if (priv->channel == channel) {
8627 		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8628 			       channel);
8629 		return 0;
8630 	}
8631 
8632 	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8633 	priv->channel = channel;
8634 
8635 #ifdef CONFIG_IPW2200_MONITOR
8636 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8637 		int i;
8638 		if (priv->status & STATUS_SCANNING) {
8639 			IPW_DEBUG_SCAN("Scan abort triggered due to "
8640 				       "channel change.\n");
8641 			ipw_abort_scan(priv);
8642 		}
8643 
8644 		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8645 			udelay(10);
8646 
8647 		if (priv->status & STATUS_SCANNING)
8648 			IPW_DEBUG_SCAN("Still scanning...\n");
8649 		else
8650 			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8651 				       1000 - i);
8652 
8653 		return 0;
8654 	}
8655 #endif				/* CONFIG_IPW2200_MONITOR */
8656 
8657 	/* Network configuration changed -- force [re]association */
8658 	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8659 	if (!ipw_disassociate(priv))
8660 		ipw_associate(priv);
8661 
8662 	return 0;
8663 }
8664 
8665 static int ipw_wx_set_freq(struct net_device *dev,
8666 			   struct iw_request_info *info,
8667 			   union iwreq_data *wrqu, char *extra)
8668 {
8669 	struct ipw_priv *priv = libipw_priv(dev);
8670 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8671 	struct iw_freq *fwrq = &wrqu->freq;
8672 	int ret = 0, i;
8673 	u8 channel, flags;
8674 	int band;
8675 
8676 	if (fwrq->m == 0) {
8677 		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8678 		mutex_lock(&priv->mutex);
8679 		ret = ipw_set_channel(priv, 0);
8680 		mutex_unlock(&priv->mutex);
8681 		return ret;
8682 	}
8683 	/* if setting by freq convert to channel */
8684 	if (fwrq->e == 1) {
8685 		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8686 		if (channel == 0)
8687 			return -EINVAL;
8688 	} else
8689 		channel = fwrq->m;
8690 
8691 	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8692 		return -EINVAL;
8693 
8694 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8695 		i = libipw_channel_to_index(priv->ieee, channel);
8696 		if (i == -1)
8697 			return -EINVAL;
8698 
8699 		flags = (band == LIBIPW_24GHZ_BAND) ?
8700 		    geo->bg[i].flags : geo->a[i].flags;
8701 		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8702 			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8703 			return -EINVAL;
8704 		}
8705 	}
8706 
8707 	IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8708 	mutex_lock(&priv->mutex);
8709 	ret = ipw_set_channel(priv, channel);
8710 	mutex_unlock(&priv->mutex);
8711 	return ret;
8712 }
8713 
8714 static int ipw_wx_get_freq(struct net_device *dev,
8715 			   struct iw_request_info *info,
8716 			   union iwreq_data *wrqu, char *extra)
8717 {
8718 	struct ipw_priv *priv = libipw_priv(dev);
8719 
8720 	wrqu->freq.e = 0;
8721 
8722 	/* If we are associated, trying to associate, or have a statically
8723 	 * configured CHANNEL then return that; otherwise return ANY */
8724 	mutex_lock(&priv->mutex);
8725 	if (priv->config & CFG_STATIC_CHANNEL ||
8726 	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8727 		int i;
8728 
8729 		i = libipw_channel_to_index(priv->ieee, priv->channel);
8730 		BUG_ON(i == -1);
8731 		wrqu->freq.e = 1;
8732 
8733 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8734 		case LIBIPW_52GHZ_BAND:
8735 			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8736 			break;
8737 
8738 		case LIBIPW_24GHZ_BAND:
8739 			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8740 			break;
8741 
8742 		default:
8743 			BUG();
8744 		}
8745 	} else
8746 		wrqu->freq.m = 0;
8747 
8748 	mutex_unlock(&priv->mutex);
8749 	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8750 	return 0;
8751 }
8752 
8753 static int ipw_wx_set_mode(struct net_device *dev,
8754 			   struct iw_request_info *info,
8755 			   union iwreq_data *wrqu, char *extra)
8756 {
8757 	struct ipw_priv *priv = libipw_priv(dev);
8758 	int err = 0;
8759 
8760 	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8761 
8762 	switch (wrqu->mode) {
8763 #ifdef CONFIG_IPW2200_MONITOR
8764 	case IW_MODE_MONITOR:
8765 #endif
8766 	case IW_MODE_ADHOC:
8767 	case IW_MODE_INFRA:
8768 		break;
8769 	case IW_MODE_AUTO:
8770 		wrqu->mode = IW_MODE_INFRA;
8771 		break;
8772 	default:
8773 		return -EINVAL;
8774 	}
8775 	if (wrqu->mode == priv->ieee->iw_mode)
8776 		return 0;
8777 
8778 	mutex_lock(&priv->mutex);
8779 
8780 	ipw_sw_reset(priv, 0);
8781 
8782 #ifdef CONFIG_IPW2200_MONITOR
8783 	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8784 		priv->net_dev->type = ARPHRD_ETHER;
8785 
8786 	if (wrqu->mode == IW_MODE_MONITOR)
8787 #ifdef CONFIG_IPW2200_RADIOTAP
8788 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8789 #else
8790 		priv->net_dev->type = ARPHRD_IEEE80211;
8791 #endif
8792 #endif				/* CONFIG_IPW2200_MONITOR */
8793 
8794 	/* Free the existing firmware and reset the fw_loaded
8795 	 * flag so ipw_load() will bring in the new firmware */
8796 	free_firmware();
8797 
8798 	priv->ieee->iw_mode = wrqu->mode;
8799 
8800 	schedule_work(&priv->adapter_restart);
8801 	mutex_unlock(&priv->mutex);
8802 	return err;
8803 }
8804 
8805 static int ipw_wx_get_mode(struct net_device *dev,
8806 			   struct iw_request_info *info,
8807 			   union iwreq_data *wrqu, char *extra)
8808 {
8809 	struct ipw_priv *priv = libipw_priv(dev);
8810 	mutex_lock(&priv->mutex);
8811 	wrqu->mode = priv->ieee->iw_mode;
8812 	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8813 	mutex_unlock(&priv->mutex);
8814 	return 0;
8815 }
8816 
8817 /* Values are in microsecond */
8818 static const s32 timeout_duration[] = {
8819 	350000,
8820 	250000,
8821 	75000,
8822 	37000,
8823 	25000,
8824 };
8825 
8826 static const s32 period_duration[] = {
8827 	400000,
8828 	700000,
8829 	1000000,
8830 	1000000,
8831 	1000000
8832 };
8833 
8834 static int ipw_wx_get_range(struct net_device *dev,
8835 			    struct iw_request_info *info,
8836 			    union iwreq_data *wrqu, char *extra)
8837 {
8838 	struct ipw_priv *priv = libipw_priv(dev);
8839 	struct iw_range *range = (struct iw_range *)extra;
8840 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8841 	int i = 0, j;
8842 
8843 	wrqu->data.length = sizeof(*range);
8844 	memset(range, 0, sizeof(*range));
8845 
8846 	/* 54Mbs == ~27 Mb/s real (802.11g) */
8847 	range->throughput = 27 * 1000 * 1000;
8848 
8849 	range->max_qual.qual = 100;
8850 	/* TODO: Find real max RSSI and stick here */
8851 	range->max_qual.level = 0;
8852 	range->max_qual.noise = 0;
8853 	range->max_qual.updated = 7;	/* Updated all three */
8854 
8855 	range->avg_qual.qual = 70;
8856 	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8857 	range->avg_qual.level = 0;	/* FIXME to real average level */
8858 	range->avg_qual.noise = 0;
8859 	range->avg_qual.updated = 7;	/* Updated all three */
8860 	mutex_lock(&priv->mutex);
8861 	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8862 
8863 	for (i = 0; i < range->num_bitrates; i++)
8864 		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8865 		    500000;
8866 
8867 	range->max_rts = DEFAULT_RTS_THRESHOLD;
8868 	range->min_frag = MIN_FRAG_THRESHOLD;
8869 	range->max_frag = MAX_FRAG_THRESHOLD;
8870 
8871 	range->encoding_size[0] = 5;
8872 	range->encoding_size[1] = 13;
8873 	range->num_encoding_sizes = 2;
8874 	range->max_encoding_tokens = WEP_KEYS;
8875 
8876 	/* Set the Wireless Extension versions */
8877 	range->we_version_compiled = WIRELESS_EXT;
8878 	range->we_version_source = 18;
8879 
8880 	i = 0;
8881 	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8882 		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8883 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8884 			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8885 				continue;
8886 
8887 			range->freq[i].i = geo->bg[j].channel;
8888 			range->freq[i].m = geo->bg[j].freq * 100000;
8889 			range->freq[i].e = 1;
8890 			i++;
8891 		}
8892 	}
8893 
8894 	if (priv->ieee->mode & IEEE_A) {
8895 		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8896 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8897 			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8898 				continue;
8899 
8900 			range->freq[i].i = geo->a[j].channel;
8901 			range->freq[i].m = geo->a[j].freq * 100000;
8902 			range->freq[i].e = 1;
8903 			i++;
8904 		}
8905 	}
8906 
8907 	range->num_channels = i;
8908 	range->num_frequency = i;
8909 
8910 	mutex_unlock(&priv->mutex);
8911 
8912 	/* Event capability (kernel + driver) */
8913 	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8914 				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8915 				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8916 				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8917 	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8918 
8919 	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8920 		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8921 
8922 	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8923 
8924 	IPW_DEBUG_WX("GET Range\n");
8925 	return 0;
8926 }
8927 
8928 static int ipw_wx_set_wap(struct net_device *dev,
8929 			  struct iw_request_info *info,
8930 			  union iwreq_data *wrqu, char *extra)
8931 {
8932 	struct ipw_priv *priv = libipw_priv(dev);
8933 
8934 	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8935 		return -EINVAL;
8936 	mutex_lock(&priv->mutex);
8937 	if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
8938 	    is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
8939 		/* we disable mandatory BSSID association */
8940 		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8941 		priv->config &= ~CFG_STATIC_BSSID;
8942 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8943 				"parameters.\n");
8944 		ipw_associate(priv);
8945 		mutex_unlock(&priv->mutex);
8946 		return 0;
8947 	}
8948 
8949 	priv->config |= CFG_STATIC_BSSID;
8950 	if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
8951 		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8952 		mutex_unlock(&priv->mutex);
8953 		return 0;
8954 	}
8955 
8956 	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8957 		     wrqu->ap_addr.sa_data);
8958 
8959 	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8960 
8961 	/* Network configuration changed -- force [re]association */
8962 	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8963 	if (!ipw_disassociate(priv))
8964 		ipw_associate(priv);
8965 
8966 	mutex_unlock(&priv->mutex);
8967 	return 0;
8968 }
8969 
8970 static int ipw_wx_get_wap(struct net_device *dev,
8971 			  struct iw_request_info *info,
8972 			  union iwreq_data *wrqu, char *extra)
8973 {
8974 	struct ipw_priv *priv = libipw_priv(dev);
8975 
8976 	/* If we are associated, trying to associate, or have a statically
8977 	 * configured BSSID then return that; otherwise return ANY */
8978 	mutex_lock(&priv->mutex);
8979 	if (priv->config & CFG_STATIC_BSSID ||
8980 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8981 		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8982 		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8983 	} else
8984 		eth_zero_addr(wrqu->ap_addr.sa_data);
8985 
8986 	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
8987 		     wrqu->ap_addr.sa_data);
8988 	mutex_unlock(&priv->mutex);
8989 	return 0;
8990 }
8991 
8992 static int ipw_wx_set_essid(struct net_device *dev,
8993 			    struct iw_request_info *info,
8994 			    union iwreq_data *wrqu, char *extra)
8995 {
8996 	struct ipw_priv *priv = libipw_priv(dev);
8997         int length;
8998 
8999         mutex_lock(&priv->mutex);
9000 
9001         if (!wrqu->essid.flags)
9002         {
9003                 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9004                 ipw_disassociate(priv);
9005                 priv->config &= ~CFG_STATIC_ESSID;
9006                 ipw_associate(priv);
9007                 mutex_unlock(&priv->mutex);
9008                 return 0;
9009         }
9010 
9011 	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9012 
9013 	priv->config |= CFG_STATIC_ESSID;
9014 
9015 	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9016 	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9017 		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9018 		mutex_unlock(&priv->mutex);
9019 		return 0;
9020 	}
9021 
9022 	IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
9023 
9024 	priv->essid_len = length;
9025 	memcpy(priv->essid, extra, priv->essid_len);
9026 
9027 	/* Network configuration changed -- force [re]association */
9028 	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9029 	if (!ipw_disassociate(priv))
9030 		ipw_associate(priv);
9031 
9032 	mutex_unlock(&priv->mutex);
9033 	return 0;
9034 }
9035 
9036 static int ipw_wx_get_essid(struct net_device *dev,
9037 			    struct iw_request_info *info,
9038 			    union iwreq_data *wrqu, char *extra)
9039 {
9040 	struct ipw_priv *priv = libipw_priv(dev);
9041 
9042 	/* If we are associated, trying to associate, or have a statically
9043 	 * configured ESSID then return that; otherwise return ANY */
9044 	mutex_lock(&priv->mutex);
9045 	if (priv->config & CFG_STATIC_ESSID ||
9046 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9047 		IPW_DEBUG_WX("Getting essid: '%*pE'\n",
9048 			     priv->essid_len, priv->essid);
9049 		memcpy(extra, priv->essid, priv->essid_len);
9050 		wrqu->essid.length = priv->essid_len;
9051 		wrqu->essid.flags = 1;	/* active */
9052 	} else {
9053 		IPW_DEBUG_WX("Getting essid: ANY\n");
9054 		wrqu->essid.length = 0;
9055 		wrqu->essid.flags = 0;	/* active */
9056 	}
9057 	mutex_unlock(&priv->mutex);
9058 	return 0;
9059 }
9060 
9061 static int ipw_wx_set_nick(struct net_device *dev,
9062 			   struct iw_request_info *info,
9063 			   union iwreq_data *wrqu, char *extra)
9064 {
9065 	struct ipw_priv *priv = libipw_priv(dev);
9066 
9067 	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9068 	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9069 		return -E2BIG;
9070 	mutex_lock(&priv->mutex);
9071 	wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9072 	memset(priv->nick, 0, sizeof(priv->nick));
9073 	memcpy(priv->nick, extra, wrqu->data.length);
9074 	IPW_DEBUG_TRACE("<<\n");
9075 	mutex_unlock(&priv->mutex);
9076 	return 0;
9077 
9078 }
9079 
9080 static int ipw_wx_get_nick(struct net_device *dev,
9081 			   struct iw_request_info *info,
9082 			   union iwreq_data *wrqu, char *extra)
9083 {
9084 	struct ipw_priv *priv = libipw_priv(dev);
9085 	IPW_DEBUG_WX("Getting nick\n");
9086 	mutex_lock(&priv->mutex);
9087 	wrqu->data.length = strlen(priv->nick);
9088 	memcpy(extra, priv->nick, wrqu->data.length);
9089 	wrqu->data.flags = 1;	/* active */
9090 	mutex_unlock(&priv->mutex);
9091 	return 0;
9092 }
9093 
9094 static int ipw_wx_set_sens(struct net_device *dev,
9095 			    struct iw_request_info *info,
9096 			    union iwreq_data *wrqu, char *extra)
9097 {
9098 	struct ipw_priv *priv = libipw_priv(dev);
9099 	int err = 0;
9100 
9101 	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9102 	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9103 	mutex_lock(&priv->mutex);
9104 
9105 	if (wrqu->sens.fixed == 0)
9106 	{
9107 		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9108 		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9109 		goto out;
9110 	}
9111 	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9112 	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9113 		err = -EINVAL;
9114 		goto out;
9115 	}
9116 
9117 	priv->roaming_threshold = wrqu->sens.value;
9118 	priv->disassociate_threshold = 3*wrqu->sens.value;
9119       out:
9120 	mutex_unlock(&priv->mutex);
9121 	return err;
9122 }
9123 
9124 static int ipw_wx_get_sens(struct net_device *dev,
9125 			    struct iw_request_info *info,
9126 			    union iwreq_data *wrqu, char *extra)
9127 {
9128 	struct ipw_priv *priv = libipw_priv(dev);
9129 	mutex_lock(&priv->mutex);
9130 	wrqu->sens.fixed = 1;
9131 	wrqu->sens.value = priv->roaming_threshold;
9132 	mutex_unlock(&priv->mutex);
9133 
9134 	IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9135 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9136 
9137 	return 0;
9138 }
9139 
9140 static int ipw_wx_set_rate(struct net_device *dev,
9141 			   struct iw_request_info *info,
9142 			   union iwreq_data *wrqu, char *extra)
9143 {
9144 	/* TODO: We should use semaphores or locks for access to priv */
9145 	struct ipw_priv *priv = libipw_priv(dev);
9146 	u32 target_rate = wrqu->bitrate.value;
9147 	u32 fixed, mask;
9148 
9149 	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9150 	/* value = X, fixed = 1 means only rate X */
9151 	/* value = X, fixed = 0 means all rates lower equal X */
9152 
9153 	if (target_rate == -1) {
9154 		fixed = 0;
9155 		mask = LIBIPW_DEFAULT_RATES_MASK;
9156 		/* Now we should reassociate */
9157 		goto apply;
9158 	}
9159 
9160 	mask = 0;
9161 	fixed = wrqu->bitrate.fixed;
9162 
9163 	if (target_rate == 1000000 || !fixed)
9164 		mask |= LIBIPW_CCK_RATE_1MB_MASK;
9165 	if (target_rate == 1000000)
9166 		goto apply;
9167 
9168 	if (target_rate == 2000000 || !fixed)
9169 		mask |= LIBIPW_CCK_RATE_2MB_MASK;
9170 	if (target_rate == 2000000)
9171 		goto apply;
9172 
9173 	if (target_rate == 5500000 || !fixed)
9174 		mask |= LIBIPW_CCK_RATE_5MB_MASK;
9175 	if (target_rate == 5500000)
9176 		goto apply;
9177 
9178 	if (target_rate == 6000000 || !fixed)
9179 		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9180 	if (target_rate == 6000000)
9181 		goto apply;
9182 
9183 	if (target_rate == 9000000 || !fixed)
9184 		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9185 	if (target_rate == 9000000)
9186 		goto apply;
9187 
9188 	if (target_rate == 11000000 || !fixed)
9189 		mask |= LIBIPW_CCK_RATE_11MB_MASK;
9190 	if (target_rate == 11000000)
9191 		goto apply;
9192 
9193 	if (target_rate == 12000000 || !fixed)
9194 		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9195 	if (target_rate == 12000000)
9196 		goto apply;
9197 
9198 	if (target_rate == 18000000 || !fixed)
9199 		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9200 	if (target_rate == 18000000)
9201 		goto apply;
9202 
9203 	if (target_rate == 24000000 || !fixed)
9204 		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9205 	if (target_rate == 24000000)
9206 		goto apply;
9207 
9208 	if (target_rate == 36000000 || !fixed)
9209 		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9210 	if (target_rate == 36000000)
9211 		goto apply;
9212 
9213 	if (target_rate == 48000000 || !fixed)
9214 		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9215 	if (target_rate == 48000000)
9216 		goto apply;
9217 
9218 	if (target_rate == 54000000 || !fixed)
9219 		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9220 	if (target_rate == 54000000)
9221 		goto apply;
9222 
9223 	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9224 	return -EINVAL;
9225 
9226       apply:
9227 	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9228 		     mask, fixed ? "fixed" : "sub-rates");
9229 	mutex_lock(&priv->mutex);
9230 	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9231 		priv->config &= ~CFG_FIXED_RATE;
9232 		ipw_set_fixed_rate(priv, priv->ieee->mode);
9233 	} else
9234 		priv->config |= CFG_FIXED_RATE;
9235 
9236 	if (priv->rates_mask == mask) {
9237 		IPW_DEBUG_WX("Mask set to current mask.\n");
9238 		mutex_unlock(&priv->mutex);
9239 		return 0;
9240 	}
9241 
9242 	priv->rates_mask = mask;
9243 
9244 	/* Network configuration changed -- force [re]association */
9245 	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9246 	if (!ipw_disassociate(priv))
9247 		ipw_associate(priv);
9248 
9249 	mutex_unlock(&priv->mutex);
9250 	return 0;
9251 }
9252 
9253 static int ipw_wx_get_rate(struct net_device *dev,
9254 			   struct iw_request_info *info,
9255 			   union iwreq_data *wrqu, char *extra)
9256 {
9257 	struct ipw_priv *priv = libipw_priv(dev);
9258 	mutex_lock(&priv->mutex);
9259 	wrqu->bitrate.value = priv->last_rate;
9260 	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9261 	mutex_unlock(&priv->mutex);
9262 	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9263 	return 0;
9264 }
9265 
9266 static int ipw_wx_set_rts(struct net_device *dev,
9267 			  struct iw_request_info *info,
9268 			  union iwreq_data *wrqu, char *extra)
9269 {
9270 	struct ipw_priv *priv = libipw_priv(dev);
9271 	mutex_lock(&priv->mutex);
9272 	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9273 		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9274 	else {
9275 		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9276 		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9277 			mutex_unlock(&priv->mutex);
9278 			return -EINVAL;
9279 		}
9280 		priv->rts_threshold = wrqu->rts.value;
9281 	}
9282 
9283 	ipw_send_rts_threshold(priv, priv->rts_threshold);
9284 	mutex_unlock(&priv->mutex);
9285 	IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9286 	return 0;
9287 }
9288 
9289 static int ipw_wx_get_rts(struct net_device *dev,
9290 			  struct iw_request_info *info,
9291 			  union iwreq_data *wrqu, char *extra)
9292 {
9293 	struct ipw_priv *priv = libipw_priv(dev);
9294 	mutex_lock(&priv->mutex);
9295 	wrqu->rts.value = priv->rts_threshold;
9296 	wrqu->rts.fixed = 0;	/* no auto select */
9297 	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9298 	mutex_unlock(&priv->mutex);
9299 	IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9300 	return 0;
9301 }
9302 
9303 static int ipw_wx_set_txpow(struct net_device *dev,
9304 			    struct iw_request_info *info,
9305 			    union iwreq_data *wrqu, char *extra)
9306 {
9307 	struct ipw_priv *priv = libipw_priv(dev);
9308 	int err = 0;
9309 
9310 	mutex_lock(&priv->mutex);
9311 	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9312 		err = -EINPROGRESS;
9313 		goto out;
9314 	}
9315 
9316 	if (!wrqu->power.fixed)
9317 		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9318 
9319 	if (wrqu->power.flags != IW_TXPOW_DBM) {
9320 		err = -EINVAL;
9321 		goto out;
9322 	}
9323 
9324 	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9325 	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9326 		err = -EINVAL;
9327 		goto out;
9328 	}
9329 
9330 	priv->tx_power = wrqu->power.value;
9331 	err = ipw_set_tx_power(priv);
9332       out:
9333 	mutex_unlock(&priv->mutex);
9334 	return err;
9335 }
9336 
9337 static int ipw_wx_get_txpow(struct net_device *dev,
9338 			    struct iw_request_info *info,
9339 			    union iwreq_data *wrqu, char *extra)
9340 {
9341 	struct ipw_priv *priv = libipw_priv(dev);
9342 	mutex_lock(&priv->mutex);
9343 	wrqu->power.value = priv->tx_power;
9344 	wrqu->power.fixed = 1;
9345 	wrqu->power.flags = IW_TXPOW_DBM;
9346 	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9347 	mutex_unlock(&priv->mutex);
9348 
9349 	IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9350 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9351 
9352 	return 0;
9353 }
9354 
9355 static int ipw_wx_set_frag(struct net_device *dev,
9356 			   struct iw_request_info *info,
9357 			   union iwreq_data *wrqu, char *extra)
9358 {
9359 	struct ipw_priv *priv = libipw_priv(dev);
9360 	mutex_lock(&priv->mutex);
9361 	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9362 		priv->ieee->fts = DEFAULT_FTS;
9363 	else {
9364 		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9365 		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9366 			mutex_unlock(&priv->mutex);
9367 			return -EINVAL;
9368 		}
9369 
9370 		priv->ieee->fts = wrqu->frag.value & ~0x1;
9371 	}
9372 
9373 	ipw_send_frag_threshold(priv, wrqu->frag.value);
9374 	mutex_unlock(&priv->mutex);
9375 	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9376 	return 0;
9377 }
9378 
9379 static int ipw_wx_get_frag(struct net_device *dev,
9380 			   struct iw_request_info *info,
9381 			   union iwreq_data *wrqu, char *extra)
9382 {
9383 	struct ipw_priv *priv = libipw_priv(dev);
9384 	mutex_lock(&priv->mutex);
9385 	wrqu->frag.value = priv->ieee->fts;
9386 	wrqu->frag.fixed = 0;	/* no auto select */
9387 	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9388 	mutex_unlock(&priv->mutex);
9389 	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9390 
9391 	return 0;
9392 }
9393 
9394 static int ipw_wx_set_retry(struct net_device *dev,
9395 			    struct iw_request_info *info,
9396 			    union iwreq_data *wrqu, char *extra)
9397 {
9398 	struct ipw_priv *priv = libipw_priv(dev);
9399 
9400 	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9401 		return -EINVAL;
9402 
9403 	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9404 		return 0;
9405 
9406 	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9407 		return -EINVAL;
9408 
9409 	mutex_lock(&priv->mutex);
9410 	if (wrqu->retry.flags & IW_RETRY_SHORT)
9411 		priv->short_retry_limit = (u8) wrqu->retry.value;
9412 	else if (wrqu->retry.flags & IW_RETRY_LONG)
9413 		priv->long_retry_limit = (u8) wrqu->retry.value;
9414 	else {
9415 		priv->short_retry_limit = (u8) wrqu->retry.value;
9416 		priv->long_retry_limit = (u8) wrqu->retry.value;
9417 	}
9418 
9419 	ipw_send_retry_limit(priv, priv->short_retry_limit,
9420 			     priv->long_retry_limit);
9421 	mutex_unlock(&priv->mutex);
9422 	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9423 		     priv->short_retry_limit, priv->long_retry_limit);
9424 	return 0;
9425 }
9426 
9427 static int ipw_wx_get_retry(struct net_device *dev,
9428 			    struct iw_request_info *info,
9429 			    union iwreq_data *wrqu, char *extra)
9430 {
9431 	struct ipw_priv *priv = libipw_priv(dev);
9432 
9433 	mutex_lock(&priv->mutex);
9434 	wrqu->retry.disabled = 0;
9435 
9436 	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9437 		mutex_unlock(&priv->mutex);
9438 		return -EINVAL;
9439 	}
9440 
9441 	if (wrqu->retry.flags & IW_RETRY_LONG) {
9442 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9443 		wrqu->retry.value = priv->long_retry_limit;
9444 	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9445 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9446 		wrqu->retry.value = priv->short_retry_limit;
9447 	} else {
9448 		wrqu->retry.flags = IW_RETRY_LIMIT;
9449 		wrqu->retry.value = priv->short_retry_limit;
9450 	}
9451 	mutex_unlock(&priv->mutex);
9452 
9453 	IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9454 
9455 	return 0;
9456 }
9457 
9458 static int ipw_wx_set_scan(struct net_device *dev,
9459 			   struct iw_request_info *info,
9460 			   union iwreq_data *wrqu, char *extra)
9461 {
9462 	struct ipw_priv *priv = libipw_priv(dev);
9463 	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9464 	struct delayed_work *work = NULL;
9465 
9466 	mutex_lock(&priv->mutex);
9467 
9468 	priv->user_requested_scan = 1;
9469 
9470 	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9471 		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9472 			int len = min((int)req->essid_len,
9473 			              (int)sizeof(priv->direct_scan_ssid));
9474 			memcpy(priv->direct_scan_ssid, req->essid, len);
9475 			priv->direct_scan_ssid_len = len;
9476 			work = &priv->request_direct_scan;
9477 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9478 			work = &priv->request_passive_scan;
9479 		}
9480 	} else {
9481 		/* Normal active broadcast scan */
9482 		work = &priv->request_scan;
9483 	}
9484 
9485 	mutex_unlock(&priv->mutex);
9486 
9487 	IPW_DEBUG_WX("Start scan\n");
9488 
9489 	schedule_delayed_work(work, 0);
9490 
9491 	return 0;
9492 }
9493 
9494 static int ipw_wx_get_scan(struct net_device *dev,
9495 			   struct iw_request_info *info,
9496 			   union iwreq_data *wrqu, char *extra)
9497 {
9498 	struct ipw_priv *priv = libipw_priv(dev);
9499 	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9500 }
9501 
9502 static int ipw_wx_set_encode(struct net_device *dev,
9503 			     struct iw_request_info *info,
9504 			     union iwreq_data *wrqu, char *key)
9505 {
9506 	struct ipw_priv *priv = libipw_priv(dev);
9507 	int ret;
9508 	u32 cap = priv->capability;
9509 
9510 	mutex_lock(&priv->mutex);
9511 	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9512 
9513 	/* In IBSS mode, we need to notify the firmware to update
9514 	 * the beacon info after we changed the capability. */
9515 	if (cap != priv->capability &&
9516 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9517 	    priv->status & STATUS_ASSOCIATED)
9518 		ipw_disassociate(priv);
9519 
9520 	mutex_unlock(&priv->mutex);
9521 	return ret;
9522 }
9523 
9524 static int ipw_wx_get_encode(struct net_device *dev,
9525 			     struct iw_request_info *info,
9526 			     union iwreq_data *wrqu, char *key)
9527 {
9528 	struct ipw_priv *priv = libipw_priv(dev);
9529 	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9530 }
9531 
9532 static int ipw_wx_set_power(struct net_device *dev,
9533 			    struct iw_request_info *info,
9534 			    union iwreq_data *wrqu, char *extra)
9535 {
9536 	struct ipw_priv *priv = libipw_priv(dev);
9537 	int err;
9538 	mutex_lock(&priv->mutex);
9539 	if (wrqu->power.disabled) {
9540 		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9541 		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9542 		if (err) {
9543 			IPW_DEBUG_WX("failed setting power mode.\n");
9544 			mutex_unlock(&priv->mutex);
9545 			return err;
9546 		}
9547 		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9548 		mutex_unlock(&priv->mutex);
9549 		return 0;
9550 	}
9551 
9552 	switch (wrqu->power.flags & IW_POWER_MODE) {
9553 	case IW_POWER_ON:	/* If not specified */
9554 	case IW_POWER_MODE:	/* If set all mask */
9555 	case IW_POWER_ALL_R:	/* If explicitly state all */
9556 		break;
9557 	default:		/* Otherwise we don't support it */
9558 		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9559 			     wrqu->power.flags);
9560 		mutex_unlock(&priv->mutex);
9561 		return -EOPNOTSUPP;
9562 	}
9563 
9564 	/* If the user hasn't specified a power management mode yet, default
9565 	 * to BATTERY */
9566 	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9567 		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9568 	else
9569 		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9570 
9571 	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9572 	if (err) {
9573 		IPW_DEBUG_WX("failed setting power mode.\n");
9574 		mutex_unlock(&priv->mutex);
9575 		return err;
9576 	}
9577 
9578 	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9579 	mutex_unlock(&priv->mutex);
9580 	return 0;
9581 }
9582 
9583 static int ipw_wx_get_power(struct net_device *dev,
9584 			    struct iw_request_info *info,
9585 			    union iwreq_data *wrqu, char *extra)
9586 {
9587 	struct ipw_priv *priv = libipw_priv(dev);
9588 	mutex_lock(&priv->mutex);
9589 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9590 		wrqu->power.disabled = 1;
9591 	else
9592 		wrqu->power.disabled = 0;
9593 
9594 	mutex_unlock(&priv->mutex);
9595 	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9596 
9597 	return 0;
9598 }
9599 
9600 static int ipw_wx_set_powermode(struct net_device *dev,
9601 				struct iw_request_info *info,
9602 				union iwreq_data *wrqu, char *extra)
9603 {
9604 	struct ipw_priv *priv = libipw_priv(dev);
9605 	int mode = *(int *)extra;
9606 	int err;
9607 
9608 	mutex_lock(&priv->mutex);
9609 	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9610 		mode = IPW_POWER_AC;
9611 
9612 	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9613 		err = ipw_send_power_mode(priv, mode);
9614 		if (err) {
9615 			IPW_DEBUG_WX("failed setting power mode.\n");
9616 			mutex_unlock(&priv->mutex);
9617 			return err;
9618 		}
9619 		priv->power_mode = IPW_POWER_ENABLED | mode;
9620 	}
9621 	mutex_unlock(&priv->mutex);
9622 	return 0;
9623 }
9624 
9625 #define MAX_WX_STRING 80
9626 static int ipw_wx_get_powermode(struct net_device *dev,
9627 				struct iw_request_info *info,
9628 				union iwreq_data *wrqu, char *extra)
9629 {
9630 	struct ipw_priv *priv = libipw_priv(dev);
9631 	int level = IPW_POWER_LEVEL(priv->power_mode);
9632 	char *p = extra;
9633 
9634 	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9635 
9636 	switch (level) {
9637 	case IPW_POWER_AC:
9638 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9639 		break;
9640 	case IPW_POWER_BATTERY:
9641 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9642 		break;
9643 	default:
9644 		p += snprintf(p, MAX_WX_STRING - (p - extra),
9645 			      "(Timeout %dms, Period %dms)",
9646 			      timeout_duration[level - 1] / 1000,
9647 			      period_duration[level - 1] / 1000);
9648 	}
9649 
9650 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9651 		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9652 
9653 	wrqu->data.length = p - extra + 1;
9654 
9655 	return 0;
9656 }
9657 
9658 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9659 				    struct iw_request_info *info,
9660 				    union iwreq_data *wrqu, char *extra)
9661 {
9662 	struct ipw_priv *priv = libipw_priv(dev);
9663 	int mode = *(int *)extra;
9664 	u8 band = 0, modulation = 0;
9665 
9666 	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9667 		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9668 		return -EINVAL;
9669 	}
9670 	mutex_lock(&priv->mutex);
9671 	if (priv->adapter == IPW_2915ABG) {
9672 		priv->ieee->abg_true = 1;
9673 		if (mode & IEEE_A) {
9674 			band |= LIBIPW_52GHZ_BAND;
9675 			modulation |= LIBIPW_OFDM_MODULATION;
9676 		} else
9677 			priv->ieee->abg_true = 0;
9678 	} else {
9679 		if (mode & IEEE_A) {
9680 			IPW_WARNING("Attempt to set 2200BG into "
9681 				    "802.11a mode\n");
9682 			mutex_unlock(&priv->mutex);
9683 			return -EINVAL;
9684 		}
9685 
9686 		priv->ieee->abg_true = 0;
9687 	}
9688 
9689 	if (mode & IEEE_B) {
9690 		band |= LIBIPW_24GHZ_BAND;
9691 		modulation |= LIBIPW_CCK_MODULATION;
9692 	} else
9693 		priv->ieee->abg_true = 0;
9694 
9695 	if (mode & IEEE_G) {
9696 		band |= LIBIPW_24GHZ_BAND;
9697 		modulation |= LIBIPW_OFDM_MODULATION;
9698 	} else
9699 		priv->ieee->abg_true = 0;
9700 
9701 	priv->ieee->mode = mode;
9702 	priv->ieee->freq_band = band;
9703 	priv->ieee->modulation = modulation;
9704 	init_supported_rates(priv, &priv->rates);
9705 
9706 	/* Network configuration changed -- force [re]association */
9707 	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9708 	if (!ipw_disassociate(priv)) {
9709 		ipw_send_supported_rates(priv, &priv->rates);
9710 		ipw_associate(priv);
9711 	}
9712 
9713 	/* Update the band LEDs */
9714 	ipw_led_band_on(priv);
9715 
9716 	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9717 		     mode & IEEE_A ? 'a' : '.',
9718 		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9719 	mutex_unlock(&priv->mutex);
9720 	return 0;
9721 }
9722 
9723 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9724 				    struct iw_request_info *info,
9725 				    union iwreq_data *wrqu, char *extra)
9726 {
9727 	struct ipw_priv *priv = libipw_priv(dev);
9728 	mutex_lock(&priv->mutex);
9729 	switch (priv->ieee->mode) {
9730 	case IEEE_A:
9731 		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9732 		break;
9733 	case IEEE_B:
9734 		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9735 		break;
9736 	case IEEE_A | IEEE_B:
9737 		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9738 		break;
9739 	case IEEE_G:
9740 		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9741 		break;
9742 	case IEEE_A | IEEE_G:
9743 		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9744 		break;
9745 	case IEEE_B | IEEE_G:
9746 		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9747 		break;
9748 	case IEEE_A | IEEE_B | IEEE_G:
9749 		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9750 		break;
9751 	default:
9752 		strncpy(extra, "unknown", MAX_WX_STRING);
9753 		break;
9754 	}
9755 	extra[MAX_WX_STRING - 1] = '\0';
9756 
9757 	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9758 
9759 	wrqu->data.length = strlen(extra) + 1;
9760 	mutex_unlock(&priv->mutex);
9761 
9762 	return 0;
9763 }
9764 
9765 static int ipw_wx_set_preamble(struct net_device *dev,
9766 			       struct iw_request_info *info,
9767 			       union iwreq_data *wrqu, char *extra)
9768 {
9769 	struct ipw_priv *priv = libipw_priv(dev);
9770 	int mode = *(int *)extra;
9771 	mutex_lock(&priv->mutex);
9772 	/* Switching from SHORT -> LONG requires a disassociation */
9773 	if (mode == 1) {
9774 		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9775 			priv->config |= CFG_PREAMBLE_LONG;
9776 
9777 			/* Network configuration changed -- force [re]association */
9778 			IPW_DEBUG_ASSOC
9779 			    ("[re]association triggered due to preamble change.\n");
9780 			if (!ipw_disassociate(priv))
9781 				ipw_associate(priv);
9782 		}
9783 		goto done;
9784 	}
9785 
9786 	if (mode == 0) {
9787 		priv->config &= ~CFG_PREAMBLE_LONG;
9788 		goto done;
9789 	}
9790 	mutex_unlock(&priv->mutex);
9791 	return -EINVAL;
9792 
9793       done:
9794 	mutex_unlock(&priv->mutex);
9795 	return 0;
9796 }
9797 
9798 static int ipw_wx_get_preamble(struct net_device *dev,
9799 			       struct iw_request_info *info,
9800 			       union iwreq_data *wrqu, char *extra)
9801 {
9802 	struct ipw_priv *priv = libipw_priv(dev);
9803 	mutex_lock(&priv->mutex);
9804 	if (priv->config & CFG_PREAMBLE_LONG)
9805 		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9806 	else
9807 		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9808 	mutex_unlock(&priv->mutex);
9809 	return 0;
9810 }
9811 
9812 #ifdef CONFIG_IPW2200_MONITOR
9813 static int ipw_wx_set_monitor(struct net_device *dev,
9814 			      struct iw_request_info *info,
9815 			      union iwreq_data *wrqu, char *extra)
9816 {
9817 	struct ipw_priv *priv = libipw_priv(dev);
9818 	int *parms = (int *)extra;
9819 	int enable = (parms[0] > 0);
9820 	mutex_lock(&priv->mutex);
9821 	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9822 	if (enable) {
9823 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9824 #ifdef CONFIG_IPW2200_RADIOTAP
9825 			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9826 #else
9827 			priv->net_dev->type = ARPHRD_IEEE80211;
9828 #endif
9829 			schedule_work(&priv->adapter_restart);
9830 		}
9831 
9832 		ipw_set_channel(priv, parms[1]);
9833 	} else {
9834 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9835 			mutex_unlock(&priv->mutex);
9836 			return 0;
9837 		}
9838 		priv->net_dev->type = ARPHRD_ETHER;
9839 		schedule_work(&priv->adapter_restart);
9840 	}
9841 	mutex_unlock(&priv->mutex);
9842 	return 0;
9843 }
9844 
9845 #endif				/* CONFIG_IPW2200_MONITOR */
9846 
9847 static int ipw_wx_reset(struct net_device *dev,
9848 			struct iw_request_info *info,
9849 			union iwreq_data *wrqu, char *extra)
9850 {
9851 	struct ipw_priv *priv = libipw_priv(dev);
9852 	IPW_DEBUG_WX("RESET\n");
9853 	schedule_work(&priv->adapter_restart);
9854 	return 0;
9855 }
9856 
9857 static int ipw_wx_sw_reset(struct net_device *dev,
9858 			   struct iw_request_info *info,
9859 			   union iwreq_data *wrqu, char *extra)
9860 {
9861 	struct ipw_priv *priv = libipw_priv(dev);
9862 	union iwreq_data wrqu_sec = {
9863 		.encoding = {
9864 			     .flags = IW_ENCODE_DISABLED,
9865 			     },
9866 	};
9867 	int ret;
9868 
9869 	IPW_DEBUG_WX("SW_RESET\n");
9870 
9871 	mutex_lock(&priv->mutex);
9872 
9873 	ret = ipw_sw_reset(priv, 2);
9874 	if (!ret) {
9875 		free_firmware();
9876 		ipw_adapter_restart(priv);
9877 	}
9878 
9879 	/* The SW reset bit might have been toggled on by the 'disable'
9880 	 * module parameter, so take appropriate action */
9881 	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9882 
9883 	mutex_unlock(&priv->mutex);
9884 	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9885 	mutex_lock(&priv->mutex);
9886 
9887 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9888 		/* Configuration likely changed -- force [re]association */
9889 		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9890 				"reset.\n");
9891 		if (!ipw_disassociate(priv))
9892 			ipw_associate(priv);
9893 	}
9894 
9895 	mutex_unlock(&priv->mutex);
9896 
9897 	return 0;
9898 }
9899 
9900 /* Rebase the WE IOCTLs to zero for the handler array */
9901 static iw_handler ipw_wx_handlers[] = {
9902 	IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9903 	IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9904 	IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9905 	IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9906 	IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9907 	IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9908 	IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9909 	IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9910 	IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9911 	IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9912 	IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9913 	IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
9914 	IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
9915 	IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
9916 	IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
9917 	IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
9918 	IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
9919 	IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
9920 	IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
9921 	IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
9922 	IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
9923 	IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
9924 	IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
9925 	IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
9926 	IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
9927 	IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
9928 	IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
9929 	IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
9930 	IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
9931 	IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
9932 	IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
9933 	IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
9934 	IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
9935 	IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
9936 	IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
9937 	IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
9938 	IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
9939 	IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
9940 	IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
9941 	IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
9942 	IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
9943 };
9944 
9945 enum {
9946 	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9947 	IPW_PRIV_GET_POWER,
9948 	IPW_PRIV_SET_MODE,
9949 	IPW_PRIV_GET_MODE,
9950 	IPW_PRIV_SET_PREAMBLE,
9951 	IPW_PRIV_GET_PREAMBLE,
9952 	IPW_PRIV_RESET,
9953 	IPW_PRIV_SW_RESET,
9954 #ifdef CONFIG_IPW2200_MONITOR
9955 	IPW_PRIV_SET_MONITOR,
9956 #endif
9957 };
9958 
9959 static struct iw_priv_args ipw_priv_args[] = {
9960 	{
9961 	 .cmd = IPW_PRIV_SET_POWER,
9962 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9963 	 .name = "set_power"},
9964 	{
9965 	 .cmd = IPW_PRIV_GET_POWER,
9966 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9967 	 .name = "get_power"},
9968 	{
9969 	 .cmd = IPW_PRIV_SET_MODE,
9970 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9971 	 .name = "set_mode"},
9972 	{
9973 	 .cmd = IPW_PRIV_GET_MODE,
9974 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9975 	 .name = "get_mode"},
9976 	{
9977 	 .cmd = IPW_PRIV_SET_PREAMBLE,
9978 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9979 	 .name = "set_preamble"},
9980 	{
9981 	 .cmd = IPW_PRIV_GET_PREAMBLE,
9982 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9983 	 .name = "get_preamble"},
9984 	{
9985 	 IPW_PRIV_RESET,
9986 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9987 	{
9988 	 IPW_PRIV_SW_RESET,
9989 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9990 #ifdef CONFIG_IPW2200_MONITOR
9991 	{
9992 	 IPW_PRIV_SET_MONITOR,
9993 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9994 #endif				/* CONFIG_IPW2200_MONITOR */
9995 };
9996 
9997 static iw_handler ipw_priv_handler[] = {
9998 	ipw_wx_set_powermode,
9999 	ipw_wx_get_powermode,
10000 	ipw_wx_set_wireless_mode,
10001 	ipw_wx_get_wireless_mode,
10002 	ipw_wx_set_preamble,
10003 	ipw_wx_get_preamble,
10004 	ipw_wx_reset,
10005 	ipw_wx_sw_reset,
10006 #ifdef CONFIG_IPW2200_MONITOR
10007 	ipw_wx_set_monitor,
10008 #endif
10009 };
10010 
10011 static struct iw_handler_def ipw_wx_handler_def = {
10012 	.standard = ipw_wx_handlers,
10013 	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
10014 	.num_private = ARRAY_SIZE(ipw_priv_handler),
10015 	.num_private_args = ARRAY_SIZE(ipw_priv_args),
10016 	.private = ipw_priv_handler,
10017 	.private_args = ipw_priv_args,
10018 	.get_wireless_stats = ipw_get_wireless_stats,
10019 };
10020 
10021 /*
10022  * Get wireless statistics.
10023  * Called by /proc/net/wireless
10024  * Also called by SIOCGIWSTATS
10025  */
10026 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10027 {
10028 	struct ipw_priv *priv = libipw_priv(dev);
10029 	struct iw_statistics *wstats;
10030 
10031 	wstats = &priv->wstats;
10032 
10033 	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10034 	 * netdev->get_wireless_stats seems to be called before fw is
10035 	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10036 	 * and associated; if not associcated, the values are all meaningless
10037 	 * anyway, so set them all to NULL and INVALID */
10038 	if (!(priv->status & STATUS_ASSOCIATED)) {
10039 		wstats->miss.beacon = 0;
10040 		wstats->discard.retries = 0;
10041 		wstats->qual.qual = 0;
10042 		wstats->qual.level = 0;
10043 		wstats->qual.noise = 0;
10044 		wstats->qual.updated = 7;
10045 		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10046 		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10047 		return wstats;
10048 	}
10049 
10050 	wstats->qual.qual = priv->quality;
10051 	wstats->qual.level = priv->exp_avg_rssi;
10052 	wstats->qual.noise = priv->exp_avg_noise;
10053 	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10054 	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10055 
10056 	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10057 	wstats->discard.retries = priv->last_tx_failures;
10058 	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10059 
10060 /*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10061 	goto fail_get_ordinal;
10062 	wstats->discard.retries += tx_retry; */
10063 
10064 	return wstats;
10065 }
10066 
10067 /* net device stuff */
10068 
10069 static  void init_sys_config(struct ipw_sys_config *sys_config)
10070 {
10071 	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10072 	sys_config->bt_coexistence = 0;
10073 	sys_config->answer_broadcast_ssid_probe = 0;
10074 	sys_config->accept_all_data_frames = 0;
10075 	sys_config->accept_non_directed_frames = 1;
10076 	sys_config->exclude_unicast_unencrypted = 0;
10077 	sys_config->disable_unicast_decryption = 1;
10078 	sys_config->exclude_multicast_unencrypted = 0;
10079 	sys_config->disable_multicast_decryption = 1;
10080 	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10081 		antenna = CFG_SYS_ANTENNA_BOTH;
10082 	sys_config->antenna_diversity = antenna;
10083 	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10084 	sys_config->dot11g_auto_detection = 0;
10085 	sys_config->enable_cts_to_self = 0;
10086 	sys_config->bt_coexist_collision_thr = 0;
10087 	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10088 	sys_config->silence_threshold = 0x1e;
10089 }
10090 
10091 static int ipw_net_open(struct net_device *dev)
10092 {
10093 	IPW_DEBUG_INFO("dev->open\n");
10094 	netif_start_queue(dev);
10095 	return 0;
10096 }
10097 
10098 static int ipw_net_stop(struct net_device *dev)
10099 {
10100 	IPW_DEBUG_INFO("dev->close\n");
10101 	netif_stop_queue(dev);
10102 	return 0;
10103 }
10104 
10105 /*
10106 todo:
10107 
10108 modify to send one tfd per fragment instead of using chunking.  otherwise
10109 we need to heavily modify the libipw_skb_to_txb.
10110 */
10111 
10112 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10113 			     int pri)
10114 {
10115 	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10116 	    txb->fragments[0]->data;
10117 	int i = 0;
10118 	struct tfd_frame *tfd;
10119 #ifdef CONFIG_IPW2200_QOS
10120 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10121 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10122 #else
10123 	struct clx2_tx_queue *txq = &priv->txq[0];
10124 #endif
10125 	struct clx2_queue *q = &txq->q;
10126 	u8 id, hdr_len, unicast;
10127 	int fc;
10128 
10129 	if (!(priv->status & STATUS_ASSOCIATED))
10130 		goto drop;
10131 
10132 	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10133 	switch (priv->ieee->iw_mode) {
10134 	case IW_MODE_ADHOC:
10135 		unicast = !is_multicast_ether_addr(hdr->addr1);
10136 		id = ipw_find_station(priv, hdr->addr1);
10137 		if (id == IPW_INVALID_STATION) {
10138 			id = ipw_add_station(priv, hdr->addr1);
10139 			if (id == IPW_INVALID_STATION) {
10140 				IPW_WARNING("Attempt to send data to "
10141 					    "invalid cell: %pM\n",
10142 					    hdr->addr1);
10143 				goto drop;
10144 			}
10145 		}
10146 		break;
10147 
10148 	case IW_MODE_INFRA:
10149 	default:
10150 		unicast = !is_multicast_ether_addr(hdr->addr3);
10151 		id = 0;
10152 		break;
10153 	}
10154 
10155 	tfd = &txq->bd[q->first_empty];
10156 	txq->txb[q->first_empty] = txb;
10157 	memset(tfd, 0, sizeof(*tfd));
10158 	tfd->u.data.station_number = id;
10159 
10160 	tfd->control_flags.message_type = TX_FRAME_TYPE;
10161 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10162 
10163 	tfd->u.data.cmd_id = DINO_CMD_TX;
10164 	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10165 
10166 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10167 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10168 	else
10169 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10170 
10171 	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10172 		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10173 
10174 	fc = le16_to_cpu(hdr->frame_ctl);
10175 	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10176 
10177 	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10178 
10179 	if (likely(unicast))
10180 		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10181 
10182 	if (txb->encrypted && !priv->ieee->host_encrypt) {
10183 		switch (priv->ieee->sec.level) {
10184 		case SEC_LEVEL_3:
10185 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10186 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10187 			/* XXX: ACK flag must be set for CCMP even if it
10188 			 * is a multicast/broadcast packet, because CCMP
10189 			 * group communication encrypted by GTK is
10190 			 * actually done by the AP. */
10191 			if (!unicast)
10192 				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10193 
10194 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10195 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10196 			tfd->u.data.key_index = 0;
10197 			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10198 			break;
10199 		case SEC_LEVEL_2:
10200 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10201 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10202 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10203 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10204 			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10205 			break;
10206 		case SEC_LEVEL_1:
10207 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10208 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10209 			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10210 			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10211 			    40)
10212 				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10213 			else
10214 				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10215 			break;
10216 		case SEC_LEVEL_0:
10217 			break;
10218 		default:
10219 			printk(KERN_ERR "Unknown security level %d\n",
10220 			       priv->ieee->sec.level);
10221 			break;
10222 		}
10223 	} else
10224 		/* No hardware encryption */
10225 		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10226 
10227 #ifdef CONFIG_IPW2200_QOS
10228 	if (fc & IEEE80211_STYPE_QOS_DATA)
10229 		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10230 #endif				/* CONFIG_IPW2200_QOS */
10231 
10232 	/* payload */
10233 	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10234 						 txb->nr_frags));
10235 	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10236 		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10237 	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10238 		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10239 			       i, le32_to_cpu(tfd->u.data.num_chunks),
10240 			       txb->fragments[i]->len - hdr_len);
10241 		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10242 			     i, tfd->u.data.num_chunks,
10243 			     txb->fragments[i]->len - hdr_len);
10244 		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10245 			   txb->fragments[i]->len - hdr_len);
10246 
10247 		tfd->u.data.chunk_ptr[i] =
10248 		    cpu_to_le32(pci_map_single
10249 				(priv->pci_dev,
10250 				 txb->fragments[i]->data + hdr_len,
10251 				 txb->fragments[i]->len - hdr_len,
10252 				 PCI_DMA_TODEVICE));
10253 		tfd->u.data.chunk_len[i] =
10254 		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10255 	}
10256 
10257 	if (i != txb->nr_frags) {
10258 		struct sk_buff *skb;
10259 		u16 remaining_bytes = 0;
10260 		int j;
10261 
10262 		for (j = i; j < txb->nr_frags; j++)
10263 			remaining_bytes += txb->fragments[j]->len - hdr_len;
10264 
10265 		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10266 		       remaining_bytes);
10267 		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10268 		if (skb != NULL) {
10269 			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10270 			for (j = i; j < txb->nr_frags; j++) {
10271 				int size = txb->fragments[j]->len - hdr_len;
10272 
10273 				printk(KERN_INFO "Adding frag %d %d...\n",
10274 				       j, size);
10275 				skb_put_data(skb,
10276 					     txb->fragments[j]->data + hdr_len,
10277 					     size);
10278 			}
10279 			dev_kfree_skb_any(txb->fragments[i]);
10280 			txb->fragments[i] = skb;
10281 			tfd->u.data.chunk_ptr[i] =
10282 			    cpu_to_le32(pci_map_single
10283 					(priv->pci_dev, skb->data,
10284 					 remaining_bytes,
10285 					 PCI_DMA_TODEVICE));
10286 
10287 			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10288 		}
10289 	}
10290 
10291 	/* kick DMA */
10292 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10293 	ipw_write32(priv, q->reg_w, q->first_empty);
10294 
10295 	if (ipw_tx_queue_space(q) < q->high_mark)
10296 		netif_stop_queue(priv->net_dev);
10297 
10298 	return NETDEV_TX_OK;
10299 
10300       drop:
10301 	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10302 	libipw_txb_free(txb);
10303 	return NETDEV_TX_OK;
10304 }
10305 
10306 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10307 {
10308 	struct ipw_priv *priv = libipw_priv(dev);
10309 #ifdef CONFIG_IPW2200_QOS
10310 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10311 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10312 #else
10313 	struct clx2_tx_queue *txq = &priv->txq[0];
10314 #endif				/* CONFIG_IPW2200_QOS */
10315 
10316 	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10317 		return 1;
10318 
10319 	return 0;
10320 }
10321 
10322 #ifdef CONFIG_IPW2200_PROMISCUOUS
10323 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10324 				      struct libipw_txb *txb)
10325 {
10326 	struct libipw_rx_stats dummystats;
10327 	struct ieee80211_hdr *hdr;
10328 	u8 n;
10329 	u16 filter = priv->prom_priv->filter;
10330 	int hdr_only = 0;
10331 
10332 	if (filter & IPW_PROM_NO_TX)
10333 		return;
10334 
10335 	memset(&dummystats, 0, sizeof(dummystats));
10336 
10337 	/* Filtering of fragment chains is done against the first fragment */
10338 	hdr = (void *)txb->fragments[0]->data;
10339 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10340 		if (filter & IPW_PROM_NO_MGMT)
10341 			return;
10342 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10343 			hdr_only = 1;
10344 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10345 		if (filter & IPW_PROM_NO_CTL)
10346 			return;
10347 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10348 			hdr_only = 1;
10349 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10350 		if (filter & IPW_PROM_NO_DATA)
10351 			return;
10352 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10353 			hdr_only = 1;
10354 	}
10355 
10356 	for(n=0; n<txb->nr_frags; ++n) {
10357 		struct sk_buff *src = txb->fragments[n];
10358 		struct sk_buff *dst;
10359 		struct ieee80211_radiotap_header *rt_hdr;
10360 		int len;
10361 
10362 		if (hdr_only) {
10363 			hdr = (void *)src->data;
10364 			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10365 		} else
10366 			len = src->len;
10367 
10368 		dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10369 		if (!dst)
10370 			continue;
10371 
10372 		rt_hdr = skb_put(dst, sizeof(*rt_hdr));
10373 
10374 		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10375 		rt_hdr->it_pad = 0;
10376 		rt_hdr->it_present = 0; /* after all, it's just an idea */
10377 		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10378 
10379 		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10380 			ieee80211chan2mhz(priv->channel));
10381 		if (priv->channel > 14) 	/* 802.11a */
10382 			*(__le16*)skb_put(dst, sizeof(u16)) =
10383 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10384 					     IEEE80211_CHAN_5GHZ);
10385 		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10386 			*(__le16*)skb_put(dst, sizeof(u16)) =
10387 				cpu_to_le16(IEEE80211_CHAN_CCK |
10388 					     IEEE80211_CHAN_2GHZ);
10389 		else 		/* 802.11g */
10390 			*(__le16*)skb_put(dst, sizeof(u16)) =
10391 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10392 				 IEEE80211_CHAN_2GHZ);
10393 
10394 		rt_hdr->it_len = cpu_to_le16(dst->len);
10395 
10396 		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10397 
10398 		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10399 			dev_kfree_skb_any(dst);
10400 	}
10401 }
10402 #endif
10403 
10404 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10405 					   struct net_device *dev, int pri)
10406 {
10407 	struct ipw_priv *priv = libipw_priv(dev);
10408 	unsigned long flags;
10409 	netdev_tx_t ret;
10410 
10411 	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10412 	spin_lock_irqsave(&priv->lock, flags);
10413 
10414 #ifdef CONFIG_IPW2200_PROMISCUOUS
10415 	if (rtap_iface && netif_running(priv->prom_net_dev))
10416 		ipw_handle_promiscuous_tx(priv, txb);
10417 #endif
10418 
10419 	ret = ipw_tx_skb(priv, txb, pri);
10420 	if (ret == NETDEV_TX_OK)
10421 		__ipw_led_activity_on(priv);
10422 	spin_unlock_irqrestore(&priv->lock, flags);
10423 
10424 	return ret;
10425 }
10426 
10427 static void ipw_net_set_multicast_list(struct net_device *dev)
10428 {
10429 
10430 }
10431 
10432 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10433 {
10434 	struct ipw_priv *priv = libipw_priv(dev);
10435 	struct sockaddr *addr = p;
10436 
10437 	if (!is_valid_ether_addr(addr->sa_data))
10438 		return -EADDRNOTAVAIL;
10439 	mutex_lock(&priv->mutex);
10440 	priv->config |= CFG_CUSTOM_MAC;
10441 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10442 	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10443 	       priv->net_dev->name, priv->mac_addr);
10444 	schedule_work(&priv->adapter_restart);
10445 	mutex_unlock(&priv->mutex);
10446 	return 0;
10447 }
10448 
10449 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10450 				    struct ethtool_drvinfo *info)
10451 {
10452 	struct ipw_priv *p = libipw_priv(dev);
10453 	char vers[64];
10454 	char date[32];
10455 	u32 len;
10456 
10457 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10458 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10459 
10460 	len = sizeof(vers);
10461 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10462 	len = sizeof(date);
10463 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10464 
10465 	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10466 		 vers, date);
10467 	strlcpy(info->bus_info, pci_name(p->pci_dev),
10468 		sizeof(info->bus_info));
10469 }
10470 
10471 static u32 ipw_ethtool_get_link(struct net_device *dev)
10472 {
10473 	struct ipw_priv *priv = libipw_priv(dev);
10474 	return (priv->status & STATUS_ASSOCIATED) != 0;
10475 }
10476 
10477 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10478 {
10479 	return IPW_EEPROM_IMAGE_SIZE;
10480 }
10481 
10482 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10483 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10484 {
10485 	struct ipw_priv *p = libipw_priv(dev);
10486 
10487 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10488 		return -EINVAL;
10489 	mutex_lock(&p->mutex);
10490 	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10491 	mutex_unlock(&p->mutex);
10492 	return 0;
10493 }
10494 
10495 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10496 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10497 {
10498 	struct ipw_priv *p = libipw_priv(dev);
10499 	int i;
10500 
10501 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10502 		return -EINVAL;
10503 	mutex_lock(&p->mutex);
10504 	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10505 	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10506 		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10507 	mutex_unlock(&p->mutex);
10508 	return 0;
10509 }
10510 
10511 static const struct ethtool_ops ipw_ethtool_ops = {
10512 	.get_link = ipw_ethtool_get_link,
10513 	.get_drvinfo = ipw_ethtool_get_drvinfo,
10514 	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10515 	.get_eeprom = ipw_ethtool_get_eeprom,
10516 	.set_eeprom = ipw_ethtool_set_eeprom,
10517 };
10518 
10519 static irqreturn_t ipw_isr(int irq, void *data)
10520 {
10521 	struct ipw_priv *priv = data;
10522 	u32 inta, inta_mask;
10523 
10524 	if (!priv)
10525 		return IRQ_NONE;
10526 
10527 	spin_lock(&priv->irq_lock);
10528 
10529 	if (!(priv->status & STATUS_INT_ENABLED)) {
10530 		/* IRQ is disabled */
10531 		goto none;
10532 	}
10533 
10534 	inta = ipw_read32(priv, IPW_INTA_RW);
10535 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10536 
10537 	if (inta == 0xFFFFFFFF) {
10538 		/* Hardware disappeared */
10539 		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10540 		goto none;
10541 	}
10542 
10543 	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10544 		/* Shared interrupt */
10545 		goto none;
10546 	}
10547 
10548 	/* tell the device to stop sending interrupts */
10549 	__ipw_disable_interrupts(priv);
10550 
10551 	/* ack current interrupts */
10552 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10553 	ipw_write32(priv, IPW_INTA_RW, inta);
10554 
10555 	/* Cache INTA value for our tasklet */
10556 	priv->isr_inta = inta;
10557 
10558 	tasklet_schedule(&priv->irq_tasklet);
10559 
10560 	spin_unlock(&priv->irq_lock);
10561 
10562 	return IRQ_HANDLED;
10563       none:
10564 	spin_unlock(&priv->irq_lock);
10565 	return IRQ_NONE;
10566 }
10567 
10568 static void ipw_rf_kill(void *adapter)
10569 {
10570 	struct ipw_priv *priv = adapter;
10571 	unsigned long flags;
10572 
10573 	spin_lock_irqsave(&priv->lock, flags);
10574 
10575 	if (rf_kill_active(priv)) {
10576 		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10577 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10578 		goto exit_unlock;
10579 	}
10580 
10581 	/* RF Kill is now disabled, so bring the device back up */
10582 
10583 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10584 		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10585 				  "device\n");
10586 
10587 		/* we can not do an adapter restart while inside an irq lock */
10588 		schedule_work(&priv->adapter_restart);
10589 	} else
10590 		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10591 				  "enabled\n");
10592 
10593       exit_unlock:
10594 	spin_unlock_irqrestore(&priv->lock, flags);
10595 }
10596 
10597 static void ipw_bg_rf_kill(struct work_struct *work)
10598 {
10599 	struct ipw_priv *priv =
10600 		container_of(work, struct ipw_priv, rf_kill.work);
10601 	mutex_lock(&priv->mutex);
10602 	ipw_rf_kill(priv);
10603 	mutex_unlock(&priv->mutex);
10604 }
10605 
10606 static void ipw_link_up(struct ipw_priv *priv)
10607 {
10608 	priv->last_seq_num = -1;
10609 	priv->last_frag_num = -1;
10610 	priv->last_packet_time = 0;
10611 
10612 	netif_carrier_on(priv->net_dev);
10613 
10614 	cancel_delayed_work(&priv->request_scan);
10615 	cancel_delayed_work(&priv->request_direct_scan);
10616 	cancel_delayed_work(&priv->request_passive_scan);
10617 	cancel_delayed_work(&priv->scan_event);
10618 	ipw_reset_stats(priv);
10619 	/* Ensure the rate is updated immediately */
10620 	priv->last_rate = ipw_get_current_rate(priv);
10621 	ipw_gather_stats(priv);
10622 	ipw_led_link_up(priv);
10623 	notify_wx_assoc_event(priv);
10624 
10625 	if (priv->config & CFG_BACKGROUND_SCAN)
10626 		schedule_delayed_work(&priv->request_scan, HZ);
10627 }
10628 
10629 static void ipw_bg_link_up(struct work_struct *work)
10630 {
10631 	struct ipw_priv *priv =
10632 		container_of(work, struct ipw_priv, link_up);
10633 	mutex_lock(&priv->mutex);
10634 	ipw_link_up(priv);
10635 	mutex_unlock(&priv->mutex);
10636 }
10637 
10638 static void ipw_link_down(struct ipw_priv *priv)
10639 {
10640 	ipw_led_link_down(priv);
10641 	netif_carrier_off(priv->net_dev);
10642 	notify_wx_assoc_event(priv);
10643 
10644 	/* Cancel any queued work ... */
10645 	cancel_delayed_work(&priv->request_scan);
10646 	cancel_delayed_work(&priv->request_direct_scan);
10647 	cancel_delayed_work(&priv->request_passive_scan);
10648 	cancel_delayed_work(&priv->adhoc_check);
10649 	cancel_delayed_work(&priv->gather_stats);
10650 
10651 	ipw_reset_stats(priv);
10652 
10653 	if (!(priv->status & STATUS_EXIT_PENDING)) {
10654 		/* Queue up another scan... */
10655 		schedule_delayed_work(&priv->request_scan, 0);
10656 	} else
10657 		cancel_delayed_work(&priv->scan_event);
10658 }
10659 
10660 static void ipw_bg_link_down(struct work_struct *work)
10661 {
10662 	struct ipw_priv *priv =
10663 		container_of(work, struct ipw_priv, link_down);
10664 	mutex_lock(&priv->mutex);
10665 	ipw_link_down(priv);
10666 	mutex_unlock(&priv->mutex);
10667 }
10668 
10669 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10670 {
10671 	int ret = 0;
10672 
10673 	init_waitqueue_head(&priv->wait_command_queue);
10674 	init_waitqueue_head(&priv->wait_state);
10675 
10676 	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10677 	INIT_WORK(&priv->associate, ipw_bg_associate);
10678 	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10679 	INIT_WORK(&priv->system_config, ipw_system_config);
10680 	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10681 	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10682 	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10683 	INIT_WORK(&priv->up, ipw_bg_up);
10684 	INIT_WORK(&priv->down, ipw_bg_down);
10685 	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10686 	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10687 	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10688 	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10689 	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10690 	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10691 	INIT_WORK(&priv->roam, ipw_bg_roam);
10692 	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10693 	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10694 	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10695 	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10696 	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10697 	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10698 	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10699 
10700 #ifdef CONFIG_IPW2200_QOS
10701 	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10702 #endif				/* CONFIG_IPW2200_QOS */
10703 
10704 	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10705 		     ipw_irq_tasklet, (unsigned long)priv);
10706 
10707 	return ret;
10708 }
10709 
10710 static void shim__set_security(struct net_device *dev,
10711 			       struct libipw_security *sec)
10712 {
10713 	struct ipw_priv *priv = libipw_priv(dev);
10714 	int i;
10715 	for (i = 0; i < 4; i++) {
10716 		if (sec->flags & (1 << i)) {
10717 			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10718 			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10719 			if (sec->key_sizes[i] == 0)
10720 				priv->ieee->sec.flags &= ~(1 << i);
10721 			else {
10722 				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10723 				       sec->key_sizes[i]);
10724 				priv->ieee->sec.flags |= (1 << i);
10725 			}
10726 			priv->status |= STATUS_SECURITY_UPDATED;
10727 		} else if (sec->level != SEC_LEVEL_1)
10728 			priv->ieee->sec.flags &= ~(1 << i);
10729 	}
10730 
10731 	if (sec->flags & SEC_ACTIVE_KEY) {
10732 		if (sec->active_key <= 3) {
10733 			priv->ieee->sec.active_key = sec->active_key;
10734 			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10735 		} else
10736 			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10737 		priv->status |= STATUS_SECURITY_UPDATED;
10738 	} else
10739 		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10740 
10741 	if ((sec->flags & SEC_AUTH_MODE) &&
10742 	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10743 		priv->ieee->sec.auth_mode = sec->auth_mode;
10744 		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10745 		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10746 			priv->capability |= CAP_SHARED_KEY;
10747 		else
10748 			priv->capability &= ~CAP_SHARED_KEY;
10749 		priv->status |= STATUS_SECURITY_UPDATED;
10750 	}
10751 
10752 	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10753 		priv->ieee->sec.flags |= SEC_ENABLED;
10754 		priv->ieee->sec.enabled = sec->enabled;
10755 		priv->status |= STATUS_SECURITY_UPDATED;
10756 		if (sec->enabled)
10757 			priv->capability |= CAP_PRIVACY_ON;
10758 		else
10759 			priv->capability &= ~CAP_PRIVACY_ON;
10760 	}
10761 
10762 	if (sec->flags & SEC_ENCRYPT)
10763 		priv->ieee->sec.encrypt = sec->encrypt;
10764 
10765 	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10766 		priv->ieee->sec.level = sec->level;
10767 		priv->ieee->sec.flags |= SEC_LEVEL;
10768 		priv->status |= STATUS_SECURITY_UPDATED;
10769 	}
10770 
10771 	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10772 		ipw_set_hwcrypto_keys(priv);
10773 
10774 	/* To match current functionality of ipw2100 (which works well w/
10775 	 * various supplicants, we don't force a disassociate if the
10776 	 * privacy capability changes ... */
10777 #if 0
10778 	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10779 	    (((priv->assoc_request.capability &
10780 	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10781 	     (!(priv->assoc_request.capability &
10782 		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10783 		IPW_DEBUG_ASSOC("Disassociating due to capability "
10784 				"change.\n");
10785 		ipw_disassociate(priv);
10786 	}
10787 #endif
10788 }
10789 
10790 static int init_supported_rates(struct ipw_priv *priv,
10791 				struct ipw_supported_rates *rates)
10792 {
10793 	/* TODO: Mask out rates based on priv->rates_mask */
10794 
10795 	memset(rates, 0, sizeof(*rates));
10796 	/* configure supported rates */
10797 	switch (priv->ieee->freq_band) {
10798 	case LIBIPW_52GHZ_BAND:
10799 		rates->ieee_mode = IPW_A_MODE;
10800 		rates->purpose = IPW_RATE_CAPABILITIES;
10801 		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10802 					LIBIPW_OFDM_DEFAULT_RATES_MASK);
10803 		break;
10804 
10805 	default:		/* Mixed or 2.4Ghz */
10806 		rates->ieee_mode = IPW_G_MODE;
10807 		rates->purpose = IPW_RATE_CAPABILITIES;
10808 		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10809 				       LIBIPW_CCK_DEFAULT_RATES_MASK);
10810 		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10811 			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10812 						LIBIPW_OFDM_DEFAULT_RATES_MASK);
10813 		}
10814 		break;
10815 	}
10816 
10817 	return 0;
10818 }
10819 
10820 static int ipw_config(struct ipw_priv *priv)
10821 {
10822 	/* This is only called from ipw_up, which resets/reloads the firmware
10823 	   so, we don't need to first disable the card before we configure
10824 	   it */
10825 	if (ipw_set_tx_power(priv))
10826 		goto error;
10827 
10828 	/* initialize adapter address */
10829 	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10830 		goto error;
10831 
10832 	/* set basic system config settings */
10833 	init_sys_config(&priv->sys_config);
10834 
10835 	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10836 	 * Does not support BT priority yet (don't abort or defer our Tx) */
10837 	if (bt_coexist) {
10838 		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10839 
10840 		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10841 			priv->sys_config.bt_coexistence
10842 			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10843 		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10844 			priv->sys_config.bt_coexistence
10845 			    |= CFG_BT_COEXISTENCE_OOB;
10846 	}
10847 
10848 #ifdef CONFIG_IPW2200_PROMISCUOUS
10849 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10850 		priv->sys_config.accept_all_data_frames = 1;
10851 		priv->sys_config.accept_non_directed_frames = 1;
10852 		priv->sys_config.accept_all_mgmt_bcpr = 1;
10853 		priv->sys_config.accept_all_mgmt_frames = 1;
10854 	}
10855 #endif
10856 
10857 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10858 		priv->sys_config.answer_broadcast_ssid_probe = 1;
10859 	else
10860 		priv->sys_config.answer_broadcast_ssid_probe = 0;
10861 
10862 	if (ipw_send_system_config(priv))
10863 		goto error;
10864 
10865 	init_supported_rates(priv, &priv->rates);
10866 	if (ipw_send_supported_rates(priv, &priv->rates))
10867 		goto error;
10868 
10869 	/* Set request-to-send threshold */
10870 	if (priv->rts_threshold) {
10871 		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10872 			goto error;
10873 	}
10874 #ifdef CONFIG_IPW2200_QOS
10875 	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10876 	ipw_qos_activate(priv, NULL);
10877 #endif				/* CONFIG_IPW2200_QOS */
10878 
10879 	if (ipw_set_random_seed(priv))
10880 		goto error;
10881 
10882 	/* final state transition to the RUN state */
10883 	if (ipw_send_host_complete(priv))
10884 		goto error;
10885 
10886 	priv->status |= STATUS_INIT;
10887 
10888 	ipw_led_init(priv);
10889 	ipw_led_radio_on(priv);
10890 	priv->notif_missed_beacons = 0;
10891 
10892 	/* Set hardware WEP key if it is configured. */
10893 	if ((priv->capability & CAP_PRIVACY_ON) &&
10894 	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10895 	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10896 		ipw_set_hwcrypto_keys(priv);
10897 
10898 	return 0;
10899 
10900       error:
10901 	return -EIO;
10902 }
10903 
10904 /*
10905  * NOTE:
10906  *
10907  * These tables have been tested in conjunction with the
10908  * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10909  *
10910  * Altering this values, using it on other hardware, or in geographies
10911  * not intended for resale of the above mentioned Intel adapters has
10912  * not been tested.
10913  *
10914  * Remember to update the table in README.ipw2200 when changing this
10915  * table.
10916  *
10917  */
10918 static const struct libipw_geo ipw_geos[] = {
10919 	{			/* Restricted */
10920 	 "---",
10921 	 .bg_channels = 11,
10922 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10923 		{2427, 4}, {2432, 5}, {2437, 6},
10924 		{2442, 7}, {2447, 8}, {2452, 9},
10925 		{2457, 10}, {2462, 11}},
10926 	 },
10927 
10928 	{			/* Custom US/Canada */
10929 	 "ZZF",
10930 	 .bg_channels = 11,
10931 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10932 		{2427, 4}, {2432, 5}, {2437, 6},
10933 		{2442, 7}, {2447, 8}, {2452, 9},
10934 		{2457, 10}, {2462, 11}},
10935 	 .a_channels = 8,
10936 	 .a = {{5180, 36},
10937 	       {5200, 40},
10938 	       {5220, 44},
10939 	       {5240, 48},
10940 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10941 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10942 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10943 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
10944 	 },
10945 
10946 	{			/* Rest of World */
10947 	 "ZZD",
10948 	 .bg_channels = 13,
10949 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10950 		{2427, 4}, {2432, 5}, {2437, 6},
10951 		{2442, 7}, {2447, 8}, {2452, 9},
10952 		{2457, 10}, {2462, 11}, {2467, 12},
10953 		{2472, 13}},
10954 	 },
10955 
10956 	{			/* Custom USA & Europe & High */
10957 	 "ZZA",
10958 	 .bg_channels = 11,
10959 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10960 		{2427, 4}, {2432, 5}, {2437, 6},
10961 		{2442, 7}, {2447, 8}, {2452, 9},
10962 		{2457, 10}, {2462, 11}},
10963 	 .a_channels = 13,
10964 	 .a = {{5180, 36},
10965 	       {5200, 40},
10966 	       {5220, 44},
10967 	       {5240, 48},
10968 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10969 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10970 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10971 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10972 	       {5745, 149},
10973 	       {5765, 153},
10974 	       {5785, 157},
10975 	       {5805, 161},
10976 	       {5825, 165}},
10977 	 },
10978 
10979 	{			/* Custom NA & Europe */
10980 	 "ZZB",
10981 	 .bg_channels = 11,
10982 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10983 		{2427, 4}, {2432, 5}, {2437, 6},
10984 		{2442, 7}, {2447, 8}, {2452, 9},
10985 		{2457, 10}, {2462, 11}},
10986 	 .a_channels = 13,
10987 	 .a = {{5180, 36},
10988 	       {5200, 40},
10989 	       {5220, 44},
10990 	       {5240, 48},
10991 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10992 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10993 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10994 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10995 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
10996 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
10997 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
10998 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
10999 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11000 	 },
11001 
11002 	{			/* Custom Japan */
11003 	 "ZZC",
11004 	 .bg_channels = 11,
11005 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11006 		{2427, 4}, {2432, 5}, {2437, 6},
11007 		{2442, 7}, {2447, 8}, {2452, 9},
11008 		{2457, 10}, {2462, 11}},
11009 	 .a_channels = 4,
11010 	 .a = {{5170, 34}, {5190, 38},
11011 	       {5210, 42}, {5230, 46}},
11012 	 },
11013 
11014 	{			/* Custom */
11015 	 "ZZM",
11016 	 .bg_channels = 11,
11017 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11018 		{2427, 4}, {2432, 5}, {2437, 6},
11019 		{2442, 7}, {2447, 8}, {2452, 9},
11020 		{2457, 10}, {2462, 11}},
11021 	 },
11022 
11023 	{			/* Europe */
11024 	 "ZZE",
11025 	 .bg_channels = 13,
11026 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11027 		{2427, 4}, {2432, 5}, {2437, 6},
11028 		{2442, 7}, {2447, 8}, {2452, 9},
11029 		{2457, 10}, {2462, 11}, {2467, 12},
11030 		{2472, 13}},
11031 	 .a_channels = 19,
11032 	 .a = {{5180, 36},
11033 	       {5200, 40},
11034 	       {5220, 44},
11035 	       {5240, 48},
11036 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11037 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11038 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11039 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11040 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11041 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11042 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11043 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11044 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11045 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11046 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11047 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11048 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11049 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11050 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11051 	 },
11052 
11053 	{			/* Custom Japan */
11054 	 "ZZJ",
11055 	 .bg_channels = 14,
11056 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11057 		{2427, 4}, {2432, 5}, {2437, 6},
11058 		{2442, 7}, {2447, 8}, {2452, 9},
11059 		{2457, 10}, {2462, 11}, {2467, 12},
11060 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11061 	 .a_channels = 4,
11062 	 .a = {{5170, 34}, {5190, 38},
11063 	       {5210, 42}, {5230, 46}},
11064 	 },
11065 
11066 	{			/* Rest of World */
11067 	 "ZZR",
11068 	 .bg_channels = 14,
11069 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11070 		{2427, 4}, {2432, 5}, {2437, 6},
11071 		{2442, 7}, {2447, 8}, {2452, 9},
11072 		{2457, 10}, {2462, 11}, {2467, 12},
11073 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11074 			     LIBIPW_CH_PASSIVE_ONLY}},
11075 	 },
11076 
11077 	{			/* High Band */
11078 	 "ZZH",
11079 	 .bg_channels = 13,
11080 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11081 		{2427, 4}, {2432, 5}, {2437, 6},
11082 		{2442, 7}, {2447, 8}, {2452, 9},
11083 		{2457, 10}, {2462, 11},
11084 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11085 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11086 	 .a_channels = 4,
11087 	 .a = {{5745, 149}, {5765, 153},
11088 	       {5785, 157}, {5805, 161}},
11089 	 },
11090 
11091 	{			/* Custom Europe */
11092 	 "ZZG",
11093 	 .bg_channels = 13,
11094 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11095 		{2427, 4}, {2432, 5}, {2437, 6},
11096 		{2442, 7}, {2447, 8}, {2452, 9},
11097 		{2457, 10}, {2462, 11},
11098 		{2467, 12}, {2472, 13}},
11099 	 .a_channels = 4,
11100 	 .a = {{5180, 36}, {5200, 40},
11101 	       {5220, 44}, {5240, 48}},
11102 	 },
11103 
11104 	{			/* Europe */
11105 	 "ZZK",
11106 	 .bg_channels = 13,
11107 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11108 		{2427, 4}, {2432, 5}, {2437, 6},
11109 		{2442, 7}, {2447, 8}, {2452, 9},
11110 		{2457, 10}, {2462, 11},
11111 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11112 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11113 	 .a_channels = 24,
11114 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11115 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11116 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11117 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11118 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11119 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11120 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11121 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11122 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11123 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11124 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11125 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11126 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11127 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11128 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11129 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11130 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11131 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11132 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11133 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11134 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11135 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11136 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11137 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11138 	 },
11139 
11140 	{			/* Europe */
11141 	 "ZZL",
11142 	 .bg_channels = 11,
11143 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11144 		{2427, 4}, {2432, 5}, {2437, 6},
11145 		{2442, 7}, {2447, 8}, {2452, 9},
11146 		{2457, 10}, {2462, 11}},
11147 	 .a_channels = 13,
11148 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11149 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11150 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11151 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11152 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11153 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11154 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11155 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11156 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11157 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11158 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11159 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11160 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11161 	 }
11162 };
11163 
11164 static void ipw_set_geo(struct ipw_priv *priv)
11165 {
11166 	int j;
11167 
11168 	for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11169 		if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11170 			    ipw_geos[j].name, 3))
11171 			break;
11172 	}
11173 
11174 	if (j == ARRAY_SIZE(ipw_geos)) {
11175 		IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11176 			    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11177 			    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11178 			    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11179 		j = 0;
11180 	}
11181 
11182 	libipw_set_geo(priv->ieee, &ipw_geos[j]);
11183 }
11184 
11185 #define MAX_HW_RESTARTS 5
11186 static int ipw_up(struct ipw_priv *priv)
11187 {
11188 	int rc, i;
11189 
11190 	/* Age scan list entries found before suspend */
11191 	if (priv->suspend_time) {
11192 		libipw_networks_age(priv->ieee, priv->suspend_time);
11193 		priv->suspend_time = 0;
11194 	}
11195 
11196 	if (priv->status & STATUS_EXIT_PENDING)
11197 		return -EIO;
11198 
11199 	if (cmdlog && !priv->cmdlog) {
11200 		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11201 				       GFP_KERNEL);
11202 		if (priv->cmdlog == NULL) {
11203 			IPW_ERROR("Error allocating %d command log entries.\n",
11204 				  cmdlog);
11205 			return -ENOMEM;
11206 		} else {
11207 			priv->cmdlog_len = cmdlog;
11208 		}
11209 	}
11210 
11211 	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11212 		/* Load the microcode, firmware, and eeprom.
11213 		 * Also start the clocks. */
11214 		rc = ipw_load(priv);
11215 		if (rc) {
11216 			IPW_ERROR("Unable to load firmware: %d\n", rc);
11217 			return rc;
11218 		}
11219 
11220 		ipw_init_ordinals(priv);
11221 		if (!(priv->config & CFG_CUSTOM_MAC))
11222 			eeprom_parse_mac(priv, priv->mac_addr);
11223 		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11224 
11225 		ipw_set_geo(priv);
11226 
11227 		if (priv->status & STATUS_RF_KILL_SW) {
11228 			IPW_WARNING("Radio disabled by module parameter.\n");
11229 			return 0;
11230 		} else if (rf_kill_active(priv)) {
11231 			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11232 				    "Kill switch must be turned off for "
11233 				    "wireless networking to work.\n");
11234 			schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11235 			return 0;
11236 		}
11237 
11238 		rc = ipw_config(priv);
11239 		if (!rc) {
11240 			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11241 
11242 			/* If configure to try and auto-associate, kick
11243 			 * off a scan. */
11244 			schedule_delayed_work(&priv->request_scan, 0);
11245 
11246 			return 0;
11247 		}
11248 
11249 		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11250 		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11251 			       i, MAX_HW_RESTARTS);
11252 
11253 		/* We had an error bringing up the hardware, so take it
11254 		 * all the way back down so we can try again */
11255 		ipw_down(priv);
11256 	}
11257 
11258 	/* tried to restart and config the device for as long as our
11259 	 * patience could withstand */
11260 	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11261 
11262 	return -EIO;
11263 }
11264 
11265 static void ipw_bg_up(struct work_struct *work)
11266 {
11267 	struct ipw_priv *priv =
11268 		container_of(work, struct ipw_priv, up);
11269 	mutex_lock(&priv->mutex);
11270 	ipw_up(priv);
11271 	mutex_unlock(&priv->mutex);
11272 }
11273 
11274 static void ipw_deinit(struct ipw_priv *priv)
11275 {
11276 	int i;
11277 
11278 	if (priv->status & STATUS_SCANNING) {
11279 		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11280 		ipw_abort_scan(priv);
11281 	}
11282 
11283 	if (priv->status & STATUS_ASSOCIATED) {
11284 		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11285 		ipw_disassociate(priv);
11286 	}
11287 
11288 	ipw_led_shutdown(priv);
11289 
11290 	/* Wait up to 1s for status to change to not scanning and not
11291 	 * associated (disassociation can take a while for a ful 802.11
11292 	 * exchange */
11293 	for (i = 1000; i && (priv->status &
11294 			     (STATUS_DISASSOCIATING |
11295 			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11296 		udelay(10);
11297 
11298 	if (priv->status & (STATUS_DISASSOCIATING |
11299 			    STATUS_ASSOCIATED | STATUS_SCANNING))
11300 		IPW_DEBUG_INFO("Still associated or scanning...\n");
11301 	else
11302 		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11303 
11304 	/* Attempt to disable the card */
11305 	ipw_send_card_disable(priv, 0);
11306 
11307 	priv->status &= ~STATUS_INIT;
11308 }
11309 
11310 static void ipw_down(struct ipw_priv *priv)
11311 {
11312 	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11313 
11314 	priv->status |= STATUS_EXIT_PENDING;
11315 
11316 	if (ipw_is_init(priv))
11317 		ipw_deinit(priv);
11318 
11319 	/* Wipe out the EXIT_PENDING status bit if we are not actually
11320 	 * exiting the module */
11321 	if (!exit_pending)
11322 		priv->status &= ~STATUS_EXIT_PENDING;
11323 
11324 	/* tell the device to stop sending interrupts */
11325 	ipw_disable_interrupts(priv);
11326 
11327 	/* Clear all bits but the RF Kill */
11328 	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11329 	netif_carrier_off(priv->net_dev);
11330 
11331 	ipw_stop_nic(priv);
11332 
11333 	ipw_led_radio_off(priv);
11334 }
11335 
11336 static void ipw_bg_down(struct work_struct *work)
11337 {
11338 	struct ipw_priv *priv =
11339 		container_of(work, struct ipw_priv, down);
11340 	mutex_lock(&priv->mutex);
11341 	ipw_down(priv);
11342 	mutex_unlock(&priv->mutex);
11343 }
11344 
11345 static int ipw_wdev_init(struct net_device *dev)
11346 {
11347 	int i, rc = 0;
11348 	struct ipw_priv *priv = libipw_priv(dev);
11349 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11350 	struct wireless_dev *wdev = &priv->ieee->wdev;
11351 
11352 	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11353 
11354 	/* fill-out priv->ieee->bg_band */
11355 	if (geo->bg_channels) {
11356 		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11357 
11358 		bg_band->band = NL80211_BAND_2GHZ;
11359 		bg_band->n_channels = geo->bg_channels;
11360 		bg_band->channels = kcalloc(geo->bg_channels,
11361 					    sizeof(struct ieee80211_channel),
11362 					    GFP_KERNEL);
11363 		if (!bg_band->channels) {
11364 			rc = -ENOMEM;
11365 			goto out;
11366 		}
11367 		/* translate geo->bg to bg_band.channels */
11368 		for (i = 0; i < geo->bg_channels; i++) {
11369 			bg_band->channels[i].band = NL80211_BAND_2GHZ;
11370 			bg_band->channels[i].center_freq = geo->bg[i].freq;
11371 			bg_band->channels[i].hw_value = geo->bg[i].channel;
11372 			bg_band->channels[i].max_power = geo->bg[i].max_power;
11373 			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11374 				bg_band->channels[i].flags |=
11375 					IEEE80211_CHAN_NO_IR;
11376 			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11377 				bg_band->channels[i].flags |=
11378 					IEEE80211_CHAN_NO_IR;
11379 			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11380 				bg_band->channels[i].flags |=
11381 					IEEE80211_CHAN_RADAR;
11382 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11383 			   LIBIPW_CH_UNIFORM_SPREADING, or
11384 			   LIBIPW_CH_B_ONLY... */
11385 		}
11386 		/* point at bitrate info */
11387 		bg_band->bitrates = ipw2200_bg_rates;
11388 		bg_band->n_bitrates = ipw2200_num_bg_rates;
11389 
11390 		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
11391 	}
11392 
11393 	/* fill-out priv->ieee->a_band */
11394 	if (geo->a_channels) {
11395 		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11396 
11397 		a_band->band = NL80211_BAND_5GHZ;
11398 		a_band->n_channels = geo->a_channels;
11399 		a_band->channels = kcalloc(geo->a_channels,
11400 					   sizeof(struct ieee80211_channel),
11401 					   GFP_KERNEL);
11402 		if (!a_band->channels) {
11403 			rc = -ENOMEM;
11404 			goto out;
11405 		}
11406 		/* translate geo->a to a_band.channels */
11407 		for (i = 0; i < geo->a_channels; i++) {
11408 			a_band->channels[i].band = NL80211_BAND_5GHZ;
11409 			a_band->channels[i].center_freq = geo->a[i].freq;
11410 			a_band->channels[i].hw_value = geo->a[i].channel;
11411 			a_band->channels[i].max_power = geo->a[i].max_power;
11412 			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11413 				a_band->channels[i].flags |=
11414 					IEEE80211_CHAN_NO_IR;
11415 			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11416 				a_band->channels[i].flags |=
11417 					IEEE80211_CHAN_NO_IR;
11418 			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11419 				a_band->channels[i].flags |=
11420 					IEEE80211_CHAN_RADAR;
11421 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11422 			   LIBIPW_CH_UNIFORM_SPREADING, or
11423 			   LIBIPW_CH_B_ONLY... */
11424 		}
11425 		/* point at bitrate info */
11426 		a_band->bitrates = ipw2200_a_rates;
11427 		a_band->n_bitrates = ipw2200_num_a_rates;
11428 
11429 		wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
11430 	}
11431 
11432 	wdev->wiphy->cipher_suites = ipw_cipher_suites;
11433 	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11434 
11435 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11436 
11437 	/* With that information in place, we can now register the wiphy... */
11438 	if (wiphy_register(wdev->wiphy))
11439 		rc = -EIO;
11440 out:
11441 	return rc;
11442 }
11443 
11444 /* PCI driver stuff */
11445 static const struct pci_device_id card_ids[] = {
11446 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11447 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11448 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11449 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11450 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11451 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11452 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11453 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11454 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11455 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11456 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11457 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11458 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11459 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11460 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11461 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11462 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11463 	{PCI_VDEVICE(INTEL, 0x104f), 0},
11464 	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
11465 	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
11466 	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
11467 	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
11468 
11469 	/* required last entry */
11470 	{0,}
11471 };
11472 
11473 MODULE_DEVICE_TABLE(pci, card_ids);
11474 
11475 static struct attribute *ipw_sysfs_entries[] = {
11476 	&dev_attr_rf_kill.attr,
11477 	&dev_attr_direct_dword.attr,
11478 	&dev_attr_indirect_byte.attr,
11479 	&dev_attr_indirect_dword.attr,
11480 	&dev_attr_mem_gpio_reg.attr,
11481 	&dev_attr_command_event_reg.attr,
11482 	&dev_attr_nic_type.attr,
11483 	&dev_attr_status.attr,
11484 	&dev_attr_cfg.attr,
11485 	&dev_attr_error.attr,
11486 	&dev_attr_event_log.attr,
11487 	&dev_attr_cmd_log.attr,
11488 	&dev_attr_eeprom_delay.attr,
11489 	&dev_attr_ucode_version.attr,
11490 	&dev_attr_rtc.attr,
11491 	&dev_attr_scan_age.attr,
11492 	&dev_attr_led.attr,
11493 	&dev_attr_speed_scan.attr,
11494 	&dev_attr_net_stats.attr,
11495 	&dev_attr_channels.attr,
11496 #ifdef CONFIG_IPW2200_PROMISCUOUS
11497 	&dev_attr_rtap_iface.attr,
11498 	&dev_attr_rtap_filter.attr,
11499 #endif
11500 	NULL
11501 };
11502 
11503 static struct attribute_group ipw_attribute_group = {
11504 	.name = NULL,		/* put in device directory */
11505 	.attrs = ipw_sysfs_entries,
11506 };
11507 
11508 #ifdef CONFIG_IPW2200_PROMISCUOUS
11509 static int ipw_prom_open(struct net_device *dev)
11510 {
11511 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11512 	struct ipw_priv *priv = prom_priv->priv;
11513 
11514 	IPW_DEBUG_INFO("prom dev->open\n");
11515 	netif_carrier_off(dev);
11516 
11517 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11518 		priv->sys_config.accept_all_data_frames = 1;
11519 		priv->sys_config.accept_non_directed_frames = 1;
11520 		priv->sys_config.accept_all_mgmt_bcpr = 1;
11521 		priv->sys_config.accept_all_mgmt_frames = 1;
11522 
11523 		ipw_send_system_config(priv);
11524 	}
11525 
11526 	return 0;
11527 }
11528 
11529 static int ipw_prom_stop(struct net_device *dev)
11530 {
11531 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11532 	struct ipw_priv *priv = prom_priv->priv;
11533 
11534 	IPW_DEBUG_INFO("prom dev->stop\n");
11535 
11536 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11537 		priv->sys_config.accept_all_data_frames = 0;
11538 		priv->sys_config.accept_non_directed_frames = 0;
11539 		priv->sys_config.accept_all_mgmt_bcpr = 0;
11540 		priv->sys_config.accept_all_mgmt_frames = 0;
11541 
11542 		ipw_send_system_config(priv);
11543 	}
11544 
11545 	return 0;
11546 }
11547 
11548 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11549 					    struct net_device *dev)
11550 {
11551 	IPW_DEBUG_INFO("prom dev->xmit\n");
11552 	dev_kfree_skb(skb);
11553 	return NETDEV_TX_OK;
11554 }
11555 
11556 static const struct net_device_ops ipw_prom_netdev_ops = {
11557 	.ndo_open 		= ipw_prom_open,
11558 	.ndo_stop		= ipw_prom_stop,
11559 	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
11560 	.ndo_set_mac_address 	= eth_mac_addr,
11561 	.ndo_validate_addr	= eth_validate_addr,
11562 };
11563 
11564 static int ipw_prom_alloc(struct ipw_priv *priv)
11565 {
11566 	int rc = 0;
11567 
11568 	if (priv->prom_net_dev)
11569 		return -EPERM;
11570 
11571 	priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11572 	if (priv->prom_net_dev == NULL)
11573 		return -ENOMEM;
11574 
11575 	priv->prom_priv = libipw_priv(priv->prom_net_dev);
11576 	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11577 	priv->prom_priv->priv = priv;
11578 
11579 	strcpy(priv->prom_net_dev->name, "rtap%d");
11580 	memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11581 
11582 	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11583 	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11584 
11585 	priv->prom_net_dev->min_mtu = 68;
11586 	priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
11587 
11588 	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11589 	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11590 
11591 	rc = register_netdev(priv->prom_net_dev);
11592 	if (rc) {
11593 		free_libipw(priv->prom_net_dev, 1);
11594 		priv->prom_net_dev = NULL;
11595 		return rc;
11596 	}
11597 
11598 	return 0;
11599 }
11600 
11601 static void ipw_prom_free(struct ipw_priv *priv)
11602 {
11603 	if (!priv->prom_net_dev)
11604 		return;
11605 
11606 	unregister_netdev(priv->prom_net_dev);
11607 	free_libipw(priv->prom_net_dev, 1);
11608 
11609 	priv->prom_net_dev = NULL;
11610 }
11611 
11612 #endif
11613 
11614 static const struct net_device_ops ipw_netdev_ops = {
11615 	.ndo_open		= ipw_net_open,
11616 	.ndo_stop		= ipw_net_stop,
11617 	.ndo_set_rx_mode	= ipw_net_set_multicast_list,
11618 	.ndo_set_mac_address	= ipw_net_set_mac_address,
11619 	.ndo_start_xmit		= libipw_xmit,
11620 	.ndo_validate_addr	= eth_validate_addr,
11621 };
11622 
11623 static int ipw_pci_probe(struct pci_dev *pdev,
11624 				   const struct pci_device_id *ent)
11625 {
11626 	int err = 0;
11627 	struct net_device *net_dev;
11628 	void __iomem *base;
11629 	u32 length, val;
11630 	struct ipw_priv *priv;
11631 	int i;
11632 
11633 	net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11634 	if (net_dev == NULL) {
11635 		err = -ENOMEM;
11636 		goto out;
11637 	}
11638 
11639 	priv = libipw_priv(net_dev);
11640 	priv->ieee = netdev_priv(net_dev);
11641 
11642 	priv->net_dev = net_dev;
11643 	priv->pci_dev = pdev;
11644 	ipw_debug_level = debug;
11645 	spin_lock_init(&priv->irq_lock);
11646 	spin_lock_init(&priv->lock);
11647 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11648 		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11649 
11650 	mutex_init(&priv->mutex);
11651 	if (pci_enable_device(pdev)) {
11652 		err = -ENODEV;
11653 		goto out_free_libipw;
11654 	}
11655 
11656 	pci_set_master(pdev);
11657 
11658 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11659 	if (!err)
11660 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11661 	if (err) {
11662 		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11663 		goto out_pci_disable_device;
11664 	}
11665 
11666 	pci_set_drvdata(pdev, priv);
11667 
11668 	err = pci_request_regions(pdev, DRV_NAME);
11669 	if (err)
11670 		goto out_pci_disable_device;
11671 
11672 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11673 	 * PCI Tx retries from interfering with C3 CPU state */
11674 	pci_read_config_dword(pdev, 0x40, &val);
11675 	if ((val & 0x0000ff00) != 0)
11676 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11677 
11678 	length = pci_resource_len(pdev, 0);
11679 	priv->hw_len = length;
11680 
11681 	base = pci_ioremap_bar(pdev, 0);
11682 	if (!base) {
11683 		err = -ENODEV;
11684 		goto out_pci_release_regions;
11685 	}
11686 
11687 	priv->hw_base = base;
11688 	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11689 	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11690 
11691 	err = ipw_setup_deferred_work(priv);
11692 	if (err) {
11693 		IPW_ERROR("Unable to setup deferred work\n");
11694 		goto out_iounmap;
11695 	}
11696 
11697 	ipw_sw_reset(priv, 1);
11698 
11699 	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11700 	if (err) {
11701 		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11702 		goto out_iounmap;
11703 	}
11704 
11705 	SET_NETDEV_DEV(net_dev, &pdev->dev);
11706 
11707 	mutex_lock(&priv->mutex);
11708 
11709 	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11710 	priv->ieee->set_security = shim__set_security;
11711 	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11712 
11713 #ifdef CONFIG_IPW2200_QOS
11714 	priv->ieee->is_qos_active = ipw_is_qos_active;
11715 	priv->ieee->handle_probe_response = ipw_handle_beacon;
11716 	priv->ieee->handle_beacon = ipw_handle_probe_response;
11717 	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11718 #endif				/* CONFIG_IPW2200_QOS */
11719 
11720 	priv->ieee->perfect_rssi = -20;
11721 	priv->ieee->worst_rssi = -85;
11722 
11723 	net_dev->netdev_ops = &ipw_netdev_ops;
11724 	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11725 	net_dev->wireless_data = &priv->wireless_data;
11726 	net_dev->wireless_handlers = &ipw_wx_handler_def;
11727 	net_dev->ethtool_ops = &ipw_ethtool_ops;
11728 
11729 	net_dev->min_mtu = 68;
11730 	net_dev->max_mtu = LIBIPW_DATA_LEN;
11731 
11732 	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11733 	if (err) {
11734 		IPW_ERROR("failed to create sysfs device attributes\n");
11735 		mutex_unlock(&priv->mutex);
11736 		goto out_release_irq;
11737 	}
11738 
11739 	if (ipw_up(priv)) {
11740 		mutex_unlock(&priv->mutex);
11741 		err = -EIO;
11742 		goto out_remove_sysfs;
11743 	}
11744 
11745 	mutex_unlock(&priv->mutex);
11746 
11747 	err = ipw_wdev_init(net_dev);
11748 	if (err) {
11749 		IPW_ERROR("failed to register wireless device\n");
11750 		goto out_remove_sysfs;
11751 	}
11752 
11753 	err = register_netdev(net_dev);
11754 	if (err) {
11755 		IPW_ERROR("failed to register network device\n");
11756 		goto out_unregister_wiphy;
11757 	}
11758 
11759 #ifdef CONFIG_IPW2200_PROMISCUOUS
11760 	if (rtap_iface) {
11761 	        err = ipw_prom_alloc(priv);
11762 		if (err) {
11763 			IPW_ERROR("Failed to register promiscuous network "
11764 				  "device (error %d).\n", err);
11765 			unregister_netdev(priv->net_dev);
11766 			goto out_unregister_wiphy;
11767 		}
11768 	}
11769 #endif
11770 
11771 	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11772 	       "channels, %d 802.11a channels)\n",
11773 	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11774 	       priv->ieee->geo.a_channels);
11775 
11776 	return 0;
11777 
11778       out_unregister_wiphy:
11779 	wiphy_unregister(priv->ieee->wdev.wiphy);
11780 	kfree(priv->ieee->a_band.channels);
11781 	kfree(priv->ieee->bg_band.channels);
11782       out_remove_sysfs:
11783 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11784       out_release_irq:
11785 	free_irq(pdev->irq, priv);
11786       out_iounmap:
11787 	iounmap(priv->hw_base);
11788       out_pci_release_regions:
11789 	pci_release_regions(pdev);
11790       out_pci_disable_device:
11791 	pci_disable_device(pdev);
11792       out_free_libipw:
11793 	free_libipw(priv->net_dev, 0);
11794       out:
11795 	return err;
11796 }
11797 
11798 static void ipw_pci_remove(struct pci_dev *pdev)
11799 {
11800 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11801 	struct list_head *p, *q;
11802 	int i;
11803 
11804 	if (!priv)
11805 		return;
11806 
11807 	mutex_lock(&priv->mutex);
11808 
11809 	priv->status |= STATUS_EXIT_PENDING;
11810 	ipw_down(priv);
11811 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11812 
11813 	mutex_unlock(&priv->mutex);
11814 
11815 	unregister_netdev(priv->net_dev);
11816 
11817 	if (priv->rxq) {
11818 		ipw_rx_queue_free(priv, priv->rxq);
11819 		priv->rxq = NULL;
11820 	}
11821 	ipw_tx_queue_free(priv);
11822 
11823 	if (priv->cmdlog) {
11824 		kfree(priv->cmdlog);
11825 		priv->cmdlog = NULL;
11826 	}
11827 
11828 	/* make sure all works are inactive */
11829 	cancel_delayed_work_sync(&priv->adhoc_check);
11830 	cancel_work_sync(&priv->associate);
11831 	cancel_work_sync(&priv->disassociate);
11832 	cancel_work_sync(&priv->system_config);
11833 	cancel_work_sync(&priv->rx_replenish);
11834 	cancel_work_sync(&priv->adapter_restart);
11835 	cancel_delayed_work_sync(&priv->rf_kill);
11836 	cancel_work_sync(&priv->up);
11837 	cancel_work_sync(&priv->down);
11838 	cancel_delayed_work_sync(&priv->request_scan);
11839 	cancel_delayed_work_sync(&priv->request_direct_scan);
11840 	cancel_delayed_work_sync(&priv->request_passive_scan);
11841 	cancel_delayed_work_sync(&priv->scan_event);
11842 	cancel_delayed_work_sync(&priv->gather_stats);
11843 	cancel_work_sync(&priv->abort_scan);
11844 	cancel_work_sync(&priv->roam);
11845 	cancel_delayed_work_sync(&priv->scan_check);
11846 	cancel_work_sync(&priv->link_up);
11847 	cancel_work_sync(&priv->link_down);
11848 	cancel_delayed_work_sync(&priv->led_link_on);
11849 	cancel_delayed_work_sync(&priv->led_link_off);
11850 	cancel_delayed_work_sync(&priv->led_act_off);
11851 	cancel_work_sync(&priv->merge_networks);
11852 
11853 	/* Free MAC hash list for ADHOC */
11854 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11855 		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11856 			list_del(p);
11857 			kfree(list_entry(p, struct ipw_ibss_seq, list));
11858 		}
11859 	}
11860 
11861 	kfree(priv->error);
11862 	priv->error = NULL;
11863 
11864 #ifdef CONFIG_IPW2200_PROMISCUOUS
11865 	ipw_prom_free(priv);
11866 #endif
11867 
11868 	free_irq(pdev->irq, priv);
11869 	iounmap(priv->hw_base);
11870 	pci_release_regions(pdev);
11871 	pci_disable_device(pdev);
11872 	/* wiphy_unregister needs to be here, before free_libipw */
11873 	wiphy_unregister(priv->ieee->wdev.wiphy);
11874 	kfree(priv->ieee->a_band.channels);
11875 	kfree(priv->ieee->bg_band.channels);
11876 	free_libipw(priv->net_dev, 0);
11877 	free_firmware();
11878 }
11879 
11880 #ifdef CONFIG_PM
11881 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11882 {
11883 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11884 	struct net_device *dev = priv->net_dev;
11885 
11886 	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11887 
11888 	/* Take down the device; powers it off, etc. */
11889 	ipw_down(priv);
11890 
11891 	/* Remove the PRESENT state of the device */
11892 	netif_device_detach(dev);
11893 
11894 	pci_save_state(pdev);
11895 	pci_disable_device(pdev);
11896 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
11897 
11898 	priv->suspend_at = get_seconds();
11899 
11900 	return 0;
11901 }
11902 
11903 static int ipw_pci_resume(struct pci_dev *pdev)
11904 {
11905 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11906 	struct net_device *dev = priv->net_dev;
11907 	int err;
11908 	u32 val;
11909 
11910 	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11911 
11912 	pci_set_power_state(pdev, PCI_D0);
11913 	err = pci_enable_device(pdev);
11914 	if (err) {
11915 		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11916 		       dev->name);
11917 		return err;
11918 	}
11919 	pci_restore_state(pdev);
11920 
11921 	/*
11922 	 * Suspend/Resume resets the PCI configuration space, so we have to
11923 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11924 	 * from interfering with C3 CPU state. pci_restore_state won't help
11925 	 * here since it only restores the first 64 bytes pci config header.
11926 	 */
11927 	pci_read_config_dword(pdev, 0x40, &val);
11928 	if ((val & 0x0000ff00) != 0)
11929 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11930 
11931 	/* Set the device back into the PRESENT state; this will also wake
11932 	 * the queue of needed */
11933 	netif_device_attach(dev);
11934 
11935 	priv->suspend_time = get_seconds() - priv->suspend_at;
11936 
11937 	/* Bring the device back up */
11938 	schedule_work(&priv->up);
11939 
11940 	return 0;
11941 }
11942 #endif
11943 
11944 static void ipw_pci_shutdown(struct pci_dev *pdev)
11945 {
11946 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11947 
11948 	/* Take down the device; powers it off, etc. */
11949 	ipw_down(priv);
11950 
11951 	pci_disable_device(pdev);
11952 }
11953 
11954 /* driver initialization stuff */
11955 static struct pci_driver ipw_driver = {
11956 	.name = DRV_NAME,
11957 	.id_table = card_ids,
11958 	.probe = ipw_pci_probe,
11959 	.remove = ipw_pci_remove,
11960 #ifdef CONFIG_PM
11961 	.suspend = ipw_pci_suspend,
11962 	.resume = ipw_pci_resume,
11963 #endif
11964 	.shutdown = ipw_pci_shutdown,
11965 };
11966 
11967 static int __init ipw_init(void)
11968 {
11969 	int ret;
11970 
11971 	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11972 	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11973 
11974 	ret = pci_register_driver(&ipw_driver);
11975 	if (ret) {
11976 		IPW_ERROR("Unable to initialize PCI module\n");
11977 		return ret;
11978 	}
11979 
11980 	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11981 	if (ret) {
11982 		IPW_ERROR("Unable to create driver sysfs file\n");
11983 		pci_unregister_driver(&ipw_driver);
11984 		return ret;
11985 	}
11986 
11987 	return ret;
11988 }
11989 
11990 static void __exit ipw_exit(void)
11991 {
11992 	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11993 	pci_unregister_driver(&ipw_driver);
11994 }
11995 
11996 module_param(disable, int, 0444);
11997 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11998 
11999 module_param(associate, int, 0444);
12000 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12001 
12002 module_param(auto_create, int, 0444);
12003 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12004 
12005 module_param_named(led, led_support, int, 0444);
12006 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12007 
12008 module_param(debug, int, 0444);
12009 MODULE_PARM_DESC(debug, "debug output mask");
12010 
12011 module_param_named(channel, default_channel, int, 0444);
12012 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12013 
12014 #ifdef CONFIG_IPW2200_PROMISCUOUS
12015 module_param(rtap_iface, int, 0444);
12016 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12017 #endif
12018 
12019 #ifdef CONFIG_IPW2200_QOS
12020 module_param(qos_enable, int, 0444);
12021 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12022 
12023 module_param(qos_burst_enable, int, 0444);
12024 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12025 
12026 module_param(qos_no_ack_mask, int, 0444);
12027 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12028 
12029 module_param(burst_duration_CCK, int, 0444);
12030 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12031 
12032 module_param(burst_duration_OFDM, int, 0444);
12033 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12034 #endif				/* CONFIG_IPW2200_QOS */
12035 
12036 #ifdef CONFIG_IPW2200_MONITOR
12037 module_param_named(mode, network_mode, int, 0444);
12038 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12039 #else
12040 module_param_named(mode, network_mode, int, 0444);
12041 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12042 #endif
12043 
12044 module_param(bt_coexist, int, 0444);
12045 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12046 
12047 module_param(hwcrypto, int, 0444);
12048 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12049 
12050 module_param(cmdlog, int, 0444);
12051 MODULE_PARM_DESC(cmdlog,
12052 		 "allocate a ring buffer for logging firmware commands");
12053 
12054 module_param(roaming, int, 0444);
12055 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12056 
12057 module_param(antenna, int, 0444);
12058 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12059 
12060 module_exit(ipw_exit);
12061 module_init(ipw_init);
12062