1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 
4   Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 
6   802.11 status code portion of this file from ethereal-0.10.6:
7     Copyright 2000, Axis Communications AB
8     Ethereal - Network traffic analyzer
9     By Gerald Combs <gerald@ethereal.com>
10     Copyright 1998 Gerald Combs
11 
12 
13   Contact Information:
14   Intel Linux Wireless <ilw@linux.intel.com>
15   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
16 
17 ******************************************************************************/
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <net/cfg80211-wext.h>
22 #include "ipw2200.h"
23 #include "ipw.h"
24 
25 
26 #ifndef KBUILD_EXTMOD
27 #define VK "k"
28 #else
29 #define VK
30 #endif
31 
32 #ifdef CONFIG_IPW2200_DEBUG
33 #define VD "d"
34 #else
35 #define VD
36 #endif
37 
38 #ifdef CONFIG_IPW2200_MONITOR
39 #define VM "m"
40 #else
41 #define VM
42 #endif
43 
44 #ifdef CONFIG_IPW2200_PROMISCUOUS
45 #define VP "p"
46 #else
47 #define VP
48 #endif
49 
50 #ifdef CONFIG_IPW2200_RADIOTAP
51 #define VR "r"
52 #else
53 #define VR
54 #endif
55 
56 #ifdef CONFIG_IPW2200_QOS
57 #define VQ "q"
58 #else
59 #define VQ
60 #endif
61 
62 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
63 #define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
64 #define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
65 #define DRV_VERSION     IPW2200_VERSION
66 
67 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
68 
69 MODULE_DESCRIPTION(DRV_DESCRIPTION);
70 MODULE_VERSION(DRV_VERSION);
71 MODULE_AUTHOR(DRV_COPYRIGHT);
72 MODULE_LICENSE("GPL");
73 MODULE_FIRMWARE("ipw2200-ibss.fw");
74 #ifdef CONFIG_IPW2200_MONITOR
75 MODULE_FIRMWARE("ipw2200-sniffer.fw");
76 #endif
77 MODULE_FIRMWARE("ipw2200-bss.fw");
78 
79 static int cmdlog = 0;
80 static int debug = 0;
81 static int default_channel = 0;
82 static int network_mode = 0;
83 
84 static u32 ipw_debug_level;
85 static int associate;
86 static int auto_create = 1;
87 static int led_support = 1;
88 static int disable = 0;
89 static int bt_coexist = 0;
90 static int hwcrypto = 0;
91 static int roaming = 1;
92 static const char ipw_modes[] = {
93 	'a', 'b', 'g', '?'
94 };
95 static int antenna = CFG_SYS_ANTENNA_BOTH;
96 
97 #ifdef CONFIG_IPW2200_PROMISCUOUS
98 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
99 #endif
100 
101 static struct ieee80211_rate ipw2200_rates[] = {
102 	{ .bitrate = 10 },
103 	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
104 	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
105 	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
106 	{ .bitrate = 60 },
107 	{ .bitrate = 90 },
108 	{ .bitrate = 120 },
109 	{ .bitrate = 180 },
110 	{ .bitrate = 240 },
111 	{ .bitrate = 360 },
112 	{ .bitrate = 480 },
113 	{ .bitrate = 540 }
114 };
115 
116 #define ipw2200_a_rates		(ipw2200_rates + 4)
117 #define ipw2200_num_a_rates	8
118 #define ipw2200_bg_rates	(ipw2200_rates + 0)
119 #define ipw2200_num_bg_rates	12
120 
121 /* Ugly macro to convert literal channel numbers into their mhz equivalents
122  * There are certianly some conditions that will break this (like feeding it '30')
123  * but they shouldn't arise since nothing talks on channel 30. */
124 #define ieee80211chan2mhz(x) \
125 	(((x) <= 14) ? \
126 	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
127 	((x) + 1000) * 5)
128 
129 #ifdef CONFIG_IPW2200_QOS
130 static int qos_enable = 0;
131 static int qos_burst_enable = 0;
132 static int qos_no_ack_mask = 0;
133 static int burst_duration_CCK = 0;
134 static int burst_duration_OFDM = 0;
135 
136 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
137 	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
138 	 QOS_TX3_CW_MIN_OFDM},
139 	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
140 	 QOS_TX3_CW_MAX_OFDM},
141 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
142 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
143 	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
144 	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
145 };
146 
147 static struct libipw_qos_parameters def_qos_parameters_CCK = {
148 	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
149 	 QOS_TX3_CW_MIN_CCK},
150 	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
151 	 QOS_TX3_CW_MAX_CCK},
152 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
153 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
154 	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
155 	 QOS_TX3_TXOP_LIMIT_CCK}
156 };
157 
158 static struct libipw_qos_parameters def_parameters_OFDM = {
159 	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
160 	 DEF_TX3_CW_MIN_OFDM},
161 	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
162 	 DEF_TX3_CW_MAX_OFDM},
163 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
164 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
165 	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
166 	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
167 };
168 
169 static struct libipw_qos_parameters def_parameters_CCK = {
170 	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
171 	 DEF_TX3_CW_MIN_CCK},
172 	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
173 	 DEF_TX3_CW_MAX_CCK},
174 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
175 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
176 	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
177 	 DEF_TX3_TXOP_LIMIT_CCK}
178 };
179 
180 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
181 
182 static int from_priority_to_tx_queue[] = {
183 	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
184 	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
185 };
186 
187 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
188 
189 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
190 				       *qos_param);
191 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
192 				     *qos_param);
193 #endif				/* CONFIG_IPW2200_QOS */
194 
195 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
196 static void ipw_remove_current_network(struct ipw_priv *priv);
197 static void ipw_rx(struct ipw_priv *priv);
198 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
199 				struct clx2_tx_queue *txq, int qindex);
200 static int ipw_queue_reset(struct ipw_priv *priv);
201 
202 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf,
203 			     int len, int sync);
204 
205 static void ipw_tx_queue_free(struct ipw_priv *);
206 
207 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
208 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
209 static void ipw_rx_queue_replenish(void *);
210 static int ipw_up(struct ipw_priv *);
211 static void ipw_bg_up(struct work_struct *work);
212 static void ipw_down(struct ipw_priv *);
213 static void ipw_bg_down(struct work_struct *work);
214 static int ipw_config(struct ipw_priv *);
215 static int init_supported_rates(struct ipw_priv *priv,
216 				struct ipw_supported_rates *prates);
217 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
218 static void ipw_send_wep_keys(struct ipw_priv *, int);
219 
220 static int snprint_line(char *buf, size_t count,
221 			const u8 * data, u32 len, u32 ofs)
222 {
223 	int out, i, j, l;
224 	char c;
225 
226 	out = scnprintf(buf, count, "%08X", ofs);
227 
228 	for (l = 0, i = 0; i < 2; i++) {
229 		out += scnprintf(buf + out, count - out, " ");
230 		for (j = 0; j < 8 && l < len; j++, l++)
231 			out += scnprintf(buf + out, count - out, "%02X ",
232 					data[(i * 8 + j)]);
233 		for (; j < 8; j++)
234 			out += scnprintf(buf + out, count - out, "   ");
235 	}
236 
237 	out += scnprintf(buf + out, count - out, " ");
238 	for (l = 0, i = 0; i < 2; i++) {
239 		out += scnprintf(buf + out, count - out, " ");
240 		for (j = 0; j < 8 && l < len; j++, l++) {
241 			c = data[(i * 8 + j)];
242 			if (!isascii(c) || !isprint(c))
243 				c = '.';
244 
245 			out += scnprintf(buf + out, count - out, "%c", c);
246 		}
247 
248 		for (; j < 8; j++)
249 			out += scnprintf(buf + out, count - out, " ");
250 	}
251 
252 	return out;
253 }
254 
255 static void printk_buf(int level, const u8 * data, u32 len)
256 {
257 	char line[81];
258 	u32 ofs = 0;
259 	if (!(ipw_debug_level & level))
260 		return;
261 
262 	while (len) {
263 		snprint_line(line, sizeof(line), &data[ofs],
264 			     min(len, 16U), ofs);
265 		printk(KERN_DEBUG "%s\n", line);
266 		ofs += 16;
267 		len -= min(len, 16U);
268 	}
269 }
270 
271 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
272 {
273 	size_t out = size;
274 	u32 ofs = 0;
275 	int total = 0;
276 
277 	while (size && len) {
278 		out = snprint_line(output, size, &data[ofs],
279 				   min_t(size_t, len, 16U), ofs);
280 
281 		ofs += 16;
282 		output += out;
283 		size -= out;
284 		len -= min_t(size_t, len, 16U);
285 		total += out;
286 	}
287 	return total;
288 }
289 
290 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
291 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
292 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
293 
294 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
295 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
296 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
297 
298 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
299 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
300 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
301 {
302 	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
303 		     __LINE__, (u32) (b), (u32) (c));
304 	_ipw_write_reg8(a, b, c);
305 }
306 
307 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
308 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
309 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
310 {
311 	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
312 		     __LINE__, (u32) (b), (u32) (c));
313 	_ipw_write_reg16(a, b, c);
314 }
315 
316 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
317 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
318 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
319 {
320 	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
321 		     __LINE__, (u32) (b), (u32) (c));
322 	_ipw_write_reg32(a, b, c);
323 }
324 
325 /* 8-bit direct write (low 4K) */
326 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
327 		u8 val)
328 {
329 	writeb(val, ipw->hw_base + ofs);
330 }
331 
332 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
333 #define ipw_write8(ipw, ofs, val) do { \
334 	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
335 			__LINE__, (u32)(ofs), (u32)(val)); \
336 	_ipw_write8(ipw, ofs, val); \
337 } while (0)
338 
339 /* 16-bit direct write (low 4K) */
340 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
341 		u16 val)
342 {
343 	writew(val, ipw->hw_base + ofs);
344 }
345 
346 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write16(ipw, ofs, val) do { \
348 	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
349 			__LINE__, (u32)(ofs), (u32)(val)); \
350 	_ipw_write16(ipw, ofs, val); \
351 } while (0)
352 
353 /* 32-bit direct write (low 4K) */
354 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
355 		u32 val)
356 {
357 	writel(val, ipw->hw_base + ofs);
358 }
359 
360 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write32(ipw, ofs, val) do { \
362 	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
363 			__LINE__, (u32)(ofs), (u32)(val)); \
364 	_ipw_write32(ipw, ofs, val); \
365 } while (0)
366 
367 /* 8-bit direct read (low 4K) */
368 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
369 {
370 	return readb(ipw->hw_base + ofs);
371 }
372 
373 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
374 #define ipw_read8(ipw, ofs) ({ \
375 	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
376 			(u32)(ofs)); \
377 	_ipw_read8(ipw, ofs); \
378 })
379 
380 /* 16-bit direct read (low 4K) */
381 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
382 {
383 	return readw(ipw->hw_base + ofs);
384 }
385 
386 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
387 #define ipw_read16(ipw, ofs) ({ \
388 	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
389 			(u32)(ofs)); \
390 	_ipw_read16(ipw, ofs); \
391 })
392 
393 /* 32-bit direct read (low 4K) */
394 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
395 {
396 	return readl(ipw->hw_base + ofs);
397 }
398 
399 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
400 #define ipw_read32(ipw, ofs) ({ \
401 	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
402 			(u32)(ofs)); \
403 	_ipw_read32(ipw, ofs); \
404 })
405 
406 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
407 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
408 #define ipw_read_indirect(a, b, c, d) ({ \
409 	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
410 			__LINE__, (u32)(b), (u32)(d)); \
411 	_ipw_read_indirect(a, b, c, d); \
412 })
413 
414 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
415 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
416 				int num);
417 #define ipw_write_indirect(a, b, c, d) do { \
418 	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
419 			__LINE__, (u32)(b), (u32)(d)); \
420 	_ipw_write_indirect(a, b, c, d); \
421 } while (0)
422 
423 /* 32-bit indirect write (above 4K) */
424 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
425 {
426 	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
427 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
428 	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
429 }
430 
431 /* 8-bit indirect write (above 4K) */
432 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
433 {
434 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
435 	u32 dif_len = reg - aligned_addr;
436 
437 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
438 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
439 	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
440 }
441 
442 /* 16-bit indirect write (above 4K) */
443 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
444 {
445 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
446 	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
447 
448 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
449 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
450 	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
451 }
452 
453 /* 8-bit indirect read (above 4K) */
454 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
455 {
456 	u32 word;
457 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
458 	IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
459 	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
460 	return (word >> ((reg & 0x3) * 8)) & 0xff;
461 }
462 
463 /* 32-bit indirect read (above 4K) */
464 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
465 {
466 	u32 value;
467 
468 	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
469 
470 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
471 	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
472 	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
473 	return value;
474 }
475 
476 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
477 /*    for area above 1st 4K of SRAM/reg space */
478 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
479 			       int num)
480 {
481 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
482 	u32 dif_len = addr - aligned_addr;
483 	u32 i;
484 
485 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
486 
487 	if (num <= 0) {
488 		return;
489 	}
490 
491 	/* Read the first dword (or portion) byte by byte */
492 	if (unlikely(dif_len)) {
493 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
494 		/* Start reading at aligned_addr + dif_len */
495 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
496 			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
497 		aligned_addr += 4;
498 	}
499 
500 	/* Read all of the middle dwords as dwords, with auto-increment */
501 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
502 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
503 		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
504 
505 	/* Read the last dword (or portion) byte by byte */
506 	if (unlikely(num)) {
507 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 		for (i = 0; num > 0; i++, num--)
509 			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
510 	}
511 }
512 
513 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
514 /*    for area above 1st 4K of SRAM/reg space */
515 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
516 				int num)
517 {
518 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
519 	u32 dif_len = addr - aligned_addr;
520 	u32 i;
521 
522 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
523 
524 	if (num <= 0) {
525 		return;
526 	}
527 
528 	/* Write the first dword (or portion) byte by byte */
529 	if (unlikely(dif_len)) {
530 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
531 		/* Start writing at aligned_addr + dif_len */
532 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
533 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
534 		aligned_addr += 4;
535 	}
536 
537 	/* Write all of the middle dwords as dwords, with auto-increment */
538 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
539 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
540 		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
541 
542 	/* Write the last dword (or portion) byte by byte */
543 	if (unlikely(num)) {
544 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 		for (i = 0; num > 0; i++, num--, buf++)
546 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
547 	}
548 }
549 
550 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
551 /*    for 1st 4K of SRAM/regs space */
552 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
553 			     int num)
554 {
555 	memcpy_toio((priv->hw_base + addr), buf, num);
556 }
557 
558 /* Set bit(s) in low 4K of SRAM/regs */
559 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
560 {
561 	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
562 }
563 
564 /* Clear bit(s) in low 4K of SRAM/regs */
565 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
566 {
567 	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
568 }
569 
570 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
571 {
572 	if (priv->status & STATUS_INT_ENABLED)
573 		return;
574 	priv->status |= STATUS_INT_ENABLED;
575 	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
576 }
577 
578 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
579 {
580 	if (!(priv->status & STATUS_INT_ENABLED))
581 		return;
582 	priv->status &= ~STATUS_INT_ENABLED;
583 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
584 }
585 
586 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
587 {
588 	unsigned long flags;
589 
590 	spin_lock_irqsave(&priv->irq_lock, flags);
591 	__ipw_enable_interrupts(priv);
592 	spin_unlock_irqrestore(&priv->irq_lock, flags);
593 }
594 
595 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
596 {
597 	unsigned long flags;
598 
599 	spin_lock_irqsave(&priv->irq_lock, flags);
600 	__ipw_disable_interrupts(priv);
601 	spin_unlock_irqrestore(&priv->irq_lock, flags);
602 }
603 
604 static char *ipw_error_desc(u32 val)
605 {
606 	switch (val) {
607 	case IPW_FW_ERROR_OK:
608 		return "ERROR_OK";
609 	case IPW_FW_ERROR_FAIL:
610 		return "ERROR_FAIL";
611 	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
612 		return "MEMORY_UNDERFLOW";
613 	case IPW_FW_ERROR_MEMORY_OVERFLOW:
614 		return "MEMORY_OVERFLOW";
615 	case IPW_FW_ERROR_BAD_PARAM:
616 		return "BAD_PARAM";
617 	case IPW_FW_ERROR_BAD_CHECKSUM:
618 		return "BAD_CHECKSUM";
619 	case IPW_FW_ERROR_NMI_INTERRUPT:
620 		return "NMI_INTERRUPT";
621 	case IPW_FW_ERROR_BAD_DATABASE:
622 		return "BAD_DATABASE";
623 	case IPW_FW_ERROR_ALLOC_FAIL:
624 		return "ALLOC_FAIL";
625 	case IPW_FW_ERROR_DMA_UNDERRUN:
626 		return "DMA_UNDERRUN";
627 	case IPW_FW_ERROR_DMA_STATUS:
628 		return "DMA_STATUS";
629 	case IPW_FW_ERROR_DINO_ERROR:
630 		return "DINO_ERROR";
631 	case IPW_FW_ERROR_EEPROM_ERROR:
632 		return "EEPROM_ERROR";
633 	case IPW_FW_ERROR_SYSASSERT:
634 		return "SYSASSERT";
635 	case IPW_FW_ERROR_FATAL_ERROR:
636 		return "FATAL_ERROR";
637 	default:
638 		return "UNKNOWN_ERROR";
639 	}
640 }
641 
642 static void ipw_dump_error_log(struct ipw_priv *priv,
643 			       struct ipw_fw_error *error)
644 {
645 	u32 i;
646 
647 	if (!error) {
648 		IPW_ERROR("Error allocating and capturing error log.  "
649 			  "Nothing to dump.\n");
650 		return;
651 	}
652 
653 	IPW_ERROR("Start IPW Error Log Dump:\n");
654 	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
655 		  error->status, error->config);
656 
657 	for (i = 0; i < error->elem_len; i++)
658 		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
659 			  ipw_error_desc(error->elem[i].desc),
660 			  error->elem[i].time,
661 			  error->elem[i].blink1,
662 			  error->elem[i].blink2,
663 			  error->elem[i].link1,
664 			  error->elem[i].link2, error->elem[i].data);
665 	for (i = 0; i < error->log_len; i++)
666 		IPW_ERROR("%i\t0x%08x\t%i\n",
667 			  error->log[i].time,
668 			  error->log[i].data, error->log[i].event);
669 }
670 
671 static inline int ipw_is_init(struct ipw_priv *priv)
672 {
673 	return (priv->status & STATUS_INIT) ? 1 : 0;
674 }
675 
676 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
677 {
678 	u32 addr, field_info, field_len, field_count, total_len;
679 
680 	IPW_DEBUG_ORD("ordinal = %i\n", ord);
681 
682 	if (!priv || !val || !len) {
683 		IPW_DEBUG_ORD("Invalid argument\n");
684 		return -EINVAL;
685 	}
686 
687 	/* verify device ordinal tables have been initialized */
688 	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
689 		IPW_DEBUG_ORD("Access ordinals before initialization\n");
690 		return -EINVAL;
691 	}
692 
693 	switch (IPW_ORD_TABLE_ID_MASK & ord) {
694 	case IPW_ORD_TABLE_0_MASK:
695 		/*
696 		 * TABLE 0: Direct access to a table of 32 bit values
697 		 *
698 		 * This is a very simple table with the data directly
699 		 * read from the table
700 		 */
701 
702 		/* remove the table id from the ordinal */
703 		ord &= IPW_ORD_TABLE_VALUE_MASK;
704 
705 		/* boundary check */
706 		if (ord > priv->table0_len) {
707 			IPW_DEBUG_ORD("ordinal value (%i) longer then "
708 				      "max (%i)\n", ord, priv->table0_len);
709 			return -EINVAL;
710 		}
711 
712 		/* verify we have enough room to store the value */
713 		if (*len < sizeof(u32)) {
714 			IPW_DEBUG_ORD("ordinal buffer length too small, "
715 				      "need %zd\n", sizeof(u32));
716 			return -EINVAL;
717 		}
718 
719 		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
720 			      ord, priv->table0_addr + (ord << 2));
721 
722 		*len = sizeof(u32);
723 		ord <<= 2;
724 		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
725 		break;
726 
727 	case IPW_ORD_TABLE_1_MASK:
728 		/*
729 		 * TABLE 1: Indirect access to a table of 32 bit values
730 		 *
731 		 * This is a fairly large table of u32 values each
732 		 * representing starting addr for the data (which is
733 		 * also a u32)
734 		 */
735 
736 		/* remove the table id from the ordinal */
737 		ord &= IPW_ORD_TABLE_VALUE_MASK;
738 
739 		/* boundary check */
740 		if (ord > priv->table1_len) {
741 			IPW_DEBUG_ORD("ordinal value too long\n");
742 			return -EINVAL;
743 		}
744 
745 		/* verify we have enough room to store the value */
746 		if (*len < sizeof(u32)) {
747 			IPW_DEBUG_ORD("ordinal buffer length too small, "
748 				      "need %zd\n", sizeof(u32));
749 			return -EINVAL;
750 		}
751 
752 		*((u32 *) val) =
753 		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
754 		*len = sizeof(u32);
755 		break;
756 
757 	case IPW_ORD_TABLE_2_MASK:
758 		/*
759 		 * TABLE 2: Indirect access to a table of variable sized values
760 		 *
761 		 * This table consist of six values, each containing
762 		 *     - dword containing the starting offset of the data
763 		 *     - dword containing the lengh in the first 16bits
764 		 *       and the count in the second 16bits
765 		 */
766 
767 		/* remove the table id from the ordinal */
768 		ord &= IPW_ORD_TABLE_VALUE_MASK;
769 
770 		/* boundary check */
771 		if (ord > priv->table2_len) {
772 			IPW_DEBUG_ORD("ordinal value too long\n");
773 			return -EINVAL;
774 		}
775 
776 		/* get the address of statistic */
777 		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
778 
779 		/* get the second DW of statistics ;
780 		 * two 16-bit words - first is length, second is count */
781 		field_info =
782 		    ipw_read_reg32(priv,
783 				   priv->table2_addr + (ord << 3) +
784 				   sizeof(u32));
785 
786 		/* get each entry length */
787 		field_len = *((u16 *) & field_info);
788 
789 		/* get number of entries */
790 		field_count = *(((u16 *) & field_info) + 1);
791 
792 		/* abort if not enough memory */
793 		total_len = field_len * field_count;
794 		if (total_len > *len) {
795 			*len = total_len;
796 			return -EINVAL;
797 		}
798 
799 		*len = total_len;
800 		if (!total_len)
801 			return 0;
802 
803 		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
804 			      "field_info = 0x%08x\n",
805 			      addr, total_len, field_info);
806 		ipw_read_indirect(priv, addr, val, total_len);
807 		break;
808 
809 	default:
810 		IPW_DEBUG_ORD("Invalid ordinal!\n");
811 		return -EINVAL;
812 
813 	}
814 
815 	return 0;
816 }
817 
818 static void ipw_init_ordinals(struct ipw_priv *priv)
819 {
820 	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
821 	priv->table0_len = ipw_read32(priv, priv->table0_addr);
822 
823 	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
824 		      priv->table0_addr, priv->table0_len);
825 
826 	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
827 	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
828 
829 	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
830 		      priv->table1_addr, priv->table1_len);
831 
832 	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
833 	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
834 	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
835 
836 	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
837 		      priv->table2_addr, priv->table2_len);
838 
839 }
840 
841 static u32 ipw_register_toggle(u32 reg)
842 {
843 	reg &= ~IPW_START_STANDBY;
844 	if (reg & IPW_GATE_ODMA)
845 		reg &= ~IPW_GATE_ODMA;
846 	if (reg & IPW_GATE_IDMA)
847 		reg &= ~IPW_GATE_IDMA;
848 	if (reg & IPW_GATE_ADMA)
849 		reg &= ~IPW_GATE_ADMA;
850 	return reg;
851 }
852 
853 /*
854  * LED behavior:
855  * - On radio ON, turn on any LEDs that require to be on during start
856  * - On initialization, start unassociated blink
857  * - On association, disable unassociated blink
858  * - On disassociation, start unassociated blink
859  * - On radio OFF, turn off any LEDs started during radio on
860  *
861  */
862 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
863 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
864 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
865 
866 static void ipw_led_link_on(struct ipw_priv *priv)
867 {
868 	unsigned long flags;
869 	u32 led;
870 
871 	/* If configured to not use LEDs, or nic_type is 1,
872 	 * then we don't toggle a LINK led */
873 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
874 		return;
875 
876 	spin_lock_irqsave(&priv->lock, flags);
877 
878 	if (!(priv->status & STATUS_RF_KILL_MASK) &&
879 	    !(priv->status & STATUS_LED_LINK_ON)) {
880 		IPW_DEBUG_LED("Link LED On\n");
881 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
882 		led |= priv->led_association_on;
883 
884 		led = ipw_register_toggle(led);
885 
886 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
887 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
888 
889 		priv->status |= STATUS_LED_LINK_ON;
890 
891 		/* If we aren't associated, schedule turning the LED off */
892 		if (!(priv->status & STATUS_ASSOCIATED))
893 			schedule_delayed_work(&priv->led_link_off,
894 					      LD_TIME_LINK_ON);
895 	}
896 
897 	spin_unlock_irqrestore(&priv->lock, flags);
898 }
899 
900 static void ipw_bg_led_link_on(struct work_struct *work)
901 {
902 	struct ipw_priv *priv =
903 		container_of(work, struct ipw_priv, led_link_on.work);
904 	mutex_lock(&priv->mutex);
905 	ipw_led_link_on(priv);
906 	mutex_unlock(&priv->mutex);
907 }
908 
909 static void ipw_led_link_off(struct ipw_priv *priv)
910 {
911 	unsigned long flags;
912 	u32 led;
913 
914 	/* If configured not to use LEDs, or nic type is 1,
915 	 * then we don't goggle the LINK led. */
916 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
917 		return;
918 
919 	spin_lock_irqsave(&priv->lock, flags);
920 
921 	if (priv->status & STATUS_LED_LINK_ON) {
922 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
923 		led &= priv->led_association_off;
924 		led = ipw_register_toggle(led);
925 
926 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
927 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
928 
929 		IPW_DEBUG_LED("Link LED Off\n");
930 
931 		priv->status &= ~STATUS_LED_LINK_ON;
932 
933 		/* If we aren't associated and the radio is on, schedule
934 		 * turning the LED on (blink while unassociated) */
935 		if (!(priv->status & STATUS_RF_KILL_MASK) &&
936 		    !(priv->status & STATUS_ASSOCIATED))
937 			schedule_delayed_work(&priv->led_link_on,
938 					      LD_TIME_LINK_OFF);
939 
940 	}
941 
942 	spin_unlock_irqrestore(&priv->lock, flags);
943 }
944 
945 static void ipw_bg_led_link_off(struct work_struct *work)
946 {
947 	struct ipw_priv *priv =
948 		container_of(work, struct ipw_priv, led_link_off.work);
949 	mutex_lock(&priv->mutex);
950 	ipw_led_link_off(priv);
951 	mutex_unlock(&priv->mutex);
952 }
953 
954 static void __ipw_led_activity_on(struct ipw_priv *priv)
955 {
956 	u32 led;
957 
958 	if (priv->config & CFG_NO_LED)
959 		return;
960 
961 	if (priv->status & STATUS_RF_KILL_MASK)
962 		return;
963 
964 	if (!(priv->status & STATUS_LED_ACT_ON)) {
965 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
966 		led |= priv->led_activity_on;
967 
968 		led = ipw_register_toggle(led);
969 
970 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
971 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
972 
973 		IPW_DEBUG_LED("Activity LED On\n");
974 
975 		priv->status |= STATUS_LED_ACT_ON;
976 
977 		cancel_delayed_work(&priv->led_act_off);
978 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
979 	} else {
980 		/* Reschedule LED off for full time period */
981 		cancel_delayed_work(&priv->led_act_off);
982 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
983 	}
984 }
985 
986 #if 0
987 void ipw_led_activity_on(struct ipw_priv *priv)
988 {
989 	unsigned long flags;
990 	spin_lock_irqsave(&priv->lock, flags);
991 	__ipw_led_activity_on(priv);
992 	spin_unlock_irqrestore(&priv->lock, flags);
993 }
994 #endif  /*  0  */
995 
996 static void ipw_led_activity_off(struct ipw_priv *priv)
997 {
998 	unsigned long flags;
999 	u32 led;
1000 
1001 	if (priv->config & CFG_NO_LED)
1002 		return;
1003 
1004 	spin_lock_irqsave(&priv->lock, flags);
1005 
1006 	if (priv->status & STATUS_LED_ACT_ON) {
1007 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
1008 		led &= priv->led_activity_off;
1009 
1010 		led = ipw_register_toggle(led);
1011 
1012 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1013 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
1014 
1015 		IPW_DEBUG_LED("Activity LED Off\n");
1016 
1017 		priv->status &= ~STATUS_LED_ACT_ON;
1018 	}
1019 
1020 	spin_unlock_irqrestore(&priv->lock, flags);
1021 }
1022 
1023 static void ipw_bg_led_activity_off(struct work_struct *work)
1024 {
1025 	struct ipw_priv *priv =
1026 		container_of(work, struct ipw_priv, led_act_off.work);
1027 	mutex_lock(&priv->mutex);
1028 	ipw_led_activity_off(priv);
1029 	mutex_unlock(&priv->mutex);
1030 }
1031 
1032 static void ipw_led_band_on(struct ipw_priv *priv)
1033 {
1034 	unsigned long flags;
1035 	u32 led;
1036 
1037 	/* Only nic type 1 supports mode LEDs */
1038 	if (priv->config & CFG_NO_LED ||
1039 	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1040 		return;
1041 
1042 	spin_lock_irqsave(&priv->lock, flags);
1043 
1044 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1045 	if (priv->assoc_network->mode == IEEE_A) {
1046 		led |= priv->led_ofdm_on;
1047 		led &= priv->led_association_off;
1048 		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1049 	} else if (priv->assoc_network->mode == IEEE_G) {
1050 		led |= priv->led_ofdm_on;
1051 		led |= priv->led_association_on;
1052 		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1053 	} else {
1054 		led &= priv->led_ofdm_off;
1055 		led |= priv->led_association_on;
1056 		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1057 	}
1058 
1059 	led = ipw_register_toggle(led);
1060 
1061 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1062 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1063 
1064 	spin_unlock_irqrestore(&priv->lock, flags);
1065 }
1066 
1067 static void ipw_led_band_off(struct ipw_priv *priv)
1068 {
1069 	unsigned long flags;
1070 	u32 led;
1071 
1072 	/* Only nic type 1 supports mode LEDs */
1073 	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1074 		return;
1075 
1076 	spin_lock_irqsave(&priv->lock, flags);
1077 
1078 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1079 	led &= priv->led_ofdm_off;
1080 	led &= priv->led_association_off;
1081 
1082 	led = ipw_register_toggle(led);
1083 
1084 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1085 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1086 
1087 	spin_unlock_irqrestore(&priv->lock, flags);
1088 }
1089 
1090 static void ipw_led_radio_on(struct ipw_priv *priv)
1091 {
1092 	ipw_led_link_on(priv);
1093 }
1094 
1095 static void ipw_led_radio_off(struct ipw_priv *priv)
1096 {
1097 	ipw_led_activity_off(priv);
1098 	ipw_led_link_off(priv);
1099 }
1100 
1101 static void ipw_led_link_up(struct ipw_priv *priv)
1102 {
1103 	/* Set the Link Led on for all nic types */
1104 	ipw_led_link_on(priv);
1105 }
1106 
1107 static void ipw_led_link_down(struct ipw_priv *priv)
1108 {
1109 	ipw_led_activity_off(priv);
1110 	ipw_led_link_off(priv);
1111 
1112 	if (priv->status & STATUS_RF_KILL_MASK)
1113 		ipw_led_radio_off(priv);
1114 }
1115 
1116 static void ipw_led_init(struct ipw_priv *priv)
1117 {
1118 	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1119 
1120 	/* Set the default PINs for the link and activity leds */
1121 	priv->led_activity_on = IPW_ACTIVITY_LED;
1122 	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1123 
1124 	priv->led_association_on = IPW_ASSOCIATED_LED;
1125 	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1126 
1127 	/* Set the default PINs for the OFDM leds */
1128 	priv->led_ofdm_on = IPW_OFDM_LED;
1129 	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1130 
1131 	switch (priv->nic_type) {
1132 	case EEPROM_NIC_TYPE_1:
1133 		/* In this NIC type, the LEDs are reversed.... */
1134 		priv->led_activity_on = IPW_ASSOCIATED_LED;
1135 		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1136 		priv->led_association_on = IPW_ACTIVITY_LED;
1137 		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1138 
1139 		if (!(priv->config & CFG_NO_LED))
1140 			ipw_led_band_on(priv);
1141 
1142 		/* And we don't blink link LEDs for this nic, so
1143 		 * just return here */
1144 		return;
1145 
1146 	case EEPROM_NIC_TYPE_3:
1147 	case EEPROM_NIC_TYPE_2:
1148 	case EEPROM_NIC_TYPE_4:
1149 	case EEPROM_NIC_TYPE_0:
1150 		break;
1151 
1152 	default:
1153 		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1154 			       priv->nic_type);
1155 		priv->nic_type = EEPROM_NIC_TYPE_0;
1156 		break;
1157 	}
1158 
1159 	if (!(priv->config & CFG_NO_LED)) {
1160 		if (priv->status & STATUS_ASSOCIATED)
1161 			ipw_led_link_on(priv);
1162 		else
1163 			ipw_led_link_off(priv);
1164 	}
1165 }
1166 
1167 static void ipw_led_shutdown(struct ipw_priv *priv)
1168 {
1169 	ipw_led_activity_off(priv);
1170 	ipw_led_link_off(priv);
1171 	ipw_led_band_off(priv);
1172 	cancel_delayed_work(&priv->led_link_on);
1173 	cancel_delayed_work(&priv->led_link_off);
1174 	cancel_delayed_work(&priv->led_act_off);
1175 }
1176 
1177 /*
1178  * The following adds a new attribute to the sysfs representation
1179  * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1180  * used for controlling the debug level.
1181  *
1182  * See the level definitions in ipw for details.
1183  */
1184 static ssize_t debug_level_show(struct device_driver *d, char *buf)
1185 {
1186 	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1187 }
1188 
1189 static ssize_t debug_level_store(struct device_driver *d, const char *buf,
1190 				 size_t count)
1191 {
1192 	char *p = (char *)buf;
1193 	u32 val;
1194 
1195 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1196 		p++;
1197 		if (p[0] == 'x' || p[0] == 'X')
1198 			p++;
1199 		val = simple_strtoul(p, &p, 16);
1200 	} else
1201 		val = simple_strtoul(p, &p, 10);
1202 	if (p == buf)
1203 		printk(KERN_INFO DRV_NAME
1204 		       ": %s is not in hex or decimal form.\n", buf);
1205 	else
1206 		ipw_debug_level = val;
1207 
1208 	return strnlen(buf, count);
1209 }
1210 static DRIVER_ATTR_RW(debug_level);
1211 
1212 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1213 {
1214 	/* length = 1st dword in log */
1215 	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1216 }
1217 
1218 static void ipw_capture_event_log(struct ipw_priv *priv,
1219 				  u32 log_len, struct ipw_event *log)
1220 {
1221 	u32 base;
1222 
1223 	if (log_len) {
1224 		base = ipw_read32(priv, IPW_EVENT_LOG);
1225 		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1226 				  (u8 *) log, sizeof(*log) * log_len);
1227 	}
1228 }
1229 
1230 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1231 {
1232 	struct ipw_fw_error *error;
1233 	u32 log_len = ipw_get_event_log_len(priv);
1234 	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1235 	u32 elem_len = ipw_read_reg32(priv, base);
1236 
1237 	error = kmalloc(sizeof(*error) +
1238 			sizeof(*error->elem) * elem_len +
1239 			sizeof(*error->log) * log_len, GFP_ATOMIC);
1240 	if (!error) {
1241 		IPW_ERROR("Memory allocation for firmware error log "
1242 			  "failed.\n");
1243 		return NULL;
1244 	}
1245 	error->jiffies = jiffies;
1246 	error->status = priv->status;
1247 	error->config = priv->config;
1248 	error->elem_len = elem_len;
1249 	error->log_len = log_len;
1250 	error->elem = (struct ipw_error_elem *)error->payload;
1251 	error->log = (struct ipw_event *)(error->elem + elem_len);
1252 
1253 	ipw_capture_event_log(priv, log_len, error->log);
1254 
1255 	if (elem_len)
1256 		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1257 				  sizeof(*error->elem) * elem_len);
1258 
1259 	return error;
1260 }
1261 
1262 static ssize_t event_log_show(struct device *d,
1263 			      struct device_attribute *attr, char *buf)
1264 {
1265 	struct ipw_priv *priv = dev_get_drvdata(d);
1266 	u32 log_len = ipw_get_event_log_len(priv);
1267 	u32 log_size;
1268 	struct ipw_event *log;
1269 	u32 len = 0, i;
1270 
1271 	/* not using min() because of its strict type checking */
1272 	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1273 			sizeof(*log) * log_len : PAGE_SIZE;
1274 	log = kzalloc(log_size, GFP_KERNEL);
1275 	if (!log) {
1276 		IPW_ERROR("Unable to allocate memory for log\n");
1277 		return 0;
1278 	}
1279 	log_len = log_size / sizeof(*log);
1280 	ipw_capture_event_log(priv, log_len, log);
1281 
1282 	len += scnprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1283 	for (i = 0; i < log_len; i++)
1284 		len += scnprintf(buf + len, PAGE_SIZE - len,
1285 				"\n%08X%08X%08X",
1286 				log[i].time, log[i].event, log[i].data);
1287 	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
1288 	kfree(log);
1289 	return len;
1290 }
1291 
1292 static DEVICE_ATTR_RO(event_log);
1293 
1294 static ssize_t error_show(struct device *d,
1295 			  struct device_attribute *attr, char *buf)
1296 {
1297 	struct ipw_priv *priv = dev_get_drvdata(d);
1298 	u32 len = 0, i;
1299 	if (!priv->error)
1300 		return 0;
1301 	len += scnprintf(buf + len, PAGE_SIZE - len,
1302 			"%08lX%08X%08X%08X",
1303 			priv->error->jiffies,
1304 			priv->error->status,
1305 			priv->error->config, priv->error->elem_len);
1306 	for (i = 0; i < priv->error->elem_len; i++)
1307 		len += scnprintf(buf + len, PAGE_SIZE - len,
1308 				"\n%08X%08X%08X%08X%08X%08X%08X",
1309 				priv->error->elem[i].time,
1310 				priv->error->elem[i].desc,
1311 				priv->error->elem[i].blink1,
1312 				priv->error->elem[i].blink2,
1313 				priv->error->elem[i].link1,
1314 				priv->error->elem[i].link2,
1315 				priv->error->elem[i].data);
1316 
1317 	len += scnprintf(buf + len, PAGE_SIZE - len,
1318 			"\n%08X", priv->error->log_len);
1319 	for (i = 0; i < priv->error->log_len; i++)
1320 		len += scnprintf(buf + len, PAGE_SIZE - len,
1321 				"\n%08X%08X%08X",
1322 				priv->error->log[i].time,
1323 				priv->error->log[i].event,
1324 				priv->error->log[i].data);
1325 	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
1326 	return len;
1327 }
1328 
1329 static ssize_t error_store(struct device *d,
1330 			   struct device_attribute *attr,
1331 			   const char *buf, size_t count)
1332 {
1333 	struct ipw_priv *priv = dev_get_drvdata(d);
1334 
1335 	kfree(priv->error);
1336 	priv->error = NULL;
1337 	return count;
1338 }
1339 
1340 static DEVICE_ATTR_RW(error);
1341 
1342 static ssize_t cmd_log_show(struct device *d,
1343 			    struct device_attribute *attr, char *buf)
1344 {
1345 	struct ipw_priv *priv = dev_get_drvdata(d);
1346 	u32 len = 0, i;
1347 	if (!priv->cmdlog)
1348 		return 0;
1349 	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1350 	     (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
1351 	     i = (i + 1) % priv->cmdlog_len) {
1352 		len +=
1353 		    scnprintf(buf + len, PAGE_SIZE - len,
1354 			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1355 			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1356 			     priv->cmdlog[i].cmd.len);
1357 		len +=
1358 		    snprintk_buf(buf + len, PAGE_SIZE - len,
1359 				 (u8 *) priv->cmdlog[i].cmd.param,
1360 				 priv->cmdlog[i].cmd.len);
1361 		len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
1362 	}
1363 	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
1364 	return len;
1365 }
1366 
1367 static DEVICE_ATTR_RO(cmd_log);
1368 
1369 #ifdef CONFIG_IPW2200_PROMISCUOUS
1370 static void ipw_prom_free(struct ipw_priv *priv);
1371 static int ipw_prom_alloc(struct ipw_priv *priv);
1372 static ssize_t rtap_iface_store(struct device *d,
1373 			 struct device_attribute *attr,
1374 			 const char *buf, size_t count)
1375 {
1376 	struct ipw_priv *priv = dev_get_drvdata(d);
1377 	int rc = 0;
1378 
1379 	if (count < 1)
1380 		return -EINVAL;
1381 
1382 	switch (buf[0]) {
1383 	case '0':
1384 		if (!rtap_iface)
1385 			return count;
1386 
1387 		if (netif_running(priv->prom_net_dev)) {
1388 			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1389 			return count;
1390 		}
1391 
1392 		ipw_prom_free(priv);
1393 		rtap_iface = 0;
1394 		break;
1395 
1396 	case '1':
1397 		if (rtap_iface)
1398 			return count;
1399 
1400 		rc = ipw_prom_alloc(priv);
1401 		if (!rc)
1402 			rtap_iface = 1;
1403 		break;
1404 
1405 	default:
1406 		return -EINVAL;
1407 	}
1408 
1409 	if (rc) {
1410 		IPW_ERROR("Failed to register promiscuous network "
1411 			  "device (error %d).\n", rc);
1412 	}
1413 
1414 	return count;
1415 }
1416 
1417 static ssize_t rtap_iface_show(struct device *d,
1418 			struct device_attribute *attr,
1419 			char *buf)
1420 {
1421 	struct ipw_priv *priv = dev_get_drvdata(d);
1422 	if (rtap_iface)
1423 		return sprintf(buf, "%s", priv->prom_net_dev->name);
1424 	else {
1425 		buf[0] = '-';
1426 		buf[1] = '1';
1427 		buf[2] = '\0';
1428 		return 3;
1429 	}
1430 }
1431 
1432 static DEVICE_ATTR_ADMIN_RW(rtap_iface);
1433 
1434 static ssize_t rtap_filter_store(struct device *d,
1435 			 struct device_attribute *attr,
1436 			 const char *buf, size_t count)
1437 {
1438 	struct ipw_priv *priv = dev_get_drvdata(d);
1439 
1440 	if (!priv->prom_priv) {
1441 		IPW_ERROR("Attempting to set filter without "
1442 			  "rtap_iface enabled.\n");
1443 		return -EPERM;
1444 	}
1445 
1446 	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1447 
1448 	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1449 		       BIT_ARG16(priv->prom_priv->filter));
1450 
1451 	return count;
1452 }
1453 
1454 static ssize_t rtap_filter_show(struct device *d,
1455 			struct device_attribute *attr,
1456 			char *buf)
1457 {
1458 	struct ipw_priv *priv = dev_get_drvdata(d);
1459 	return sprintf(buf, "0x%04X",
1460 		       priv->prom_priv ? priv->prom_priv->filter : 0);
1461 }
1462 
1463 static DEVICE_ATTR_ADMIN_RW(rtap_filter);
1464 #endif
1465 
1466 static ssize_t scan_age_show(struct device *d, struct device_attribute *attr,
1467 			     char *buf)
1468 {
1469 	struct ipw_priv *priv = dev_get_drvdata(d);
1470 	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1471 }
1472 
1473 static ssize_t scan_age_store(struct device *d, struct device_attribute *attr,
1474 			      const char *buf, size_t count)
1475 {
1476 	struct ipw_priv *priv = dev_get_drvdata(d);
1477 	struct net_device *dev = priv->net_dev;
1478 	char buffer[] = "00000000";
1479 	unsigned long len =
1480 	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1481 	unsigned long val;
1482 	char *p = buffer;
1483 
1484 	IPW_DEBUG_INFO("enter\n");
1485 
1486 	strncpy(buffer, buf, len);
1487 	buffer[len] = 0;
1488 
1489 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1490 		p++;
1491 		if (p[0] == 'x' || p[0] == 'X')
1492 			p++;
1493 		val = simple_strtoul(p, &p, 16);
1494 	} else
1495 		val = simple_strtoul(p, &p, 10);
1496 	if (p == buffer) {
1497 		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1498 	} else {
1499 		priv->ieee->scan_age = val;
1500 		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1501 	}
1502 
1503 	IPW_DEBUG_INFO("exit\n");
1504 	return len;
1505 }
1506 
1507 static DEVICE_ATTR_RW(scan_age);
1508 
1509 static ssize_t led_show(struct device *d, struct device_attribute *attr,
1510 			char *buf)
1511 {
1512 	struct ipw_priv *priv = dev_get_drvdata(d);
1513 	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1514 }
1515 
1516 static ssize_t led_store(struct device *d, struct device_attribute *attr,
1517 			 const char *buf, size_t count)
1518 {
1519 	struct ipw_priv *priv = dev_get_drvdata(d);
1520 
1521 	IPW_DEBUG_INFO("enter\n");
1522 
1523 	if (count == 0)
1524 		return 0;
1525 
1526 	if (*buf == 0) {
1527 		IPW_DEBUG_LED("Disabling LED control.\n");
1528 		priv->config |= CFG_NO_LED;
1529 		ipw_led_shutdown(priv);
1530 	} else {
1531 		IPW_DEBUG_LED("Enabling LED control.\n");
1532 		priv->config &= ~CFG_NO_LED;
1533 		ipw_led_init(priv);
1534 	}
1535 
1536 	IPW_DEBUG_INFO("exit\n");
1537 	return count;
1538 }
1539 
1540 static DEVICE_ATTR_RW(led);
1541 
1542 static ssize_t status_show(struct device *d,
1543 			   struct device_attribute *attr, char *buf)
1544 {
1545 	struct ipw_priv *p = dev_get_drvdata(d);
1546 	return sprintf(buf, "0x%08x\n", (int)p->status);
1547 }
1548 
1549 static DEVICE_ATTR_RO(status);
1550 
1551 static ssize_t cfg_show(struct device *d, struct device_attribute *attr,
1552 			char *buf)
1553 {
1554 	struct ipw_priv *p = dev_get_drvdata(d);
1555 	return sprintf(buf, "0x%08x\n", (int)p->config);
1556 }
1557 
1558 static DEVICE_ATTR_RO(cfg);
1559 
1560 static ssize_t nic_type_show(struct device *d,
1561 			     struct device_attribute *attr, char *buf)
1562 {
1563 	struct ipw_priv *priv = dev_get_drvdata(d);
1564 	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1565 }
1566 
1567 static DEVICE_ATTR_RO(nic_type);
1568 
1569 static ssize_t ucode_version_show(struct device *d,
1570 				  struct device_attribute *attr, char *buf)
1571 {
1572 	u32 len = sizeof(u32), tmp = 0;
1573 	struct ipw_priv *p = dev_get_drvdata(d);
1574 
1575 	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1576 		return 0;
1577 
1578 	return sprintf(buf, "0x%08x\n", tmp);
1579 }
1580 
1581 static DEVICE_ATTR_RO(ucode_version);
1582 
1583 static ssize_t rtc_show(struct device *d, struct device_attribute *attr,
1584 			char *buf)
1585 {
1586 	u32 len = sizeof(u32), tmp = 0;
1587 	struct ipw_priv *p = dev_get_drvdata(d);
1588 
1589 	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1590 		return 0;
1591 
1592 	return sprintf(buf, "0x%08x\n", tmp);
1593 }
1594 
1595 static DEVICE_ATTR_RO(rtc);
1596 
1597 /*
1598  * Add a device attribute to view/control the delay between eeprom
1599  * operations.
1600  */
1601 static ssize_t eeprom_delay_show(struct device *d,
1602 				 struct device_attribute *attr, char *buf)
1603 {
1604 	struct ipw_priv *p = dev_get_drvdata(d);
1605 	int n = p->eeprom_delay;
1606 	return sprintf(buf, "%i\n", n);
1607 }
1608 static ssize_t eeprom_delay_store(struct device *d,
1609 				  struct device_attribute *attr,
1610 				  const char *buf, size_t count)
1611 {
1612 	struct ipw_priv *p = dev_get_drvdata(d);
1613 	sscanf(buf, "%i", &p->eeprom_delay);
1614 	return strnlen(buf, count);
1615 }
1616 
1617 static DEVICE_ATTR_RW(eeprom_delay);
1618 
1619 static ssize_t command_event_reg_show(struct device *d,
1620 				      struct device_attribute *attr, char *buf)
1621 {
1622 	u32 reg = 0;
1623 	struct ipw_priv *p = dev_get_drvdata(d);
1624 
1625 	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1626 	return sprintf(buf, "0x%08x\n", reg);
1627 }
1628 static ssize_t command_event_reg_store(struct device *d,
1629 				       struct device_attribute *attr,
1630 				       const char *buf, size_t count)
1631 {
1632 	u32 reg;
1633 	struct ipw_priv *p = dev_get_drvdata(d);
1634 
1635 	sscanf(buf, "%x", &reg);
1636 	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1637 	return strnlen(buf, count);
1638 }
1639 
1640 static DEVICE_ATTR_RW(command_event_reg);
1641 
1642 static ssize_t mem_gpio_reg_show(struct device *d,
1643 				 struct device_attribute *attr, char *buf)
1644 {
1645 	u32 reg = 0;
1646 	struct ipw_priv *p = dev_get_drvdata(d);
1647 
1648 	reg = ipw_read_reg32(p, 0x301100);
1649 	return sprintf(buf, "0x%08x\n", reg);
1650 }
1651 static ssize_t mem_gpio_reg_store(struct device *d,
1652 				  struct device_attribute *attr,
1653 				  const char *buf, size_t count)
1654 {
1655 	u32 reg;
1656 	struct ipw_priv *p = dev_get_drvdata(d);
1657 
1658 	sscanf(buf, "%x", &reg);
1659 	ipw_write_reg32(p, 0x301100, reg);
1660 	return strnlen(buf, count);
1661 }
1662 
1663 static DEVICE_ATTR_RW(mem_gpio_reg);
1664 
1665 static ssize_t indirect_dword_show(struct device *d,
1666 				   struct device_attribute *attr, char *buf)
1667 {
1668 	u32 reg = 0;
1669 	struct ipw_priv *priv = dev_get_drvdata(d);
1670 
1671 	if (priv->status & STATUS_INDIRECT_DWORD)
1672 		reg = ipw_read_reg32(priv, priv->indirect_dword);
1673 	else
1674 		reg = 0;
1675 
1676 	return sprintf(buf, "0x%08x\n", reg);
1677 }
1678 static ssize_t indirect_dword_store(struct device *d,
1679 				    struct device_attribute *attr,
1680 				    const char *buf, size_t count)
1681 {
1682 	struct ipw_priv *priv = dev_get_drvdata(d);
1683 
1684 	sscanf(buf, "%x", &priv->indirect_dword);
1685 	priv->status |= STATUS_INDIRECT_DWORD;
1686 	return strnlen(buf, count);
1687 }
1688 
1689 static DEVICE_ATTR_RW(indirect_dword);
1690 
1691 static ssize_t indirect_byte_show(struct device *d,
1692 				  struct device_attribute *attr, char *buf)
1693 {
1694 	u8 reg = 0;
1695 	struct ipw_priv *priv = dev_get_drvdata(d);
1696 
1697 	if (priv->status & STATUS_INDIRECT_BYTE)
1698 		reg = ipw_read_reg8(priv, priv->indirect_byte);
1699 	else
1700 		reg = 0;
1701 
1702 	return sprintf(buf, "0x%02x\n", reg);
1703 }
1704 static ssize_t indirect_byte_store(struct device *d,
1705 				   struct device_attribute *attr,
1706 				   const char *buf, size_t count)
1707 {
1708 	struct ipw_priv *priv = dev_get_drvdata(d);
1709 
1710 	sscanf(buf, "%x", &priv->indirect_byte);
1711 	priv->status |= STATUS_INDIRECT_BYTE;
1712 	return strnlen(buf, count);
1713 }
1714 
1715 static DEVICE_ATTR_RW(indirect_byte);
1716 
1717 static ssize_t direct_dword_show(struct device *d,
1718 				 struct device_attribute *attr, char *buf)
1719 {
1720 	u32 reg = 0;
1721 	struct ipw_priv *priv = dev_get_drvdata(d);
1722 
1723 	if (priv->status & STATUS_DIRECT_DWORD)
1724 		reg = ipw_read32(priv, priv->direct_dword);
1725 	else
1726 		reg = 0;
1727 
1728 	return sprintf(buf, "0x%08x\n", reg);
1729 }
1730 static ssize_t direct_dword_store(struct device *d,
1731 				  struct device_attribute *attr,
1732 				  const char *buf, size_t count)
1733 {
1734 	struct ipw_priv *priv = dev_get_drvdata(d);
1735 
1736 	sscanf(buf, "%x", &priv->direct_dword);
1737 	priv->status |= STATUS_DIRECT_DWORD;
1738 	return strnlen(buf, count);
1739 }
1740 
1741 static DEVICE_ATTR_RW(direct_dword);
1742 
1743 static int rf_kill_active(struct ipw_priv *priv)
1744 {
1745 	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1746 		priv->status |= STATUS_RF_KILL_HW;
1747 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1748 	} else {
1749 		priv->status &= ~STATUS_RF_KILL_HW;
1750 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1751 	}
1752 
1753 	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1754 }
1755 
1756 static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr,
1757 			    char *buf)
1758 {
1759 	/* 0 - RF kill not enabled
1760 	   1 - SW based RF kill active (sysfs)
1761 	   2 - HW based RF kill active
1762 	   3 - Both HW and SW baed RF kill active */
1763 	struct ipw_priv *priv = dev_get_drvdata(d);
1764 	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1765 	    (rf_kill_active(priv) ? 0x2 : 0x0);
1766 	return sprintf(buf, "%i\n", val);
1767 }
1768 
1769 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1770 {
1771 	if ((disable_radio ? 1 : 0) ==
1772 	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1773 		return 0;
1774 
1775 	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1776 			  disable_radio ? "OFF" : "ON");
1777 
1778 	if (disable_radio) {
1779 		priv->status |= STATUS_RF_KILL_SW;
1780 
1781 		cancel_delayed_work(&priv->request_scan);
1782 		cancel_delayed_work(&priv->request_direct_scan);
1783 		cancel_delayed_work(&priv->request_passive_scan);
1784 		cancel_delayed_work(&priv->scan_event);
1785 		schedule_work(&priv->down);
1786 	} else {
1787 		priv->status &= ~STATUS_RF_KILL_SW;
1788 		if (rf_kill_active(priv)) {
1789 			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1790 					  "disabled by HW switch\n");
1791 			/* Make sure the RF_KILL check timer is running */
1792 			cancel_delayed_work(&priv->rf_kill);
1793 			schedule_delayed_work(&priv->rf_kill,
1794 					      round_jiffies_relative(2 * HZ));
1795 		} else
1796 			schedule_work(&priv->up);
1797 	}
1798 
1799 	return 1;
1800 }
1801 
1802 static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr,
1803 			     const char *buf, size_t count)
1804 {
1805 	struct ipw_priv *priv = dev_get_drvdata(d);
1806 
1807 	ipw_radio_kill_sw(priv, buf[0] == '1');
1808 
1809 	return count;
1810 }
1811 
1812 static DEVICE_ATTR_RW(rf_kill);
1813 
1814 static ssize_t speed_scan_show(struct device *d, struct device_attribute *attr,
1815 			       char *buf)
1816 {
1817 	struct ipw_priv *priv = dev_get_drvdata(d);
1818 	int pos = 0, len = 0;
1819 	if (priv->config & CFG_SPEED_SCAN) {
1820 		while (priv->speed_scan[pos] != 0)
1821 			len += sprintf(&buf[len], "%d ",
1822 				       priv->speed_scan[pos++]);
1823 		return len + sprintf(&buf[len], "\n");
1824 	}
1825 
1826 	return sprintf(buf, "0\n");
1827 }
1828 
1829 static ssize_t speed_scan_store(struct device *d, struct device_attribute *attr,
1830 				const char *buf, size_t count)
1831 {
1832 	struct ipw_priv *priv = dev_get_drvdata(d);
1833 	int channel, pos = 0;
1834 	const char *p = buf;
1835 
1836 	/* list of space separated channels to scan, optionally ending with 0 */
1837 	while ((channel = simple_strtol(p, NULL, 0))) {
1838 		if (pos == MAX_SPEED_SCAN - 1) {
1839 			priv->speed_scan[pos] = 0;
1840 			break;
1841 		}
1842 
1843 		if (libipw_is_valid_channel(priv->ieee, channel))
1844 			priv->speed_scan[pos++] = channel;
1845 		else
1846 			IPW_WARNING("Skipping invalid channel request: %d\n",
1847 				    channel);
1848 		p = strchr(p, ' ');
1849 		if (!p)
1850 			break;
1851 		while (*p == ' ' || *p == '\t')
1852 			p++;
1853 	}
1854 
1855 	if (pos == 0)
1856 		priv->config &= ~CFG_SPEED_SCAN;
1857 	else {
1858 		priv->speed_scan_pos = 0;
1859 		priv->config |= CFG_SPEED_SCAN;
1860 	}
1861 
1862 	return count;
1863 }
1864 
1865 static DEVICE_ATTR_RW(speed_scan);
1866 
1867 static ssize_t net_stats_show(struct device *d, struct device_attribute *attr,
1868 			      char *buf)
1869 {
1870 	struct ipw_priv *priv = dev_get_drvdata(d);
1871 	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1872 }
1873 
1874 static ssize_t net_stats_store(struct device *d, struct device_attribute *attr,
1875 			       const char *buf, size_t count)
1876 {
1877 	struct ipw_priv *priv = dev_get_drvdata(d);
1878 	if (buf[0] == '1')
1879 		priv->config |= CFG_NET_STATS;
1880 	else
1881 		priv->config &= ~CFG_NET_STATS;
1882 
1883 	return count;
1884 }
1885 
1886 static DEVICE_ATTR_RW(net_stats);
1887 
1888 static ssize_t channels_show(struct device *d,
1889 			     struct device_attribute *attr,
1890 			     char *buf)
1891 {
1892 	struct ipw_priv *priv = dev_get_drvdata(d);
1893 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1894 	int len = 0, i;
1895 
1896 	len = sprintf(&buf[len],
1897 		      "Displaying %d channels in 2.4Ghz band "
1898 		      "(802.11bg):\n", geo->bg_channels);
1899 
1900 	for (i = 0; i < geo->bg_channels; i++) {
1901 		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1902 			       geo->bg[i].channel,
1903 			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1904 			       " (radar spectrum)" : "",
1905 			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1906 				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1907 			       ? "" : ", IBSS",
1908 			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1909 			       "passive only" : "active/passive",
1910 			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1911 			       "B" : "B/G");
1912 	}
1913 
1914 	len += sprintf(&buf[len],
1915 		       "Displaying %d channels in 5.2Ghz band "
1916 		       "(802.11a):\n", geo->a_channels);
1917 	for (i = 0; i < geo->a_channels; i++) {
1918 		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1919 			       geo->a[i].channel,
1920 			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1921 			       " (radar spectrum)" : "",
1922 			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1923 				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1924 			       ? "" : ", IBSS",
1925 			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1926 			       "passive only" : "active/passive");
1927 	}
1928 
1929 	return len;
1930 }
1931 
1932 static DEVICE_ATTR_ADMIN_RO(channels);
1933 
1934 static void notify_wx_assoc_event(struct ipw_priv *priv)
1935 {
1936 	union iwreq_data wrqu;
1937 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1938 	if (priv->status & STATUS_ASSOCIATED)
1939 		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1940 	else
1941 		eth_zero_addr(wrqu.ap_addr.sa_data);
1942 	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1943 }
1944 
1945 static void ipw_irq_tasklet(struct tasklet_struct *t)
1946 {
1947 	struct ipw_priv *priv = from_tasklet(priv, t, irq_tasklet);
1948 	u32 inta, inta_mask, handled = 0;
1949 	unsigned long flags;
1950 
1951 	spin_lock_irqsave(&priv->irq_lock, flags);
1952 
1953 	inta = ipw_read32(priv, IPW_INTA_RW);
1954 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1955 
1956 	if (inta == 0xFFFFFFFF) {
1957 		/* Hardware disappeared */
1958 		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1959 		/* Only handle the cached INTA values */
1960 		inta = 0;
1961 	}
1962 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1963 
1964 	/* Add any cached INTA values that need to be handled */
1965 	inta |= priv->isr_inta;
1966 
1967 	spin_unlock_irqrestore(&priv->irq_lock, flags);
1968 
1969 	spin_lock_irqsave(&priv->lock, flags);
1970 
1971 	/* handle all the justifications for the interrupt */
1972 	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1973 		ipw_rx(priv);
1974 		handled |= IPW_INTA_BIT_RX_TRANSFER;
1975 	}
1976 
1977 	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1978 		IPW_DEBUG_HC("Command completed.\n");
1979 		ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1980 		priv->status &= ~STATUS_HCMD_ACTIVE;
1981 		wake_up_interruptible(&priv->wait_command_queue);
1982 		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1983 	}
1984 
1985 	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1986 		IPW_DEBUG_TX("TX_QUEUE_1\n");
1987 		ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1988 		handled |= IPW_INTA_BIT_TX_QUEUE_1;
1989 	}
1990 
1991 	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1992 		IPW_DEBUG_TX("TX_QUEUE_2\n");
1993 		ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1994 		handled |= IPW_INTA_BIT_TX_QUEUE_2;
1995 	}
1996 
1997 	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1998 		IPW_DEBUG_TX("TX_QUEUE_3\n");
1999 		ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2000 		handled |= IPW_INTA_BIT_TX_QUEUE_3;
2001 	}
2002 
2003 	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2004 		IPW_DEBUG_TX("TX_QUEUE_4\n");
2005 		ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2006 		handled |= IPW_INTA_BIT_TX_QUEUE_4;
2007 	}
2008 
2009 	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2010 		IPW_WARNING("STATUS_CHANGE\n");
2011 		handled |= IPW_INTA_BIT_STATUS_CHANGE;
2012 	}
2013 
2014 	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2015 		IPW_WARNING("TX_PERIOD_EXPIRED\n");
2016 		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2017 	}
2018 
2019 	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2020 		IPW_WARNING("HOST_CMD_DONE\n");
2021 		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2022 	}
2023 
2024 	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2025 		IPW_WARNING("FW_INITIALIZATION_DONE\n");
2026 		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2027 	}
2028 
2029 	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2030 		IPW_WARNING("PHY_OFF_DONE\n");
2031 		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2032 	}
2033 
2034 	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2035 		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2036 		priv->status |= STATUS_RF_KILL_HW;
2037 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2038 		wake_up_interruptible(&priv->wait_command_queue);
2039 		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2040 		cancel_delayed_work(&priv->request_scan);
2041 		cancel_delayed_work(&priv->request_direct_scan);
2042 		cancel_delayed_work(&priv->request_passive_scan);
2043 		cancel_delayed_work(&priv->scan_event);
2044 		schedule_work(&priv->link_down);
2045 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2046 		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2047 	}
2048 
2049 	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2050 		IPW_WARNING("Firmware error detected.  Restarting.\n");
2051 		if (priv->error) {
2052 			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2053 			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2054 				struct ipw_fw_error *error =
2055 				    ipw_alloc_error_log(priv);
2056 				ipw_dump_error_log(priv, error);
2057 				kfree(error);
2058 			}
2059 		} else {
2060 			priv->error = ipw_alloc_error_log(priv);
2061 			if (priv->error)
2062 				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2063 			else
2064 				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2065 					     "log.\n");
2066 			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2067 				ipw_dump_error_log(priv, priv->error);
2068 		}
2069 
2070 		/* XXX: If hardware encryption is for WPA/WPA2,
2071 		 * we have to notify the supplicant. */
2072 		if (priv->ieee->sec.encrypt) {
2073 			priv->status &= ~STATUS_ASSOCIATED;
2074 			notify_wx_assoc_event(priv);
2075 		}
2076 
2077 		/* Keep the restart process from trying to send host
2078 		 * commands by clearing the INIT status bit */
2079 		priv->status &= ~STATUS_INIT;
2080 
2081 		/* Cancel currently queued command. */
2082 		priv->status &= ~STATUS_HCMD_ACTIVE;
2083 		wake_up_interruptible(&priv->wait_command_queue);
2084 
2085 		schedule_work(&priv->adapter_restart);
2086 		handled |= IPW_INTA_BIT_FATAL_ERROR;
2087 	}
2088 
2089 	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2090 		IPW_ERROR("Parity error\n");
2091 		handled |= IPW_INTA_BIT_PARITY_ERROR;
2092 	}
2093 
2094 	if (handled != inta) {
2095 		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2096 	}
2097 
2098 	spin_unlock_irqrestore(&priv->lock, flags);
2099 
2100 	/* enable all interrupts */
2101 	ipw_enable_interrupts(priv);
2102 }
2103 
2104 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2105 static char *get_cmd_string(u8 cmd)
2106 {
2107 	switch (cmd) {
2108 		IPW_CMD(HOST_COMPLETE);
2109 		IPW_CMD(POWER_DOWN);
2110 		IPW_CMD(SYSTEM_CONFIG);
2111 		IPW_CMD(MULTICAST_ADDRESS);
2112 		IPW_CMD(SSID);
2113 		IPW_CMD(ADAPTER_ADDRESS);
2114 		IPW_CMD(PORT_TYPE);
2115 		IPW_CMD(RTS_THRESHOLD);
2116 		IPW_CMD(FRAG_THRESHOLD);
2117 		IPW_CMD(POWER_MODE);
2118 		IPW_CMD(WEP_KEY);
2119 		IPW_CMD(TGI_TX_KEY);
2120 		IPW_CMD(SCAN_REQUEST);
2121 		IPW_CMD(SCAN_REQUEST_EXT);
2122 		IPW_CMD(ASSOCIATE);
2123 		IPW_CMD(SUPPORTED_RATES);
2124 		IPW_CMD(SCAN_ABORT);
2125 		IPW_CMD(TX_FLUSH);
2126 		IPW_CMD(QOS_PARAMETERS);
2127 		IPW_CMD(DINO_CONFIG);
2128 		IPW_CMD(RSN_CAPABILITIES);
2129 		IPW_CMD(RX_KEY);
2130 		IPW_CMD(CARD_DISABLE);
2131 		IPW_CMD(SEED_NUMBER);
2132 		IPW_CMD(TX_POWER);
2133 		IPW_CMD(COUNTRY_INFO);
2134 		IPW_CMD(AIRONET_INFO);
2135 		IPW_CMD(AP_TX_POWER);
2136 		IPW_CMD(CCKM_INFO);
2137 		IPW_CMD(CCX_VER_INFO);
2138 		IPW_CMD(SET_CALIBRATION);
2139 		IPW_CMD(SENSITIVITY_CALIB);
2140 		IPW_CMD(RETRY_LIMIT);
2141 		IPW_CMD(IPW_PRE_POWER_DOWN);
2142 		IPW_CMD(VAP_BEACON_TEMPLATE);
2143 		IPW_CMD(VAP_DTIM_PERIOD);
2144 		IPW_CMD(EXT_SUPPORTED_RATES);
2145 		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2146 		IPW_CMD(VAP_QUIET_INTERVALS);
2147 		IPW_CMD(VAP_CHANNEL_SWITCH);
2148 		IPW_CMD(VAP_MANDATORY_CHANNELS);
2149 		IPW_CMD(VAP_CELL_PWR_LIMIT);
2150 		IPW_CMD(VAP_CF_PARAM_SET);
2151 		IPW_CMD(VAP_SET_BEACONING_STATE);
2152 		IPW_CMD(MEASUREMENT);
2153 		IPW_CMD(POWER_CAPABILITY);
2154 		IPW_CMD(SUPPORTED_CHANNELS);
2155 		IPW_CMD(TPC_REPORT);
2156 		IPW_CMD(WME_INFO);
2157 		IPW_CMD(PRODUCTION_COMMAND);
2158 	default:
2159 		return "UNKNOWN";
2160 	}
2161 }
2162 
2163 #define HOST_COMPLETE_TIMEOUT HZ
2164 
2165 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2166 {
2167 	int rc = 0;
2168 	unsigned long flags;
2169 	unsigned long now, end;
2170 
2171 	spin_lock_irqsave(&priv->lock, flags);
2172 	if (priv->status & STATUS_HCMD_ACTIVE) {
2173 		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2174 			  get_cmd_string(cmd->cmd));
2175 		spin_unlock_irqrestore(&priv->lock, flags);
2176 		return -EAGAIN;
2177 	}
2178 
2179 	priv->status |= STATUS_HCMD_ACTIVE;
2180 
2181 	if (priv->cmdlog) {
2182 		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2183 		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2184 		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2185 		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2186 		       cmd->len);
2187 		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2188 	}
2189 
2190 	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2191 		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2192 		     priv->status);
2193 
2194 #ifndef DEBUG_CMD_WEP_KEY
2195 	if (cmd->cmd == IPW_CMD_WEP_KEY)
2196 		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2197 	else
2198 #endif
2199 		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2200 
2201 	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2202 	if (rc) {
2203 		priv->status &= ~STATUS_HCMD_ACTIVE;
2204 		IPW_ERROR("Failed to send %s: Reason %d\n",
2205 			  get_cmd_string(cmd->cmd), rc);
2206 		spin_unlock_irqrestore(&priv->lock, flags);
2207 		goto exit;
2208 	}
2209 	spin_unlock_irqrestore(&priv->lock, flags);
2210 
2211 	now = jiffies;
2212 	end = now + HOST_COMPLETE_TIMEOUT;
2213 again:
2214 	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2215 					      !(priv->
2216 						status & STATUS_HCMD_ACTIVE),
2217 					      end - now);
2218 	if (rc < 0) {
2219 		now = jiffies;
2220 		if (time_before(now, end))
2221 			goto again;
2222 		rc = 0;
2223 	}
2224 
2225 	if (rc == 0) {
2226 		spin_lock_irqsave(&priv->lock, flags);
2227 		if (priv->status & STATUS_HCMD_ACTIVE) {
2228 			IPW_ERROR("Failed to send %s: Command timed out.\n",
2229 				  get_cmd_string(cmd->cmd));
2230 			priv->status &= ~STATUS_HCMD_ACTIVE;
2231 			spin_unlock_irqrestore(&priv->lock, flags);
2232 			rc = -EIO;
2233 			goto exit;
2234 		}
2235 		spin_unlock_irqrestore(&priv->lock, flags);
2236 	} else
2237 		rc = 0;
2238 
2239 	if (priv->status & STATUS_RF_KILL_HW) {
2240 		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2241 			  get_cmd_string(cmd->cmd));
2242 		rc = -EIO;
2243 		goto exit;
2244 	}
2245 
2246       exit:
2247 	if (priv->cmdlog) {
2248 		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2249 		priv->cmdlog_pos %= priv->cmdlog_len;
2250 	}
2251 	return rc;
2252 }
2253 
2254 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2255 {
2256 	struct host_cmd cmd = {
2257 		.cmd = command,
2258 	};
2259 
2260 	return __ipw_send_cmd(priv, &cmd);
2261 }
2262 
2263 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2264 			    const void *data)
2265 {
2266 	struct host_cmd cmd = {
2267 		.cmd = command,
2268 		.len = len,
2269 		.param = data,
2270 	};
2271 
2272 	return __ipw_send_cmd(priv, &cmd);
2273 }
2274 
2275 static int ipw_send_host_complete(struct ipw_priv *priv)
2276 {
2277 	if (!priv) {
2278 		IPW_ERROR("Invalid args\n");
2279 		return -1;
2280 	}
2281 
2282 	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2283 }
2284 
2285 static int ipw_send_system_config(struct ipw_priv *priv)
2286 {
2287 	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2288 				sizeof(priv->sys_config),
2289 				&priv->sys_config);
2290 }
2291 
2292 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2293 {
2294 	if (!priv || !ssid) {
2295 		IPW_ERROR("Invalid args\n");
2296 		return -1;
2297 	}
2298 
2299 	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2300 				ssid);
2301 }
2302 
2303 static int ipw_send_adapter_address(struct ipw_priv *priv, const u8 * mac)
2304 {
2305 	if (!priv || !mac) {
2306 		IPW_ERROR("Invalid args\n");
2307 		return -1;
2308 	}
2309 
2310 	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2311 		       priv->net_dev->name, mac);
2312 
2313 	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2314 }
2315 
2316 static void ipw_adapter_restart(void *adapter)
2317 {
2318 	struct ipw_priv *priv = adapter;
2319 
2320 	if (priv->status & STATUS_RF_KILL_MASK)
2321 		return;
2322 
2323 	ipw_down(priv);
2324 
2325 	if (priv->assoc_network &&
2326 	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2327 		ipw_remove_current_network(priv);
2328 
2329 	if (ipw_up(priv)) {
2330 		IPW_ERROR("Failed to up device\n");
2331 		return;
2332 	}
2333 }
2334 
2335 static void ipw_bg_adapter_restart(struct work_struct *work)
2336 {
2337 	struct ipw_priv *priv =
2338 		container_of(work, struct ipw_priv, adapter_restart);
2339 	mutex_lock(&priv->mutex);
2340 	ipw_adapter_restart(priv);
2341 	mutex_unlock(&priv->mutex);
2342 }
2343 
2344 static void ipw_abort_scan(struct ipw_priv *priv);
2345 
2346 #define IPW_SCAN_CHECK_WATCHDOG	(5 * HZ)
2347 
2348 static void ipw_scan_check(void *data)
2349 {
2350 	struct ipw_priv *priv = data;
2351 
2352 	if (priv->status & STATUS_SCAN_ABORTING) {
2353 		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2354 			       "adapter after (%dms).\n",
2355 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2356 		schedule_work(&priv->adapter_restart);
2357 	} else if (priv->status & STATUS_SCANNING) {
2358 		IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2359 			       "after (%dms).\n",
2360 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2361 		ipw_abort_scan(priv);
2362 		schedule_delayed_work(&priv->scan_check, HZ);
2363 	}
2364 }
2365 
2366 static void ipw_bg_scan_check(struct work_struct *work)
2367 {
2368 	struct ipw_priv *priv =
2369 		container_of(work, struct ipw_priv, scan_check.work);
2370 	mutex_lock(&priv->mutex);
2371 	ipw_scan_check(priv);
2372 	mutex_unlock(&priv->mutex);
2373 }
2374 
2375 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2376 				     struct ipw_scan_request_ext *request)
2377 {
2378 	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2379 				sizeof(*request), request);
2380 }
2381 
2382 static int ipw_send_scan_abort(struct ipw_priv *priv)
2383 {
2384 	if (!priv) {
2385 		IPW_ERROR("Invalid args\n");
2386 		return -1;
2387 	}
2388 
2389 	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2390 }
2391 
2392 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2393 {
2394 	struct ipw_sensitivity_calib calib = {
2395 		.beacon_rssi_raw = cpu_to_le16(sens),
2396 	};
2397 
2398 	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2399 				&calib);
2400 }
2401 
2402 static int ipw_send_associate(struct ipw_priv *priv,
2403 			      struct ipw_associate *associate)
2404 {
2405 	if (!priv || !associate) {
2406 		IPW_ERROR("Invalid args\n");
2407 		return -1;
2408 	}
2409 
2410 	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2411 				associate);
2412 }
2413 
2414 static int ipw_send_supported_rates(struct ipw_priv *priv,
2415 				    struct ipw_supported_rates *rates)
2416 {
2417 	if (!priv || !rates) {
2418 		IPW_ERROR("Invalid args\n");
2419 		return -1;
2420 	}
2421 
2422 	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2423 				rates);
2424 }
2425 
2426 static int ipw_set_random_seed(struct ipw_priv *priv)
2427 {
2428 	u32 val;
2429 
2430 	if (!priv) {
2431 		IPW_ERROR("Invalid args\n");
2432 		return -1;
2433 	}
2434 
2435 	get_random_bytes(&val, sizeof(val));
2436 
2437 	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2438 }
2439 
2440 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2441 {
2442 	__le32 v = cpu_to_le32(phy_off);
2443 	if (!priv) {
2444 		IPW_ERROR("Invalid args\n");
2445 		return -1;
2446 	}
2447 
2448 	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2449 }
2450 
2451 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2452 {
2453 	if (!priv || !power) {
2454 		IPW_ERROR("Invalid args\n");
2455 		return -1;
2456 	}
2457 
2458 	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2459 }
2460 
2461 static int ipw_set_tx_power(struct ipw_priv *priv)
2462 {
2463 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2464 	struct ipw_tx_power tx_power;
2465 	s8 max_power;
2466 	int i;
2467 
2468 	memset(&tx_power, 0, sizeof(tx_power));
2469 
2470 	/* configure device for 'G' band */
2471 	tx_power.ieee_mode = IPW_G_MODE;
2472 	tx_power.num_channels = geo->bg_channels;
2473 	for (i = 0; i < geo->bg_channels; i++) {
2474 		max_power = geo->bg[i].max_power;
2475 		tx_power.channels_tx_power[i].channel_number =
2476 		    geo->bg[i].channel;
2477 		tx_power.channels_tx_power[i].tx_power = max_power ?
2478 		    min(max_power, priv->tx_power) : priv->tx_power;
2479 	}
2480 	if (ipw_send_tx_power(priv, &tx_power))
2481 		return -EIO;
2482 
2483 	/* configure device to also handle 'B' band */
2484 	tx_power.ieee_mode = IPW_B_MODE;
2485 	if (ipw_send_tx_power(priv, &tx_power))
2486 		return -EIO;
2487 
2488 	/* configure device to also handle 'A' band */
2489 	if (priv->ieee->abg_true) {
2490 		tx_power.ieee_mode = IPW_A_MODE;
2491 		tx_power.num_channels = geo->a_channels;
2492 		for (i = 0; i < tx_power.num_channels; i++) {
2493 			max_power = geo->a[i].max_power;
2494 			tx_power.channels_tx_power[i].channel_number =
2495 			    geo->a[i].channel;
2496 			tx_power.channels_tx_power[i].tx_power = max_power ?
2497 			    min(max_power, priv->tx_power) : priv->tx_power;
2498 		}
2499 		if (ipw_send_tx_power(priv, &tx_power))
2500 			return -EIO;
2501 	}
2502 	return 0;
2503 }
2504 
2505 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2506 {
2507 	struct ipw_rts_threshold rts_threshold = {
2508 		.rts_threshold = cpu_to_le16(rts),
2509 	};
2510 
2511 	if (!priv) {
2512 		IPW_ERROR("Invalid args\n");
2513 		return -1;
2514 	}
2515 
2516 	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2517 				sizeof(rts_threshold), &rts_threshold);
2518 }
2519 
2520 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2521 {
2522 	struct ipw_frag_threshold frag_threshold = {
2523 		.frag_threshold = cpu_to_le16(frag),
2524 	};
2525 
2526 	if (!priv) {
2527 		IPW_ERROR("Invalid args\n");
2528 		return -1;
2529 	}
2530 
2531 	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2532 				sizeof(frag_threshold), &frag_threshold);
2533 }
2534 
2535 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2536 {
2537 	__le32 param;
2538 
2539 	if (!priv) {
2540 		IPW_ERROR("Invalid args\n");
2541 		return -1;
2542 	}
2543 
2544 	/* If on battery, set to 3, if AC set to CAM, else user
2545 	 * level */
2546 	switch (mode) {
2547 	case IPW_POWER_BATTERY:
2548 		param = cpu_to_le32(IPW_POWER_INDEX_3);
2549 		break;
2550 	case IPW_POWER_AC:
2551 		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2552 		break;
2553 	default:
2554 		param = cpu_to_le32(mode);
2555 		break;
2556 	}
2557 
2558 	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2559 				&param);
2560 }
2561 
2562 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2563 {
2564 	struct ipw_retry_limit retry_limit = {
2565 		.short_retry_limit = slimit,
2566 		.long_retry_limit = llimit
2567 	};
2568 
2569 	if (!priv) {
2570 		IPW_ERROR("Invalid args\n");
2571 		return -1;
2572 	}
2573 
2574 	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2575 				&retry_limit);
2576 }
2577 
2578 /*
2579  * The IPW device contains a Microwire compatible EEPROM that stores
2580  * various data like the MAC address.  Usually the firmware has exclusive
2581  * access to the eeprom, but during device initialization (before the
2582  * device driver has sent the HostComplete command to the firmware) the
2583  * device driver has read access to the EEPROM by way of indirect addressing
2584  * through a couple of memory mapped registers.
2585  *
2586  * The following is a simplified implementation for pulling data out of the
2587  * eeprom, along with some helper functions to find information in
2588  * the per device private data's copy of the eeprom.
2589  *
2590  * NOTE: To better understand how these functions work (i.e what is a chip
2591  *       select and why do have to keep driving the eeprom clock?), read
2592  *       just about any data sheet for a Microwire compatible EEPROM.
2593  */
2594 
2595 /* write a 32 bit value into the indirect accessor register */
2596 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2597 {
2598 	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2599 
2600 	/* the eeprom requires some time to complete the operation */
2601 	udelay(p->eeprom_delay);
2602 }
2603 
2604 /* perform a chip select operation */
2605 static void eeprom_cs(struct ipw_priv *priv)
2606 {
2607 	eeprom_write_reg(priv, 0);
2608 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2609 	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2610 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2611 }
2612 
2613 /* perform a chip select operation */
2614 static void eeprom_disable_cs(struct ipw_priv *priv)
2615 {
2616 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2617 	eeprom_write_reg(priv, 0);
2618 	eeprom_write_reg(priv, EEPROM_BIT_SK);
2619 }
2620 
2621 /* push a single bit down to the eeprom */
2622 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2623 {
2624 	int d = (bit ? EEPROM_BIT_DI : 0);
2625 	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2626 	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2627 }
2628 
2629 /* push an opcode followed by an address down to the eeprom */
2630 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2631 {
2632 	int i;
2633 
2634 	eeprom_cs(priv);
2635 	eeprom_write_bit(priv, 1);
2636 	eeprom_write_bit(priv, op & 2);
2637 	eeprom_write_bit(priv, op & 1);
2638 	for (i = 7; i >= 0; i--) {
2639 		eeprom_write_bit(priv, addr & (1 << i));
2640 	}
2641 }
2642 
2643 /* pull 16 bits off the eeprom, one bit at a time */
2644 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2645 {
2646 	int i;
2647 	u16 r = 0;
2648 
2649 	/* Send READ Opcode */
2650 	eeprom_op(priv, EEPROM_CMD_READ, addr);
2651 
2652 	/* Send dummy bit */
2653 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2654 
2655 	/* Read the byte off the eeprom one bit at a time */
2656 	for (i = 0; i < 16; i++) {
2657 		u32 data = 0;
2658 		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2659 		eeprom_write_reg(priv, EEPROM_BIT_CS);
2660 		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2661 		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2662 	}
2663 
2664 	/* Send another dummy bit */
2665 	eeprom_write_reg(priv, 0);
2666 	eeprom_disable_cs(priv);
2667 
2668 	return r;
2669 }
2670 
2671 /* helper function for pulling the mac address out of the private */
2672 /* data's copy of the eeprom data                                 */
2673 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2674 {
2675 	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2676 }
2677 
2678 static void ipw_read_eeprom(struct ipw_priv *priv)
2679 {
2680 	int i;
2681 	__le16 *eeprom = (__le16 *) priv->eeprom;
2682 
2683 	IPW_DEBUG_TRACE(">>\n");
2684 
2685 	/* read entire contents of eeprom into private buffer */
2686 	for (i = 0; i < 128; i++)
2687 		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2688 
2689 	IPW_DEBUG_TRACE("<<\n");
2690 }
2691 
2692 /*
2693  * Either the device driver (i.e. the host) or the firmware can
2694  * load eeprom data into the designated region in SRAM.  If neither
2695  * happens then the FW will shutdown with a fatal error.
2696  *
2697  * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2698  * bit needs region of shared SRAM needs to be non-zero.
2699  */
2700 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2701 {
2702 	int i;
2703 
2704 	IPW_DEBUG_TRACE(">>\n");
2705 
2706 	/*
2707 	   If the data looks correct, then copy it to our private
2708 	   copy.  Otherwise let the firmware know to perform the operation
2709 	   on its own.
2710 	 */
2711 	if (priv->eeprom[EEPROM_VERSION] != 0) {
2712 		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2713 
2714 		/* write the eeprom data to sram */
2715 		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2716 			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2717 
2718 		/* Do not load eeprom data on fatal error or suspend */
2719 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2720 	} else {
2721 		IPW_DEBUG_INFO("Enabling FW initialization of SRAM\n");
2722 
2723 		/* Load eeprom data on fatal error or suspend */
2724 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2725 	}
2726 
2727 	IPW_DEBUG_TRACE("<<\n");
2728 }
2729 
2730 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2731 {
2732 	count >>= 2;
2733 	if (!count)
2734 		return;
2735 	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2736 	while (count--)
2737 		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2738 }
2739 
2740 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2741 {
2742 	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2743 			CB_NUMBER_OF_ELEMENTS_SMALL *
2744 			sizeof(struct command_block));
2745 }
2746 
2747 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2748 {				/* start dma engine but no transfers yet */
2749 
2750 	IPW_DEBUG_FW(">> :\n");
2751 
2752 	/* Start the dma */
2753 	ipw_fw_dma_reset_command_blocks(priv);
2754 
2755 	/* Write CB base address */
2756 	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2757 
2758 	IPW_DEBUG_FW("<< :\n");
2759 	return 0;
2760 }
2761 
2762 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2763 {
2764 	u32 control = 0;
2765 
2766 	IPW_DEBUG_FW(">> :\n");
2767 
2768 	/* set the Stop and Abort bit */
2769 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2770 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2771 	priv->sram_desc.last_cb_index = 0;
2772 
2773 	IPW_DEBUG_FW("<<\n");
2774 }
2775 
2776 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2777 					  struct command_block *cb)
2778 {
2779 	u32 address =
2780 	    IPW_SHARED_SRAM_DMA_CONTROL +
2781 	    (sizeof(struct command_block) * index);
2782 	IPW_DEBUG_FW(">> :\n");
2783 
2784 	ipw_write_indirect(priv, address, (u8 *) cb,
2785 			   (int)sizeof(struct command_block));
2786 
2787 	IPW_DEBUG_FW("<< :\n");
2788 	return 0;
2789 
2790 }
2791 
2792 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2793 {
2794 	u32 control = 0;
2795 	u32 index = 0;
2796 
2797 	IPW_DEBUG_FW(">> :\n");
2798 
2799 	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2800 		ipw_fw_dma_write_command_block(priv, index,
2801 					       &priv->sram_desc.cb_list[index]);
2802 
2803 	/* Enable the DMA in the CSR register */
2804 	ipw_clear_bit(priv, IPW_RESET_REG,
2805 		      IPW_RESET_REG_MASTER_DISABLED |
2806 		      IPW_RESET_REG_STOP_MASTER);
2807 
2808 	/* Set the Start bit. */
2809 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2810 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2811 
2812 	IPW_DEBUG_FW("<< :\n");
2813 	return 0;
2814 }
2815 
2816 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2817 {
2818 	u32 address;
2819 	u32 register_value = 0;
2820 	u32 cb_fields_address = 0;
2821 
2822 	IPW_DEBUG_FW(">> :\n");
2823 	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2824 	IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2825 
2826 	/* Read the DMA Controlor register */
2827 	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2828 	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2829 
2830 	/* Print the CB values */
2831 	cb_fields_address = address;
2832 	register_value = ipw_read_reg32(priv, cb_fields_address);
2833 	IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2834 
2835 	cb_fields_address += sizeof(u32);
2836 	register_value = ipw_read_reg32(priv, cb_fields_address);
2837 	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2838 
2839 	cb_fields_address += sizeof(u32);
2840 	register_value = ipw_read_reg32(priv, cb_fields_address);
2841 	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2842 			  register_value);
2843 
2844 	cb_fields_address += sizeof(u32);
2845 	register_value = ipw_read_reg32(priv, cb_fields_address);
2846 	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2847 
2848 	IPW_DEBUG_FW(">> :\n");
2849 }
2850 
2851 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2852 {
2853 	u32 current_cb_address = 0;
2854 	u32 current_cb_index = 0;
2855 
2856 	IPW_DEBUG_FW("<< :\n");
2857 	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2858 
2859 	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2860 	    sizeof(struct command_block);
2861 
2862 	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2863 			  current_cb_index, current_cb_address);
2864 
2865 	IPW_DEBUG_FW(">> :\n");
2866 	return current_cb_index;
2867 
2868 }
2869 
2870 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2871 					u32 src_address,
2872 					u32 dest_address,
2873 					u32 length,
2874 					int interrupt_enabled, int is_last)
2875 {
2876 
2877 	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2878 	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2879 	    CB_DEST_SIZE_LONG;
2880 	struct command_block *cb;
2881 	u32 last_cb_element = 0;
2882 
2883 	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2884 			  src_address, dest_address, length);
2885 
2886 	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2887 		return -1;
2888 
2889 	last_cb_element = priv->sram_desc.last_cb_index;
2890 	cb = &priv->sram_desc.cb_list[last_cb_element];
2891 	priv->sram_desc.last_cb_index++;
2892 
2893 	/* Calculate the new CB control word */
2894 	if (interrupt_enabled)
2895 		control |= CB_INT_ENABLED;
2896 
2897 	if (is_last)
2898 		control |= CB_LAST_VALID;
2899 
2900 	control |= length;
2901 
2902 	/* Calculate the CB Element's checksum value */
2903 	cb->status = control ^ src_address ^ dest_address;
2904 
2905 	/* Copy the Source and Destination addresses */
2906 	cb->dest_addr = dest_address;
2907 	cb->source_addr = src_address;
2908 
2909 	/* Copy the Control Word last */
2910 	cb->control = control;
2911 
2912 	return 0;
2913 }
2914 
2915 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2916 				 int nr, u32 dest_address, u32 len)
2917 {
2918 	int ret, i;
2919 	u32 size;
2920 
2921 	IPW_DEBUG_FW(">>\n");
2922 	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2923 			  nr, dest_address, len);
2924 
2925 	for (i = 0; i < nr; i++) {
2926 		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2927 		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2928 						   dest_address +
2929 						   i * CB_MAX_LENGTH, size,
2930 						   0, 0);
2931 		if (ret) {
2932 			IPW_DEBUG_FW_INFO(": Failed\n");
2933 			return -1;
2934 		} else
2935 			IPW_DEBUG_FW_INFO(": Added new cb\n");
2936 	}
2937 
2938 	IPW_DEBUG_FW("<<\n");
2939 	return 0;
2940 }
2941 
2942 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2943 {
2944 	u32 current_index = 0, previous_index;
2945 	u32 watchdog = 0;
2946 
2947 	IPW_DEBUG_FW(">> :\n");
2948 
2949 	current_index = ipw_fw_dma_command_block_index(priv);
2950 	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2951 			  (int)priv->sram_desc.last_cb_index);
2952 
2953 	while (current_index < priv->sram_desc.last_cb_index) {
2954 		udelay(50);
2955 		previous_index = current_index;
2956 		current_index = ipw_fw_dma_command_block_index(priv);
2957 
2958 		if (previous_index < current_index) {
2959 			watchdog = 0;
2960 			continue;
2961 		}
2962 		if (++watchdog > 400) {
2963 			IPW_DEBUG_FW_INFO("Timeout\n");
2964 			ipw_fw_dma_dump_command_block(priv);
2965 			ipw_fw_dma_abort(priv);
2966 			return -1;
2967 		}
2968 	}
2969 
2970 	ipw_fw_dma_abort(priv);
2971 
2972 	/*Disable the DMA in the CSR register */
2973 	ipw_set_bit(priv, IPW_RESET_REG,
2974 		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2975 
2976 	IPW_DEBUG_FW("<< dmaWaitSync\n");
2977 	return 0;
2978 }
2979 
2980 static void ipw_remove_current_network(struct ipw_priv *priv)
2981 {
2982 	struct list_head *element, *safe;
2983 	struct libipw_network *network = NULL;
2984 	unsigned long flags;
2985 
2986 	spin_lock_irqsave(&priv->ieee->lock, flags);
2987 	list_for_each_safe(element, safe, &priv->ieee->network_list) {
2988 		network = list_entry(element, struct libipw_network, list);
2989 		if (ether_addr_equal(network->bssid, priv->bssid)) {
2990 			list_del(element);
2991 			list_add_tail(&network->list,
2992 				      &priv->ieee->network_free_list);
2993 		}
2994 	}
2995 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
2996 }
2997 
2998 /* timeout in msec, attempted in 10-msec quanta */
2999 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3000 			       int timeout)
3001 {
3002 	int i = 0;
3003 
3004 	do {
3005 		if ((ipw_read32(priv, addr) & mask) == mask)
3006 			return i;
3007 		mdelay(10);
3008 		i += 10;
3009 	} while (i < timeout);
3010 
3011 	return -ETIME;
3012 }
3013 
3014 /* These functions load the firmware and micro code for the operation of
3015  * the ipw hardware.  It assumes the buffer has all the bits for the
3016  * image and the caller is handling the memory allocation and clean up.
3017  */
3018 
3019 static int ipw_stop_master(struct ipw_priv *priv)
3020 {
3021 	int rc;
3022 
3023 	IPW_DEBUG_TRACE(">>\n");
3024 	/* stop master. typical delay - 0 */
3025 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3026 
3027 	/* timeout is in msec, polled in 10-msec quanta */
3028 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3029 			  IPW_RESET_REG_MASTER_DISABLED, 100);
3030 	if (rc < 0) {
3031 		IPW_ERROR("wait for stop master failed after 100ms\n");
3032 		return -1;
3033 	}
3034 
3035 	IPW_DEBUG_INFO("stop master %dms\n", rc);
3036 
3037 	return rc;
3038 }
3039 
3040 static void ipw_arc_release(struct ipw_priv *priv)
3041 {
3042 	IPW_DEBUG_TRACE(">>\n");
3043 	mdelay(5);
3044 
3045 	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3046 
3047 	/* no one knows timing, for safety add some delay */
3048 	mdelay(5);
3049 }
3050 
3051 struct fw_chunk {
3052 	__le32 address;
3053 	__le32 length;
3054 };
3055 
3056 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3057 {
3058 	int rc = 0, i, addr;
3059 	u8 cr = 0;
3060 	__le16 *image;
3061 
3062 	image = (__le16 *) data;
3063 
3064 	IPW_DEBUG_TRACE(">>\n");
3065 
3066 	rc = ipw_stop_master(priv);
3067 
3068 	if (rc < 0)
3069 		return rc;
3070 
3071 	for (addr = IPW_SHARED_LOWER_BOUND;
3072 	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3073 		ipw_write32(priv, addr, 0);
3074 	}
3075 
3076 	/* no ucode (yet) */
3077 	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3078 	/* destroy DMA queues */
3079 	/* reset sequence */
3080 
3081 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3082 	ipw_arc_release(priv);
3083 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3084 	mdelay(1);
3085 
3086 	/* reset PHY */
3087 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3088 	mdelay(1);
3089 
3090 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3091 	mdelay(1);
3092 
3093 	/* enable ucode store */
3094 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3095 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3096 	mdelay(1);
3097 
3098 	/* write ucode */
3099 	/*
3100 	 * @bug
3101 	 * Do NOT set indirect address register once and then
3102 	 * store data to indirect data register in the loop.
3103 	 * It seems very reasonable, but in this case DINO do not
3104 	 * accept ucode. It is essential to set address each time.
3105 	 */
3106 	/* load new ipw uCode */
3107 	for (i = 0; i < len / 2; i++)
3108 		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3109 				le16_to_cpu(image[i]));
3110 
3111 	/* enable DINO */
3112 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3113 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3114 
3115 	/* this is where the igx / win driver deveates from the VAP driver. */
3116 
3117 	/* wait for alive response */
3118 	for (i = 0; i < 100; i++) {
3119 		/* poll for incoming data */
3120 		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3121 		if (cr & DINO_RXFIFO_DATA)
3122 			break;
3123 		mdelay(1);
3124 	}
3125 
3126 	if (cr & DINO_RXFIFO_DATA) {
3127 		/* alive_command_responce size is NOT multiple of 4 */
3128 		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3129 
3130 		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3131 			response_buffer[i] =
3132 			    cpu_to_le32(ipw_read_reg32(priv,
3133 						       IPW_BASEBAND_RX_FIFO_READ));
3134 		memcpy(&priv->dino_alive, response_buffer,
3135 		       sizeof(priv->dino_alive));
3136 		if (priv->dino_alive.alive_command == 1
3137 		    && priv->dino_alive.ucode_valid == 1) {
3138 			rc = 0;
3139 			IPW_DEBUG_INFO
3140 			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3141 			     "of %02d/%02d/%02d %02d:%02d\n",
3142 			     priv->dino_alive.software_revision,
3143 			     priv->dino_alive.software_revision,
3144 			     priv->dino_alive.device_identifier,
3145 			     priv->dino_alive.device_identifier,
3146 			     priv->dino_alive.time_stamp[0],
3147 			     priv->dino_alive.time_stamp[1],
3148 			     priv->dino_alive.time_stamp[2],
3149 			     priv->dino_alive.time_stamp[3],
3150 			     priv->dino_alive.time_stamp[4]);
3151 		} else {
3152 			IPW_DEBUG_INFO("Microcode is not alive\n");
3153 			rc = -EINVAL;
3154 		}
3155 	} else {
3156 		IPW_DEBUG_INFO("No alive response from DINO\n");
3157 		rc = -ETIME;
3158 	}
3159 
3160 	/* disable DINO, otherwise for some reason
3161 	   firmware have problem getting alive resp. */
3162 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3163 
3164 	return rc;
3165 }
3166 
3167 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3168 {
3169 	int ret = -1;
3170 	int offset = 0;
3171 	struct fw_chunk *chunk;
3172 	int total_nr = 0;
3173 	int i;
3174 	struct dma_pool *pool;
3175 	void **virts;
3176 	dma_addr_t *phys;
3177 
3178 	IPW_DEBUG_TRACE("<< :\n");
3179 
3180 	virts = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(void *),
3181 			      GFP_KERNEL);
3182 	if (!virts)
3183 		return -ENOMEM;
3184 
3185 	phys = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(dma_addr_t),
3186 			     GFP_KERNEL);
3187 	if (!phys) {
3188 		kfree(virts);
3189 		return -ENOMEM;
3190 	}
3191 	pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0,
3192 			       0);
3193 	if (!pool) {
3194 		IPW_ERROR("dma_pool_create failed\n");
3195 		kfree(phys);
3196 		kfree(virts);
3197 		return -ENOMEM;
3198 	}
3199 
3200 	/* Start the Dma */
3201 	ret = ipw_fw_dma_enable(priv);
3202 
3203 	/* the DMA is already ready this would be a bug. */
3204 	BUG_ON(priv->sram_desc.last_cb_index > 0);
3205 
3206 	do {
3207 		u32 chunk_len;
3208 		u8 *start;
3209 		int size;
3210 		int nr = 0;
3211 
3212 		chunk = (struct fw_chunk *)(data + offset);
3213 		offset += sizeof(struct fw_chunk);
3214 		chunk_len = le32_to_cpu(chunk->length);
3215 		start = data + offset;
3216 
3217 		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3218 		for (i = 0; i < nr; i++) {
3219 			virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL,
3220 							 &phys[total_nr]);
3221 			if (!virts[total_nr]) {
3222 				ret = -ENOMEM;
3223 				goto out;
3224 			}
3225 			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3226 				     CB_MAX_LENGTH);
3227 			memcpy(virts[total_nr], start, size);
3228 			start += size;
3229 			total_nr++;
3230 			/* We don't support fw chunk larger than 64*8K */
3231 			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3232 		}
3233 
3234 		/* build DMA packet and queue up for sending */
3235 		/* dma to chunk->address, the chunk->length bytes from data +
3236 		 * offeset*/
3237 		/* Dma loading */
3238 		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3239 					    nr, le32_to_cpu(chunk->address),
3240 					    chunk_len);
3241 		if (ret) {
3242 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3243 			goto out;
3244 		}
3245 
3246 		offset += chunk_len;
3247 	} while (offset < len);
3248 
3249 	/* Run the DMA and wait for the answer */
3250 	ret = ipw_fw_dma_kick(priv);
3251 	if (ret) {
3252 		IPW_ERROR("dmaKick Failed\n");
3253 		goto out;
3254 	}
3255 
3256 	ret = ipw_fw_dma_wait(priv);
3257 	if (ret) {
3258 		IPW_ERROR("dmaWaitSync Failed\n");
3259 		goto out;
3260 	}
3261  out:
3262 	for (i = 0; i < total_nr; i++)
3263 		dma_pool_free(pool, virts[i], phys[i]);
3264 
3265 	dma_pool_destroy(pool);
3266 	kfree(phys);
3267 	kfree(virts);
3268 
3269 	return ret;
3270 }
3271 
3272 /* stop nic */
3273 static int ipw_stop_nic(struct ipw_priv *priv)
3274 {
3275 	int rc = 0;
3276 
3277 	/* stop */
3278 	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3279 
3280 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3281 			  IPW_RESET_REG_MASTER_DISABLED, 500);
3282 	if (rc < 0) {
3283 		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3284 		return rc;
3285 	}
3286 
3287 	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3288 
3289 	return rc;
3290 }
3291 
3292 static void ipw_start_nic(struct ipw_priv *priv)
3293 {
3294 	IPW_DEBUG_TRACE(">>\n");
3295 
3296 	/* prvHwStartNic  release ARC */
3297 	ipw_clear_bit(priv, IPW_RESET_REG,
3298 		      IPW_RESET_REG_MASTER_DISABLED |
3299 		      IPW_RESET_REG_STOP_MASTER |
3300 		      CBD_RESET_REG_PRINCETON_RESET);
3301 
3302 	/* enable power management */
3303 	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3304 		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3305 
3306 	IPW_DEBUG_TRACE("<<\n");
3307 }
3308 
3309 static int ipw_init_nic(struct ipw_priv *priv)
3310 {
3311 	int rc;
3312 
3313 	IPW_DEBUG_TRACE(">>\n");
3314 	/* reset */
3315 	/*prvHwInitNic */
3316 	/* set "initialization complete" bit to move adapter to D0 state */
3317 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3318 
3319 	/* low-level PLL activation */
3320 	ipw_write32(priv, IPW_READ_INT_REGISTER,
3321 		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3322 
3323 	/* wait for clock stabilization */
3324 	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3325 			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3326 	if (rc < 0)
3327 		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3328 
3329 	/* assert SW reset */
3330 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3331 
3332 	udelay(10);
3333 
3334 	/* set "initialization complete" bit to move adapter to D0 state */
3335 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3336 
3337 	IPW_DEBUG_TRACE(">>\n");
3338 	return 0;
3339 }
3340 
3341 /* Call this function from process context, it will sleep in request_firmware.
3342  * Probe is an ok place to call this from.
3343  */
3344 static int ipw_reset_nic(struct ipw_priv *priv)
3345 {
3346 	int rc = 0;
3347 	unsigned long flags;
3348 
3349 	IPW_DEBUG_TRACE(">>\n");
3350 
3351 	rc = ipw_init_nic(priv);
3352 
3353 	spin_lock_irqsave(&priv->lock, flags);
3354 	/* Clear the 'host command active' bit... */
3355 	priv->status &= ~STATUS_HCMD_ACTIVE;
3356 	wake_up_interruptible(&priv->wait_command_queue);
3357 	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3358 	wake_up_interruptible(&priv->wait_state);
3359 	spin_unlock_irqrestore(&priv->lock, flags);
3360 
3361 	IPW_DEBUG_TRACE("<<\n");
3362 	return rc;
3363 }
3364 
3365 
3366 struct ipw_fw {
3367 	__le32 ver;
3368 	__le32 boot_size;
3369 	__le32 ucode_size;
3370 	__le32 fw_size;
3371 	u8 data[];
3372 };
3373 
3374 static int ipw_get_fw(struct ipw_priv *priv,
3375 		      const struct firmware **raw, const char *name)
3376 {
3377 	struct ipw_fw *fw;
3378 	int rc;
3379 
3380 	/* ask firmware_class module to get the boot firmware off disk */
3381 	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3382 	if (rc < 0) {
3383 		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3384 		return rc;
3385 	}
3386 
3387 	if ((*raw)->size < sizeof(*fw)) {
3388 		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3389 		return -EINVAL;
3390 	}
3391 
3392 	fw = (void *)(*raw)->data;
3393 
3394 	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3395 	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3396 		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3397 			  name, (*raw)->size);
3398 		return -EINVAL;
3399 	}
3400 
3401 	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3402 		       name,
3403 		       le32_to_cpu(fw->ver) >> 16,
3404 		       le32_to_cpu(fw->ver) & 0xff,
3405 		       (*raw)->size - sizeof(*fw));
3406 	return 0;
3407 }
3408 
3409 #define IPW_RX_BUF_SIZE (3000)
3410 
3411 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3412 				      struct ipw_rx_queue *rxq)
3413 {
3414 	unsigned long flags;
3415 	int i;
3416 
3417 	spin_lock_irqsave(&rxq->lock, flags);
3418 
3419 	INIT_LIST_HEAD(&rxq->rx_free);
3420 	INIT_LIST_HEAD(&rxq->rx_used);
3421 
3422 	/* Fill the rx_used queue with _all_ of the Rx buffers */
3423 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3424 		/* In the reset function, these buffers may have been allocated
3425 		 * to an SKB, so we need to unmap and free potential storage */
3426 		if (rxq->pool[i].skb != NULL) {
3427 			dma_unmap_single(&priv->pci_dev->dev,
3428 					 rxq->pool[i].dma_addr,
3429 					 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
3430 			dev_kfree_skb_irq(rxq->pool[i].skb);
3431 			rxq->pool[i].skb = NULL;
3432 		}
3433 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3434 	}
3435 
3436 	/* Set us so that we have processed and used all buffers, but have
3437 	 * not restocked the Rx queue with fresh buffers */
3438 	rxq->read = rxq->write = 0;
3439 	rxq->free_count = 0;
3440 	spin_unlock_irqrestore(&rxq->lock, flags);
3441 }
3442 
3443 #ifdef CONFIG_PM
3444 static int fw_loaded = 0;
3445 static const struct firmware *raw = NULL;
3446 
3447 static void free_firmware(void)
3448 {
3449 	if (fw_loaded) {
3450 		release_firmware(raw);
3451 		raw = NULL;
3452 		fw_loaded = 0;
3453 	}
3454 }
3455 #else
3456 #define free_firmware() do {} while (0)
3457 #endif
3458 
3459 static int ipw_load(struct ipw_priv *priv)
3460 {
3461 #ifndef CONFIG_PM
3462 	const struct firmware *raw = NULL;
3463 #endif
3464 	struct ipw_fw *fw;
3465 	u8 *boot_img, *ucode_img, *fw_img;
3466 	u8 *name = NULL;
3467 	int rc = 0, retries = 3;
3468 
3469 	switch (priv->ieee->iw_mode) {
3470 	case IW_MODE_ADHOC:
3471 		name = "ipw2200-ibss.fw";
3472 		break;
3473 #ifdef CONFIG_IPW2200_MONITOR
3474 	case IW_MODE_MONITOR:
3475 		name = "ipw2200-sniffer.fw";
3476 		break;
3477 #endif
3478 	case IW_MODE_INFRA:
3479 		name = "ipw2200-bss.fw";
3480 		break;
3481 	}
3482 
3483 	if (!name) {
3484 		rc = -EINVAL;
3485 		goto error;
3486 	}
3487 
3488 #ifdef CONFIG_PM
3489 	if (!fw_loaded) {
3490 #endif
3491 		rc = ipw_get_fw(priv, &raw, name);
3492 		if (rc < 0)
3493 			goto error;
3494 #ifdef CONFIG_PM
3495 	}
3496 #endif
3497 
3498 	fw = (void *)raw->data;
3499 	boot_img = &fw->data[0];
3500 	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3501 	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3502 			   le32_to_cpu(fw->ucode_size)];
3503 
3504 	if (!priv->rxq)
3505 		priv->rxq = ipw_rx_queue_alloc(priv);
3506 	else
3507 		ipw_rx_queue_reset(priv, priv->rxq);
3508 	if (!priv->rxq) {
3509 		IPW_ERROR("Unable to initialize Rx queue\n");
3510 		rc = -ENOMEM;
3511 		goto error;
3512 	}
3513 
3514       retry:
3515 	/* Ensure interrupts are disabled */
3516 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3517 	priv->status &= ~STATUS_INT_ENABLED;
3518 
3519 	/* ack pending interrupts */
3520 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3521 
3522 	ipw_stop_nic(priv);
3523 
3524 	rc = ipw_reset_nic(priv);
3525 	if (rc < 0) {
3526 		IPW_ERROR("Unable to reset NIC\n");
3527 		goto error;
3528 	}
3529 
3530 	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3531 			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3532 
3533 	/* DMA the initial boot firmware into the device */
3534 	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3535 	if (rc < 0) {
3536 		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3537 		goto error;
3538 	}
3539 
3540 	/* kick start the device */
3541 	ipw_start_nic(priv);
3542 
3543 	/* wait for the device to finish its initial startup sequence */
3544 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3545 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3546 	if (rc < 0) {
3547 		IPW_ERROR("device failed to boot initial fw image\n");
3548 		goto error;
3549 	}
3550 	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3551 
3552 	/* ack fw init done interrupt */
3553 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3554 
3555 	/* DMA the ucode into the device */
3556 	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3557 	if (rc < 0) {
3558 		IPW_ERROR("Unable to load ucode: %d\n", rc);
3559 		goto error;
3560 	}
3561 
3562 	/* stop nic */
3563 	ipw_stop_nic(priv);
3564 
3565 	/* DMA bss firmware into the device */
3566 	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3567 	if (rc < 0) {
3568 		IPW_ERROR("Unable to load firmware: %d\n", rc);
3569 		goto error;
3570 	}
3571 #ifdef CONFIG_PM
3572 	fw_loaded = 1;
3573 #endif
3574 
3575 	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3576 
3577 	rc = ipw_queue_reset(priv);
3578 	if (rc < 0) {
3579 		IPW_ERROR("Unable to initialize queues\n");
3580 		goto error;
3581 	}
3582 
3583 	/* Ensure interrupts are disabled */
3584 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3585 	/* ack pending interrupts */
3586 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3587 
3588 	/* kick start the device */
3589 	ipw_start_nic(priv);
3590 
3591 	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3592 		if (retries > 0) {
3593 			IPW_WARNING("Parity error.  Retrying init.\n");
3594 			retries--;
3595 			goto retry;
3596 		}
3597 
3598 		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3599 		rc = -EIO;
3600 		goto error;
3601 	}
3602 
3603 	/* wait for the device */
3604 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3605 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3606 	if (rc < 0) {
3607 		IPW_ERROR("device failed to start within 500ms\n");
3608 		goto error;
3609 	}
3610 	IPW_DEBUG_INFO("device response after %dms\n", rc);
3611 
3612 	/* ack fw init done interrupt */
3613 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3614 
3615 	/* read eeprom data */
3616 	priv->eeprom_delay = 1;
3617 	ipw_read_eeprom(priv);
3618 	/* initialize the eeprom region of sram */
3619 	ipw_eeprom_init_sram(priv);
3620 
3621 	/* enable interrupts */
3622 	ipw_enable_interrupts(priv);
3623 
3624 	/* Ensure our queue has valid packets */
3625 	ipw_rx_queue_replenish(priv);
3626 
3627 	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3628 
3629 	/* ack pending interrupts */
3630 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3631 
3632 #ifndef CONFIG_PM
3633 	release_firmware(raw);
3634 #endif
3635 	return 0;
3636 
3637       error:
3638 	if (priv->rxq) {
3639 		ipw_rx_queue_free(priv, priv->rxq);
3640 		priv->rxq = NULL;
3641 	}
3642 	ipw_tx_queue_free(priv);
3643 	release_firmware(raw);
3644 #ifdef CONFIG_PM
3645 	fw_loaded = 0;
3646 	raw = NULL;
3647 #endif
3648 
3649 	return rc;
3650 }
3651 
3652 /*
3653  * DMA services
3654  *
3655  * Theory of operation
3656  *
3657  * A queue is a circular buffers with 'Read' and 'Write' pointers.
3658  * 2 empty entries always kept in the buffer to protect from overflow.
3659  *
3660  * For Tx queue, there are low mark and high mark limits. If, after queuing
3661  * the packet for Tx, free space become < low mark, Tx queue stopped. When
3662  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3663  * Tx queue resumed.
3664  *
3665  * The IPW operates with six queues, one receive queue in the device's
3666  * sram, one transmit queue for sending commands to the device firmware,
3667  * and four transmit queues for data.
3668  *
3669  * The four transmit queues allow for performing quality of service (qos)
3670  * transmissions as per the 802.11 protocol.  Currently Linux does not
3671  * provide a mechanism to the user for utilizing prioritized queues, so
3672  * we only utilize the first data transmit queue (queue1).
3673  */
3674 
3675 /*
3676  * Driver allocates buffers of this size for Rx
3677  */
3678 
3679 /*
3680  * ipw_rx_queue_space - Return number of free slots available in queue.
3681  */
3682 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3683 {
3684 	int s = q->read - q->write;
3685 	if (s <= 0)
3686 		s += RX_QUEUE_SIZE;
3687 	/* keep some buffer to not confuse full and empty queue */
3688 	s -= 2;
3689 	if (s < 0)
3690 		s = 0;
3691 	return s;
3692 }
3693 
3694 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3695 {
3696 	int s = q->last_used - q->first_empty;
3697 	if (s <= 0)
3698 		s += q->n_bd;
3699 	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3700 	if (s < 0)
3701 		s = 0;
3702 	return s;
3703 }
3704 
3705 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3706 {
3707 	return (++index == n_bd) ? 0 : index;
3708 }
3709 
3710 /*
3711  * Initialize common DMA queue structure
3712  *
3713  * @param q                queue to init
3714  * @param count            Number of BD's to allocate. Should be power of 2
3715  * @param read_register    Address for 'read' register
3716  *                         (not offset within BAR, full address)
3717  * @param write_register   Address for 'write' register
3718  *                         (not offset within BAR, full address)
3719  * @param base_register    Address for 'base' register
3720  *                         (not offset within BAR, full address)
3721  * @param size             Address for 'size' register
3722  *                         (not offset within BAR, full address)
3723  */
3724 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3725 			   int count, u32 read, u32 write, u32 base, u32 size)
3726 {
3727 	q->n_bd = count;
3728 
3729 	q->low_mark = q->n_bd / 4;
3730 	if (q->low_mark < 4)
3731 		q->low_mark = 4;
3732 
3733 	q->high_mark = q->n_bd / 8;
3734 	if (q->high_mark < 2)
3735 		q->high_mark = 2;
3736 
3737 	q->first_empty = q->last_used = 0;
3738 	q->reg_r = read;
3739 	q->reg_w = write;
3740 
3741 	ipw_write32(priv, base, q->dma_addr);
3742 	ipw_write32(priv, size, count);
3743 	ipw_write32(priv, read, 0);
3744 	ipw_write32(priv, write, 0);
3745 
3746 	_ipw_read32(priv, 0x90);
3747 }
3748 
3749 static int ipw_queue_tx_init(struct ipw_priv *priv,
3750 			     struct clx2_tx_queue *q,
3751 			     int count, u32 read, u32 write, u32 base, u32 size)
3752 {
3753 	struct pci_dev *dev = priv->pci_dev;
3754 
3755 	q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
3756 	if (!q->txb)
3757 		return -ENOMEM;
3758 
3759 	q->bd =
3760 	    dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
3761 			       &q->q.dma_addr, GFP_KERNEL);
3762 	if (!q->bd) {
3763 		IPW_ERROR("dma_alloc_coherent(%zd) failed\n",
3764 			  sizeof(q->bd[0]) * count);
3765 		kfree(q->txb);
3766 		q->txb = NULL;
3767 		return -ENOMEM;
3768 	}
3769 
3770 	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3771 	return 0;
3772 }
3773 
3774 /*
3775  * Free one TFD, those at index [txq->q.last_used].
3776  * Do NOT advance any indexes
3777  *
3778  * @param dev
3779  * @param txq
3780  */
3781 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3782 				  struct clx2_tx_queue *txq)
3783 {
3784 	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3785 	struct pci_dev *dev = priv->pci_dev;
3786 	int i;
3787 
3788 	/* classify bd */
3789 	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3790 		/* nothing to cleanup after for host commands */
3791 		return;
3792 
3793 	/* sanity check */
3794 	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3795 		IPW_ERROR("Too many chunks: %i\n",
3796 			  le32_to_cpu(bd->u.data.num_chunks));
3797 		/* @todo issue fatal error, it is quite serious situation */
3798 		return;
3799 	}
3800 
3801 	/* unmap chunks if any */
3802 	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3803 		dma_unmap_single(&dev->dev,
3804 				 le32_to_cpu(bd->u.data.chunk_ptr[i]),
3805 				 le16_to_cpu(bd->u.data.chunk_len[i]),
3806 				 DMA_TO_DEVICE);
3807 		if (txq->txb[txq->q.last_used]) {
3808 			libipw_txb_free(txq->txb[txq->q.last_used]);
3809 			txq->txb[txq->q.last_used] = NULL;
3810 		}
3811 	}
3812 }
3813 
3814 /*
3815  * Deallocate DMA queue.
3816  *
3817  * Empty queue by removing and destroying all BD's.
3818  * Free all buffers.
3819  *
3820  * @param dev
3821  * @param q
3822  */
3823 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3824 {
3825 	struct clx2_queue *q = &txq->q;
3826 	struct pci_dev *dev = priv->pci_dev;
3827 
3828 	if (q->n_bd == 0)
3829 		return;
3830 
3831 	/* first, empty all BD's */
3832 	for (; q->first_empty != q->last_used;
3833 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3834 		ipw_queue_tx_free_tfd(priv, txq);
3835 	}
3836 
3837 	/* free buffers belonging to queue itself */
3838 	dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3839 			  q->dma_addr);
3840 	kfree(txq->txb);
3841 
3842 	/* 0 fill whole structure */
3843 	memset(txq, 0, sizeof(*txq));
3844 }
3845 
3846 /*
3847  * Destroy all DMA queues and structures
3848  *
3849  * @param priv
3850  */
3851 static void ipw_tx_queue_free(struct ipw_priv *priv)
3852 {
3853 	/* Tx CMD queue */
3854 	ipw_queue_tx_free(priv, &priv->txq_cmd);
3855 
3856 	/* Tx queues */
3857 	ipw_queue_tx_free(priv, &priv->txq[0]);
3858 	ipw_queue_tx_free(priv, &priv->txq[1]);
3859 	ipw_queue_tx_free(priv, &priv->txq[2]);
3860 	ipw_queue_tx_free(priv, &priv->txq[3]);
3861 }
3862 
3863 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3864 {
3865 	/* First 3 bytes are manufacturer */
3866 	bssid[0] = priv->mac_addr[0];
3867 	bssid[1] = priv->mac_addr[1];
3868 	bssid[2] = priv->mac_addr[2];
3869 
3870 	/* Last bytes are random */
3871 	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3872 
3873 	bssid[0] &= 0xfe;	/* clear multicast bit */
3874 	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3875 }
3876 
3877 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3878 {
3879 	struct ipw_station_entry entry;
3880 	int i;
3881 
3882 	for (i = 0; i < priv->num_stations; i++) {
3883 		if (ether_addr_equal(priv->stations[i], bssid)) {
3884 			/* Another node is active in network */
3885 			priv->missed_adhoc_beacons = 0;
3886 			if (!(priv->config & CFG_STATIC_CHANNEL))
3887 				/* when other nodes drop out, we drop out */
3888 				priv->config &= ~CFG_ADHOC_PERSIST;
3889 
3890 			return i;
3891 		}
3892 	}
3893 
3894 	if (i == MAX_STATIONS)
3895 		return IPW_INVALID_STATION;
3896 
3897 	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3898 
3899 	entry.reserved = 0;
3900 	entry.support_mode = 0;
3901 	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3902 	memcpy(priv->stations[i], bssid, ETH_ALEN);
3903 	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3904 			 &entry, sizeof(entry));
3905 	priv->num_stations++;
3906 
3907 	return i;
3908 }
3909 
3910 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3911 {
3912 	int i;
3913 
3914 	for (i = 0; i < priv->num_stations; i++)
3915 		if (ether_addr_equal(priv->stations[i], bssid))
3916 			return i;
3917 
3918 	return IPW_INVALID_STATION;
3919 }
3920 
3921 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3922 {
3923 	int err;
3924 
3925 	if (priv->status & STATUS_ASSOCIATING) {
3926 		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3927 		schedule_work(&priv->disassociate);
3928 		return;
3929 	}
3930 
3931 	if (!(priv->status & STATUS_ASSOCIATED)) {
3932 		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3933 		return;
3934 	}
3935 
3936 	IPW_DEBUG_ASSOC("Disassociation attempt from %pM "
3937 			"on channel %d.\n",
3938 			priv->assoc_request.bssid,
3939 			priv->assoc_request.channel);
3940 
3941 	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3942 	priv->status |= STATUS_DISASSOCIATING;
3943 
3944 	if (quiet)
3945 		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3946 	else
3947 		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3948 
3949 	err = ipw_send_associate(priv, &priv->assoc_request);
3950 	if (err) {
3951 		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3952 			     "failed.\n");
3953 		return;
3954 	}
3955 
3956 }
3957 
3958 static int ipw_disassociate(void *data)
3959 {
3960 	struct ipw_priv *priv = data;
3961 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3962 		return 0;
3963 	ipw_send_disassociate(data, 0);
3964 	netif_carrier_off(priv->net_dev);
3965 	return 1;
3966 }
3967 
3968 static void ipw_bg_disassociate(struct work_struct *work)
3969 {
3970 	struct ipw_priv *priv =
3971 		container_of(work, struct ipw_priv, disassociate);
3972 	mutex_lock(&priv->mutex);
3973 	ipw_disassociate(priv);
3974 	mutex_unlock(&priv->mutex);
3975 }
3976 
3977 static void ipw_system_config(struct work_struct *work)
3978 {
3979 	struct ipw_priv *priv =
3980 		container_of(work, struct ipw_priv, system_config);
3981 
3982 #ifdef CONFIG_IPW2200_PROMISCUOUS
3983 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3984 		priv->sys_config.accept_all_data_frames = 1;
3985 		priv->sys_config.accept_non_directed_frames = 1;
3986 		priv->sys_config.accept_all_mgmt_bcpr = 1;
3987 		priv->sys_config.accept_all_mgmt_frames = 1;
3988 	}
3989 #endif
3990 
3991 	ipw_send_system_config(priv);
3992 }
3993 
3994 struct ipw_status_code {
3995 	u16 status;
3996 	const char *reason;
3997 };
3998 
3999 static const struct ipw_status_code ipw_status_codes[] = {
4000 	{0x00, "Successful"},
4001 	{0x01, "Unspecified failure"},
4002 	{0x0A, "Cannot support all requested capabilities in the "
4003 	 "Capability information field"},
4004 	{0x0B, "Reassociation denied due to inability to confirm that "
4005 	 "association exists"},
4006 	{0x0C, "Association denied due to reason outside the scope of this "
4007 	 "standard"},
4008 	{0x0D,
4009 	 "Responding station does not support the specified authentication "
4010 	 "algorithm"},
4011 	{0x0E,
4012 	 "Received an Authentication frame with authentication sequence "
4013 	 "transaction sequence number out of expected sequence"},
4014 	{0x0F, "Authentication rejected because of challenge failure"},
4015 	{0x10, "Authentication rejected due to timeout waiting for next "
4016 	 "frame in sequence"},
4017 	{0x11, "Association denied because AP is unable to handle additional "
4018 	 "associated stations"},
4019 	{0x12,
4020 	 "Association denied due to requesting station not supporting all "
4021 	 "of the datarates in the BSSBasicServiceSet Parameter"},
4022 	{0x13,
4023 	 "Association denied due to requesting station not supporting "
4024 	 "short preamble operation"},
4025 	{0x14,
4026 	 "Association denied due to requesting station not supporting "
4027 	 "PBCC encoding"},
4028 	{0x15,
4029 	 "Association denied due to requesting station not supporting "
4030 	 "channel agility"},
4031 	{0x19,
4032 	 "Association denied due to requesting station not supporting "
4033 	 "short slot operation"},
4034 	{0x1A,
4035 	 "Association denied due to requesting station not supporting "
4036 	 "DSSS-OFDM operation"},
4037 	{0x28, "Invalid Information Element"},
4038 	{0x29, "Group Cipher is not valid"},
4039 	{0x2A, "Pairwise Cipher is not valid"},
4040 	{0x2B, "AKMP is not valid"},
4041 	{0x2C, "Unsupported RSN IE version"},
4042 	{0x2D, "Invalid RSN IE Capabilities"},
4043 	{0x2E, "Cipher suite is rejected per security policy"},
4044 };
4045 
4046 static const char *ipw_get_status_code(u16 status)
4047 {
4048 	int i;
4049 	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4050 		if (ipw_status_codes[i].status == (status & 0xff))
4051 			return ipw_status_codes[i].reason;
4052 	return "Unknown status value.";
4053 }
4054 
4055 static inline void average_init(struct average *avg)
4056 {
4057 	memset(avg, 0, sizeof(*avg));
4058 }
4059 
4060 #define DEPTH_RSSI 8
4061 #define DEPTH_NOISE 16
4062 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4063 {
4064 	return ((depth-1)*prev_avg +  val)/depth;
4065 }
4066 
4067 static void average_add(struct average *avg, s16 val)
4068 {
4069 	avg->sum -= avg->entries[avg->pos];
4070 	avg->sum += val;
4071 	avg->entries[avg->pos++] = val;
4072 	if (unlikely(avg->pos == AVG_ENTRIES)) {
4073 		avg->init = 1;
4074 		avg->pos = 0;
4075 	}
4076 }
4077 
4078 static s16 average_value(struct average *avg)
4079 {
4080 	if (!unlikely(avg->init)) {
4081 		if (avg->pos)
4082 			return avg->sum / avg->pos;
4083 		return 0;
4084 	}
4085 
4086 	return avg->sum / AVG_ENTRIES;
4087 }
4088 
4089 static void ipw_reset_stats(struct ipw_priv *priv)
4090 {
4091 	u32 len = sizeof(u32);
4092 
4093 	priv->quality = 0;
4094 
4095 	average_init(&priv->average_missed_beacons);
4096 	priv->exp_avg_rssi = -60;
4097 	priv->exp_avg_noise = -85 + 0x100;
4098 
4099 	priv->last_rate = 0;
4100 	priv->last_missed_beacons = 0;
4101 	priv->last_rx_packets = 0;
4102 	priv->last_tx_packets = 0;
4103 	priv->last_tx_failures = 0;
4104 
4105 	/* Firmware managed, reset only when NIC is restarted, so we have to
4106 	 * normalize on the current value */
4107 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4108 			&priv->last_rx_err, &len);
4109 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4110 			&priv->last_tx_failures, &len);
4111 
4112 	/* Driver managed, reset with each association */
4113 	priv->missed_adhoc_beacons = 0;
4114 	priv->missed_beacons = 0;
4115 	priv->tx_packets = 0;
4116 	priv->rx_packets = 0;
4117 
4118 }
4119 
4120 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4121 {
4122 	u32 i = 0x80000000;
4123 	u32 mask = priv->rates_mask;
4124 	/* If currently associated in B mode, restrict the maximum
4125 	 * rate match to B rates */
4126 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4127 		mask &= LIBIPW_CCK_RATES_MASK;
4128 
4129 	/* TODO: Verify that the rate is supported by the current rates
4130 	 * list. */
4131 
4132 	while (i && !(mask & i))
4133 		i >>= 1;
4134 	switch (i) {
4135 	case LIBIPW_CCK_RATE_1MB_MASK:
4136 		return 1000000;
4137 	case LIBIPW_CCK_RATE_2MB_MASK:
4138 		return 2000000;
4139 	case LIBIPW_CCK_RATE_5MB_MASK:
4140 		return 5500000;
4141 	case LIBIPW_OFDM_RATE_6MB_MASK:
4142 		return 6000000;
4143 	case LIBIPW_OFDM_RATE_9MB_MASK:
4144 		return 9000000;
4145 	case LIBIPW_CCK_RATE_11MB_MASK:
4146 		return 11000000;
4147 	case LIBIPW_OFDM_RATE_12MB_MASK:
4148 		return 12000000;
4149 	case LIBIPW_OFDM_RATE_18MB_MASK:
4150 		return 18000000;
4151 	case LIBIPW_OFDM_RATE_24MB_MASK:
4152 		return 24000000;
4153 	case LIBIPW_OFDM_RATE_36MB_MASK:
4154 		return 36000000;
4155 	case LIBIPW_OFDM_RATE_48MB_MASK:
4156 		return 48000000;
4157 	case LIBIPW_OFDM_RATE_54MB_MASK:
4158 		return 54000000;
4159 	}
4160 
4161 	if (priv->ieee->mode == IEEE_B)
4162 		return 11000000;
4163 	else
4164 		return 54000000;
4165 }
4166 
4167 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4168 {
4169 	u32 rate, len = sizeof(rate);
4170 	int err;
4171 
4172 	if (!(priv->status & STATUS_ASSOCIATED))
4173 		return 0;
4174 
4175 	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4176 		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4177 				      &len);
4178 		if (err) {
4179 			IPW_DEBUG_INFO("failed querying ordinals.\n");
4180 			return 0;
4181 		}
4182 	} else
4183 		return ipw_get_max_rate(priv);
4184 
4185 	switch (rate) {
4186 	case IPW_TX_RATE_1MB:
4187 		return 1000000;
4188 	case IPW_TX_RATE_2MB:
4189 		return 2000000;
4190 	case IPW_TX_RATE_5MB:
4191 		return 5500000;
4192 	case IPW_TX_RATE_6MB:
4193 		return 6000000;
4194 	case IPW_TX_RATE_9MB:
4195 		return 9000000;
4196 	case IPW_TX_RATE_11MB:
4197 		return 11000000;
4198 	case IPW_TX_RATE_12MB:
4199 		return 12000000;
4200 	case IPW_TX_RATE_18MB:
4201 		return 18000000;
4202 	case IPW_TX_RATE_24MB:
4203 		return 24000000;
4204 	case IPW_TX_RATE_36MB:
4205 		return 36000000;
4206 	case IPW_TX_RATE_48MB:
4207 		return 48000000;
4208 	case IPW_TX_RATE_54MB:
4209 		return 54000000;
4210 	}
4211 
4212 	return 0;
4213 }
4214 
4215 #define IPW_STATS_INTERVAL (2 * HZ)
4216 static void ipw_gather_stats(struct ipw_priv *priv)
4217 {
4218 	u32 rx_err, rx_err_delta, rx_packets_delta;
4219 	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4220 	u32 missed_beacons_percent, missed_beacons_delta;
4221 	u32 quality = 0;
4222 	u32 len = sizeof(u32);
4223 	s16 rssi;
4224 	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4225 	    rate_quality;
4226 	u32 max_rate;
4227 
4228 	if (!(priv->status & STATUS_ASSOCIATED)) {
4229 		priv->quality = 0;
4230 		return;
4231 	}
4232 
4233 	/* Update the statistics */
4234 	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4235 			&priv->missed_beacons, &len);
4236 	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4237 	priv->last_missed_beacons = priv->missed_beacons;
4238 	if (priv->assoc_request.beacon_interval) {
4239 		missed_beacons_percent = missed_beacons_delta *
4240 		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4241 		    (IPW_STATS_INTERVAL * 10);
4242 	} else {
4243 		missed_beacons_percent = 0;
4244 	}
4245 	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4246 
4247 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4248 	rx_err_delta = rx_err - priv->last_rx_err;
4249 	priv->last_rx_err = rx_err;
4250 
4251 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4252 	tx_failures_delta = tx_failures - priv->last_tx_failures;
4253 	priv->last_tx_failures = tx_failures;
4254 
4255 	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4256 	priv->last_rx_packets = priv->rx_packets;
4257 
4258 	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4259 	priv->last_tx_packets = priv->tx_packets;
4260 
4261 	/* Calculate quality based on the following:
4262 	 *
4263 	 * Missed beacon: 100% = 0, 0% = 70% missed
4264 	 * Rate: 60% = 1Mbs, 100% = Max
4265 	 * Rx and Tx errors represent a straight % of total Rx/Tx
4266 	 * RSSI: 100% = > -50,  0% = < -80
4267 	 * Rx errors: 100% = 0, 0% = 50% missed
4268 	 *
4269 	 * The lowest computed quality is used.
4270 	 *
4271 	 */
4272 #define BEACON_THRESHOLD 5
4273 	beacon_quality = 100 - missed_beacons_percent;
4274 	if (beacon_quality < BEACON_THRESHOLD)
4275 		beacon_quality = 0;
4276 	else
4277 		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4278 		    (100 - BEACON_THRESHOLD);
4279 	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4280 			beacon_quality, missed_beacons_percent);
4281 
4282 	priv->last_rate = ipw_get_current_rate(priv);
4283 	max_rate = ipw_get_max_rate(priv);
4284 	rate_quality = priv->last_rate * 40 / max_rate + 60;
4285 	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4286 			rate_quality, priv->last_rate / 1000000);
4287 
4288 	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4289 		rx_quality = 100 - (rx_err_delta * 100) /
4290 		    (rx_packets_delta + rx_err_delta);
4291 	else
4292 		rx_quality = 100;
4293 	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4294 			rx_quality, rx_err_delta, rx_packets_delta);
4295 
4296 	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4297 		tx_quality = 100 - (tx_failures_delta * 100) /
4298 		    (tx_packets_delta + tx_failures_delta);
4299 	else
4300 		tx_quality = 100;
4301 	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4302 			tx_quality, tx_failures_delta, tx_packets_delta);
4303 
4304 	rssi = priv->exp_avg_rssi;
4305 	signal_quality =
4306 	    (100 *
4307 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4308 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4309 	     (priv->ieee->perfect_rssi - rssi) *
4310 	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4311 	      62 * (priv->ieee->perfect_rssi - rssi))) /
4312 	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4313 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4314 	if (signal_quality > 100)
4315 		signal_quality = 100;
4316 	else if (signal_quality < 1)
4317 		signal_quality = 0;
4318 
4319 	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4320 			signal_quality, rssi);
4321 
4322 	quality = min(rx_quality, signal_quality);
4323 	quality = min(tx_quality, quality);
4324 	quality = min(rate_quality, quality);
4325 	quality = min(beacon_quality, quality);
4326 	if (quality == beacon_quality)
4327 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4328 				quality);
4329 	if (quality == rate_quality)
4330 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4331 				quality);
4332 	if (quality == tx_quality)
4333 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4334 				quality);
4335 	if (quality == rx_quality)
4336 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4337 				quality);
4338 	if (quality == signal_quality)
4339 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4340 				quality);
4341 
4342 	priv->quality = quality;
4343 
4344 	schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4345 }
4346 
4347 static void ipw_bg_gather_stats(struct work_struct *work)
4348 {
4349 	struct ipw_priv *priv =
4350 		container_of(work, struct ipw_priv, gather_stats.work);
4351 	mutex_lock(&priv->mutex);
4352 	ipw_gather_stats(priv);
4353 	mutex_unlock(&priv->mutex);
4354 }
4355 
4356 /* Missed beacon behavior:
4357  * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4358  * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4359  * Above disassociate threshold, give up and stop scanning.
4360  * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4361 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4362 					    int missed_count)
4363 {
4364 	priv->notif_missed_beacons = missed_count;
4365 
4366 	if (missed_count > priv->disassociate_threshold &&
4367 	    priv->status & STATUS_ASSOCIATED) {
4368 		/* If associated and we've hit the missed
4369 		 * beacon threshold, disassociate, turn
4370 		 * off roaming, and abort any active scans */
4371 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4372 			  IPW_DL_STATE | IPW_DL_ASSOC,
4373 			  "Missed beacon: %d - disassociate\n", missed_count);
4374 		priv->status &= ~STATUS_ROAMING;
4375 		if (priv->status & STATUS_SCANNING) {
4376 			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4377 				  IPW_DL_STATE,
4378 				  "Aborting scan with missed beacon.\n");
4379 			schedule_work(&priv->abort_scan);
4380 		}
4381 
4382 		schedule_work(&priv->disassociate);
4383 		return;
4384 	}
4385 
4386 	if (priv->status & STATUS_ROAMING) {
4387 		/* If we are currently roaming, then just
4388 		 * print a debug statement... */
4389 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4390 			  "Missed beacon: %d - roam in progress\n",
4391 			  missed_count);
4392 		return;
4393 	}
4394 
4395 	if (roaming &&
4396 	    (missed_count > priv->roaming_threshold &&
4397 	     missed_count <= priv->disassociate_threshold)) {
4398 		/* If we are not already roaming, set the ROAM
4399 		 * bit in the status and kick off a scan.
4400 		 * This can happen several times before we reach
4401 		 * disassociate_threshold. */
4402 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4403 			  "Missed beacon: %d - initiate "
4404 			  "roaming\n", missed_count);
4405 		if (!(priv->status & STATUS_ROAMING)) {
4406 			priv->status |= STATUS_ROAMING;
4407 			if (!(priv->status & STATUS_SCANNING))
4408 				schedule_delayed_work(&priv->request_scan, 0);
4409 		}
4410 		return;
4411 	}
4412 
4413 	if (priv->status & STATUS_SCANNING &&
4414 	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4415 		/* Stop scan to keep fw from getting
4416 		 * stuck (only if we aren't roaming --
4417 		 * otherwise we'll never scan more than 2 or 3
4418 		 * channels..) */
4419 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4420 			  "Aborting scan with missed beacon.\n");
4421 		schedule_work(&priv->abort_scan);
4422 	}
4423 
4424 	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4425 }
4426 
4427 static void ipw_scan_event(struct work_struct *work)
4428 {
4429 	union iwreq_data wrqu;
4430 
4431 	struct ipw_priv *priv =
4432 		container_of(work, struct ipw_priv, scan_event.work);
4433 
4434 	wrqu.data.length = 0;
4435 	wrqu.data.flags = 0;
4436 	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4437 }
4438 
4439 static void handle_scan_event(struct ipw_priv *priv)
4440 {
4441 	/* Only userspace-requested scan completion events go out immediately */
4442 	if (!priv->user_requested_scan) {
4443 		schedule_delayed_work(&priv->scan_event,
4444 				      round_jiffies_relative(msecs_to_jiffies(4000)));
4445 	} else {
4446 		priv->user_requested_scan = 0;
4447 		mod_delayed_work(system_wq, &priv->scan_event, 0);
4448 	}
4449 }
4450 
4451 /*
4452  * Handle host notification packet.
4453  * Called from interrupt routine
4454  */
4455 static void ipw_rx_notification(struct ipw_priv *priv,
4456 				       struct ipw_rx_notification *notif)
4457 {
4458 	u16 size = le16_to_cpu(notif->size);
4459 
4460 	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4461 
4462 	switch (notif->subtype) {
4463 	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4464 			struct notif_association *assoc = &notif->u.assoc;
4465 
4466 			switch (assoc->state) {
4467 			case CMAS_ASSOCIATED:{
4468 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4469 						  IPW_DL_ASSOC,
4470 						  "associated: '%*pE' %pM\n",
4471 						  priv->essid_len, priv->essid,
4472 						  priv->bssid);
4473 
4474 					switch (priv->ieee->iw_mode) {
4475 					case IW_MODE_INFRA:
4476 						memcpy(priv->ieee->bssid,
4477 						       priv->bssid, ETH_ALEN);
4478 						break;
4479 
4480 					case IW_MODE_ADHOC:
4481 						memcpy(priv->ieee->bssid,
4482 						       priv->bssid, ETH_ALEN);
4483 
4484 						/* clear out the station table */
4485 						priv->num_stations = 0;
4486 
4487 						IPW_DEBUG_ASSOC
4488 						    ("queueing adhoc check\n");
4489 						schedule_delayed_work(
4490 							&priv->adhoc_check,
4491 							le16_to_cpu(priv->
4492 							assoc_request.
4493 							beacon_interval));
4494 						break;
4495 					}
4496 
4497 					priv->status &= ~STATUS_ASSOCIATING;
4498 					priv->status |= STATUS_ASSOCIATED;
4499 					schedule_work(&priv->system_config);
4500 
4501 #ifdef CONFIG_IPW2200_QOS
4502 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4503 			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4504 					if ((priv->status & STATUS_AUTH) &&
4505 					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4506 					     == IEEE80211_STYPE_ASSOC_RESP)) {
4507 						if ((sizeof
4508 						     (struct
4509 						      libipw_assoc_response)
4510 						     <= size)
4511 						    && (size <= 2314)) {
4512 							struct
4513 							libipw_rx_stats
4514 							    stats = {
4515 								.len = size - 1,
4516 							};
4517 
4518 							IPW_DEBUG_QOS
4519 							    ("QoS Associate "
4520 							     "size %d\n", size);
4521 							libipw_rx_mgt(priv->
4522 									 ieee,
4523 									 (struct
4524 									  libipw_hdr_4addr
4525 									  *)
4526 									 &notif->u.raw, &stats);
4527 						}
4528 					}
4529 #endif
4530 
4531 					schedule_work(&priv->link_up);
4532 
4533 					break;
4534 				}
4535 
4536 			case CMAS_AUTHENTICATED:{
4537 					if (priv->
4538 					    status & (STATUS_ASSOCIATED |
4539 						      STATUS_AUTH)) {
4540 						struct notif_authenticate *auth
4541 						    = &notif->u.auth;
4542 						IPW_DEBUG(IPW_DL_NOTIF |
4543 							  IPW_DL_STATE |
4544 							  IPW_DL_ASSOC,
4545 							  "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
4546 							  priv->essid_len,
4547 							  priv->essid,
4548 							  priv->bssid,
4549 							  le16_to_cpu(auth->status),
4550 							  ipw_get_status_code
4551 							  (le16_to_cpu
4552 							   (auth->status)));
4553 
4554 						priv->status &=
4555 						    ~(STATUS_ASSOCIATING |
4556 						      STATUS_AUTH |
4557 						      STATUS_ASSOCIATED);
4558 
4559 						schedule_work(&priv->link_down);
4560 						break;
4561 					}
4562 
4563 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4564 						  IPW_DL_ASSOC,
4565 						  "authenticated: '%*pE' %pM\n",
4566 						  priv->essid_len, priv->essid,
4567 						  priv->bssid);
4568 					break;
4569 				}
4570 
4571 			case CMAS_INIT:{
4572 					if (priv->status & STATUS_AUTH) {
4573 						struct
4574 						    libipw_assoc_response
4575 						*resp;
4576 						resp =
4577 						    (struct
4578 						     libipw_assoc_response
4579 						     *)&notif->u.raw;
4580 						IPW_DEBUG(IPW_DL_NOTIF |
4581 							  IPW_DL_STATE |
4582 							  IPW_DL_ASSOC,
4583 							  "association failed (0x%04X): %s\n",
4584 							  le16_to_cpu(resp->status),
4585 							  ipw_get_status_code
4586 							  (le16_to_cpu
4587 							   (resp->status)));
4588 					}
4589 
4590 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4591 						  IPW_DL_ASSOC,
4592 						  "disassociated: '%*pE' %pM\n",
4593 						  priv->essid_len, priv->essid,
4594 						  priv->bssid);
4595 
4596 					priv->status &=
4597 					    ~(STATUS_DISASSOCIATING |
4598 					      STATUS_ASSOCIATING |
4599 					      STATUS_ASSOCIATED | STATUS_AUTH);
4600 					if (priv->assoc_network
4601 					    && (priv->assoc_network->
4602 						capability &
4603 						WLAN_CAPABILITY_IBSS))
4604 						ipw_remove_current_network
4605 						    (priv);
4606 
4607 					schedule_work(&priv->link_down);
4608 
4609 					break;
4610 				}
4611 
4612 			case CMAS_RX_ASSOC_RESP:
4613 				break;
4614 
4615 			default:
4616 				IPW_ERROR("assoc: unknown (%d)\n",
4617 					  assoc->state);
4618 				break;
4619 			}
4620 
4621 			break;
4622 		}
4623 
4624 	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4625 			struct notif_authenticate *auth = &notif->u.auth;
4626 			switch (auth->state) {
4627 			case CMAS_AUTHENTICATED:
4628 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4629 					  "authenticated: '%*pE' %pM\n",
4630 					  priv->essid_len, priv->essid,
4631 					  priv->bssid);
4632 				priv->status |= STATUS_AUTH;
4633 				break;
4634 
4635 			case CMAS_INIT:
4636 				if (priv->status & STATUS_AUTH) {
4637 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4638 						  IPW_DL_ASSOC,
4639 						  "authentication failed (0x%04X): %s\n",
4640 						  le16_to_cpu(auth->status),
4641 						  ipw_get_status_code(le16_to_cpu
4642 								      (auth->
4643 								       status)));
4644 				}
4645 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4646 					  IPW_DL_ASSOC,
4647 					  "deauthenticated: '%*pE' %pM\n",
4648 					  priv->essid_len, priv->essid,
4649 					  priv->bssid);
4650 
4651 				priv->status &= ~(STATUS_ASSOCIATING |
4652 						  STATUS_AUTH |
4653 						  STATUS_ASSOCIATED);
4654 
4655 				schedule_work(&priv->link_down);
4656 				break;
4657 
4658 			case CMAS_TX_AUTH_SEQ_1:
4659 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4660 					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4661 				break;
4662 			case CMAS_RX_AUTH_SEQ_2:
4663 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4664 					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4665 				break;
4666 			case CMAS_AUTH_SEQ_1_PASS:
4667 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4668 					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4669 				break;
4670 			case CMAS_AUTH_SEQ_1_FAIL:
4671 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4672 					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4673 				break;
4674 			case CMAS_TX_AUTH_SEQ_3:
4675 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4676 					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4677 				break;
4678 			case CMAS_RX_AUTH_SEQ_4:
4679 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4680 					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4681 				break;
4682 			case CMAS_AUTH_SEQ_2_PASS:
4683 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4684 					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4685 				break;
4686 			case CMAS_AUTH_SEQ_2_FAIL:
4687 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4688 					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4689 				break;
4690 			case CMAS_TX_ASSOC:
4691 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4692 					  IPW_DL_ASSOC, "TX_ASSOC\n");
4693 				break;
4694 			case CMAS_RX_ASSOC_RESP:
4695 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4696 					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4697 
4698 				break;
4699 			case CMAS_ASSOCIATED:
4700 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4701 					  IPW_DL_ASSOC, "ASSOCIATED\n");
4702 				break;
4703 			default:
4704 				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4705 						auth->state);
4706 				break;
4707 			}
4708 			break;
4709 		}
4710 
4711 	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4712 			struct notif_channel_result *x =
4713 			    &notif->u.channel_result;
4714 
4715 			if (size == sizeof(*x)) {
4716 				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4717 					       x->channel_num);
4718 			} else {
4719 				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4720 					       "(should be %zd)\n",
4721 					       size, sizeof(*x));
4722 			}
4723 			break;
4724 		}
4725 
4726 	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4727 			struct notif_scan_complete *x = &notif->u.scan_complete;
4728 			if (size == sizeof(*x)) {
4729 				IPW_DEBUG_SCAN
4730 				    ("Scan completed: type %d, %d channels, "
4731 				     "%d status\n", x->scan_type,
4732 				     x->num_channels, x->status);
4733 			} else {
4734 				IPW_ERROR("Scan completed of wrong size %d "
4735 					  "(should be %zd)\n",
4736 					  size, sizeof(*x));
4737 			}
4738 
4739 			priv->status &=
4740 			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4741 
4742 			wake_up_interruptible(&priv->wait_state);
4743 			cancel_delayed_work(&priv->scan_check);
4744 
4745 			if (priv->status & STATUS_EXIT_PENDING)
4746 				break;
4747 
4748 			priv->ieee->scans++;
4749 
4750 #ifdef CONFIG_IPW2200_MONITOR
4751 			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4752 				priv->status |= STATUS_SCAN_FORCED;
4753 				schedule_delayed_work(&priv->request_scan, 0);
4754 				break;
4755 			}
4756 			priv->status &= ~STATUS_SCAN_FORCED;
4757 #endif				/* CONFIG_IPW2200_MONITOR */
4758 
4759 			/* Do queued direct scans first */
4760 			if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4761 				schedule_delayed_work(&priv->request_direct_scan, 0);
4762 
4763 			if (!(priv->status & (STATUS_ASSOCIATED |
4764 					      STATUS_ASSOCIATING |
4765 					      STATUS_ROAMING |
4766 					      STATUS_DISASSOCIATING)))
4767 				schedule_work(&priv->associate);
4768 			else if (priv->status & STATUS_ROAMING) {
4769 				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4770 					/* If a scan completed and we are in roam mode, then
4771 					 * the scan that completed was the one requested as a
4772 					 * result of entering roam... so, schedule the
4773 					 * roam work */
4774 					schedule_work(&priv->roam);
4775 				else
4776 					/* Don't schedule if we aborted the scan */
4777 					priv->status &= ~STATUS_ROAMING;
4778 			} else if (priv->status & STATUS_SCAN_PENDING)
4779 				schedule_delayed_work(&priv->request_scan, 0);
4780 			else if (priv->config & CFG_BACKGROUND_SCAN
4781 				 && priv->status & STATUS_ASSOCIATED)
4782 				schedule_delayed_work(&priv->request_scan,
4783 						      round_jiffies_relative(HZ));
4784 
4785 			/* Send an empty event to user space.
4786 			 * We don't send the received data on the event because
4787 			 * it would require us to do complex transcoding, and
4788 			 * we want to minimise the work done in the irq handler
4789 			 * Use a request to extract the data.
4790 			 * Also, we generate this even for any scan, regardless
4791 			 * on how the scan was initiated. User space can just
4792 			 * sync on periodic scan to get fresh data...
4793 			 * Jean II */
4794 			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4795 				handle_scan_event(priv);
4796 			break;
4797 		}
4798 
4799 	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4800 			struct notif_frag_length *x = &notif->u.frag_len;
4801 
4802 			if (size == sizeof(*x))
4803 				IPW_ERROR("Frag length: %d\n",
4804 					  le16_to_cpu(x->frag_length));
4805 			else
4806 				IPW_ERROR("Frag length of wrong size %d "
4807 					  "(should be %zd)\n",
4808 					  size, sizeof(*x));
4809 			break;
4810 		}
4811 
4812 	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4813 			struct notif_link_deterioration *x =
4814 			    &notif->u.link_deterioration;
4815 
4816 			if (size == sizeof(*x)) {
4817 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4818 					"link deterioration: type %d, cnt %d\n",
4819 					x->silence_notification_type,
4820 					x->silence_count);
4821 				memcpy(&priv->last_link_deterioration, x,
4822 				       sizeof(*x));
4823 			} else {
4824 				IPW_ERROR("Link Deterioration of wrong size %d "
4825 					  "(should be %zd)\n",
4826 					  size, sizeof(*x));
4827 			}
4828 			break;
4829 		}
4830 
4831 	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4832 			IPW_ERROR("Dino config\n");
4833 			if (priv->hcmd
4834 			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4835 				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4836 
4837 			break;
4838 		}
4839 
4840 	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4841 			struct notif_beacon_state *x = &notif->u.beacon_state;
4842 			if (size != sizeof(*x)) {
4843 				IPW_ERROR
4844 				    ("Beacon state of wrong size %d (should "
4845 				     "be %zd)\n", size, sizeof(*x));
4846 				break;
4847 			}
4848 
4849 			if (le32_to_cpu(x->state) ==
4850 			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4851 				ipw_handle_missed_beacon(priv,
4852 							 le32_to_cpu(x->
4853 								     number));
4854 
4855 			break;
4856 		}
4857 
4858 	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4859 			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4860 			if (size == sizeof(*x)) {
4861 				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4862 					  "0x%02x station %d\n",
4863 					  x->key_state, x->security_type,
4864 					  x->station_index);
4865 				break;
4866 			}
4867 
4868 			IPW_ERROR
4869 			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4870 			     size, sizeof(*x));
4871 			break;
4872 		}
4873 
4874 	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4875 			struct notif_calibration *x = &notif->u.calibration;
4876 
4877 			if (size == sizeof(*x)) {
4878 				memcpy(&priv->calib, x, sizeof(*x));
4879 				IPW_DEBUG_INFO("TODO: Calibration\n");
4880 				break;
4881 			}
4882 
4883 			IPW_ERROR
4884 			    ("Calibration of wrong size %d (should be %zd)\n",
4885 			     size, sizeof(*x));
4886 			break;
4887 		}
4888 
4889 	case HOST_NOTIFICATION_NOISE_STATS:{
4890 			if (size == sizeof(u32)) {
4891 				priv->exp_avg_noise =
4892 				    exponential_average(priv->exp_avg_noise,
4893 				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4894 				    DEPTH_NOISE);
4895 				break;
4896 			}
4897 
4898 			IPW_ERROR
4899 			    ("Noise stat is wrong size %d (should be %zd)\n",
4900 			     size, sizeof(u32));
4901 			break;
4902 		}
4903 
4904 	default:
4905 		IPW_DEBUG_NOTIF("Unknown notification: "
4906 				"subtype=%d,flags=0x%2x,size=%d\n",
4907 				notif->subtype, notif->flags, size);
4908 	}
4909 }
4910 
4911 /*
4912  * Destroys all DMA structures and initialise them again
4913  *
4914  * @param priv
4915  * @return error code
4916  */
4917 static int ipw_queue_reset(struct ipw_priv *priv)
4918 {
4919 	int rc = 0;
4920 	/* @todo customize queue sizes */
4921 	int nTx = 64, nTxCmd = 8;
4922 	ipw_tx_queue_free(priv);
4923 	/* Tx CMD queue */
4924 	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4925 			       IPW_TX_CMD_QUEUE_READ_INDEX,
4926 			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4927 			       IPW_TX_CMD_QUEUE_BD_BASE,
4928 			       IPW_TX_CMD_QUEUE_BD_SIZE);
4929 	if (rc) {
4930 		IPW_ERROR("Tx Cmd queue init failed\n");
4931 		goto error;
4932 	}
4933 	/* Tx queue(s) */
4934 	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4935 			       IPW_TX_QUEUE_0_READ_INDEX,
4936 			       IPW_TX_QUEUE_0_WRITE_INDEX,
4937 			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4938 	if (rc) {
4939 		IPW_ERROR("Tx 0 queue init failed\n");
4940 		goto error;
4941 	}
4942 	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4943 			       IPW_TX_QUEUE_1_READ_INDEX,
4944 			       IPW_TX_QUEUE_1_WRITE_INDEX,
4945 			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4946 	if (rc) {
4947 		IPW_ERROR("Tx 1 queue init failed\n");
4948 		goto error;
4949 	}
4950 	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4951 			       IPW_TX_QUEUE_2_READ_INDEX,
4952 			       IPW_TX_QUEUE_2_WRITE_INDEX,
4953 			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4954 	if (rc) {
4955 		IPW_ERROR("Tx 2 queue init failed\n");
4956 		goto error;
4957 	}
4958 	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4959 			       IPW_TX_QUEUE_3_READ_INDEX,
4960 			       IPW_TX_QUEUE_3_WRITE_INDEX,
4961 			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4962 	if (rc) {
4963 		IPW_ERROR("Tx 3 queue init failed\n");
4964 		goto error;
4965 	}
4966 	/* statistics */
4967 	priv->rx_bufs_min = 0;
4968 	priv->rx_pend_max = 0;
4969 	return rc;
4970 
4971       error:
4972 	ipw_tx_queue_free(priv);
4973 	return rc;
4974 }
4975 
4976 /*
4977  * Reclaim Tx queue entries no more used by NIC.
4978  *
4979  * When FW advances 'R' index, all entries between old and
4980  * new 'R' index need to be reclaimed. As result, some free space
4981  * forms. If there is enough free space (> low mark), wake Tx queue.
4982  *
4983  * @note Need to protect against garbage in 'R' index
4984  * @param priv
4985  * @param txq
4986  * @param qindex
4987  * @return Number of used entries remains in the queue
4988  */
4989 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4990 				struct clx2_tx_queue *txq, int qindex)
4991 {
4992 	u32 hw_tail;
4993 	int used;
4994 	struct clx2_queue *q = &txq->q;
4995 
4996 	hw_tail = ipw_read32(priv, q->reg_r);
4997 	if (hw_tail >= q->n_bd) {
4998 		IPW_ERROR
4999 		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5000 		     hw_tail, q->n_bd);
5001 		goto done;
5002 	}
5003 	for (; q->last_used != hw_tail;
5004 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5005 		ipw_queue_tx_free_tfd(priv, txq);
5006 		priv->tx_packets++;
5007 	}
5008       done:
5009 	if ((ipw_tx_queue_space(q) > q->low_mark) &&
5010 	    (qindex >= 0))
5011 		netif_wake_queue(priv->net_dev);
5012 	used = q->first_empty - q->last_used;
5013 	if (used < 0)
5014 		used += q->n_bd;
5015 
5016 	return used;
5017 }
5018 
5019 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf,
5020 			     int len, int sync)
5021 {
5022 	struct clx2_tx_queue *txq = &priv->txq_cmd;
5023 	struct clx2_queue *q = &txq->q;
5024 	struct tfd_frame *tfd;
5025 
5026 	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5027 		IPW_ERROR("No space for Tx\n");
5028 		return -EBUSY;
5029 	}
5030 
5031 	tfd = &txq->bd[q->first_empty];
5032 	txq->txb[q->first_empty] = NULL;
5033 
5034 	memset(tfd, 0, sizeof(*tfd));
5035 	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5036 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5037 	priv->hcmd_seq++;
5038 	tfd->u.cmd.index = hcmd;
5039 	tfd->u.cmd.length = len;
5040 	memcpy(tfd->u.cmd.payload, buf, len);
5041 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5042 	ipw_write32(priv, q->reg_w, q->first_empty);
5043 	_ipw_read32(priv, 0x90);
5044 
5045 	return 0;
5046 }
5047 
5048 /*
5049  * Rx theory of operation
5050  *
5051  * The host allocates 32 DMA target addresses and passes the host address
5052  * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5053  * 0 to 31
5054  *
5055  * Rx Queue Indexes
5056  * The host/firmware share two index registers for managing the Rx buffers.
5057  *
5058  * The READ index maps to the first position that the firmware may be writing
5059  * to -- the driver can read up to (but not including) this position and get
5060  * good data.
5061  * The READ index is managed by the firmware once the card is enabled.
5062  *
5063  * The WRITE index maps to the last position the driver has read from -- the
5064  * position preceding WRITE is the last slot the firmware can place a packet.
5065  *
5066  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5067  * WRITE = READ.
5068  *
5069  * During initialization the host sets up the READ queue position to the first
5070  * INDEX position, and WRITE to the last (READ - 1 wrapped)
5071  *
5072  * When the firmware places a packet in a buffer it will advance the READ index
5073  * and fire the RX interrupt.  The driver can then query the READ index and
5074  * process as many packets as possible, moving the WRITE index forward as it
5075  * resets the Rx queue buffers with new memory.
5076  *
5077  * The management in the driver is as follows:
5078  * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5079  *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5080  *   to replensish the ipw->rxq->rx_free.
5081  * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5082  *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5083  *   'processed' and 'read' driver indexes as well)
5084  * + A received packet is processed and handed to the kernel network stack,
5085  *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5086  * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5087  *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5088  *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5089  *   were enough free buffers and RX_STALLED is set it is cleared.
5090  *
5091  *
5092  * Driver sequence:
5093  *
5094  * ipw_rx_queue_alloc()       Allocates rx_free
5095  * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5096  *                            ipw_rx_queue_restock
5097  * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5098  *                            queue, updates firmware pointers, and updates
5099  *                            the WRITE index.  If insufficient rx_free buffers
5100  *                            are available, schedules ipw_rx_queue_replenish
5101  *
5102  * -- enable interrupts --
5103  * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5104  *                            READ INDEX, detaching the SKB from the pool.
5105  *                            Moves the packet buffer from queue to rx_used.
5106  *                            Calls ipw_rx_queue_restock to refill any empty
5107  *                            slots.
5108  * ...
5109  *
5110  */
5111 
5112 /*
5113  * If there are slots in the RX queue that  need to be restocked,
5114  * and we have free pre-allocated buffers, fill the ranks as much
5115  * as we can pulling from rx_free.
5116  *
5117  * This moves the 'write' index forward to catch up with 'processed', and
5118  * also updates the memory address in the firmware to reference the new
5119  * target buffer.
5120  */
5121 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5122 {
5123 	struct ipw_rx_queue *rxq = priv->rxq;
5124 	struct list_head *element;
5125 	struct ipw_rx_mem_buffer *rxb;
5126 	unsigned long flags;
5127 	int write;
5128 
5129 	spin_lock_irqsave(&rxq->lock, flags);
5130 	write = rxq->write;
5131 	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5132 		element = rxq->rx_free.next;
5133 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5134 		list_del(element);
5135 
5136 		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5137 			    rxb->dma_addr);
5138 		rxq->queue[rxq->write] = rxb;
5139 		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5140 		rxq->free_count--;
5141 	}
5142 	spin_unlock_irqrestore(&rxq->lock, flags);
5143 
5144 	/* If the pre-allocated buffer pool is dropping low, schedule to
5145 	 * refill it */
5146 	if (rxq->free_count <= RX_LOW_WATERMARK)
5147 		schedule_work(&priv->rx_replenish);
5148 
5149 	/* If we've added more space for the firmware to place data, tell it */
5150 	if (write != rxq->write)
5151 		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5152 }
5153 
5154 /*
5155  * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5156  * Also restock the Rx queue via ipw_rx_queue_restock.
5157  *
5158  * This is called as a scheduled work item (except for during initialization)
5159  */
5160 static void ipw_rx_queue_replenish(void *data)
5161 {
5162 	struct ipw_priv *priv = data;
5163 	struct ipw_rx_queue *rxq = priv->rxq;
5164 	struct list_head *element;
5165 	struct ipw_rx_mem_buffer *rxb;
5166 	unsigned long flags;
5167 
5168 	spin_lock_irqsave(&rxq->lock, flags);
5169 	while (!list_empty(&rxq->rx_used)) {
5170 		element = rxq->rx_used.next;
5171 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5172 		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5173 		if (!rxb->skb) {
5174 			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5175 			       priv->net_dev->name);
5176 			/* We don't reschedule replenish work here -- we will
5177 			 * call the restock method and if it still needs
5178 			 * more buffers it will schedule replenish */
5179 			break;
5180 		}
5181 		list_del(element);
5182 
5183 		rxb->dma_addr =
5184 		    dma_map_single(&priv->pci_dev->dev, rxb->skb->data,
5185 				   IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
5186 
5187 		list_add_tail(&rxb->list, &rxq->rx_free);
5188 		rxq->free_count++;
5189 	}
5190 	spin_unlock_irqrestore(&rxq->lock, flags);
5191 
5192 	ipw_rx_queue_restock(priv);
5193 }
5194 
5195 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5196 {
5197 	struct ipw_priv *priv =
5198 		container_of(work, struct ipw_priv, rx_replenish);
5199 	mutex_lock(&priv->mutex);
5200 	ipw_rx_queue_replenish(priv);
5201 	mutex_unlock(&priv->mutex);
5202 }
5203 
5204 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5205  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5206  * This free routine walks the list of POOL entries and if SKB is set to
5207  * non NULL it is unmapped and freed
5208  */
5209 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5210 {
5211 	int i;
5212 
5213 	if (!rxq)
5214 		return;
5215 
5216 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5217 		if (rxq->pool[i].skb != NULL) {
5218 			dma_unmap_single(&priv->pci_dev->dev,
5219 					 rxq->pool[i].dma_addr,
5220 					 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
5221 			dev_kfree_skb(rxq->pool[i].skb);
5222 		}
5223 	}
5224 
5225 	kfree(rxq);
5226 }
5227 
5228 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5229 {
5230 	struct ipw_rx_queue *rxq;
5231 	int i;
5232 
5233 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5234 	if (unlikely(!rxq)) {
5235 		IPW_ERROR("memory allocation failed\n");
5236 		return NULL;
5237 	}
5238 	spin_lock_init(&rxq->lock);
5239 	INIT_LIST_HEAD(&rxq->rx_free);
5240 	INIT_LIST_HEAD(&rxq->rx_used);
5241 
5242 	/* Fill the rx_used queue with _all_ of the Rx buffers */
5243 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5244 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5245 
5246 	/* Set us so that we have processed and used all buffers, but have
5247 	 * not restocked the Rx queue with fresh buffers */
5248 	rxq->read = rxq->write = 0;
5249 	rxq->free_count = 0;
5250 
5251 	return rxq;
5252 }
5253 
5254 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5255 {
5256 	rate &= ~LIBIPW_BASIC_RATE_MASK;
5257 	if (ieee_mode == IEEE_A) {
5258 		switch (rate) {
5259 		case LIBIPW_OFDM_RATE_6MB:
5260 			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5261 			    1 : 0;
5262 		case LIBIPW_OFDM_RATE_9MB:
5263 			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5264 			    1 : 0;
5265 		case LIBIPW_OFDM_RATE_12MB:
5266 			return priv->
5267 			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5268 		case LIBIPW_OFDM_RATE_18MB:
5269 			return priv->
5270 			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5271 		case LIBIPW_OFDM_RATE_24MB:
5272 			return priv->
5273 			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5274 		case LIBIPW_OFDM_RATE_36MB:
5275 			return priv->
5276 			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5277 		case LIBIPW_OFDM_RATE_48MB:
5278 			return priv->
5279 			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5280 		case LIBIPW_OFDM_RATE_54MB:
5281 			return priv->
5282 			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5283 		default:
5284 			return 0;
5285 		}
5286 	}
5287 
5288 	/* B and G mixed */
5289 	switch (rate) {
5290 	case LIBIPW_CCK_RATE_1MB:
5291 		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5292 	case LIBIPW_CCK_RATE_2MB:
5293 		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5294 	case LIBIPW_CCK_RATE_5MB:
5295 		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5296 	case LIBIPW_CCK_RATE_11MB:
5297 		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5298 	}
5299 
5300 	/* If we are limited to B modulations, bail at this point */
5301 	if (ieee_mode == IEEE_B)
5302 		return 0;
5303 
5304 	/* G */
5305 	switch (rate) {
5306 	case LIBIPW_OFDM_RATE_6MB:
5307 		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5308 	case LIBIPW_OFDM_RATE_9MB:
5309 		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5310 	case LIBIPW_OFDM_RATE_12MB:
5311 		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5312 	case LIBIPW_OFDM_RATE_18MB:
5313 		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5314 	case LIBIPW_OFDM_RATE_24MB:
5315 		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5316 	case LIBIPW_OFDM_RATE_36MB:
5317 		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5318 	case LIBIPW_OFDM_RATE_48MB:
5319 		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5320 	case LIBIPW_OFDM_RATE_54MB:
5321 		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5322 	}
5323 
5324 	return 0;
5325 }
5326 
5327 static int ipw_compatible_rates(struct ipw_priv *priv,
5328 				const struct libipw_network *network,
5329 				struct ipw_supported_rates *rates)
5330 {
5331 	int num_rates, i;
5332 
5333 	memset(rates, 0, sizeof(*rates));
5334 	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5335 	rates->num_rates = 0;
5336 	for (i = 0; i < num_rates; i++) {
5337 		if (!ipw_is_rate_in_mask(priv, network->mode,
5338 					 network->rates[i])) {
5339 
5340 			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5341 				IPW_DEBUG_SCAN("Adding masked mandatory "
5342 					       "rate %02X\n",
5343 					       network->rates[i]);
5344 				rates->supported_rates[rates->num_rates++] =
5345 				    network->rates[i];
5346 				continue;
5347 			}
5348 
5349 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5350 				       network->rates[i], priv->rates_mask);
5351 			continue;
5352 		}
5353 
5354 		rates->supported_rates[rates->num_rates++] = network->rates[i];
5355 	}
5356 
5357 	num_rates = min(network->rates_ex_len,
5358 			(u8) (IPW_MAX_RATES - num_rates));
5359 	for (i = 0; i < num_rates; i++) {
5360 		if (!ipw_is_rate_in_mask(priv, network->mode,
5361 					 network->rates_ex[i])) {
5362 			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5363 				IPW_DEBUG_SCAN("Adding masked mandatory "
5364 					       "rate %02X\n",
5365 					       network->rates_ex[i]);
5366 				rates->supported_rates[rates->num_rates++] =
5367 				    network->rates[i];
5368 				continue;
5369 			}
5370 
5371 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5372 				       network->rates_ex[i], priv->rates_mask);
5373 			continue;
5374 		}
5375 
5376 		rates->supported_rates[rates->num_rates++] =
5377 		    network->rates_ex[i];
5378 	}
5379 
5380 	return 1;
5381 }
5382 
5383 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5384 				  const struct ipw_supported_rates *src)
5385 {
5386 	u8 i;
5387 	for (i = 0; i < src->num_rates; i++)
5388 		dest->supported_rates[i] = src->supported_rates[i];
5389 	dest->num_rates = src->num_rates;
5390 }
5391 
5392 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5393  * mask should ever be used -- right now all callers to add the scan rates are
5394  * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5395 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5396 				   u8 modulation, u32 rate_mask)
5397 {
5398 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5399 	    LIBIPW_BASIC_RATE_MASK : 0;
5400 
5401 	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5402 		rates->supported_rates[rates->num_rates++] =
5403 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5404 
5405 	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5406 		rates->supported_rates[rates->num_rates++] =
5407 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5408 
5409 	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5410 		rates->supported_rates[rates->num_rates++] = basic_mask |
5411 		    LIBIPW_CCK_RATE_5MB;
5412 
5413 	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5414 		rates->supported_rates[rates->num_rates++] = basic_mask |
5415 		    LIBIPW_CCK_RATE_11MB;
5416 }
5417 
5418 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5419 				    u8 modulation, u32 rate_mask)
5420 {
5421 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5422 	    LIBIPW_BASIC_RATE_MASK : 0;
5423 
5424 	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5425 		rates->supported_rates[rates->num_rates++] = basic_mask |
5426 		    LIBIPW_OFDM_RATE_6MB;
5427 
5428 	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5429 		rates->supported_rates[rates->num_rates++] =
5430 		    LIBIPW_OFDM_RATE_9MB;
5431 
5432 	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5433 		rates->supported_rates[rates->num_rates++] = basic_mask |
5434 		    LIBIPW_OFDM_RATE_12MB;
5435 
5436 	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5437 		rates->supported_rates[rates->num_rates++] =
5438 		    LIBIPW_OFDM_RATE_18MB;
5439 
5440 	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5441 		rates->supported_rates[rates->num_rates++] = basic_mask |
5442 		    LIBIPW_OFDM_RATE_24MB;
5443 
5444 	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5445 		rates->supported_rates[rates->num_rates++] =
5446 		    LIBIPW_OFDM_RATE_36MB;
5447 
5448 	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5449 		rates->supported_rates[rates->num_rates++] =
5450 		    LIBIPW_OFDM_RATE_48MB;
5451 
5452 	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5453 		rates->supported_rates[rates->num_rates++] =
5454 		    LIBIPW_OFDM_RATE_54MB;
5455 }
5456 
5457 struct ipw_network_match {
5458 	struct libipw_network *network;
5459 	struct ipw_supported_rates rates;
5460 };
5461 
5462 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5463 				  struct ipw_network_match *match,
5464 				  struct libipw_network *network,
5465 				  int roaming)
5466 {
5467 	struct ipw_supported_rates rates;
5468 
5469 	/* Verify that this network's capability is compatible with the
5470 	 * current mode (AdHoc or Infrastructure) */
5471 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5472 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5473 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5474 				network->ssid_len, network->ssid,
5475 				network->bssid);
5476 		return 0;
5477 	}
5478 
5479 	if (unlikely(roaming)) {
5480 		/* If we are roaming, then ensure check if this is a valid
5481 		 * network to try and roam to */
5482 		if ((network->ssid_len != match->network->ssid_len) ||
5483 		    memcmp(network->ssid, match->network->ssid,
5484 			   network->ssid_len)) {
5485 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5486 					network->ssid_len, network->ssid,
5487 					network->bssid);
5488 			return 0;
5489 		}
5490 	} else {
5491 		/* If an ESSID has been configured then compare the broadcast
5492 		 * ESSID to ours */
5493 		if ((priv->config & CFG_STATIC_ESSID) &&
5494 		    ((network->ssid_len != priv->essid_len) ||
5495 		     memcmp(network->ssid, priv->essid,
5496 			    min(network->ssid_len, priv->essid_len)))) {
5497 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5498 					network->ssid_len, network->ssid,
5499 					network->bssid, priv->essid_len,
5500 					priv->essid);
5501 			return 0;
5502 		}
5503 	}
5504 
5505 	/* If the old network rate is better than this one, don't bother
5506 	 * testing everything else. */
5507 
5508 	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5509 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5510 				match->network->ssid_len, match->network->ssid);
5511 		return 0;
5512 	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5513 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5514 				match->network->ssid_len, match->network->ssid);
5515 		return 0;
5516 	}
5517 
5518 	/* Now go through and see if the requested network is valid... */
5519 	if (priv->ieee->scan_age != 0 &&
5520 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5521 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5522 				network->ssid_len, network->ssid,
5523 				network->bssid,
5524 				jiffies_to_msecs(jiffies -
5525 						 network->last_scanned));
5526 		return 0;
5527 	}
5528 
5529 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5530 	    (network->channel != priv->channel)) {
5531 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5532 				network->ssid_len, network->ssid,
5533 				network->bssid,
5534 				network->channel, priv->channel);
5535 		return 0;
5536 	}
5537 
5538 	/* Verify privacy compatibility */
5539 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5540 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5541 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5542 				network->ssid_len, network->ssid,
5543 				network->bssid,
5544 				priv->
5545 				capability & CAP_PRIVACY_ON ? "on" : "off",
5546 				network->
5547 				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5548 				"off");
5549 		return 0;
5550 	}
5551 
5552 	if (ether_addr_equal(network->bssid, priv->bssid)) {
5553 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
5554 				network->ssid_len, network->ssid,
5555 				network->bssid, priv->bssid);
5556 		return 0;
5557 	}
5558 
5559 	/* Filter out any incompatible freq / mode combinations */
5560 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5561 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5562 				network->ssid_len, network->ssid,
5563 				network->bssid);
5564 		return 0;
5565 	}
5566 
5567 	/* Ensure that the rates supported by the driver are compatible with
5568 	 * this AP, including verification of basic rates (mandatory) */
5569 	if (!ipw_compatible_rates(priv, network, &rates)) {
5570 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5571 				network->ssid_len, network->ssid,
5572 				network->bssid);
5573 		return 0;
5574 	}
5575 
5576 	if (rates.num_rates == 0) {
5577 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5578 				network->ssid_len, network->ssid,
5579 				network->bssid);
5580 		return 0;
5581 	}
5582 
5583 	/* TODO: Perform any further minimal comparititive tests.  We do not
5584 	 * want to put too much policy logic here; intelligent scan selection
5585 	 * should occur within a generic IEEE 802.11 user space tool.  */
5586 
5587 	/* Set up 'new' AP to this network */
5588 	ipw_copy_rates(&match->rates, &rates);
5589 	match->network = network;
5590 	IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
5591 			network->ssid_len, network->ssid, network->bssid);
5592 
5593 	return 1;
5594 }
5595 
5596 static void ipw_merge_adhoc_network(struct work_struct *work)
5597 {
5598 	struct ipw_priv *priv =
5599 		container_of(work, struct ipw_priv, merge_networks);
5600 	struct libipw_network *network = NULL;
5601 	struct ipw_network_match match = {
5602 		.network = priv->assoc_network
5603 	};
5604 
5605 	if ((priv->status & STATUS_ASSOCIATED) &&
5606 	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5607 		/* First pass through ROAM process -- look for a better
5608 		 * network */
5609 		unsigned long flags;
5610 
5611 		spin_lock_irqsave(&priv->ieee->lock, flags);
5612 		list_for_each_entry(network, &priv->ieee->network_list, list) {
5613 			if (network != priv->assoc_network)
5614 				ipw_find_adhoc_network(priv, &match, network,
5615 						       1);
5616 		}
5617 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5618 
5619 		if (match.network == priv->assoc_network) {
5620 			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5621 					"merge to.\n");
5622 			return;
5623 		}
5624 
5625 		mutex_lock(&priv->mutex);
5626 		if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5627 			IPW_DEBUG_MERGE("remove network %*pE\n",
5628 					priv->essid_len, priv->essid);
5629 			ipw_remove_current_network(priv);
5630 		}
5631 
5632 		ipw_disassociate(priv);
5633 		priv->assoc_network = match.network;
5634 		mutex_unlock(&priv->mutex);
5635 		return;
5636 	}
5637 }
5638 
5639 static int ipw_best_network(struct ipw_priv *priv,
5640 			    struct ipw_network_match *match,
5641 			    struct libipw_network *network, int roaming)
5642 {
5643 	struct ipw_supported_rates rates;
5644 
5645 	/* Verify that this network's capability is compatible with the
5646 	 * current mode (AdHoc or Infrastructure) */
5647 	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5648 	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5649 	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5650 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5651 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5652 				network->ssid_len, network->ssid,
5653 				network->bssid);
5654 		return 0;
5655 	}
5656 
5657 	if (unlikely(roaming)) {
5658 		/* If we are roaming, then ensure check if this is a valid
5659 		 * network to try and roam to */
5660 		if ((network->ssid_len != match->network->ssid_len) ||
5661 		    memcmp(network->ssid, match->network->ssid,
5662 			   network->ssid_len)) {
5663 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5664 					network->ssid_len, network->ssid,
5665 					network->bssid);
5666 			return 0;
5667 		}
5668 	} else {
5669 		/* If an ESSID has been configured then compare the broadcast
5670 		 * ESSID to ours */
5671 		if ((priv->config & CFG_STATIC_ESSID) &&
5672 		    ((network->ssid_len != priv->essid_len) ||
5673 		     memcmp(network->ssid, priv->essid,
5674 			    min(network->ssid_len, priv->essid_len)))) {
5675 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5676 					network->ssid_len, network->ssid,
5677 					network->bssid, priv->essid_len,
5678 					priv->essid);
5679 			return 0;
5680 		}
5681 	}
5682 
5683 	/* If the old network rate is better than this one, don't bother
5684 	 * testing everything else. */
5685 	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5686 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
5687 				network->ssid_len, network->ssid,
5688 				network->bssid, match->network->ssid_len,
5689 				match->network->ssid, match->network->bssid);
5690 		return 0;
5691 	}
5692 
5693 	/* If this network has already had an association attempt within the
5694 	 * last 3 seconds, do not try and associate again... */
5695 	if (network->last_associate &&
5696 	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5697 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
5698 				network->ssid_len, network->ssid,
5699 				network->bssid,
5700 				jiffies_to_msecs(jiffies -
5701 						 network->last_associate));
5702 		return 0;
5703 	}
5704 
5705 	/* Now go through and see if the requested network is valid... */
5706 	if (priv->ieee->scan_age != 0 &&
5707 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5708 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5709 				network->ssid_len, network->ssid,
5710 				network->bssid,
5711 				jiffies_to_msecs(jiffies -
5712 						 network->last_scanned));
5713 		return 0;
5714 	}
5715 
5716 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5717 	    (network->channel != priv->channel)) {
5718 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5719 				network->ssid_len, network->ssid,
5720 				network->bssid,
5721 				network->channel, priv->channel);
5722 		return 0;
5723 	}
5724 
5725 	/* Verify privacy compatibility */
5726 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5727 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5728 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5729 				network->ssid_len, network->ssid,
5730 				network->bssid,
5731 				priv->capability & CAP_PRIVACY_ON ? "on" :
5732 				"off",
5733 				network->capability &
5734 				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5735 		return 0;
5736 	}
5737 
5738 	if ((priv->config & CFG_STATIC_BSSID) &&
5739 	    !ether_addr_equal(network->bssid, priv->bssid)) {
5740 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
5741 				network->ssid_len, network->ssid,
5742 				network->bssid, priv->bssid);
5743 		return 0;
5744 	}
5745 
5746 	/* Filter out any incompatible freq / mode combinations */
5747 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5748 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5749 				network->ssid_len, network->ssid,
5750 				network->bssid);
5751 		return 0;
5752 	}
5753 
5754 	/* Filter out invalid channel in current GEO */
5755 	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5756 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
5757 				network->ssid_len, network->ssid,
5758 				network->bssid);
5759 		return 0;
5760 	}
5761 
5762 	/* Ensure that the rates supported by the driver are compatible with
5763 	 * this AP, including verification of basic rates (mandatory) */
5764 	if (!ipw_compatible_rates(priv, network, &rates)) {
5765 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5766 				network->ssid_len, network->ssid,
5767 				network->bssid);
5768 		return 0;
5769 	}
5770 
5771 	if (rates.num_rates == 0) {
5772 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5773 				network->ssid_len, network->ssid,
5774 				network->bssid);
5775 		return 0;
5776 	}
5777 
5778 	/* TODO: Perform any further minimal comparititive tests.  We do not
5779 	 * want to put too much policy logic here; intelligent scan selection
5780 	 * should occur within a generic IEEE 802.11 user space tool.  */
5781 
5782 	/* Set up 'new' AP to this network */
5783 	ipw_copy_rates(&match->rates, &rates);
5784 	match->network = network;
5785 
5786 	IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
5787 			network->ssid_len, network->ssid, network->bssid);
5788 
5789 	return 1;
5790 }
5791 
5792 static void ipw_adhoc_create(struct ipw_priv *priv,
5793 			     struct libipw_network *network)
5794 {
5795 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5796 	int i;
5797 
5798 	/*
5799 	 * For the purposes of scanning, we can set our wireless mode
5800 	 * to trigger scans across combinations of bands, but when it
5801 	 * comes to creating a new ad-hoc network, we have tell the FW
5802 	 * exactly which band to use.
5803 	 *
5804 	 * We also have the possibility of an invalid channel for the
5805 	 * chossen band.  Attempting to create a new ad-hoc network
5806 	 * with an invalid channel for wireless mode will trigger a
5807 	 * FW fatal error.
5808 	 *
5809 	 */
5810 	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5811 	case LIBIPW_52GHZ_BAND:
5812 		network->mode = IEEE_A;
5813 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5814 		BUG_ON(i == -1);
5815 		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5816 			IPW_WARNING("Overriding invalid channel\n");
5817 			priv->channel = geo->a[0].channel;
5818 		}
5819 		break;
5820 
5821 	case LIBIPW_24GHZ_BAND:
5822 		if (priv->ieee->mode & IEEE_G)
5823 			network->mode = IEEE_G;
5824 		else
5825 			network->mode = IEEE_B;
5826 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5827 		BUG_ON(i == -1);
5828 		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5829 			IPW_WARNING("Overriding invalid channel\n");
5830 			priv->channel = geo->bg[0].channel;
5831 		}
5832 		break;
5833 
5834 	default:
5835 		IPW_WARNING("Overriding invalid channel\n");
5836 		if (priv->ieee->mode & IEEE_A) {
5837 			network->mode = IEEE_A;
5838 			priv->channel = geo->a[0].channel;
5839 		} else if (priv->ieee->mode & IEEE_G) {
5840 			network->mode = IEEE_G;
5841 			priv->channel = geo->bg[0].channel;
5842 		} else {
5843 			network->mode = IEEE_B;
5844 			priv->channel = geo->bg[0].channel;
5845 		}
5846 		break;
5847 	}
5848 
5849 	network->channel = priv->channel;
5850 	priv->config |= CFG_ADHOC_PERSIST;
5851 	ipw_create_bssid(priv, network->bssid);
5852 	network->ssid_len = priv->essid_len;
5853 	memcpy(network->ssid, priv->essid, priv->essid_len);
5854 	memset(&network->stats, 0, sizeof(network->stats));
5855 	network->capability = WLAN_CAPABILITY_IBSS;
5856 	if (!(priv->config & CFG_PREAMBLE_LONG))
5857 		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5858 	if (priv->capability & CAP_PRIVACY_ON)
5859 		network->capability |= WLAN_CAPABILITY_PRIVACY;
5860 	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5861 	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5862 	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5863 	memcpy(network->rates_ex,
5864 	       &priv->rates.supported_rates[network->rates_len],
5865 	       network->rates_ex_len);
5866 	network->last_scanned = 0;
5867 	network->flags = 0;
5868 	network->last_associate = 0;
5869 	network->time_stamp[0] = 0;
5870 	network->time_stamp[1] = 0;
5871 	network->beacon_interval = 100;	/* Default */
5872 	network->listen_interval = 10;	/* Default */
5873 	network->atim_window = 0;	/* Default */
5874 	network->wpa_ie_len = 0;
5875 	network->rsn_ie_len = 0;
5876 }
5877 
5878 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5879 {
5880 	struct ipw_tgi_tx_key key;
5881 
5882 	if (!(priv->ieee->sec.flags & (1 << index)))
5883 		return;
5884 
5885 	key.key_id = index;
5886 	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5887 	key.security_type = type;
5888 	key.station_index = 0;	/* always 0 for BSS */
5889 	key.flags = 0;
5890 	/* 0 for new key; previous value of counter (after fatal error) */
5891 	key.tx_counter[0] = cpu_to_le32(0);
5892 	key.tx_counter[1] = cpu_to_le32(0);
5893 
5894 	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5895 }
5896 
5897 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5898 {
5899 	struct ipw_wep_key key;
5900 	int i;
5901 
5902 	key.cmd_id = DINO_CMD_WEP_KEY;
5903 	key.seq_num = 0;
5904 
5905 	/* Note: AES keys cannot be set for multiple times.
5906 	 * Only set it at the first time. */
5907 	for (i = 0; i < 4; i++) {
5908 		key.key_index = i | type;
5909 		if (!(priv->ieee->sec.flags & (1 << i))) {
5910 			key.key_size = 0;
5911 			continue;
5912 		}
5913 
5914 		key.key_size = priv->ieee->sec.key_sizes[i];
5915 		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5916 
5917 		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5918 	}
5919 }
5920 
5921 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5922 {
5923 	if (priv->ieee->host_encrypt)
5924 		return;
5925 
5926 	switch (level) {
5927 	case SEC_LEVEL_3:
5928 		priv->sys_config.disable_unicast_decryption = 0;
5929 		priv->ieee->host_decrypt = 0;
5930 		break;
5931 	case SEC_LEVEL_2:
5932 		priv->sys_config.disable_unicast_decryption = 1;
5933 		priv->ieee->host_decrypt = 1;
5934 		break;
5935 	case SEC_LEVEL_1:
5936 		priv->sys_config.disable_unicast_decryption = 0;
5937 		priv->ieee->host_decrypt = 0;
5938 		break;
5939 	case SEC_LEVEL_0:
5940 		priv->sys_config.disable_unicast_decryption = 1;
5941 		break;
5942 	default:
5943 		break;
5944 	}
5945 }
5946 
5947 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5948 {
5949 	if (priv->ieee->host_encrypt)
5950 		return;
5951 
5952 	switch (level) {
5953 	case SEC_LEVEL_3:
5954 		priv->sys_config.disable_multicast_decryption = 0;
5955 		break;
5956 	case SEC_LEVEL_2:
5957 		priv->sys_config.disable_multicast_decryption = 1;
5958 		break;
5959 	case SEC_LEVEL_1:
5960 		priv->sys_config.disable_multicast_decryption = 0;
5961 		break;
5962 	case SEC_LEVEL_0:
5963 		priv->sys_config.disable_multicast_decryption = 1;
5964 		break;
5965 	default:
5966 		break;
5967 	}
5968 }
5969 
5970 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5971 {
5972 	switch (priv->ieee->sec.level) {
5973 	case SEC_LEVEL_3:
5974 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5975 			ipw_send_tgi_tx_key(priv,
5976 					    DCT_FLAG_EXT_SECURITY_CCM,
5977 					    priv->ieee->sec.active_key);
5978 
5979 		if (!priv->ieee->host_mc_decrypt)
5980 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5981 		break;
5982 	case SEC_LEVEL_2:
5983 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5984 			ipw_send_tgi_tx_key(priv,
5985 					    DCT_FLAG_EXT_SECURITY_TKIP,
5986 					    priv->ieee->sec.active_key);
5987 		break;
5988 	case SEC_LEVEL_1:
5989 		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5990 		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5991 		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5992 		break;
5993 	case SEC_LEVEL_0:
5994 	default:
5995 		break;
5996 	}
5997 }
5998 
5999 static void ipw_adhoc_check(void *data)
6000 {
6001 	struct ipw_priv *priv = data;
6002 
6003 	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6004 	    !(priv->config & CFG_ADHOC_PERSIST)) {
6005 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6006 			  IPW_DL_STATE | IPW_DL_ASSOC,
6007 			  "Missed beacon: %d - disassociate\n",
6008 			  priv->missed_adhoc_beacons);
6009 		ipw_remove_current_network(priv);
6010 		ipw_disassociate(priv);
6011 		return;
6012 	}
6013 
6014 	schedule_delayed_work(&priv->adhoc_check,
6015 			      le16_to_cpu(priv->assoc_request.beacon_interval));
6016 }
6017 
6018 static void ipw_bg_adhoc_check(struct work_struct *work)
6019 {
6020 	struct ipw_priv *priv =
6021 		container_of(work, struct ipw_priv, adhoc_check.work);
6022 	mutex_lock(&priv->mutex);
6023 	ipw_adhoc_check(priv);
6024 	mutex_unlock(&priv->mutex);
6025 }
6026 
6027 static void ipw_debug_config(struct ipw_priv *priv)
6028 {
6029 	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6030 		       "[CFG 0x%08X]\n", priv->config);
6031 	if (priv->config & CFG_STATIC_CHANNEL)
6032 		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6033 	else
6034 		IPW_DEBUG_INFO("Channel unlocked.\n");
6035 	if (priv->config & CFG_STATIC_ESSID)
6036 		IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
6037 			       priv->essid_len, priv->essid);
6038 	else
6039 		IPW_DEBUG_INFO("ESSID unlocked.\n");
6040 	if (priv->config & CFG_STATIC_BSSID)
6041 		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6042 	else
6043 		IPW_DEBUG_INFO("BSSID unlocked.\n");
6044 	if (priv->capability & CAP_PRIVACY_ON)
6045 		IPW_DEBUG_INFO("PRIVACY on\n");
6046 	else
6047 		IPW_DEBUG_INFO("PRIVACY off\n");
6048 	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6049 }
6050 
6051 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6052 {
6053 	/* TODO: Verify that this works... */
6054 	struct ipw_fixed_rate fr;
6055 	u32 reg;
6056 	u16 mask = 0;
6057 	u16 new_tx_rates = priv->rates_mask;
6058 
6059 	/* Identify 'current FW band' and match it with the fixed
6060 	 * Tx rates */
6061 
6062 	switch (priv->ieee->freq_band) {
6063 	case LIBIPW_52GHZ_BAND:	/* A only */
6064 		/* IEEE_A */
6065 		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6066 			/* Invalid fixed rate mask */
6067 			IPW_DEBUG_WX
6068 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6069 			new_tx_rates = 0;
6070 			break;
6071 		}
6072 
6073 		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6074 		break;
6075 
6076 	default:		/* 2.4Ghz or Mixed */
6077 		/* IEEE_B */
6078 		if (mode == IEEE_B) {
6079 			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6080 				/* Invalid fixed rate mask */
6081 				IPW_DEBUG_WX
6082 				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6083 				new_tx_rates = 0;
6084 			}
6085 			break;
6086 		}
6087 
6088 		/* IEEE_G */
6089 		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6090 				    LIBIPW_OFDM_RATES_MASK)) {
6091 			/* Invalid fixed rate mask */
6092 			IPW_DEBUG_WX
6093 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6094 			new_tx_rates = 0;
6095 			break;
6096 		}
6097 
6098 		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6099 			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6100 			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6101 		}
6102 
6103 		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6104 			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6105 			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6106 		}
6107 
6108 		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6109 			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6110 			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6111 		}
6112 
6113 		new_tx_rates |= mask;
6114 		break;
6115 	}
6116 
6117 	fr.tx_rates = cpu_to_le16(new_tx_rates);
6118 
6119 	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6120 	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6121 }
6122 
6123 static void ipw_abort_scan(struct ipw_priv *priv)
6124 {
6125 	int err;
6126 
6127 	if (priv->status & STATUS_SCAN_ABORTING) {
6128 		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6129 		return;
6130 	}
6131 	priv->status |= STATUS_SCAN_ABORTING;
6132 
6133 	err = ipw_send_scan_abort(priv);
6134 	if (err)
6135 		IPW_DEBUG_HC("Request to abort scan failed.\n");
6136 }
6137 
6138 static void ipw_add_scan_channels(struct ipw_priv *priv,
6139 				  struct ipw_scan_request_ext *scan,
6140 				  int scan_type)
6141 {
6142 	int channel_index = 0;
6143 	const struct libipw_geo *geo;
6144 	int i;
6145 
6146 	geo = libipw_get_geo(priv->ieee);
6147 
6148 	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6149 		int start = channel_index;
6150 		for (i = 0; i < geo->a_channels; i++) {
6151 			if ((priv->status & STATUS_ASSOCIATED) &&
6152 			    geo->a[i].channel == priv->channel)
6153 				continue;
6154 			channel_index++;
6155 			scan->channels_list[channel_index] = geo->a[i].channel;
6156 			ipw_set_scan_type(scan, channel_index,
6157 					  geo->a[i].
6158 					  flags & LIBIPW_CH_PASSIVE_ONLY ?
6159 					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6160 					  scan_type);
6161 		}
6162 
6163 		if (start != channel_index) {
6164 			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6165 			    (channel_index - start);
6166 			channel_index++;
6167 		}
6168 	}
6169 
6170 	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6171 		int start = channel_index;
6172 		if (priv->config & CFG_SPEED_SCAN) {
6173 			int index;
6174 			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6175 				/* nop out the list */
6176 				[0] = 0
6177 			};
6178 
6179 			u8 channel;
6180 			while (channel_index < IPW_SCAN_CHANNELS - 1) {
6181 				channel =
6182 				    priv->speed_scan[priv->speed_scan_pos];
6183 				if (channel == 0) {
6184 					priv->speed_scan_pos = 0;
6185 					channel = priv->speed_scan[0];
6186 				}
6187 				if ((priv->status & STATUS_ASSOCIATED) &&
6188 				    channel == priv->channel) {
6189 					priv->speed_scan_pos++;
6190 					continue;
6191 				}
6192 
6193 				/* If this channel has already been
6194 				 * added in scan, break from loop
6195 				 * and this will be the first channel
6196 				 * in the next scan.
6197 				 */
6198 				if (channels[channel - 1] != 0)
6199 					break;
6200 
6201 				channels[channel - 1] = 1;
6202 				priv->speed_scan_pos++;
6203 				channel_index++;
6204 				scan->channels_list[channel_index] = channel;
6205 				index =
6206 				    libipw_channel_to_index(priv->ieee, channel);
6207 				ipw_set_scan_type(scan, channel_index,
6208 						  geo->bg[index].
6209 						  flags &
6210 						  LIBIPW_CH_PASSIVE_ONLY ?
6211 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6212 						  : scan_type);
6213 			}
6214 		} else {
6215 			for (i = 0; i < geo->bg_channels; i++) {
6216 				if ((priv->status & STATUS_ASSOCIATED) &&
6217 				    geo->bg[i].channel == priv->channel)
6218 					continue;
6219 				channel_index++;
6220 				scan->channels_list[channel_index] =
6221 				    geo->bg[i].channel;
6222 				ipw_set_scan_type(scan, channel_index,
6223 						  geo->bg[i].
6224 						  flags &
6225 						  LIBIPW_CH_PASSIVE_ONLY ?
6226 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6227 						  : scan_type);
6228 			}
6229 		}
6230 
6231 		if (start != channel_index) {
6232 			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6233 			    (channel_index - start);
6234 		}
6235 	}
6236 }
6237 
6238 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6239 {
6240 	/* staying on passive channels longer than the DTIM interval during a
6241 	 * scan, while associated, causes the firmware to cancel the scan
6242 	 * without notification. Hence, don't stay on passive channels longer
6243 	 * than the beacon interval.
6244 	 */
6245 	if (priv->status & STATUS_ASSOCIATED
6246 	    && priv->assoc_network->beacon_interval > 10)
6247 		return priv->assoc_network->beacon_interval - 10;
6248 	else
6249 		return 120;
6250 }
6251 
6252 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6253 {
6254 	struct ipw_scan_request_ext scan;
6255 	int err = 0, scan_type;
6256 
6257 	if (!(priv->status & STATUS_INIT) ||
6258 	    (priv->status & STATUS_EXIT_PENDING))
6259 		return 0;
6260 
6261 	mutex_lock(&priv->mutex);
6262 
6263 	if (direct && (priv->direct_scan_ssid_len == 0)) {
6264 		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6265 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6266 		goto done;
6267 	}
6268 
6269 	if (priv->status & STATUS_SCANNING) {
6270 		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6271 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6272 					STATUS_SCAN_PENDING;
6273 		goto done;
6274 	}
6275 
6276 	if (!(priv->status & STATUS_SCAN_FORCED) &&
6277 	    priv->status & STATUS_SCAN_ABORTING) {
6278 		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6279 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6280 					STATUS_SCAN_PENDING;
6281 		goto done;
6282 	}
6283 
6284 	if (priv->status & STATUS_RF_KILL_MASK) {
6285 		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6286 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6287 					STATUS_SCAN_PENDING;
6288 		goto done;
6289 	}
6290 
6291 	memset(&scan, 0, sizeof(scan));
6292 	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6293 
6294 	if (type == IW_SCAN_TYPE_PASSIVE) {
6295 		IPW_DEBUG_WX("use passive scanning\n");
6296 		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6297 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6298 			cpu_to_le16(ipw_passive_dwell_time(priv));
6299 		ipw_add_scan_channels(priv, &scan, scan_type);
6300 		goto send_request;
6301 	}
6302 
6303 	/* Use active scan by default. */
6304 	if (priv->config & CFG_SPEED_SCAN)
6305 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6306 			cpu_to_le16(30);
6307 	else
6308 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6309 			cpu_to_le16(20);
6310 
6311 	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6312 		cpu_to_le16(20);
6313 
6314 	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6315 		cpu_to_le16(ipw_passive_dwell_time(priv));
6316 	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6317 
6318 #ifdef CONFIG_IPW2200_MONITOR
6319 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6320 		u8 channel;
6321 		u8 band = 0;
6322 
6323 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6324 		case LIBIPW_52GHZ_BAND:
6325 			band = (u8) (IPW_A_MODE << 6) | 1;
6326 			channel = priv->channel;
6327 			break;
6328 
6329 		case LIBIPW_24GHZ_BAND:
6330 			band = (u8) (IPW_B_MODE << 6) | 1;
6331 			channel = priv->channel;
6332 			break;
6333 
6334 		default:
6335 			band = (u8) (IPW_B_MODE << 6) | 1;
6336 			channel = 9;
6337 			break;
6338 		}
6339 
6340 		scan.channels_list[0] = band;
6341 		scan.channels_list[1] = channel;
6342 		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6343 
6344 		/* NOTE:  The card will sit on this channel for this time
6345 		 * period.  Scan aborts are timing sensitive and frequently
6346 		 * result in firmware restarts.  As such, it is best to
6347 		 * set a small dwell_time here and just keep re-issuing
6348 		 * scans.  Otherwise fast channel hopping will not actually
6349 		 * hop channels.
6350 		 *
6351 		 * TODO: Move SPEED SCAN support to all modes and bands */
6352 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6353 			cpu_to_le16(2000);
6354 	} else {
6355 #endif				/* CONFIG_IPW2200_MONITOR */
6356 		/* Honor direct scans first, otherwise if we are roaming make
6357 		 * this a direct scan for the current network.  Finally,
6358 		 * ensure that every other scan is a fast channel hop scan */
6359 		if (direct) {
6360 			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6361 			                    priv->direct_scan_ssid_len);
6362 			if (err) {
6363 				IPW_DEBUG_HC("Attempt to send SSID command  "
6364 					     "failed\n");
6365 				goto done;
6366 			}
6367 
6368 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6369 		} else if ((priv->status & STATUS_ROAMING)
6370 			   || (!(priv->status & STATUS_ASSOCIATED)
6371 			       && (priv->config & CFG_STATIC_ESSID)
6372 			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6373 			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6374 			if (err) {
6375 				IPW_DEBUG_HC("Attempt to send SSID command "
6376 					     "failed.\n");
6377 				goto done;
6378 			}
6379 
6380 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6381 		} else
6382 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6383 
6384 		ipw_add_scan_channels(priv, &scan, scan_type);
6385 #ifdef CONFIG_IPW2200_MONITOR
6386 	}
6387 #endif
6388 
6389 send_request:
6390 	err = ipw_send_scan_request_ext(priv, &scan);
6391 	if (err) {
6392 		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6393 		goto done;
6394 	}
6395 
6396 	priv->status |= STATUS_SCANNING;
6397 	if (direct) {
6398 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6399 		priv->direct_scan_ssid_len = 0;
6400 	} else
6401 		priv->status &= ~STATUS_SCAN_PENDING;
6402 
6403 	schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6404 done:
6405 	mutex_unlock(&priv->mutex);
6406 	return err;
6407 }
6408 
6409 static void ipw_request_passive_scan(struct work_struct *work)
6410 {
6411 	struct ipw_priv *priv =
6412 		container_of(work, struct ipw_priv, request_passive_scan.work);
6413 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6414 }
6415 
6416 static void ipw_request_scan(struct work_struct *work)
6417 {
6418 	struct ipw_priv *priv =
6419 		container_of(work, struct ipw_priv, request_scan.work);
6420 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6421 }
6422 
6423 static void ipw_request_direct_scan(struct work_struct *work)
6424 {
6425 	struct ipw_priv *priv =
6426 		container_of(work, struct ipw_priv, request_direct_scan.work);
6427 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6428 }
6429 
6430 static void ipw_bg_abort_scan(struct work_struct *work)
6431 {
6432 	struct ipw_priv *priv =
6433 		container_of(work, struct ipw_priv, abort_scan);
6434 	mutex_lock(&priv->mutex);
6435 	ipw_abort_scan(priv);
6436 	mutex_unlock(&priv->mutex);
6437 }
6438 
6439 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6440 {
6441 	/* This is called when wpa_supplicant loads and closes the driver
6442 	 * interface. */
6443 	priv->ieee->wpa_enabled = value;
6444 	return 0;
6445 }
6446 
6447 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6448 {
6449 	struct libipw_device *ieee = priv->ieee;
6450 	struct libipw_security sec = {
6451 		.flags = SEC_AUTH_MODE,
6452 	};
6453 	int ret = 0;
6454 
6455 	if (value & IW_AUTH_ALG_SHARED_KEY) {
6456 		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6457 		ieee->open_wep = 0;
6458 	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6459 		sec.auth_mode = WLAN_AUTH_OPEN;
6460 		ieee->open_wep = 1;
6461 	} else if (value & IW_AUTH_ALG_LEAP) {
6462 		sec.auth_mode = WLAN_AUTH_LEAP;
6463 		ieee->open_wep = 1;
6464 	} else
6465 		return -EINVAL;
6466 
6467 	if (ieee->set_security)
6468 		ieee->set_security(ieee->dev, &sec);
6469 	else
6470 		ret = -EOPNOTSUPP;
6471 
6472 	return ret;
6473 }
6474 
6475 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6476 				int wpa_ie_len)
6477 {
6478 	/* make sure WPA is enabled */
6479 	ipw_wpa_enable(priv, 1);
6480 }
6481 
6482 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6483 			    char *capabilities, int length)
6484 {
6485 	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6486 
6487 	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6488 				capabilities);
6489 }
6490 
6491 /*
6492  * WE-18 support
6493  */
6494 
6495 /* SIOCSIWGENIE */
6496 static int ipw_wx_set_genie(struct net_device *dev,
6497 			    struct iw_request_info *info,
6498 			    union iwreq_data *wrqu, char *extra)
6499 {
6500 	struct ipw_priv *priv = libipw_priv(dev);
6501 	struct libipw_device *ieee = priv->ieee;
6502 	u8 *buf;
6503 	int err = 0;
6504 
6505 	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6506 	    (wrqu->data.length && extra == NULL))
6507 		return -EINVAL;
6508 
6509 	if (wrqu->data.length) {
6510 		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6511 		if (buf == NULL) {
6512 			err = -ENOMEM;
6513 			goto out;
6514 		}
6515 
6516 		kfree(ieee->wpa_ie);
6517 		ieee->wpa_ie = buf;
6518 		ieee->wpa_ie_len = wrqu->data.length;
6519 	} else {
6520 		kfree(ieee->wpa_ie);
6521 		ieee->wpa_ie = NULL;
6522 		ieee->wpa_ie_len = 0;
6523 	}
6524 
6525 	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6526       out:
6527 	return err;
6528 }
6529 
6530 /* SIOCGIWGENIE */
6531 static int ipw_wx_get_genie(struct net_device *dev,
6532 			    struct iw_request_info *info,
6533 			    union iwreq_data *wrqu, char *extra)
6534 {
6535 	struct ipw_priv *priv = libipw_priv(dev);
6536 	struct libipw_device *ieee = priv->ieee;
6537 	int err = 0;
6538 
6539 	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6540 		wrqu->data.length = 0;
6541 		goto out;
6542 	}
6543 
6544 	if (wrqu->data.length < ieee->wpa_ie_len) {
6545 		err = -E2BIG;
6546 		goto out;
6547 	}
6548 
6549 	wrqu->data.length = ieee->wpa_ie_len;
6550 	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6551 
6552       out:
6553 	return err;
6554 }
6555 
6556 static int wext_cipher2level(int cipher)
6557 {
6558 	switch (cipher) {
6559 	case IW_AUTH_CIPHER_NONE:
6560 		return SEC_LEVEL_0;
6561 	case IW_AUTH_CIPHER_WEP40:
6562 	case IW_AUTH_CIPHER_WEP104:
6563 		return SEC_LEVEL_1;
6564 	case IW_AUTH_CIPHER_TKIP:
6565 		return SEC_LEVEL_2;
6566 	case IW_AUTH_CIPHER_CCMP:
6567 		return SEC_LEVEL_3;
6568 	default:
6569 		return -1;
6570 	}
6571 }
6572 
6573 /* SIOCSIWAUTH */
6574 static int ipw_wx_set_auth(struct net_device *dev,
6575 			   struct iw_request_info *info,
6576 			   union iwreq_data *wrqu, char *extra)
6577 {
6578 	struct ipw_priv *priv = libipw_priv(dev);
6579 	struct libipw_device *ieee = priv->ieee;
6580 	struct iw_param *param = &wrqu->param;
6581 	struct lib80211_crypt_data *crypt;
6582 	unsigned long flags;
6583 	int ret = 0;
6584 
6585 	switch (param->flags & IW_AUTH_INDEX) {
6586 	case IW_AUTH_WPA_VERSION:
6587 		break;
6588 	case IW_AUTH_CIPHER_PAIRWISE:
6589 		ipw_set_hw_decrypt_unicast(priv,
6590 					   wext_cipher2level(param->value));
6591 		break;
6592 	case IW_AUTH_CIPHER_GROUP:
6593 		ipw_set_hw_decrypt_multicast(priv,
6594 					     wext_cipher2level(param->value));
6595 		break;
6596 	case IW_AUTH_KEY_MGMT:
6597 		/*
6598 		 * ipw2200 does not use these parameters
6599 		 */
6600 		break;
6601 
6602 	case IW_AUTH_TKIP_COUNTERMEASURES:
6603 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6604 		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6605 			break;
6606 
6607 		flags = crypt->ops->get_flags(crypt->priv);
6608 
6609 		if (param->value)
6610 			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6611 		else
6612 			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6613 
6614 		crypt->ops->set_flags(flags, crypt->priv);
6615 
6616 		break;
6617 
6618 	case IW_AUTH_DROP_UNENCRYPTED:{
6619 			/* HACK:
6620 			 *
6621 			 * wpa_supplicant calls set_wpa_enabled when the driver
6622 			 * is loaded and unloaded, regardless of if WPA is being
6623 			 * used.  No other calls are made which can be used to
6624 			 * determine if encryption will be used or not prior to
6625 			 * association being expected.  If encryption is not being
6626 			 * used, drop_unencrypted is set to false, else true -- we
6627 			 * can use this to determine if the CAP_PRIVACY_ON bit should
6628 			 * be set.
6629 			 */
6630 			struct libipw_security sec = {
6631 				.flags = SEC_ENABLED,
6632 				.enabled = param->value,
6633 			};
6634 			priv->ieee->drop_unencrypted = param->value;
6635 			/* We only change SEC_LEVEL for open mode. Others
6636 			 * are set by ipw_wpa_set_encryption.
6637 			 */
6638 			if (!param->value) {
6639 				sec.flags |= SEC_LEVEL;
6640 				sec.level = SEC_LEVEL_0;
6641 			} else {
6642 				sec.flags |= SEC_LEVEL;
6643 				sec.level = SEC_LEVEL_1;
6644 			}
6645 			if (priv->ieee->set_security)
6646 				priv->ieee->set_security(priv->ieee->dev, &sec);
6647 			break;
6648 		}
6649 
6650 	case IW_AUTH_80211_AUTH_ALG:
6651 		ret = ipw_wpa_set_auth_algs(priv, param->value);
6652 		break;
6653 
6654 	case IW_AUTH_WPA_ENABLED:
6655 		ret = ipw_wpa_enable(priv, param->value);
6656 		ipw_disassociate(priv);
6657 		break;
6658 
6659 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6660 		ieee->ieee802_1x = param->value;
6661 		break;
6662 
6663 	case IW_AUTH_PRIVACY_INVOKED:
6664 		ieee->privacy_invoked = param->value;
6665 		break;
6666 
6667 	default:
6668 		return -EOPNOTSUPP;
6669 	}
6670 	return ret;
6671 }
6672 
6673 /* SIOCGIWAUTH */
6674 static int ipw_wx_get_auth(struct net_device *dev,
6675 			   struct iw_request_info *info,
6676 			   union iwreq_data *wrqu, char *extra)
6677 {
6678 	struct ipw_priv *priv = libipw_priv(dev);
6679 	struct libipw_device *ieee = priv->ieee;
6680 	struct lib80211_crypt_data *crypt;
6681 	struct iw_param *param = &wrqu->param;
6682 
6683 	switch (param->flags & IW_AUTH_INDEX) {
6684 	case IW_AUTH_WPA_VERSION:
6685 	case IW_AUTH_CIPHER_PAIRWISE:
6686 	case IW_AUTH_CIPHER_GROUP:
6687 	case IW_AUTH_KEY_MGMT:
6688 		/*
6689 		 * wpa_supplicant will control these internally
6690 		 */
6691 		return -EOPNOTSUPP;
6692 
6693 	case IW_AUTH_TKIP_COUNTERMEASURES:
6694 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6695 		if (!crypt || !crypt->ops->get_flags)
6696 			break;
6697 
6698 		param->value = (crypt->ops->get_flags(crypt->priv) &
6699 				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6700 
6701 		break;
6702 
6703 	case IW_AUTH_DROP_UNENCRYPTED:
6704 		param->value = ieee->drop_unencrypted;
6705 		break;
6706 
6707 	case IW_AUTH_80211_AUTH_ALG:
6708 		param->value = ieee->sec.auth_mode;
6709 		break;
6710 
6711 	case IW_AUTH_WPA_ENABLED:
6712 		param->value = ieee->wpa_enabled;
6713 		break;
6714 
6715 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6716 		param->value = ieee->ieee802_1x;
6717 		break;
6718 
6719 	case IW_AUTH_ROAMING_CONTROL:
6720 	case IW_AUTH_PRIVACY_INVOKED:
6721 		param->value = ieee->privacy_invoked;
6722 		break;
6723 
6724 	default:
6725 		return -EOPNOTSUPP;
6726 	}
6727 	return 0;
6728 }
6729 
6730 /* SIOCSIWENCODEEXT */
6731 static int ipw_wx_set_encodeext(struct net_device *dev,
6732 				struct iw_request_info *info,
6733 				union iwreq_data *wrqu, char *extra)
6734 {
6735 	struct ipw_priv *priv = libipw_priv(dev);
6736 	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6737 
6738 	if (hwcrypto) {
6739 		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6740 			/* IPW HW can't build TKIP MIC,
6741 			   host decryption still needed */
6742 			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6743 				priv->ieee->host_mc_decrypt = 1;
6744 			else {
6745 				priv->ieee->host_encrypt = 0;
6746 				priv->ieee->host_encrypt_msdu = 1;
6747 				priv->ieee->host_decrypt = 1;
6748 			}
6749 		} else {
6750 			priv->ieee->host_encrypt = 0;
6751 			priv->ieee->host_encrypt_msdu = 0;
6752 			priv->ieee->host_decrypt = 0;
6753 			priv->ieee->host_mc_decrypt = 0;
6754 		}
6755 	}
6756 
6757 	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6758 }
6759 
6760 /* SIOCGIWENCODEEXT */
6761 static int ipw_wx_get_encodeext(struct net_device *dev,
6762 				struct iw_request_info *info,
6763 				union iwreq_data *wrqu, char *extra)
6764 {
6765 	struct ipw_priv *priv = libipw_priv(dev);
6766 	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6767 }
6768 
6769 /* SIOCSIWMLME */
6770 static int ipw_wx_set_mlme(struct net_device *dev,
6771 			   struct iw_request_info *info,
6772 			   union iwreq_data *wrqu, char *extra)
6773 {
6774 	struct ipw_priv *priv = libipw_priv(dev);
6775 	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6776 
6777 	switch (mlme->cmd) {
6778 	case IW_MLME_DEAUTH:
6779 		/* silently ignore */
6780 		break;
6781 
6782 	case IW_MLME_DISASSOC:
6783 		ipw_disassociate(priv);
6784 		break;
6785 
6786 	default:
6787 		return -EOPNOTSUPP;
6788 	}
6789 	return 0;
6790 }
6791 
6792 #ifdef CONFIG_IPW2200_QOS
6793 
6794 /* QoS */
6795 /*
6796 * get the modulation type of the current network or
6797 * the card current mode
6798 */
6799 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6800 {
6801 	u8 mode = 0;
6802 
6803 	if (priv->status & STATUS_ASSOCIATED) {
6804 		unsigned long flags;
6805 
6806 		spin_lock_irqsave(&priv->ieee->lock, flags);
6807 		mode = priv->assoc_network->mode;
6808 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6809 	} else {
6810 		mode = priv->ieee->mode;
6811 	}
6812 	IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6813 	return mode;
6814 }
6815 
6816 /*
6817 * Handle management frame beacon and probe response
6818 */
6819 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6820 					 int active_network,
6821 					 struct libipw_network *network)
6822 {
6823 	u32 size = sizeof(struct libipw_qos_parameters);
6824 
6825 	if (network->capability & WLAN_CAPABILITY_IBSS)
6826 		network->qos_data.active = network->qos_data.supported;
6827 
6828 	if (network->flags & NETWORK_HAS_QOS_MASK) {
6829 		if (active_network &&
6830 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6831 			network->qos_data.active = network->qos_data.supported;
6832 
6833 		if ((network->qos_data.active == 1) && (active_network == 1) &&
6834 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6835 		    (network->qos_data.old_param_count !=
6836 		     network->qos_data.param_count)) {
6837 			network->qos_data.old_param_count =
6838 			    network->qos_data.param_count;
6839 			schedule_work(&priv->qos_activate);
6840 			IPW_DEBUG_QOS("QoS parameters change call "
6841 				      "qos_activate\n");
6842 		}
6843 	} else {
6844 		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6845 			memcpy(&network->qos_data.parameters,
6846 			       &def_parameters_CCK, size);
6847 		else
6848 			memcpy(&network->qos_data.parameters,
6849 			       &def_parameters_OFDM, size);
6850 
6851 		if ((network->qos_data.active == 1) && (active_network == 1)) {
6852 			IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6853 			schedule_work(&priv->qos_activate);
6854 		}
6855 
6856 		network->qos_data.active = 0;
6857 		network->qos_data.supported = 0;
6858 	}
6859 	if ((priv->status & STATUS_ASSOCIATED) &&
6860 	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6861 		if (!ether_addr_equal(network->bssid, priv->bssid))
6862 			if (network->capability & WLAN_CAPABILITY_IBSS)
6863 				if ((network->ssid_len ==
6864 				     priv->assoc_network->ssid_len) &&
6865 				    !memcmp(network->ssid,
6866 					    priv->assoc_network->ssid,
6867 					    network->ssid_len)) {
6868 					schedule_work(&priv->merge_networks);
6869 				}
6870 	}
6871 
6872 	return 0;
6873 }
6874 
6875 /*
6876 * This function set up the firmware to support QoS. It sends
6877 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6878 */
6879 static int ipw_qos_activate(struct ipw_priv *priv,
6880 			    struct libipw_qos_data *qos_network_data)
6881 {
6882 	int err;
6883 	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6884 	struct libipw_qos_parameters *active_one = NULL;
6885 	u32 size = sizeof(struct libipw_qos_parameters);
6886 	u32 burst_duration;
6887 	int i;
6888 	u8 type;
6889 
6890 	type = ipw_qos_current_mode(priv);
6891 
6892 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6893 	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6894 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6895 	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6896 
6897 	if (qos_network_data == NULL) {
6898 		if (type == IEEE_B) {
6899 			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6900 			active_one = &def_parameters_CCK;
6901 		} else
6902 			active_one = &def_parameters_OFDM;
6903 
6904 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6905 		burst_duration = ipw_qos_get_burst_duration(priv);
6906 		for (i = 0; i < QOS_QUEUE_NUM; i++)
6907 			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6908 			    cpu_to_le16(burst_duration);
6909 	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6910 		if (type == IEEE_B) {
6911 			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
6912 				      type);
6913 			if (priv->qos_data.qos_enable == 0)
6914 				active_one = &def_parameters_CCK;
6915 			else
6916 				active_one = priv->qos_data.def_qos_parm_CCK;
6917 		} else {
6918 			if (priv->qos_data.qos_enable == 0)
6919 				active_one = &def_parameters_OFDM;
6920 			else
6921 				active_one = priv->qos_data.def_qos_parm_OFDM;
6922 		}
6923 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6924 	} else {
6925 		unsigned long flags;
6926 		int active;
6927 
6928 		spin_lock_irqsave(&priv->ieee->lock, flags);
6929 		active_one = &(qos_network_data->parameters);
6930 		qos_network_data->old_param_count =
6931 		    qos_network_data->param_count;
6932 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6933 		active = qos_network_data->supported;
6934 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6935 
6936 		if (active == 0) {
6937 			burst_duration = ipw_qos_get_burst_duration(priv);
6938 			for (i = 0; i < QOS_QUEUE_NUM; i++)
6939 				qos_parameters[QOS_PARAM_SET_ACTIVE].
6940 				    tx_op_limit[i] = cpu_to_le16(burst_duration);
6941 		}
6942 	}
6943 
6944 	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6945 	err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
6946 	if (err)
6947 		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6948 
6949 	return err;
6950 }
6951 
6952 /*
6953 * send IPW_CMD_WME_INFO to the firmware
6954 */
6955 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6956 {
6957 	int ret = 0;
6958 	struct libipw_qos_information_element qos_info;
6959 
6960 	if (priv == NULL)
6961 		return -1;
6962 
6963 	qos_info.elementID = QOS_ELEMENT_ID;
6964 	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
6965 
6966 	qos_info.version = QOS_VERSION_1;
6967 	qos_info.ac_info = 0;
6968 
6969 	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6970 	qos_info.qui_type = QOS_OUI_TYPE;
6971 	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6972 
6973 	ret = ipw_send_qos_info_command(priv, &qos_info);
6974 	if (ret != 0) {
6975 		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6976 	}
6977 	return ret;
6978 }
6979 
6980 /*
6981 * Set the QoS parameter with the association request structure
6982 */
6983 static int ipw_qos_association(struct ipw_priv *priv,
6984 			       struct libipw_network *network)
6985 {
6986 	int err = 0;
6987 	struct libipw_qos_data *qos_data = NULL;
6988 	struct libipw_qos_data ibss_data = {
6989 		.supported = 1,
6990 		.active = 1,
6991 	};
6992 
6993 	switch (priv->ieee->iw_mode) {
6994 	case IW_MODE_ADHOC:
6995 		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6996 
6997 		qos_data = &ibss_data;
6998 		break;
6999 
7000 	case IW_MODE_INFRA:
7001 		qos_data = &network->qos_data;
7002 		break;
7003 
7004 	default:
7005 		BUG();
7006 		break;
7007 	}
7008 
7009 	err = ipw_qos_activate(priv, qos_data);
7010 	if (err) {
7011 		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7012 		return err;
7013 	}
7014 
7015 	if (priv->qos_data.qos_enable && qos_data->supported) {
7016 		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7017 		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7018 		return ipw_qos_set_info_element(priv);
7019 	}
7020 
7021 	return 0;
7022 }
7023 
7024 /*
7025 * handling the beaconing responses. if we get different QoS setting
7026 * off the network from the associated setting, adjust the QoS
7027 * setting
7028 */
7029 static void ipw_qos_association_resp(struct ipw_priv *priv,
7030 				    struct libipw_network *network)
7031 {
7032 	unsigned long flags;
7033 	u32 size = sizeof(struct libipw_qos_parameters);
7034 	int set_qos_param = 0;
7035 
7036 	if ((priv == NULL) || (network == NULL) ||
7037 	    (priv->assoc_network == NULL))
7038 		return;
7039 
7040 	if (!(priv->status & STATUS_ASSOCIATED))
7041 		return;
7042 
7043 	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7044 		return;
7045 
7046 	spin_lock_irqsave(&priv->ieee->lock, flags);
7047 	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7048 		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7049 		       sizeof(struct libipw_qos_data));
7050 		priv->assoc_network->qos_data.active = 1;
7051 		if ((network->qos_data.old_param_count !=
7052 		     network->qos_data.param_count)) {
7053 			set_qos_param = 1;
7054 			network->qos_data.old_param_count =
7055 			    network->qos_data.param_count;
7056 		}
7057 
7058 	} else {
7059 		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7060 			memcpy(&priv->assoc_network->qos_data.parameters,
7061 			       &def_parameters_CCK, size);
7062 		else
7063 			memcpy(&priv->assoc_network->qos_data.parameters,
7064 			       &def_parameters_OFDM, size);
7065 		priv->assoc_network->qos_data.active = 0;
7066 		priv->assoc_network->qos_data.supported = 0;
7067 		set_qos_param = 1;
7068 	}
7069 
7070 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7071 
7072 	if (set_qos_param == 1)
7073 		schedule_work(&priv->qos_activate);
7074 }
7075 
7076 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7077 {
7078 	u32 ret = 0;
7079 
7080 	if (!priv)
7081 		return 0;
7082 
7083 	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7084 		ret = priv->qos_data.burst_duration_CCK;
7085 	else
7086 		ret = priv->qos_data.burst_duration_OFDM;
7087 
7088 	return ret;
7089 }
7090 
7091 /*
7092 * Initialize the setting of QoS global
7093 */
7094 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7095 			 int burst_enable, u32 burst_duration_CCK,
7096 			 u32 burst_duration_OFDM)
7097 {
7098 	priv->qos_data.qos_enable = enable;
7099 
7100 	if (priv->qos_data.qos_enable) {
7101 		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7102 		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7103 		IPW_DEBUG_QOS("QoS is enabled\n");
7104 	} else {
7105 		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7106 		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7107 		IPW_DEBUG_QOS("QoS is not enabled\n");
7108 	}
7109 
7110 	priv->qos_data.burst_enable = burst_enable;
7111 
7112 	if (burst_enable) {
7113 		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7114 		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7115 	} else {
7116 		priv->qos_data.burst_duration_CCK = 0;
7117 		priv->qos_data.burst_duration_OFDM = 0;
7118 	}
7119 }
7120 
7121 /*
7122 * map the packet priority to the right TX Queue
7123 */
7124 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7125 {
7126 	if (priority > 7 || !priv->qos_data.qos_enable)
7127 		priority = 0;
7128 
7129 	return from_priority_to_tx_queue[priority] - 1;
7130 }
7131 
7132 static int ipw_is_qos_active(struct net_device *dev,
7133 			     struct sk_buff *skb)
7134 {
7135 	struct ipw_priv *priv = libipw_priv(dev);
7136 	struct libipw_qos_data *qos_data = NULL;
7137 	int active, supported;
7138 	u8 *daddr = skb->data + ETH_ALEN;
7139 	int unicast = !is_multicast_ether_addr(daddr);
7140 
7141 	if (!(priv->status & STATUS_ASSOCIATED))
7142 		return 0;
7143 
7144 	qos_data = &priv->assoc_network->qos_data;
7145 
7146 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7147 		if (unicast == 0)
7148 			qos_data->active = 0;
7149 		else
7150 			qos_data->active = qos_data->supported;
7151 	}
7152 	active = qos_data->active;
7153 	supported = qos_data->supported;
7154 	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7155 		      "unicast %d\n",
7156 		      priv->qos_data.qos_enable, active, supported, unicast);
7157 	if (active && priv->qos_data.qos_enable)
7158 		return 1;
7159 
7160 	return 0;
7161 
7162 }
7163 /*
7164 * add QoS parameter to the TX command
7165 */
7166 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7167 					u16 priority,
7168 					struct tfd_data *tfd)
7169 {
7170 	int tx_queue_id = 0;
7171 
7172 
7173 	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7174 	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7175 
7176 	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7177 		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7178 		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7179 	}
7180 	return 0;
7181 }
7182 
7183 /*
7184 * background support to run QoS activate functionality
7185 */
7186 static void ipw_bg_qos_activate(struct work_struct *work)
7187 {
7188 	struct ipw_priv *priv =
7189 		container_of(work, struct ipw_priv, qos_activate);
7190 
7191 	mutex_lock(&priv->mutex);
7192 
7193 	if (priv->status & STATUS_ASSOCIATED)
7194 		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7195 
7196 	mutex_unlock(&priv->mutex);
7197 }
7198 
7199 static int ipw_handle_probe_response(struct net_device *dev,
7200 				     struct libipw_probe_response *resp,
7201 				     struct libipw_network *network)
7202 {
7203 	struct ipw_priv *priv = libipw_priv(dev);
7204 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7205 			      (network == priv->assoc_network));
7206 
7207 	ipw_qos_handle_probe_response(priv, active_network, network);
7208 
7209 	return 0;
7210 }
7211 
7212 static int ipw_handle_beacon(struct net_device *dev,
7213 			     struct libipw_beacon *resp,
7214 			     struct libipw_network *network)
7215 {
7216 	struct ipw_priv *priv = libipw_priv(dev);
7217 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7218 			      (network == priv->assoc_network));
7219 
7220 	ipw_qos_handle_probe_response(priv, active_network, network);
7221 
7222 	return 0;
7223 }
7224 
7225 static int ipw_handle_assoc_response(struct net_device *dev,
7226 				     struct libipw_assoc_response *resp,
7227 				     struct libipw_network *network)
7228 {
7229 	struct ipw_priv *priv = libipw_priv(dev);
7230 	ipw_qos_association_resp(priv, network);
7231 	return 0;
7232 }
7233 
7234 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7235 				       *qos_param)
7236 {
7237 	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7238 				sizeof(*qos_param) * 3, qos_param);
7239 }
7240 
7241 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7242 				     *qos_param)
7243 {
7244 	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7245 				qos_param);
7246 }
7247 
7248 #endif				/* CONFIG_IPW2200_QOS */
7249 
7250 static int ipw_associate_network(struct ipw_priv *priv,
7251 				 struct libipw_network *network,
7252 				 struct ipw_supported_rates *rates, int roaming)
7253 {
7254 	int err;
7255 
7256 	if (priv->config & CFG_FIXED_RATE)
7257 		ipw_set_fixed_rate(priv, network->mode);
7258 
7259 	if (!(priv->config & CFG_STATIC_ESSID)) {
7260 		priv->essid_len = min(network->ssid_len,
7261 				      (u8) IW_ESSID_MAX_SIZE);
7262 		memcpy(priv->essid, network->ssid, priv->essid_len);
7263 	}
7264 
7265 	network->last_associate = jiffies;
7266 
7267 	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7268 	priv->assoc_request.channel = network->channel;
7269 	priv->assoc_request.auth_key = 0;
7270 
7271 	if ((priv->capability & CAP_PRIVACY_ON) &&
7272 	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7273 		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7274 		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7275 
7276 		if (priv->ieee->sec.level == SEC_LEVEL_1)
7277 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7278 
7279 	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7280 		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7281 		priv->assoc_request.auth_type = AUTH_LEAP;
7282 	else
7283 		priv->assoc_request.auth_type = AUTH_OPEN;
7284 
7285 	if (priv->ieee->wpa_ie_len) {
7286 		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7287 		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7288 				 priv->ieee->wpa_ie_len);
7289 	}
7290 
7291 	/*
7292 	 * It is valid for our ieee device to support multiple modes, but
7293 	 * when it comes to associating to a given network we have to choose
7294 	 * just one mode.
7295 	 */
7296 	if (network->mode & priv->ieee->mode & IEEE_A)
7297 		priv->assoc_request.ieee_mode = IPW_A_MODE;
7298 	else if (network->mode & priv->ieee->mode & IEEE_G)
7299 		priv->assoc_request.ieee_mode = IPW_G_MODE;
7300 	else if (network->mode & priv->ieee->mode & IEEE_B)
7301 		priv->assoc_request.ieee_mode = IPW_B_MODE;
7302 
7303 	priv->assoc_request.capability = cpu_to_le16(network->capability);
7304 	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7305 	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7306 		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7307 	} else {
7308 		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7309 
7310 		/* Clear the short preamble if we won't be supporting it */
7311 		priv->assoc_request.capability &=
7312 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7313 	}
7314 
7315 	/* Clear capability bits that aren't used in Ad Hoc */
7316 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7317 		priv->assoc_request.capability &=
7318 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7319 
7320 	IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7321 			roaming ? "Rea" : "A",
7322 			priv->essid_len, priv->essid,
7323 			network->channel,
7324 			ipw_modes[priv->assoc_request.ieee_mode],
7325 			rates->num_rates,
7326 			(priv->assoc_request.preamble_length ==
7327 			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7328 			network->capability &
7329 			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7330 			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7331 			priv->capability & CAP_PRIVACY_ON ?
7332 			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7333 			 "(open)") : "",
7334 			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7335 			priv->capability & CAP_PRIVACY_ON ?
7336 			'1' + priv->ieee->sec.active_key : '.',
7337 			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7338 
7339 	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7340 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7341 	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7342 		priv->assoc_request.assoc_type = HC_IBSS_START;
7343 		priv->assoc_request.assoc_tsf_msw = 0;
7344 		priv->assoc_request.assoc_tsf_lsw = 0;
7345 	} else {
7346 		if (unlikely(roaming))
7347 			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7348 		else
7349 			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7350 		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7351 		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7352 	}
7353 
7354 	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7355 
7356 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7357 		eth_broadcast_addr(priv->assoc_request.dest);
7358 		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7359 	} else {
7360 		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7361 		priv->assoc_request.atim_window = 0;
7362 	}
7363 
7364 	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7365 
7366 	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7367 	if (err) {
7368 		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7369 		return err;
7370 	}
7371 
7372 	rates->ieee_mode = priv->assoc_request.ieee_mode;
7373 	rates->purpose = IPW_RATE_CONNECT;
7374 	ipw_send_supported_rates(priv, rates);
7375 
7376 	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7377 		priv->sys_config.dot11g_auto_detection = 1;
7378 	else
7379 		priv->sys_config.dot11g_auto_detection = 0;
7380 
7381 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7382 		priv->sys_config.answer_broadcast_ssid_probe = 1;
7383 	else
7384 		priv->sys_config.answer_broadcast_ssid_probe = 0;
7385 
7386 	err = ipw_send_system_config(priv);
7387 	if (err) {
7388 		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7389 		return err;
7390 	}
7391 
7392 	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7393 	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7394 	if (err) {
7395 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7396 		return err;
7397 	}
7398 
7399 	/*
7400 	 * If preemption is enabled, it is possible for the association
7401 	 * to complete before we return from ipw_send_associate.  Therefore
7402 	 * we have to be sure and update our priviate data first.
7403 	 */
7404 	priv->channel = network->channel;
7405 	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7406 	priv->status |= STATUS_ASSOCIATING;
7407 	priv->status &= ~STATUS_SECURITY_UPDATED;
7408 
7409 	priv->assoc_network = network;
7410 
7411 #ifdef CONFIG_IPW2200_QOS
7412 	ipw_qos_association(priv, network);
7413 #endif
7414 
7415 	err = ipw_send_associate(priv, &priv->assoc_request);
7416 	if (err) {
7417 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7418 		return err;
7419 	}
7420 
7421 	IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
7422 		  priv->essid_len, priv->essid, priv->bssid);
7423 
7424 	return 0;
7425 }
7426 
7427 static void ipw_roam(void *data)
7428 {
7429 	struct ipw_priv *priv = data;
7430 	struct libipw_network *network = NULL;
7431 	struct ipw_network_match match = {
7432 		.network = priv->assoc_network
7433 	};
7434 
7435 	/* The roaming process is as follows:
7436 	 *
7437 	 * 1.  Missed beacon threshold triggers the roaming process by
7438 	 *     setting the status ROAM bit and requesting a scan.
7439 	 * 2.  When the scan completes, it schedules the ROAM work
7440 	 * 3.  The ROAM work looks at all of the known networks for one that
7441 	 *     is a better network than the currently associated.  If none
7442 	 *     found, the ROAM process is over (ROAM bit cleared)
7443 	 * 4.  If a better network is found, a disassociation request is
7444 	 *     sent.
7445 	 * 5.  When the disassociation completes, the roam work is again
7446 	 *     scheduled.  The second time through, the driver is no longer
7447 	 *     associated, and the newly selected network is sent an
7448 	 *     association request.
7449 	 * 6.  At this point ,the roaming process is complete and the ROAM
7450 	 *     status bit is cleared.
7451 	 */
7452 
7453 	/* If we are no longer associated, and the roaming bit is no longer
7454 	 * set, then we are not actively roaming, so just return */
7455 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7456 		return;
7457 
7458 	if (priv->status & STATUS_ASSOCIATED) {
7459 		/* First pass through ROAM process -- look for a better
7460 		 * network */
7461 		unsigned long flags;
7462 		u8 rssi = priv->assoc_network->stats.rssi;
7463 		priv->assoc_network->stats.rssi = -128;
7464 		spin_lock_irqsave(&priv->ieee->lock, flags);
7465 		list_for_each_entry(network, &priv->ieee->network_list, list) {
7466 			if (network != priv->assoc_network)
7467 				ipw_best_network(priv, &match, network, 1);
7468 		}
7469 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7470 		priv->assoc_network->stats.rssi = rssi;
7471 
7472 		if (match.network == priv->assoc_network) {
7473 			IPW_DEBUG_ASSOC("No better APs in this network to "
7474 					"roam to.\n");
7475 			priv->status &= ~STATUS_ROAMING;
7476 			ipw_debug_config(priv);
7477 			return;
7478 		}
7479 
7480 		ipw_send_disassociate(priv, 1);
7481 		priv->assoc_network = match.network;
7482 
7483 		return;
7484 	}
7485 
7486 	/* Second pass through ROAM process -- request association */
7487 	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7488 	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7489 	priv->status &= ~STATUS_ROAMING;
7490 }
7491 
7492 static void ipw_bg_roam(struct work_struct *work)
7493 {
7494 	struct ipw_priv *priv =
7495 		container_of(work, struct ipw_priv, roam);
7496 	mutex_lock(&priv->mutex);
7497 	ipw_roam(priv);
7498 	mutex_unlock(&priv->mutex);
7499 }
7500 
7501 static int ipw_associate(void *data)
7502 {
7503 	struct ipw_priv *priv = data;
7504 
7505 	struct libipw_network *network = NULL;
7506 	struct ipw_network_match match = {
7507 		.network = NULL
7508 	};
7509 	struct ipw_supported_rates *rates;
7510 	struct list_head *element;
7511 	unsigned long flags;
7512 
7513 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7514 		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7515 		return 0;
7516 	}
7517 
7518 	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7519 		IPW_DEBUG_ASSOC("Not attempting association (already in "
7520 				"progress)\n");
7521 		return 0;
7522 	}
7523 
7524 	if (priv->status & STATUS_DISASSOCIATING) {
7525 		IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n");
7526 		schedule_work(&priv->associate);
7527 		return 0;
7528 	}
7529 
7530 	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7531 		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7532 				"initialized)\n");
7533 		return 0;
7534 	}
7535 
7536 	if (!(priv->config & CFG_ASSOCIATE) &&
7537 	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7538 		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7539 		return 0;
7540 	}
7541 
7542 	/* Protect our use of the network_list */
7543 	spin_lock_irqsave(&priv->ieee->lock, flags);
7544 	list_for_each_entry(network, &priv->ieee->network_list, list)
7545 	    ipw_best_network(priv, &match, network, 0);
7546 
7547 	network = match.network;
7548 	rates = &match.rates;
7549 
7550 	if (network == NULL &&
7551 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7552 	    priv->config & CFG_ADHOC_CREATE &&
7553 	    priv->config & CFG_STATIC_ESSID &&
7554 	    priv->config & CFG_STATIC_CHANNEL) {
7555 		/* Use oldest network if the free list is empty */
7556 		if (list_empty(&priv->ieee->network_free_list)) {
7557 			struct libipw_network *oldest = NULL;
7558 			struct libipw_network *target;
7559 
7560 			list_for_each_entry(target, &priv->ieee->network_list, list) {
7561 				if ((oldest == NULL) ||
7562 				    (target->last_scanned < oldest->last_scanned))
7563 					oldest = target;
7564 			}
7565 
7566 			/* If there are no more slots, expire the oldest */
7567 			list_del(&oldest->list);
7568 			target = oldest;
7569 			IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
7570 					target->ssid_len, target->ssid,
7571 					target->bssid);
7572 			list_add_tail(&target->list,
7573 				      &priv->ieee->network_free_list);
7574 		}
7575 
7576 		element = priv->ieee->network_free_list.next;
7577 		network = list_entry(element, struct libipw_network, list);
7578 		ipw_adhoc_create(priv, network);
7579 		rates = &priv->rates;
7580 		list_del(element);
7581 		list_add_tail(&network->list, &priv->ieee->network_list);
7582 	}
7583 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7584 
7585 	/* If we reached the end of the list, then we don't have any valid
7586 	 * matching APs */
7587 	if (!network) {
7588 		ipw_debug_config(priv);
7589 
7590 		if (!(priv->status & STATUS_SCANNING)) {
7591 			if (!(priv->config & CFG_SPEED_SCAN))
7592 				schedule_delayed_work(&priv->request_scan,
7593 						      SCAN_INTERVAL);
7594 			else
7595 				schedule_delayed_work(&priv->request_scan, 0);
7596 		}
7597 
7598 		return 0;
7599 	}
7600 
7601 	ipw_associate_network(priv, network, rates, 0);
7602 
7603 	return 1;
7604 }
7605 
7606 static void ipw_bg_associate(struct work_struct *work)
7607 {
7608 	struct ipw_priv *priv =
7609 		container_of(work, struct ipw_priv, associate);
7610 	mutex_lock(&priv->mutex);
7611 	ipw_associate(priv);
7612 	mutex_unlock(&priv->mutex);
7613 }
7614 
7615 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7616 				      struct sk_buff *skb)
7617 {
7618 	struct ieee80211_hdr *hdr;
7619 	u16 fc;
7620 
7621 	hdr = (struct ieee80211_hdr *)skb->data;
7622 	fc = le16_to_cpu(hdr->frame_control);
7623 	if (!(fc & IEEE80211_FCTL_PROTECTED))
7624 		return;
7625 
7626 	fc &= ~IEEE80211_FCTL_PROTECTED;
7627 	hdr->frame_control = cpu_to_le16(fc);
7628 	switch (priv->ieee->sec.level) {
7629 	case SEC_LEVEL_3:
7630 		/* Remove CCMP HDR */
7631 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7632 			skb->data + LIBIPW_3ADDR_LEN + 8,
7633 			skb->len - LIBIPW_3ADDR_LEN - 8);
7634 		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7635 		break;
7636 	case SEC_LEVEL_2:
7637 		break;
7638 	case SEC_LEVEL_1:
7639 		/* Remove IV */
7640 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7641 			skb->data + LIBIPW_3ADDR_LEN + 4,
7642 			skb->len - LIBIPW_3ADDR_LEN - 4);
7643 		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7644 		break;
7645 	case SEC_LEVEL_0:
7646 		break;
7647 	default:
7648 		printk(KERN_ERR "Unknown security level %d\n",
7649 		       priv->ieee->sec.level);
7650 		break;
7651 	}
7652 }
7653 
7654 static void ipw_handle_data_packet(struct ipw_priv *priv,
7655 				   struct ipw_rx_mem_buffer *rxb,
7656 				   struct libipw_rx_stats *stats)
7657 {
7658 	struct net_device *dev = priv->net_dev;
7659 	struct libipw_hdr_4addr *hdr;
7660 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7661 
7662 	/* We received data from the HW, so stop the watchdog */
7663 	netif_trans_update(dev);
7664 
7665 	/* We only process data packets if the
7666 	 * interface is open */
7667 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7668 		     skb_tailroom(rxb->skb))) {
7669 		dev->stats.rx_errors++;
7670 		priv->wstats.discard.misc++;
7671 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7672 		return;
7673 	} else if (unlikely(!netif_running(priv->net_dev))) {
7674 		dev->stats.rx_dropped++;
7675 		priv->wstats.discard.misc++;
7676 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7677 		return;
7678 	}
7679 
7680 	/* Advance skb->data to the start of the actual payload */
7681 	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7682 
7683 	/* Set the size of the skb to the size of the frame */
7684 	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7685 
7686 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7687 
7688 	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7689 	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7690 	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7691 	    (is_multicast_ether_addr(hdr->addr1) ?
7692 	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7693 		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7694 
7695 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7696 		dev->stats.rx_errors++;
7697 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7698 		rxb->skb = NULL;
7699 		__ipw_led_activity_on(priv);
7700 	}
7701 }
7702 
7703 #ifdef CONFIG_IPW2200_RADIOTAP
7704 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7705 					   struct ipw_rx_mem_buffer *rxb,
7706 					   struct libipw_rx_stats *stats)
7707 {
7708 	struct net_device *dev = priv->net_dev;
7709 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7710 	struct ipw_rx_frame *frame = &pkt->u.frame;
7711 
7712 	/* initial pull of some data */
7713 	u16 received_channel = frame->received_channel;
7714 	u8 antennaAndPhy = frame->antennaAndPhy;
7715 	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7716 	u16 pktrate = frame->rate;
7717 
7718 	/* Magic struct that slots into the radiotap header -- no reason
7719 	 * to build this manually element by element, we can write it much
7720 	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7721 	struct ipw_rt_hdr *ipw_rt;
7722 
7723 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7724 
7725 	/* We received data from the HW, so stop the watchdog */
7726 	netif_trans_update(dev);
7727 
7728 	/* We only process data packets if the
7729 	 * interface is open */
7730 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7731 		     skb_tailroom(rxb->skb))) {
7732 		dev->stats.rx_errors++;
7733 		priv->wstats.discard.misc++;
7734 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7735 		return;
7736 	} else if (unlikely(!netif_running(priv->net_dev))) {
7737 		dev->stats.rx_dropped++;
7738 		priv->wstats.discard.misc++;
7739 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7740 		return;
7741 	}
7742 
7743 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7744 	 * that now */
7745 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7746 		/* FIXME: Should alloc bigger skb instead */
7747 		dev->stats.rx_dropped++;
7748 		priv->wstats.discard.misc++;
7749 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7750 		return;
7751 	}
7752 
7753 	/* copy the frame itself */
7754 	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7755 		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7756 
7757 	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7758 
7759 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7760 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7761 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7762 
7763 	/* Big bitfield of all the fields we provide in radiotap */
7764 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7765 	     (1 << IEEE80211_RADIOTAP_TSFT) |
7766 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7767 	     (1 << IEEE80211_RADIOTAP_RATE) |
7768 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7769 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7770 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7771 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7772 
7773 	/* Zero the flags, we'll add to them as we go */
7774 	ipw_rt->rt_flags = 0;
7775 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7776 			       frame->parent_tsf[2] << 16 |
7777 			       frame->parent_tsf[1] << 8  |
7778 			       frame->parent_tsf[0]);
7779 
7780 	/* Convert signal to DBM */
7781 	ipw_rt->rt_dbmsignal = antsignal;
7782 	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7783 
7784 	/* Convert the channel data and set the flags */
7785 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7786 	if (received_channel > 14) {	/* 802.11a */
7787 		ipw_rt->rt_chbitmask =
7788 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7789 	} else if (antennaAndPhy & 32) {	/* 802.11b */
7790 		ipw_rt->rt_chbitmask =
7791 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7792 	} else {		/* 802.11g */
7793 		ipw_rt->rt_chbitmask =
7794 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7795 	}
7796 
7797 	/* set the rate in multiples of 500k/s */
7798 	switch (pktrate) {
7799 	case IPW_TX_RATE_1MB:
7800 		ipw_rt->rt_rate = 2;
7801 		break;
7802 	case IPW_TX_RATE_2MB:
7803 		ipw_rt->rt_rate = 4;
7804 		break;
7805 	case IPW_TX_RATE_5MB:
7806 		ipw_rt->rt_rate = 10;
7807 		break;
7808 	case IPW_TX_RATE_6MB:
7809 		ipw_rt->rt_rate = 12;
7810 		break;
7811 	case IPW_TX_RATE_9MB:
7812 		ipw_rt->rt_rate = 18;
7813 		break;
7814 	case IPW_TX_RATE_11MB:
7815 		ipw_rt->rt_rate = 22;
7816 		break;
7817 	case IPW_TX_RATE_12MB:
7818 		ipw_rt->rt_rate = 24;
7819 		break;
7820 	case IPW_TX_RATE_18MB:
7821 		ipw_rt->rt_rate = 36;
7822 		break;
7823 	case IPW_TX_RATE_24MB:
7824 		ipw_rt->rt_rate = 48;
7825 		break;
7826 	case IPW_TX_RATE_36MB:
7827 		ipw_rt->rt_rate = 72;
7828 		break;
7829 	case IPW_TX_RATE_48MB:
7830 		ipw_rt->rt_rate = 96;
7831 		break;
7832 	case IPW_TX_RATE_54MB:
7833 		ipw_rt->rt_rate = 108;
7834 		break;
7835 	default:
7836 		ipw_rt->rt_rate = 0;
7837 		break;
7838 	}
7839 
7840 	/* antenna number */
7841 	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7842 
7843 	/* set the preamble flag if we have it */
7844 	if ((antennaAndPhy & 64))
7845 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7846 
7847 	/* Set the size of the skb to the size of the frame */
7848 	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7849 
7850 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7851 
7852 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7853 		dev->stats.rx_errors++;
7854 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7855 		rxb->skb = NULL;
7856 		/* no LED during capture */
7857 	}
7858 }
7859 #endif
7860 
7861 #ifdef CONFIG_IPW2200_PROMISCUOUS
7862 #define libipw_is_probe_response(fc) \
7863    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7864     (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7865 
7866 #define libipw_is_management(fc) \
7867    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7868 
7869 #define libipw_is_control(fc) \
7870    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7871 
7872 #define libipw_is_data(fc) \
7873    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7874 
7875 #define libipw_is_assoc_request(fc) \
7876    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7877 
7878 #define libipw_is_reassoc_request(fc) \
7879    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7880 
7881 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7882 				      struct ipw_rx_mem_buffer *rxb,
7883 				      struct libipw_rx_stats *stats)
7884 {
7885 	struct net_device *dev = priv->prom_net_dev;
7886 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7887 	struct ipw_rx_frame *frame = &pkt->u.frame;
7888 	struct ipw_rt_hdr *ipw_rt;
7889 
7890 	/* First cache any information we need before we overwrite
7891 	 * the information provided in the skb from the hardware */
7892 	struct ieee80211_hdr *hdr;
7893 	u16 channel = frame->received_channel;
7894 	u8 phy_flags = frame->antennaAndPhy;
7895 	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7896 	s8 noise = (s8) le16_to_cpu(frame->noise);
7897 	u8 rate = frame->rate;
7898 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7899 	struct sk_buff *skb;
7900 	int hdr_only = 0;
7901 	u16 filter = priv->prom_priv->filter;
7902 
7903 	/* If the filter is set to not include Rx frames then return */
7904 	if (filter & IPW_PROM_NO_RX)
7905 		return;
7906 
7907 	/* We received data from the HW, so stop the watchdog */
7908 	netif_trans_update(dev);
7909 
7910 	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7911 		dev->stats.rx_errors++;
7912 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7913 		return;
7914 	}
7915 
7916 	/* We only process data packets if the interface is open */
7917 	if (unlikely(!netif_running(dev))) {
7918 		dev->stats.rx_dropped++;
7919 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7920 		return;
7921 	}
7922 
7923 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7924 	 * that now */
7925 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7926 		/* FIXME: Should alloc bigger skb instead */
7927 		dev->stats.rx_dropped++;
7928 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7929 		return;
7930 	}
7931 
7932 	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7933 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
7934 		if (filter & IPW_PROM_NO_MGMT)
7935 			return;
7936 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7937 			hdr_only = 1;
7938 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
7939 		if (filter & IPW_PROM_NO_CTL)
7940 			return;
7941 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
7942 			hdr_only = 1;
7943 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
7944 		if (filter & IPW_PROM_NO_DATA)
7945 			return;
7946 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
7947 			hdr_only = 1;
7948 	}
7949 
7950 	/* Copy the SKB since this is for the promiscuous side */
7951 	skb = skb_copy(rxb->skb, GFP_ATOMIC);
7952 	if (skb == NULL) {
7953 		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7954 		return;
7955 	}
7956 
7957 	/* copy the frame data to write after where the radiotap header goes */
7958 	ipw_rt = (void *)skb->data;
7959 
7960 	if (hdr_only)
7961 		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
7962 
7963 	memcpy(ipw_rt->payload, hdr, len);
7964 
7965 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7966 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7967 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
7968 
7969 	/* Set the size of the skb to the size of the frame */
7970 	skb_put(skb, sizeof(*ipw_rt) + len);
7971 
7972 	/* Big bitfield of all the fields we provide in radiotap */
7973 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7974 	     (1 << IEEE80211_RADIOTAP_TSFT) |
7975 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7976 	     (1 << IEEE80211_RADIOTAP_RATE) |
7977 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7978 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7979 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7980 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7981 
7982 	/* Zero the flags, we'll add to them as we go */
7983 	ipw_rt->rt_flags = 0;
7984 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7985 			       frame->parent_tsf[2] << 16 |
7986 			       frame->parent_tsf[1] << 8  |
7987 			       frame->parent_tsf[0]);
7988 
7989 	/* Convert to DBM */
7990 	ipw_rt->rt_dbmsignal = signal;
7991 	ipw_rt->rt_dbmnoise = noise;
7992 
7993 	/* Convert the channel data and set the flags */
7994 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7995 	if (channel > 14) {	/* 802.11a */
7996 		ipw_rt->rt_chbitmask =
7997 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7998 	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
7999 		ipw_rt->rt_chbitmask =
8000 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8001 	} else {		/* 802.11g */
8002 		ipw_rt->rt_chbitmask =
8003 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8004 	}
8005 
8006 	/* set the rate in multiples of 500k/s */
8007 	switch (rate) {
8008 	case IPW_TX_RATE_1MB:
8009 		ipw_rt->rt_rate = 2;
8010 		break;
8011 	case IPW_TX_RATE_2MB:
8012 		ipw_rt->rt_rate = 4;
8013 		break;
8014 	case IPW_TX_RATE_5MB:
8015 		ipw_rt->rt_rate = 10;
8016 		break;
8017 	case IPW_TX_RATE_6MB:
8018 		ipw_rt->rt_rate = 12;
8019 		break;
8020 	case IPW_TX_RATE_9MB:
8021 		ipw_rt->rt_rate = 18;
8022 		break;
8023 	case IPW_TX_RATE_11MB:
8024 		ipw_rt->rt_rate = 22;
8025 		break;
8026 	case IPW_TX_RATE_12MB:
8027 		ipw_rt->rt_rate = 24;
8028 		break;
8029 	case IPW_TX_RATE_18MB:
8030 		ipw_rt->rt_rate = 36;
8031 		break;
8032 	case IPW_TX_RATE_24MB:
8033 		ipw_rt->rt_rate = 48;
8034 		break;
8035 	case IPW_TX_RATE_36MB:
8036 		ipw_rt->rt_rate = 72;
8037 		break;
8038 	case IPW_TX_RATE_48MB:
8039 		ipw_rt->rt_rate = 96;
8040 		break;
8041 	case IPW_TX_RATE_54MB:
8042 		ipw_rt->rt_rate = 108;
8043 		break;
8044 	default:
8045 		ipw_rt->rt_rate = 0;
8046 		break;
8047 	}
8048 
8049 	/* antenna number */
8050 	ipw_rt->rt_antenna = (phy_flags & 3);
8051 
8052 	/* set the preamble flag if we have it */
8053 	if (phy_flags & (1 << 6))
8054 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8055 
8056 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8057 
8058 	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8059 		dev->stats.rx_errors++;
8060 		dev_kfree_skb_any(skb);
8061 	}
8062 }
8063 #endif
8064 
8065 static int is_network_packet(struct ipw_priv *priv,
8066 				    struct libipw_hdr_4addr *header)
8067 {
8068 	/* Filter incoming packets to determine if they are targeted toward
8069 	 * this network, discarding packets coming from ourselves */
8070 	switch (priv->ieee->iw_mode) {
8071 	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8072 		/* packets from our adapter are dropped (echo) */
8073 		if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
8074 			return 0;
8075 
8076 		/* {broad,multi}cast packets to our BSSID go through */
8077 		if (is_multicast_ether_addr(header->addr1))
8078 			return ether_addr_equal(header->addr3, priv->bssid);
8079 
8080 		/* packets to our adapter go through */
8081 		return ether_addr_equal(header->addr1,
8082 					priv->net_dev->dev_addr);
8083 
8084 	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8085 		/* packets from our adapter are dropped (echo) */
8086 		if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
8087 			return 0;
8088 
8089 		/* {broad,multi}cast packets to our BSS go through */
8090 		if (is_multicast_ether_addr(header->addr1))
8091 			return ether_addr_equal(header->addr2, priv->bssid);
8092 
8093 		/* packets to our adapter go through */
8094 		return ether_addr_equal(header->addr1,
8095 					priv->net_dev->dev_addr);
8096 	}
8097 
8098 	return 1;
8099 }
8100 
8101 #define IPW_PACKET_RETRY_TIME HZ
8102 
8103 static  int is_duplicate_packet(struct ipw_priv *priv,
8104 				      struct libipw_hdr_4addr *header)
8105 {
8106 	u16 sc = le16_to_cpu(header->seq_ctl);
8107 	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8108 	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8109 	u16 *last_seq, *last_frag;
8110 	unsigned long *last_time;
8111 
8112 	switch (priv->ieee->iw_mode) {
8113 	case IW_MODE_ADHOC:
8114 		{
8115 			struct list_head *p;
8116 			struct ipw_ibss_seq *entry = NULL;
8117 			u8 *mac = header->addr2;
8118 			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8119 
8120 			list_for_each(p, &priv->ibss_mac_hash[index]) {
8121 				entry =
8122 				    list_entry(p, struct ipw_ibss_seq, list);
8123 				if (ether_addr_equal(entry->mac, mac))
8124 					break;
8125 			}
8126 			if (p == &priv->ibss_mac_hash[index]) {
8127 				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8128 				if (!entry) {
8129 					IPW_ERROR
8130 					    ("Cannot malloc new mac entry\n");
8131 					return 0;
8132 				}
8133 				memcpy(entry->mac, mac, ETH_ALEN);
8134 				entry->seq_num = seq;
8135 				entry->frag_num = frag;
8136 				entry->packet_time = jiffies;
8137 				list_add(&entry->list,
8138 					 &priv->ibss_mac_hash[index]);
8139 				return 0;
8140 			}
8141 			last_seq = &entry->seq_num;
8142 			last_frag = &entry->frag_num;
8143 			last_time = &entry->packet_time;
8144 			break;
8145 		}
8146 	case IW_MODE_INFRA:
8147 		last_seq = &priv->last_seq_num;
8148 		last_frag = &priv->last_frag_num;
8149 		last_time = &priv->last_packet_time;
8150 		break;
8151 	default:
8152 		return 0;
8153 	}
8154 	if ((*last_seq == seq) &&
8155 	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8156 		if (*last_frag == frag)
8157 			goto drop;
8158 		if (*last_frag + 1 != frag)
8159 			/* out-of-order fragment */
8160 			goto drop;
8161 	} else
8162 		*last_seq = seq;
8163 
8164 	*last_frag = frag;
8165 	*last_time = jiffies;
8166 	return 0;
8167 
8168       drop:
8169 	/* Comment this line now since we observed the card receives
8170 	 * duplicate packets but the FCTL_RETRY bit is not set in the
8171 	 * IBSS mode with fragmentation enabled.
8172 	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8173 	return 1;
8174 }
8175 
8176 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8177 				   struct ipw_rx_mem_buffer *rxb,
8178 				   struct libipw_rx_stats *stats)
8179 {
8180 	struct sk_buff *skb = rxb->skb;
8181 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8182 	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8183 	    (skb->data + IPW_RX_FRAME_SIZE);
8184 
8185 	libipw_rx_mgt(priv->ieee, header, stats);
8186 
8187 	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8188 	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8189 	      IEEE80211_STYPE_PROBE_RESP) ||
8190 	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8191 	      IEEE80211_STYPE_BEACON))) {
8192 		if (ether_addr_equal(header->addr3, priv->bssid))
8193 			ipw_add_station(priv, header->addr2);
8194 	}
8195 
8196 	if (priv->config & CFG_NET_STATS) {
8197 		IPW_DEBUG_HC("sending stat packet\n");
8198 
8199 		/* Set the size of the skb to the size of the full
8200 		 * ipw header and 802.11 frame */
8201 		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8202 			IPW_RX_FRAME_SIZE);
8203 
8204 		/* Advance past the ipw packet header to the 802.11 frame */
8205 		skb_pull(skb, IPW_RX_FRAME_SIZE);
8206 
8207 		/* Push the libipw_rx_stats before the 802.11 frame */
8208 		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8209 
8210 		skb->dev = priv->ieee->dev;
8211 
8212 		/* Point raw at the libipw_stats */
8213 		skb_reset_mac_header(skb);
8214 
8215 		skb->pkt_type = PACKET_OTHERHOST;
8216 		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8217 		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8218 		netif_rx(skb);
8219 		rxb->skb = NULL;
8220 	}
8221 }
8222 
8223 /*
8224  * Main entry function for receiving a packet with 80211 headers.  This
8225  * should be called when ever the FW has notified us that there is a new
8226  * skb in the receive queue.
8227  */
8228 static void ipw_rx(struct ipw_priv *priv)
8229 {
8230 	struct ipw_rx_mem_buffer *rxb;
8231 	struct ipw_rx_packet *pkt;
8232 	struct libipw_hdr_4addr *header;
8233 	u32 r, i;
8234 	u8 network_packet;
8235 	u8 fill_rx = 0;
8236 
8237 	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8238 	ipw_read32(priv, IPW_RX_WRITE_INDEX);
8239 	i = priv->rxq->read;
8240 
8241 	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8242 		fill_rx = 1;
8243 
8244 	while (i != r) {
8245 		rxb = priv->rxq->queue[i];
8246 		if (unlikely(rxb == NULL)) {
8247 			printk(KERN_CRIT "Queue not allocated!\n");
8248 			break;
8249 		}
8250 		priv->rxq->queue[i] = NULL;
8251 
8252 		dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr,
8253 					IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
8254 
8255 		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8256 		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8257 			     pkt->header.message_type,
8258 			     pkt->header.rx_seq_num, pkt->header.control_bits);
8259 
8260 		switch (pkt->header.message_type) {
8261 		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8262 				struct libipw_rx_stats stats = {
8263 					.rssi = pkt->u.frame.rssi_dbm -
8264 					    IPW_RSSI_TO_DBM,
8265 					.signal =
8266 					    pkt->u.frame.rssi_dbm -
8267 					    IPW_RSSI_TO_DBM + 0x100,
8268 					.noise =
8269 					    le16_to_cpu(pkt->u.frame.noise),
8270 					.rate = pkt->u.frame.rate,
8271 					.mac_time = jiffies,
8272 					.received_channel =
8273 					    pkt->u.frame.received_channel,
8274 					.freq =
8275 					    (pkt->u.frame.
8276 					     control & (1 << 0)) ?
8277 					    LIBIPW_24GHZ_BAND :
8278 					    LIBIPW_52GHZ_BAND,
8279 					.len = le16_to_cpu(pkt->u.frame.length),
8280 				};
8281 
8282 				if (stats.rssi != 0)
8283 					stats.mask |= LIBIPW_STATMASK_RSSI;
8284 				if (stats.signal != 0)
8285 					stats.mask |= LIBIPW_STATMASK_SIGNAL;
8286 				if (stats.noise != 0)
8287 					stats.mask |= LIBIPW_STATMASK_NOISE;
8288 				if (stats.rate != 0)
8289 					stats.mask |= LIBIPW_STATMASK_RATE;
8290 
8291 				priv->rx_packets++;
8292 
8293 #ifdef CONFIG_IPW2200_PROMISCUOUS
8294 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8295 		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8296 #endif
8297 
8298 #ifdef CONFIG_IPW2200_MONITOR
8299 				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8300 #ifdef CONFIG_IPW2200_RADIOTAP
8301 
8302                 ipw_handle_data_packet_monitor(priv,
8303 					       rxb,
8304 					       &stats);
8305 #else
8306 		ipw_handle_data_packet(priv, rxb,
8307 				       &stats);
8308 #endif
8309 					break;
8310 				}
8311 #endif
8312 
8313 				header =
8314 				    (struct libipw_hdr_4addr *)(rxb->skb->
8315 								   data +
8316 								   IPW_RX_FRAME_SIZE);
8317 				/* TODO: Check Ad-Hoc dest/source and make sure
8318 				 * that we are actually parsing these packets
8319 				 * correctly -- we should probably use the
8320 				 * frame control of the packet and disregard
8321 				 * the current iw_mode */
8322 
8323 				network_packet =
8324 				    is_network_packet(priv, header);
8325 				if (network_packet && priv->assoc_network) {
8326 					priv->assoc_network->stats.rssi =
8327 					    stats.rssi;
8328 					priv->exp_avg_rssi =
8329 					    exponential_average(priv->exp_avg_rssi,
8330 					    stats.rssi, DEPTH_RSSI);
8331 				}
8332 
8333 				IPW_DEBUG_RX("Frame: len=%u\n",
8334 					     le16_to_cpu(pkt->u.frame.length));
8335 
8336 				if (le16_to_cpu(pkt->u.frame.length) <
8337 				    libipw_get_hdrlen(le16_to_cpu(
8338 						    header->frame_ctl))) {
8339 					IPW_DEBUG_DROP
8340 					    ("Received packet is too small. "
8341 					     "Dropping.\n");
8342 					priv->net_dev->stats.rx_errors++;
8343 					priv->wstats.discard.misc++;
8344 					break;
8345 				}
8346 
8347 				switch (WLAN_FC_GET_TYPE
8348 					(le16_to_cpu(header->frame_ctl))) {
8349 
8350 				case IEEE80211_FTYPE_MGMT:
8351 					ipw_handle_mgmt_packet(priv, rxb,
8352 							       &stats);
8353 					break;
8354 
8355 				case IEEE80211_FTYPE_CTL:
8356 					break;
8357 
8358 				case IEEE80211_FTYPE_DATA:
8359 					if (unlikely(!network_packet ||
8360 						     is_duplicate_packet(priv,
8361 									 header)))
8362 					{
8363 						IPW_DEBUG_DROP("Dropping: "
8364 							       "%pM, "
8365 							       "%pM, "
8366 							       "%pM\n",
8367 							       header->addr1,
8368 							       header->addr2,
8369 							       header->addr3);
8370 						break;
8371 					}
8372 
8373 					ipw_handle_data_packet(priv, rxb,
8374 							       &stats);
8375 
8376 					break;
8377 				}
8378 				break;
8379 			}
8380 
8381 		case RX_HOST_NOTIFICATION_TYPE:{
8382 				IPW_DEBUG_RX
8383 				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8384 				     pkt->u.notification.subtype,
8385 				     pkt->u.notification.flags,
8386 				     le16_to_cpu(pkt->u.notification.size));
8387 				ipw_rx_notification(priv, &pkt->u.notification);
8388 				break;
8389 			}
8390 
8391 		default:
8392 			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8393 				     pkt->header.message_type);
8394 			break;
8395 		}
8396 
8397 		/* For now we just don't re-use anything.  We can tweak this
8398 		 * later to try and re-use notification packets and SKBs that
8399 		 * fail to Rx correctly */
8400 		if (rxb->skb != NULL) {
8401 			dev_kfree_skb_any(rxb->skb);
8402 			rxb->skb = NULL;
8403 		}
8404 
8405 		dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr,
8406 				 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
8407 		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8408 
8409 		i = (i + 1) % RX_QUEUE_SIZE;
8410 
8411 		/* If there are a lot of unsued frames, restock the Rx queue
8412 		 * so the ucode won't assert */
8413 		if (fill_rx) {
8414 			priv->rxq->read = i;
8415 			ipw_rx_queue_replenish(priv);
8416 		}
8417 	}
8418 
8419 	/* Backtrack one entry */
8420 	priv->rxq->read = i;
8421 	ipw_rx_queue_restock(priv);
8422 }
8423 
8424 #define DEFAULT_RTS_THRESHOLD     2304U
8425 #define MIN_RTS_THRESHOLD         1U
8426 #define MAX_RTS_THRESHOLD         2304U
8427 #define DEFAULT_BEACON_INTERVAL   100U
8428 #define	DEFAULT_SHORT_RETRY_LIMIT 7U
8429 #define	DEFAULT_LONG_RETRY_LIMIT  4U
8430 
8431 /*
8432  * ipw_sw_reset
8433  * @option: options to control different reset behaviour
8434  * 	    0 = reset everything except the 'disable' module_param
8435  * 	    1 = reset everything and print out driver info (for probe only)
8436  * 	    2 = reset everything
8437  */
8438 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8439 {
8440 	int band, modulation;
8441 	int old_mode = priv->ieee->iw_mode;
8442 
8443 	/* Initialize module parameter values here */
8444 	priv->config = 0;
8445 
8446 	/* We default to disabling the LED code as right now it causes
8447 	 * too many systems to lock up... */
8448 	if (!led_support)
8449 		priv->config |= CFG_NO_LED;
8450 
8451 	if (associate)
8452 		priv->config |= CFG_ASSOCIATE;
8453 	else
8454 		IPW_DEBUG_INFO("Auto associate disabled.\n");
8455 
8456 	if (auto_create)
8457 		priv->config |= CFG_ADHOC_CREATE;
8458 	else
8459 		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8460 
8461 	priv->config &= ~CFG_STATIC_ESSID;
8462 	priv->essid_len = 0;
8463 	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8464 
8465 	if (disable && option) {
8466 		priv->status |= STATUS_RF_KILL_SW;
8467 		IPW_DEBUG_INFO("Radio disabled.\n");
8468 	}
8469 
8470 	if (default_channel != 0) {
8471 		priv->config |= CFG_STATIC_CHANNEL;
8472 		priv->channel = default_channel;
8473 		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8474 		/* TODO: Validate that provided channel is in range */
8475 	}
8476 #ifdef CONFIG_IPW2200_QOS
8477 	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8478 		     burst_duration_CCK, burst_duration_OFDM);
8479 #endif				/* CONFIG_IPW2200_QOS */
8480 
8481 	switch (network_mode) {
8482 	case 1:
8483 		priv->ieee->iw_mode = IW_MODE_ADHOC;
8484 		priv->net_dev->type = ARPHRD_ETHER;
8485 
8486 		break;
8487 #ifdef CONFIG_IPW2200_MONITOR
8488 	case 2:
8489 		priv->ieee->iw_mode = IW_MODE_MONITOR;
8490 #ifdef CONFIG_IPW2200_RADIOTAP
8491 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8492 #else
8493 		priv->net_dev->type = ARPHRD_IEEE80211;
8494 #endif
8495 		break;
8496 #endif
8497 	default:
8498 	case 0:
8499 		priv->net_dev->type = ARPHRD_ETHER;
8500 		priv->ieee->iw_mode = IW_MODE_INFRA;
8501 		break;
8502 	}
8503 
8504 	if (hwcrypto) {
8505 		priv->ieee->host_encrypt = 0;
8506 		priv->ieee->host_encrypt_msdu = 0;
8507 		priv->ieee->host_decrypt = 0;
8508 		priv->ieee->host_mc_decrypt = 0;
8509 	}
8510 	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8511 
8512 	/* IPW2200/2915 is abled to do hardware fragmentation. */
8513 	priv->ieee->host_open_frag = 0;
8514 
8515 	if ((priv->pci_dev->device == 0x4223) ||
8516 	    (priv->pci_dev->device == 0x4224)) {
8517 		if (option == 1)
8518 			printk(KERN_INFO DRV_NAME
8519 			       ": Detected Intel PRO/Wireless 2915ABG Network "
8520 			       "Connection\n");
8521 		priv->ieee->abg_true = 1;
8522 		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8523 		modulation = LIBIPW_OFDM_MODULATION |
8524 		    LIBIPW_CCK_MODULATION;
8525 		priv->adapter = IPW_2915ABG;
8526 		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8527 	} else {
8528 		if (option == 1)
8529 			printk(KERN_INFO DRV_NAME
8530 			       ": Detected Intel PRO/Wireless 2200BG Network "
8531 			       "Connection\n");
8532 
8533 		priv->ieee->abg_true = 0;
8534 		band = LIBIPW_24GHZ_BAND;
8535 		modulation = LIBIPW_OFDM_MODULATION |
8536 		    LIBIPW_CCK_MODULATION;
8537 		priv->adapter = IPW_2200BG;
8538 		priv->ieee->mode = IEEE_G | IEEE_B;
8539 	}
8540 
8541 	priv->ieee->freq_band = band;
8542 	priv->ieee->modulation = modulation;
8543 
8544 	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8545 
8546 	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8547 	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8548 
8549 	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8550 	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8551 	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8552 
8553 	/* If power management is turned on, default to AC mode */
8554 	priv->power_mode = IPW_POWER_AC;
8555 	priv->tx_power = IPW_TX_POWER_DEFAULT;
8556 
8557 	return old_mode == priv->ieee->iw_mode;
8558 }
8559 
8560 /*
8561  * This file defines the Wireless Extension handlers.  It does not
8562  * define any methods of hardware manipulation and relies on the
8563  * functions defined in ipw_main to provide the HW interaction.
8564  *
8565  * The exception to this is the use of the ipw_get_ordinal()
8566  * function used to poll the hardware vs. making unnecessary calls.
8567  *
8568  */
8569 
8570 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8571 {
8572 	if (channel == 0) {
8573 		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8574 		priv->config &= ~CFG_STATIC_CHANNEL;
8575 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8576 				"parameters.\n");
8577 		ipw_associate(priv);
8578 		return 0;
8579 	}
8580 
8581 	priv->config |= CFG_STATIC_CHANNEL;
8582 
8583 	if (priv->channel == channel) {
8584 		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8585 			       channel);
8586 		return 0;
8587 	}
8588 
8589 	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8590 	priv->channel = channel;
8591 
8592 #ifdef CONFIG_IPW2200_MONITOR
8593 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8594 		int i;
8595 		if (priv->status & STATUS_SCANNING) {
8596 			IPW_DEBUG_SCAN("Scan abort triggered due to "
8597 				       "channel change.\n");
8598 			ipw_abort_scan(priv);
8599 		}
8600 
8601 		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8602 			udelay(10);
8603 
8604 		if (priv->status & STATUS_SCANNING)
8605 			IPW_DEBUG_SCAN("Still scanning...\n");
8606 		else
8607 			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8608 				       1000 - i);
8609 
8610 		return 0;
8611 	}
8612 #endif				/* CONFIG_IPW2200_MONITOR */
8613 
8614 	/* Network configuration changed -- force [re]association */
8615 	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8616 	if (!ipw_disassociate(priv))
8617 		ipw_associate(priv);
8618 
8619 	return 0;
8620 }
8621 
8622 static int ipw_wx_set_freq(struct net_device *dev,
8623 			   struct iw_request_info *info,
8624 			   union iwreq_data *wrqu, char *extra)
8625 {
8626 	struct ipw_priv *priv = libipw_priv(dev);
8627 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8628 	struct iw_freq *fwrq = &wrqu->freq;
8629 	int ret = 0, i;
8630 	u8 channel, flags;
8631 	int band;
8632 
8633 	if (fwrq->m == 0) {
8634 		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8635 		mutex_lock(&priv->mutex);
8636 		ret = ipw_set_channel(priv, 0);
8637 		mutex_unlock(&priv->mutex);
8638 		return ret;
8639 	}
8640 	/* if setting by freq convert to channel */
8641 	if (fwrq->e == 1) {
8642 		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8643 		if (channel == 0)
8644 			return -EINVAL;
8645 	} else
8646 		channel = fwrq->m;
8647 
8648 	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8649 		return -EINVAL;
8650 
8651 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8652 		i = libipw_channel_to_index(priv->ieee, channel);
8653 		if (i == -1)
8654 			return -EINVAL;
8655 
8656 		flags = (band == LIBIPW_24GHZ_BAND) ?
8657 		    geo->bg[i].flags : geo->a[i].flags;
8658 		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8659 			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8660 			return -EINVAL;
8661 		}
8662 	}
8663 
8664 	IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8665 	mutex_lock(&priv->mutex);
8666 	ret = ipw_set_channel(priv, channel);
8667 	mutex_unlock(&priv->mutex);
8668 	return ret;
8669 }
8670 
8671 static int ipw_wx_get_freq(struct net_device *dev,
8672 			   struct iw_request_info *info,
8673 			   union iwreq_data *wrqu, char *extra)
8674 {
8675 	struct ipw_priv *priv = libipw_priv(dev);
8676 
8677 	wrqu->freq.e = 0;
8678 
8679 	/* If we are associated, trying to associate, or have a statically
8680 	 * configured CHANNEL then return that; otherwise return ANY */
8681 	mutex_lock(&priv->mutex);
8682 	if (priv->config & CFG_STATIC_CHANNEL ||
8683 	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8684 		int i;
8685 
8686 		i = libipw_channel_to_index(priv->ieee, priv->channel);
8687 		BUG_ON(i == -1);
8688 		wrqu->freq.e = 1;
8689 
8690 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8691 		case LIBIPW_52GHZ_BAND:
8692 			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8693 			break;
8694 
8695 		case LIBIPW_24GHZ_BAND:
8696 			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8697 			break;
8698 
8699 		default:
8700 			BUG();
8701 		}
8702 	} else
8703 		wrqu->freq.m = 0;
8704 
8705 	mutex_unlock(&priv->mutex);
8706 	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8707 	return 0;
8708 }
8709 
8710 static int ipw_wx_set_mode(struct net_device *dev,
8711 			   struct iw_request_info *info,
8712 			   union iwreq_data *wrqu, char *extra)
8713 {
8714 	struct ipw_priv *priv = libipw_priv(dev);
8715 	int err = 0;
8716 
8717 	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8718 
8719 	switch (wrqu->mode) {
8720 #ifdef CONFIG_IPW2200_MONITOR
8721 	case IW_MODE_MONITOR:
8722 #endif
8723 	case IW_MODE_ADHOC:
8724 	case IW_MODE_INFRA:
8725 		break;
8726 	case IW_MODE_AUTO:
8727 		wrqu->mode = IW_MODE_INFRA;
8728 		break;
8729 	default:
8730 		return -EINVAL;
8731 	}
8732 	if (wrqu->mode == priv->ieee->iw_mode)
8733 		return 0;
8734 
8735 	mutex_lock(&priv->mutex);
8736 
8737 	ipw_sw_reset(priv, 0);
8738 
8739 #ifdef CONFIG_IPW2200_MONITOR
8740 	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8741 		priv->net_dev->type = ARPHRD_ETHER;
8742 
8743 	if (wrqu->mode == IW_MODE_MONITOR)
8744 #ifdef CONFIG_IPW2200_RADIOTAP
8745 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8746 #else
8747 		priv->net_dev->type = ARPHRD_IEEE80211;
8748 #endif
8749 #endif				/* CONFIG_IPW2200_MONITOR */
8750 
8751 	/* Free the existing firmware and reset the fw_loaded
8752 	 * flag so ipw_load() will bring in the new firmware */
8753 	free_firmware();
8754 
8755 	priv->ieee->iw_mode = wrqu->mode;
8756 
8757 	schedule_work(&priv->adapter_restart);
8758 	mutex_unlock(&priv->mutex);
8759 	return err;
8760 }
8761 
8762 static int ipw_wx_get_mode(struct net_device *dev,
8763 			   struct iw_request_info *info,
8764 			   union iwreq_data *wrqu, char *extra)
8765 {
8766 	struct ipw_priv *priv = libipw_priv(dev);
8767 	mutex_lock(&priv->mutex);
8768 	wrqu->mode = priv->ieee->iw_mode;
8769 	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8770 	mutex_unlock(&priv->mutex);
8771 	return 0;
8772 }
8773 
8774 /* Values are in microsecond */
8775 static const s32 timeout_duration[] = {
8776 	350000,
8777 	250000,
8778 	75000,
8779 	37000,
8780 	25000,
8781 };
8782 
8783 static const s32 period_duration[] = {
8784 	400000,
8785 	700000,
8786 	1000000,
8787 	1000000,
8788 	1000000
8789 };
8790 
8791 static int ipw_wx_get_range(struct net_device *dev,
8792 			    struct iw_request_info *info,
8793 			    union iwreq_data *wrqu, char *extra)
8794 {
8795 	struct ipw_priv *priv = libipw_priv(dev);
8796 	struct iw_range *range = (struct iw_range *)extra;
8797 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8798 	int i = 0, j;
8799 
8800 	wrqu->data.length = sizeof(*range);
8801 	memset(range, 0, sizeof(*range));
8802 
8803 	/* 54Mbs == ~27 Mb/s real (802.11g) */
8804 	range->throughput = 27 * 1000 * 1000;
8805 
8806 	range->max_qual.qual = 100;
8807 	/* TODO: Find real max RSSI and stick here */
8808 	range->max_qual.level = 0;
8809 	range->max_qual.noise = 0;
8810 	range->max_qual.updated = 7;	/* Updated all three */
8811 
8812 	range->avg_qual.qual = 70;
8813 	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8814 	range->avg_qual.level = 0;	/* FIXME to real average level */
8815 	range->avg_qual.noise = 0;
8816 	range->avg_qual.updated = 7;	/* Updated all three */
8817 	mutex_lock(&priv->mutex);
8818 	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8819 
8820 	for (i = 0; i < range->num_bitrates; i++)
8821 		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8822 		    500000;
8823 
8824 	range->max_rts = DEFAULT_RTS_THRESHOLD;
8825 	range->min_frag = MIN_FRAG_THRESHOLD;
8826 	range->max_frag = MAX_FRAG_THRESHOLD;
8827 
8828 	range->encoding_size[0] = 5;
8829 	range->encoding_size[1] = 13;
8830 	range->num_encoding_sizes = 2;
8831 	range->max_encoding_tokens = WEP_KEYS;
8832 
8833 	/* Set the Wireless Extension versions */
8834 	range->we_version_compiled = WIRELESS_EXT;
8835 	range->we_version_source = 18;
8836 
8837 	i = 0;
8838 	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8839 		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8840 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8841 			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8842 				continue;
8843 
8844 			range->freq[i].i = geo->bg[j].channel;
8845 			range->freq[i].m = geo->bg[j].freq * 100000;
8846 			range->freq[i].e = 1;
8847 			i++;
8848 		}
8849 	}
8850 
8851 	if (priv->ieee->mode & IEEE_A) {
8852 		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8853 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8854 			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8855 				continue;
8856 
8857 			range->freq[i].i = geo->a[j].channel;
8858 			range->freq[i].m = geo->a[j].freq * 100000;
8859 			range->freq[i].e = 1;
8860 			i++;
8861 		}
8862 	}
8863 
8864 	range->num_channels = i;
8865 	range->num_frequency = i;
8866 
8867 	mutex_unlock(&priv->mutex);
8868 
8869 	/* Event capability (kernel + driver) */
8870 	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8871 				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8872 				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8873 				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8874 	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8875 
8876 	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8877 		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8878 
8879 	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8880 
8881 	IPW_DEBUG_WX("GET Range\n");
8882 	return 0;
8883 }
8884 
8885 static int ipw_wx_set_wap(struct net_device *dev,
8886 			  struct iw_request_info *info,
8887 			  union iwreq_data *wrqu, char *extra)
8888 {
8889 	struct ipw_priv *priv = libipw_priv(dev);
8890 
8891 	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8892 		return -EINVAL;
8893 	mutex_lock(&priv->mutex);
8894 	if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
8895 	    is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
8896 		/* we disable mandatory BSSID association */
8897 		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8898 		priv->config &= ~CFG_STATIC_BSSID;
8899 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8900 				"parameters.\n");
8901 		ipw_associate(priv);
8902 		mutex_unlock(&priv->mutex);
8903 		return 0;
8904 	}
8905 
8906 	priv->config |= CFG_STATIC_BSSID;
8907 	if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
8908 		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8909 		mutex_unlock(&priv->mutex);
8910 		return 0;
8911 	}
8912 
8913 	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8914 		     wrqu->ap_addr.sa_data);
8915 
8916 	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8917 
8918 	/* Network configuration changed -- force [re]association */
8919 	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8920 	if (!ipw_disassociate(priv))
8921 		ipw_associate(priv);
8922 
8923 	mutex_unlock(&priv->mutex);
8924 	return 0;
8925 }
8926 
8927 static int ipw_wx_get_wap(struct net_device *dev,
8928 			  struct iw_request_info *info,
8929 			  union iwreq_data *wrqu, char *extra)
8930 {
8931 	struct ipw_priv *priv = libipw_priv(dev);
8932 
8933 	/* If we are associated, trying to associate, or have a statically
8934 	 * configured BSSID then return that; otherwise return ANY */
8935 	mutex_lock(&priv->mutex);
8936 	if (priv->config & CFG_STATIC_BSSID ||
8937 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8938 		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8939 		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8940 	} else
8941 		eth_zero_addr(wrqu->ap_addr.sa_data);
8942 
8943 	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
8944 		     wrqu->ap_addr.sa_data);
8945 	mutex_unlock(&priv->mutex);
8946 	return 0;
8947 }
8948 
8949 static int ipw_wx_set_essid(struct net_device *dev,
8950 			    struct iw_request_info *info,
8951 			    union iwreq_data *wrqu, char *extra)
8952 {
8953 	struct ipw_priv *priv = libipw_priv(dev);
8954         int length;
8955 
8956         mutex_lock(&priv->mutex);
8957 
8958         if (!wrqu->essid.flags)
8959         {
8960                 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8961                 ipw_disassociate(priv);
8962                 priv->config &= ~CFG_STATIC_ESSID;
8963                 ipw_associate(priv);
8964                 mutex_unlock(&priv->mutex);
8965                 return 0;
8966         }
8967 
8968 	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8969 
8970 	priv->config |= CFG_STATIC_ESSID;
8971 
8972 	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8973 	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8974 		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8975 		mutex_unlock(&priv->mutex);
8976 		return 0;
8977 	}
8978 
8979 	IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
8980 
8981 	priv->essid_len = length;
8982 	memcpy(priv->essid, extra, priv->essid_len);
8983 
8984 	/* Network configuration changed -- force [re]association */
8985 	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8986 	if (!ipw_disassociate(priv))
8987 		ipw_associate(priv);
8988 
8989 	mutex_unlock(&priv->mutex);
8990 	return 0;
8991 }
8992 
8993 static int ipw_wx_get_essid(struct net_device *dev,
8994 			    struct iw_request_info *info,
8995 			    union iwreq_data *wrqu, char *extra)
8996 {
8997 	struct ipw_priv *priv = libipw_priv(dev);
8998 
8999 	/* If we are associated, trying to associate, or have a statically
9000 	 * configured ESSID then return that; otherwise return ANY */
9001 	mutex_lock(&priv->mutex);
9002 	if (priv->config & CFG_STATIC_ESSID ||
9003 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9004 		IPW_DEBUG_WX("Getting essid: '%*pE'\n",
9005 			     priv->essid_len, priv->essid);
9006 		memcpy(extra, priv->essid, priv->essid_len);
9007 		wrqu->essid.length = priv->essid_len;
9008 		wrqu->essid.flags = 1;	/* active */
9009 	} else {
9010 		IPW_DEBUG_WX("Getting essid: ANY\n");
9011 		wrqu->essid.length = 0;
9012 		wrqu->essid.flags = 0;	/* active */
9013 	}
9014 	mutex_unlock(&priv->mutex);
9015 	return 0;
9016 }
9017 
9018 static int ipw_wx_set_nick(struct net_device *dev,
9019 			   struct iw_request_info *info,
9020 			   union iwreq_data *wrqu, char *extra)
9021 {
9022 	struct ipw_priv *priv = libipw_priv(dev);
9023 
9024 	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9025 	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9026 		return -E2BIG;
9027 	mutex_lock(&priv->mutex);
9028 	wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9029 	memset(priv->nick, 0, sizeof(priv->nick));
9030 	memcpy(priv->nick, extra, wrqu->data.length);
9031 	IPW_DEBUG_TRACE("<<\n");
9032 	mutex_unlock(&priv->mutex);
9033 	return 0;
9034 
9035 }
9036 
9037 static int ipw_wx_get_nick(struct net_device *dev,
9038 			   struct iw_request_info *info,
9039 			   union iwreq_data *wrqu, char *extra)
9040 {
9041 	struct ipw_priv *priv = libipw_priv(dev);
9042 	IPW_DEBUG_WX("Getting nick\n");
9043 	mutex_lock(&priv->mutex);
9044 	wrqu->data.length = strlen(priv->nick);
9045 	memcpy(extra, priv->nick, wrqu->data.length);
9046 	wrqu->data.flags = 1;	/* active */
9047 	mutex_unlock(&priv->mutex);
9048 	return 0;
9049 }
9050 
9051 static int ipw_wx_set_sens(struct net_device *dev,
9052 			    struct iw_request_info *info,
9053 			    union iwreq_data *wrqu, char *extra)
9054 {
9055 	struct ipw_priv *priv = libipw_priv(dev);
9056 	int err = 0;
9057 
9058 	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9059 	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9060 	mutex_lock(&priv->mutex);
9061 
9062 	if (wrqu->sens.fixed == 0)
9063 	{
9064 		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9065 		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9066 		goto out;
9067 	}
9068 	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9069 	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9070 		err = -EINVAL;
9071 		goto out;
9072 	}
9073 
9074 	priv->roaming_threshold = wrqu->sens.value;
9075 	priv->disassociate_threshold = 3*wrqu->sens.value;
9076       out:
9077 	mutex_unlock(&priv->mutex);
9078 	return err;
9079 }
9080 
9081 static int ipw_wx_get_sens(struct net_device *dev,
9082 			    struct iw_request_info *info,
9083 			    union iwreq_data *wrqu, char *extra)
9084 {
9085 	struct ipw_priv *priv = libipw_priv(dev);
9086 	mutex_lock(&priv->mutex);
9087 	wrqu->sens.fixed = 1;
9088 	wrqu->sens.value = priv->roaming_threshold;
9089 	mutex_unlock(&priv->mutex);
9090 
9091 	IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9092 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9093 
9094 	return 0;
9095 }
9096 
9097 static int ipw_wx_set_rate(struct net_device *dev,
9098 			   struct iw_request_info *info,
9099 			   union iwreq_data *wrqu, char *extra)
9100 {
9101 	/* TODO: We should use semaphores or locks for access to priv */
9102 	struct ipw_priv *priv = libipw_priv(dev);
9103 	u32 target_rate = wrqu->bitrate.value;
9104 	u32 fixed, mask;
9105 
9106 	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9107 	/* value = X, fixed = 1 means only rate X */
9108 	/* value = X, fixed = 0 means all rates lower equal X */
9109 
9110 	if (target_rate == -1) {
9111 		fixed = 0;
9112 		mask = LIBIPW_DEFAULT_RATES_MASK;
9113 		/* Now we should reassociate */
9114 		goto apply;
9115 	}
9116 
9117 	mask = 0;
9118 	fixed = wrqu->bitrate.fixed;
9119 
9120 	if (target_rate == 1000000 || !fixed)
9121 		mask |= LIBIPW_CCK_RATE_1MB_MASK;
9122 	if (target_rate == 1000000)
9123 		goto apply;
9124 
9125 	if (target_rate == 2000000 || !fixed)
9126 		mask |= LIBIPW_CCK_RATE_2MB_MASK;
9127 	if (target_rate == 2000000)
9128 		goto apply;
9129 
9130 	if (target_rate == 5500000 || !fixed)
9131 		mask |= LIBIPW_CCK_RATE_5MB_MASK;
9132 	if (target_rate == 5500000)
9133 		goto apply;
9134 
9135 	if (target_rate == 6000000 || !fixed)
9136 		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9137 	if (target_rate == 6000000)
9138 		goto apply;
9139 
9140 	if (target_rate == 9000000 || !fixed)
9141 		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9142 	if (target_rate == 9000000)
9143 		goto apply;
9144 
9145 	if (target_rate == 11000000 || !fixed)
9146 		mask |= LIBIPW_CCK_RATE_11MB_MASK;
9147 	if (target_rate == 11000000)
9148 		goto apply;
9149 
9150 	if (target_rate == 12000000 || !fixed)
9151 		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9152 	if (target_rate == 12000000)
9153 		goto apply;
9154 
9155 	if (target_rate == 18000000 || !fixed)
9156 		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9157 	if (target_rate == 18000000)
9158 		goto apply;
9159 
9160 	if (target_rate == 24000000 || !fixed)
9161 		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9162 	if (target_rate == 24000000)
9163 		goto apply;
9164 
9165 	if (target_rate == 36000000 || !fixed)
9166 		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9167 	if (target_rate == 36000000)
9168 		goto apply;
9169 
9170 	if (target_rate == 48000000 || !fixed)
9171 		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9172 	if (target_rate == 48000000)
9173 		goto apply;
9174 
9175 	if (target_rate == 54000000 || !fixed)
9176 		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9177 	if (target_rate == 54000000)
9178 		goto apply;
9179 
9180 	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9181 	return -EINVAL;
9182 
9183       apply:
9184 	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9185 		     mask, fixed ? "fixed" : "sub-rates");
9186 	mutex_lock(&priv->mutex);
9187 	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9188 		priv->config &= ~CFG_FIXED_RATE;
9189 		ipw_set_fixed_rate(priv, priv->ieee->mode);
9190 	} else
9191 		priv->config |= CFG_FIXED_RATE;
9192 
9193 	if (priv->rates_mask == mask) {
9194 		IPW_DEBUG_WX("Mask set to current mask.\n");
9195 		mutex_unlock(&priv->mutex);
9196 		return 0;
9197 	}
9198 
9199 	priv->rates_mask = mask;
9200 
9201 	/* Network configuration changed -- force [re]association */
9202 	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9203 	if (!ipw_disassociate(priv))
9204 		ipw_associate(priv);
9205 
9206 	mutex_unlock(&priv->mutex);
9207 	return 0;
9208 }
9209 
9210 static int ipw_wx_get_rate(struct net_device *dev,
9211 			   struct iw_request_info *info,
9212 			   union iwreq_data *wrqu, char *extra)
9213 {
9214 	struct ipw_priv *priv = libipw_priv(dev);
9215 	mutex_lock(&priv->mutex);
9216 	wrqu->bitrate.value = priv->last_rate;
9217 	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9218 	mutex_unlock(&priv->mutex);
9219 	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9220 	return 0;
9221 }
9222 
9223 static int ipw_wx_set_rts(struct net_device *dev,
9224 			  struct iw_request_info *info,
9225 			  union iwreq_data *wrqu, char *extra)
9226 {
9227 	struct ipw_priv *priv = libipw_priv(dev);
9228 	mutex_lock(&priv->mutex);
9229 	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9230 		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9231 	else {
9232 		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9233 		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9234 			mutex_unlock(&priv->mutex);
9235 			return -EINVAL;
9236 		}
9237 		priv->rts_threshold = wrqu->rts.value;
9238 	}
9239 
9240 	ipw_send_rts_threshold(priv, priv->rts_threshold);
9241 	mutex_unlock(&priv->mutex);
9242 	IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9243 	return 0;
9244 }
9245 
9246 static int ipw_wx_get_rts(struct net_device *dev,
9247 			  struct iw_request_info *info,
9248 			  union iwreq_data *wrqu, char *extra)
9249 {
9250 	struct ipw_priv *priv = libipw_priv(dev);
9251 	mutex_lock(&priv->mutex);
9252 	wrqu->rts.value = priv->rts_threshold;
9253 	wrqu->rts.fixed = 0;	/* no auto select */
9254 	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9255 	mutex_unlock(&priv->mutex);
9256 	IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9257 	return 0;
9258 }
9259 
9260 static int ipw_wx_set_txpow(struct net_device *dev,
9261 			    struct iw_request_info *info,
9262 			    union iwreq_data *wrqu, char *extra)
9263 {
9264 	struct ipw_priv *priv = libipw_priv(dev);
9265 	int err = 0;
9266 
9267 	mutex_lock(&priv->mutex);
9268 	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9269 		err = -EINPROGRESS;
9270 		goto out;
9271 	}
9272 
9273 	if (!wrqu->power.fixed)
9274 		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9275 
9276 	if (wrqu->power.flags != IW_TXPOW_DBM) {
9277 		err = -EINVAL;
9278 		goto out;
9279 	}
9280 
9281 	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9282 	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9283 		err = -EINVAL;
9284 		goto out;
9285 	}
9286 
9287 	priv->tx_power = wrqu->power.value;
9288 	err = ipw_set_tx_power(priv);
9289       out:
9290 	mutex_unlock(&priv->mutex);
9291 	return err;
9292 }
9293 
9294 static int ipw_wx_get_txpow(struct net_device *dev,
9295 			    struct iw_request_info *info,
9296 			    union iwreq_data *wrqu, char *extra)
9297 {
9298 	struct ipw_priv *priv = libipw_priv(dev);
9299 	mutex_lock(&priv->mutex);
9300 	wrqu->power.value = priv->tx_power;
9301 	wrqu->power.fixed = 1;
9302 	wrqu->power.flags = IW_TXPOW_DBM;
9303 	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9304 	mutex_unlock(&priv->mutex);
9305 
9306 	IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9307 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9308 
9309 	return 0;
9310 }
9311 
9312 static int ipw_wx_set_frag(struct net_device *dev,
9313 			   struct iw_request_info *info,
9314 			   union iwreq_data *wrqu, char *extra)
9315 {
9316 	struct ipw_priv *priv = libipw_priv(dev);
9317 	mutex_lock(&priv->mutex);
9318 	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9319 		priv->ieee->fts = DEFAULT_FTS;
9320 	else {
9321 		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9322 		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9323 			mutex_unlock(&priv->mutex);
9324 			return -EINVAL;
9325 		}
9326 
9327 		priv->ieee->fts = wrqu->frag.value & ~0x1;
9328 	}
9329 
9330 	ipw_send_frag_threshold(priv, wrqu->frag.value);
9331 	mutex_unlock(&priv->mutex);
9332 	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9333 	return 0;
9334 }
9335 
9336 static int ipw_wx_get_frag(struct net_device *dev,
9337 			   struct iw_request_info *info,
9338 			   union iwreq_data *wrqu, char *extra)
9339 {
9340 	struct ipw_priv *priv = libipw_priv(dev);
9341 	mutex_lock(&priv->mutex);
9342 	wrqu->frag.value = priv->ieee->fts;
9343 	wrqu->frag.fixed = 0;	/* no auto select */
9344 	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9345 	mutex_unlock(&priv->mutex);
9346 	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9347 
9348 	return 0;
9349 }
9350 
9351 static int ipw_wx_set_retry(struct net_device *dev,
9352 			    struct iw_request_info *info,
9353 			    union iwreq_data *wrqu, char *extra)
9354 {
9355 	struct ipw_priv *priv = libipw_priv(dev);
9356 
9357 	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9358 		return -EINVAL;
9359 
9360 	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9361 		return 0;
9362 
9363 	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9364 		return -EINVAL;
9365 
9366 	mutex_lock(&priv->mutex);
9367 	if (wrqu->retry.flags & IW_RETRY_SHORT)
9368 		priv->short_retry_limit = (u8) wrqu->retry.value;
9369 	else if (wrqu->retry.flags & IW_RETRY_LONG)
9370 		priv->long_retry_limit = (u8) wrqu->retry.value;
9371 	else {
9372 		priv->short_retry_limit = (u8) wrqu->retry.value;
9373 		priv->long_retry_limit = (u8) wrqu->retry.value;
9374 	}
9375 
9376 	ipw_send_retry_limit(priv, priv->short_retry_limit,
9377 			     priv->long_retry_limit);
9378 	mutex_unlock(&priv->mutex);
9379 	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9380 		     priv->short_retry_limit, priv->long_retry_limit);
9381 	return 0;
9382 }
9383 
9384 static int ipw_wx_get_retry(struct net_device *dev,
9385 			    struct iw_request_info *info,
9386 			    union iwreq_data *wrqu, char *extra)
9387 {
9388 	struct ipw_priv *priv = libipw_priv(dev);
9389 
9390 	mutex_lock(&priv->mutex);
9391 	wrqu->retry.disabled = 0;
9392 
9393 	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9394 		mutex_unlock(&priv->mutex);
9395 		return -EINVAL;
9396 	}
9397 
9398 	if (wrqu->retry.flags & IW_RETRY_LONG) {
9399 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9400 		wrqu->retry.value = priv->long_retry_limit;
9401 	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9402 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9403 		wrqu->retry.value = priv->short_retry_limit;
9404 	} else {
9405 		wrqu->retry.flags = IW_RETRY_LIMIT;
9406 		wrqu->retry.value = priv->short_retry_limit;
9407 	}
9408 	mutex_unlock(&priv->mutex);
9409 
9410 	IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9411 
9412 	return 0;
9413 }
9414 
9415 static int ipw_wx_set_scan(struct net_device *dev,
9416 			   struct iw_request_info *info,
9417 			   union iwreq_data *wrqu, char *extra)
9418 {
9419 	struct ipw_priv *priv = libipw_priv(dev);
9420 	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9421 	struct delayed_work *work = NULL;
9422 
9423 	mutex_lock(&priv->mutex);
9424 
9425 	priv->user_requested_scan = 1;
9426 
9427 	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9428 		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9429 			int len = min((int)req->essid_len,
9430 			              (int)sizeof(priv->direct_scan_ssid));
9431 			memcpy(priv->direct_scan_ssid, req->essid, len);
9432 			priv->direct_scan_ssid_len = len;
9433 			work = &priv->request_direct_scan;
9434 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9435 			work = &priv->request_passive_scan;
9436 		}
9437 	} else {
9438 		/* Normal active broadcast scan */
9439 		work = &priv->request_scan;
9440 	}
9441 
9442 	mutex_unlock(&priv->mutex);
9443 
9444 	IPW_DEBUG_WX("Start scan\n");
9445 
9446 	schedule_delayed_work(work, 0);
9447 
9448 	return 0;
9449 }
9450 
9451 static int ipw_wx_get_scan(struct net_device *dev,
9452 			   struct iw_request_info *info,
9453 			   union iwreq_data *wrqu, char *extra)
9454 {
9455 	struct ipw_priv *priv = libipw_priv(dev);
9456 	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9457 }
9458 
9459 static int ipw_wx_set_encode(struct net_device *dev,
9460 			     struct iw_request_info *info,
9461 			     union iwreq_data *wrqu, char *key)
9462 {
9463 	struct ipw_priv *priv = libipw_priv(dev);
9464 	int ret;
9465 	u32 cap = priv->capability;
9466 
9467 	mutex_lock(&priv->mutex);
9468 	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9469 
9470 	/* In IBSS mode, we need to notify the firmware to update
9471 	 * the beacon info after we changed the capability. */
9472 	if (cap != priv->capability &&
9473 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9474 	    priv->status & STATUS_ASSOCIATED)
9475 		ipw_disassociate(priv);
9476 
9477 	mutex_unlock(&priv->mutex);
9478 	return ret;
9479 }
9480 
9481 static int ipw_wx_get_encode(struct net_device *dev,
9482 			     struct iw_request_info *info,
9483 			     union iwreq_data *wrqu, char *key)
9484 {
9485 	struct ipw_priv *priv = libipw_priv(dev);
9486 	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9487 }
9488 
9489 static int ipw_wx_set_power(struct net_device *dev,
9490 			    struct iw_request_info *info,
9491 			    union iwreq_data *wrqu, char *extra)
9492 {
9493 	struct ipw_priv *priv = libipw_priv(dev);
9494 	int err;
9495 	mutex_lock(&priv->mutex);
9496 	if (wrqu->power.disabled) {
9497 		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9498 		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9499 		if (err) {
9500 			IPW_DEBUG_WX("failed setting power mode.\n");
9501 			mutex_unlock(&priv->mutex);
9502 			return err;
9503 		}
9504 		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9505 		mutex_unlock(&priv->mutex);
9506 		return 0;
9507 	}
9508 
9509 	switch (wrqu->power.flags & IW_POWER_MODE) {
9510 	case IW_POWER_ON:	/* If not specified */
9511 	case IW_POWER_MODE:	/* If set all mask */
9512 	case IW_POWER_ALL_R:	/* If explicitly state all */
9513 		break;
9514 	default:		/* Otherwise we don't support it */
9515 		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9516 			     wrqu->power.flags);
9517 		mutex_unlock(&priv->mutex);
9518 		return -EOPNOTSUPP;
9519 	}
9520 
9521 	/* If the user hasn't specified a power management mode yet, default
9522 	 * to BATTERY */
9523 	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9524 		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9525 	else
9526 		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9527 
9528 	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9529 	if (err) {
9530 		IPW_DEBUG_WX("failed setting power mode.\n");
9531 		mutex_unlock(&priv->mutex);
9532 		return err;
9533 	}
9534 
9535 	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9536 	mutex_unlock(&priv->mutex);
9537 	return 0;
9538 }
9539 
9540 static int ipw_wx_get_power(struct net_device *dev,
9541 			    struct iw_request_info *info,
9542 			    union iwreq_data *wrqu, char *extra)
9543 {
9544 	struct ipw_priv *priv = libipw_priv(dev);
9545 	mutex_lock(&priv->mutex);
9546 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9547 		wrqu->power.disabled = 1;
9548 	else
9549 		wrqu->power.disabled = 0;
9550 
9551 	mutex_unlock(&priv->mutex);
9552 	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9553 
9554 	return 0;
9555 }
9556 
9557 static int ipw_wx_set_powermode(struct net_device *dev,
9558 				struct iw_request_info *info,
9559 				union iwreq_data *wrqu, char *extra)
9560 {
9561 	struct ipw_priv *priv = libipw_priv(dev);
9562 	int mode = *(int *)extra;
9563 	int err;
9564 
9565 	mutex_lock(&priv->mutex);
9566 	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9567 		mode = IPW_POWER_AC;
9568 
9569 	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9570 		err = ipw_send_power_mode(priv, mode);
9571 		if (err) {
9572 			IPW_DEBUG_WX("failed setting power mode.\n");
9573 			mutex_unlock(&priv->mutex);
9574 			return err;
9575 		}
9576 		priv->power_mode = IPW_POWER_ENABLED | mode;
9577 	}
9578 	mutex_unlock(&priv->mutex);
9579 	return 0;
9580 }
9581 
9582 #define MAX_WX_STRING 80
9583 static int ipw_wx_get_powermode(struct net_device *dev,
9584 				struct iw_request_info *info,
9585 				union iwreq_data *wrqu, char *extra)
9586 {
9587 	struct ipw_priv *priv = libipw_priv(dev);
9588 	int level = IPW_POWER_LEVEL(priv->power_mode);
9589 	char *p = extra;
9590 
9591 	p += scnprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9592 
9593 	switch (level) {
9594 	case IPW_POWER_AC:
9595 		p += scnprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9596 		break;
9597 	case IPW_POWER_BATTERY:
9598 		p += scnprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9599 		break;
9600 	default:
9601 		p += scnprintf(p, MAX_WX_STRING - (p - extra),
9602 			      "(Timeout %dms, Period %dms)",
9603 			      timeout_duration[level - 1] / 1000,
9604 			      period_duration[level - 1] / 1000);
9605 	}
9606 
9607 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9608 		p += scnprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9609 
9610 	wrqu->data.length = p - extra + 1;
9611 
9612 	return 0;
9613 }
9614 
9615 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9616 				    struct iw_request_info *info,
9617 				    union iwreq_data *wrqu, char *extra)
9618 {
9619 	struct ipw_priv *priv = libipw_priv(dev);
9620 	int mode = *(int *)extra;
9621 	u8 band = 0, modulation = 0;
9622 
9623 	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9624 		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9625 		return -EINVAL;
9626 	}
9627 	mutex_lock(&priv->mutex);
9628 	if (priv->adapter == IPW_2915ABG) {
9629 		priv->ieee->abg_true = 1;
9630 		if (mode & IEEE_A) {
9631 			band |= LIBIPW_52GHZ_BAND;
9632 			modulation |= LIBIPW_OFDM_MODULATION;
9633 		} else
9634 			priv->ieee->abg_true = 0;
9635 	} else {
9636 		if (mode & IEEE_A) {
9637 			IPW_WARNING("Attempt to set 2200BG into "
9638 				    "802.11a mode\n");
9639 			mutex_unlock(&priv->mutex);
9640 			return -EINVAL;
9641 		}
9642 
9643 		priv->ieee->abg_true = 0;
9644 	}
9645 
9646 	if (mode & IEEE_B) {
9647 		band |= LIBIPW_24GHZ_BAND;
9648 		modulation |= LIBIPW_CCK_MODULATION;
9649 	} else
9650 		priv->ieee->abg_true = 0;
9651 
9652 	if (mode & IEEE_G) {
9653 		band |= LIBIPW_24GHZ_BAND;
9654 		modulation |= LIBIPW_OFDM_MODULATION;
9655 	} else
9656 		priv->ieee->abg_true = 0;
9657 
9658 	priv->ieee->mode = mode;
9659 	priv->ieee->freq_band = band;
9660 	priv->ieee->modulation = modulation;
9661 	init_supported_rates(priv, &priv->rates);
9662 
9663 	/* Network configuration changed -- force [re]association */
9664 	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9665 	if (!ipw_disassociate(priv)) {
9666 		ipw_send_supported_rates(priv, &priv->rates);
9667 		ipw_associate(priv);
9668 	}
9669 
9670 	/* Update the band LEDs */
9671 	ipw_led_band_on(priv);
9672 
9673 	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9674 		     mode & IEEE_A ? 'a' : '.',
9675 		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9676 	mutex_unlock(&priv->mutex);
9677 	return 0;
9678 }
9679 
9680 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9681 				    struct iw_request_info *info,
9682 				    union iwreq_data *wrqu, char *extra)
9683 {
9684 	struct ipw_priv *priv = libipw_priv(dev);
9685 	mutex_lock(&priv->mutex);
9686 	switch (priv->ieee->mode) {
9687 	case IEEE_A:
9688 		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9689 		break;
9690 	case IEEE_B:
9691 		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9692 		break;
9693 	case IEEE_A | IEEE_B:
9694 		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9695 		break;
9696 	case IEEE_G:
9697 		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9698 		break;
9699 	case IEEE_A | IEEE_G:
9700 		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9701 		break;
9702 	case IEEE_B | IEEE_G:
9703 		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9704 		break;
9705 	case IEEE_A | IEEE_B | IEEE_G:
9706 		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9707 		break;
9708 	default:
9709 		strncpy(extra, "unknown", MAX_WX_STRING);
9710 		break;
9711 	}
9712 	extra[MAX_WX_STRING - 1] = '\0';
9713 
9714 	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9715 
9716 	wrqu->data.length = strlen(extra) + 1;
9717 	mutex_unlock(&priv->mutex);
9718 
9719 	return 0;
9720 }
9721 
9722 static int ipw_wx_set_preamble(struct net_device *dev,
9723 			       struct iw_request_info *info,
9724 			       union iwreq_data *wrqu, char *extra)
9725 {
9726 	struct ipw_priv *priv = libipw_priv(dev);
9727 	int mode = *(int *)extra;
9728 	mutex_lock(&priv->mutex);
9729 	/* Switching from SHORT -> LONG requires a disassociation */
9730 	if (mode == 1) {
9731 		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9732 			priv->config |= CFG_PREAMBLE_LONG;
9733 
9734 			/* Network configuration changed -- force [re]association */
9735 			IPW_DEBUG_ASSOC
9736 			    ("[re]association triggered due to preamble change.\n");
9737 			if (!ipw_disassociate(priv))
9738 				ipw_associate(priv);
9739 		}
9740 		goto done;
9741 	}
9742 
9743 	if (mode == 0) {
9744 		priv->config &= ~CFG_PREAMBLE_LONG;
9745 		goto done;
9746 	}
9747 	mutex_unlock(&priv->mutex);
9748 	return -EINVAL;
9749 
9750       done:
9751 	mutex_unlock(&priv->mutex);
9752 	return 0;
9753 }
9754 
9755 static int ipw_wx_get_preamble(struct net_device *dev,
9756 			       struct iw_request_info *info,
9757 			       union iwreq_data *wrqu, char *extra)
9758 {
9759 	struct ipw_priv *priv = libipw_priv(dev);
9760 	mutex_lock(&priv->mutex);
9761 	if (priv->config & CFG_PREAMBLE_LONG)
9762 		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9763 	else
9764 		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9765 	mutex_unlock(&priv->mutex);
9766 	return 0;
9767 }
9768 
9769 #ifdef CONFIG_IPW2200_MONITOR
9770 static int ipw_wx_set_monitor(struct net_device *dev,
9771 			      struct iw_request_info *info,
9772 			      union iwreq_data *wrqu, char *extra)
9773 {
9774 	struct ipw_priv *priv = libipw_priv(dev);
9775 	int *parms = (int *)extra;
9776 	int enable = (parms[0] > 0);
9777 	mutex_lock(&priv->mutex);
9778 	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9779 	if (enable) {
9780 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9781 #ifdef CONFIG_IPW2200_RADIOTAP
9782 			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9783 #else
9784 			priv->net_dev->type = ARPHRD_IEEE80211;
9785 #endif
9786 			schedule_work(&priv->adapter_restart);
9787 		}
9788 
9789 		ipw_set_channel(priv, parms[1]);
9790 	} else {
9791 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9792 			mutex_unlock(&priv->mutex);
9793 			return 0;
9794 		}
9795 		priv->net_dev->type = ARPHRD_ETHER;
9796 		schedule_work(&priv->adapter_restart);
9797 	}
9798 	mutex_unlock(&priv->mutex);
9799 	return 0;
9800 }
9801 
9802 #endif				/* CONFIG_IPW2200_MONITOR */
9803 
9804 static int ipw_wx_reset(struct net_device *dev,
9805 			struct iw_request_info *info,
9806 			union iwreq_data *wrqu, char *extra)
9807 {
9808 	struct ipw_priv *priv = libipw_priv(dev);
9809 	IPW_DEBUG_WX("RESET\n");
9810 	schedule_work(&priv->adapter_restart);
9811 	return 0;
9812 }
9813 
9814 static int ipw_wx_sw_reset(struct net_device *dev,
9815 			   struct iw_request_info *info,
9816 			   union iwreq_data *wrqu, char *extra)
9817 {
9818 	struct ipw_priv *priv = libipw_priv(dev);
9819 	union iwreq_data wrqu_sec = {
9820 		.encoding = {
9821 			     .flags = IW_ENCODE_DISABLED,
9822 			     },
9823 	};
9824 	int ret;
9825 
9826 	IPW_DEBUG_WX("SW_RESET\n");
9827 
9828 	mutex_lock(&priv->mutex);
9829 
9830 	ret = ipw_sw_reset(priv, 2);
9831 	if (!ret) {
9832 		free_firmware();
9833 		ipw_adapter_restart(priv);
9834 	}
9835 
9836 	/* The SW reset bit might have been toggled on by the 'disable'
9837 	 * module parameter, so take appropriate action */
9838 	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9839 
9840 	mutex_unlock(&priv->mutex);
9841 	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9842 	mutex_lock(&priv->mutex);
9843 
9844 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9845 		/* Configuration likely changed -- force [re]association */
9846 		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9847 				"reset.\n");
9848 		if (!ipw_disassociate(priv))
9849 			ipw_associate(priv);
9850 	}
9851 
9852 	mutex_unlock(&priv->mutex);
9853 
9854 	return 0;
9855 }
9856 
9857 /* Rebase the WE IOCTLs to zero for the handler array */
9858 static iw_handler ipw_wx_handlers[] = {
9859 	IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname),
9860 	IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9861 	IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9862 	IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9863 	IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9864 	IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9865 	IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9866 	IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9867 	IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9868 	IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9869 	IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9870 	IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
9871 	IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
9872 	IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
9873 	IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
9874 	IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
9875 	IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
9876 	IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
9877 	IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
9878 	IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
9879 	IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
9880 	IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
9881 	IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
9882 	IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
9883 	IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
9884 	IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
9885 	IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
9886 	IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
9887 	IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
9888 	IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
9889 	IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
9890 	IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
9891 	IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
9892 	IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
9893 	IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
9894 	IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
9895 	IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
9896 	IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
9897 	IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
9898 	IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
9899 	IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
9900 };
9901 
9902 enum {
9903 	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9904 	IPW_PRIV_GET_POWER,
9905 	IPW_PRIV_SET_MODE,
9906 	IPW_PRIV_GET_MODE,
9907 	IPW_PRIV_SET_PREAMBLE,
9908 	IPW_PRIV_GET_PREAMBLE,
9909 	IPW_PRIV_RESET,
9910 	IPW_PRIV_SW_RESET,
9911 #ifdef CONFIG_IPW2200_MONITOR
9912 	IPW_PRIV_SET_MONITOR,
9913 #endif
9914 };
9915 
9916 static struct iw_priv_args ipw_priv_args[] = {
9917 	{
9918 	 .cmd = IPW_PRIV_SET_POWER,
9919 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9920 	 .name = "set_power"},
9921 	{
9922 	 .cmd = IPW_PRIV_GET_POWER,
9923 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9924 	 .name = "get_power"},
9925 	{
9926 	 .cmd = IPW_PRIV_SET_MODE,
9927 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9928 	 .name = "set_mode"},
9929 	{
9930 	 .cmd = IPW_PRIV_GET_MODE,
9931 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9932 	 .name = "get_mode"},
9933 	{
9934 	 .cmd = IPW_PRIV_SET_PREAMBLE,
9935 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9936 	 .name = "set_preamble"},
9937 	{
9938 	 .cmd = IPW_PRIV_GET_PREAMBLE,
9939 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9940 	 .name = "get_preamble"},
9941 	{
9942 	 IPW_PRIV_RESET,
9943 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9944 	{
9945 	 IPW_PRIV_SW_RESET,
9946 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9947 #ifdef CONFIG_IPW2200_MONITOR
9948 	{
9949 	 IPW_PRIV_SET_MONITOR,
9950 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9951 #endif				/* CONFIG_IPW2200_MONITOR */
9952 };
9953 
9954 static iw_handler ipw_priv_handler[] = {
9955 	ipw_wx_set_powermode,
9956 	ipw_wx_get_powermode,
9957 	ipw_wx_set_wireless_mode,
9958 	ipw_wx_get_wireless_mode,
9959 	ipw_wx_set_preamble,
9960 	ipw_wx_get_preamble,
9961 	ipw_wx_reset,
9962 	ipw_wx_sw_reset,
9963 #ifdef CONFIG_IPW2200_MONITOR
9964 	ipw_wx_set_monitor,
9965 #endif
9966 };
9967 
9968 static const struct iw_handler_def ipw_wx_handler_def = {
9969 	.standard = ipw_wx_handlers,
9970 	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
9971 	.num_private = ARRAY_SIZE(ipw_priv_handler),
9972 	.num_private_args = ARRAY_SIZE(ipw_priv_args),
9973 	.private = ipw_priv_handler,
9974 	.private_args = ipw_priv_args,
9975 	.get_wireless_stats = ipw_get_wireless_stats,
9976 };
9977 
9978 /*
9979  * Get wireless statistics.
9980  * Called by /proc/net/wireless
9981  * Also called by SIOCGIWSTATS
9982  */
9983 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9984 {
9985 	struct ipw_priv *priv = libipw_priv(dev);
9986 	struct iw_statistics *wstats;
9987 
9988 	wstats = &priv->wstats;
9989 
9990 	/* if hw is disabled, then ipw_get_ordinal() can't be called.
9991 	 * netdev->get_wireless_stats seems to be called before fw is
9992 	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
9993 	 * and associated; if not associcated, the values are all meaningless
9994 	 * anyway, so set them all to NULL and INVALID */
9995 	if (!(priv->status & STATUS_ASSOCIATED)) {
9996 		wstats->miss.beacon = 0;
9997 		wstats->discard.retries = 0;
9998 		wstats->qual.qual = 0;
9999 		wstats->qual.level = 0;
10000 		wstats->qual.noise = 0;
10001 		wstats->qual.updated = 7;
10002 		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10003 		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10004 		return wstats;
10005 	}
10006 
10007 	wstats->qual.qual = priv->quality;
10008 	wstats->qual.level = priv->exp_avg_rssi;
10009 	wstats->qual.noise = priv->exp_avg_noise;
10010 	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10011 	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10012 
10013 	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10014 	wstats->discard.retries = priv->last_tx_failures;
10015 	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10016 
10017 /*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10018 	goto fail_get_ordinal;
10019 	wstats->discard.retries += tx_retry; */
10020 
10021 	return wstats;
10022 }
10023 
10024 /* net device stuff */
10025 
10026 static  void init_sys_config(struct ipw_sys_config *sys_config)
10027 {
10028 	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10029 	sys_config->bt_coexistence = 0;
10030 	sys_config->answer_broadcast_ssid_probe = 0;
10031 	sys_config->accept_all_data_frames = 0;
10032 	sys_config->accept_non_directed_frames = 1;
10033 	sys_config->exclude_unicast_unencrypted = 0;
10034 	sys_config->disable_unicast_decryption = 1;
10035 	sys_config->exclude_multicast_unencrypted = 0;
10036 	sys_config->disable_multicast_decryption = 1;
10037 	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10038 		antenna = CFG_SYS_ANTENNA_BOTH;
10039 	sys_config->antenna_diversity = antenna;
10040 	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10041 	sys_config->dot11g_auto_detection = 0;
10042 	sys_config->enable_cts_to_self = 0;
10043 	sys_config->bt_coexist_collision_thr = 0;
10044 	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10045 	sys_config->silence_threshold = 0x1e;
10046 }
10047 
10048 static int ipw_net_open(struct net_device *dev)
10049 {
10050 	IPW_DEBUG_INFO("dev->open\n");
10051 	netif_start_queue(dev);
10052 	return 0;
10053 }
10054 
10055 static int ipw_net_stop(struct net_device *dev)
10056 {
10057 	IPW_DEBUG_INFO("dev->close\n");
10058 	netif_stop_queue(dev);
10059 	return 0;
10060 }
10061 
10062 /*
10063 todo:
10064 
10065 modify to send one tfd per fragment instead of using chunking.  otherwise
10066 we need to heavily modify the libipw_skb_to_txb.
10067 */
10068 
10069 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10070 			     int pri)
10071 {
10072 	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10073 	    txb->fragments[0]->data;
10074 	int i = 0;
10075 	struct tfd_frame *tfd;
10076 #ifdef CONFIG_IPW2200_QOS
10077 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10078 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10079 #else
10080 	struct clx2_tx_queue *txq = &priv->txq[0];
10081 #endif
10082 	struct clx2_queue *q = &txq->q;
10083 	u8 id, hdr_len, unicast;
10084 	int fc;
10085 
10086 	if (!(priv->status & STATUS_ASSOCIATED))
10087 		goto drop;
10088 
10089 	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10090 	switch (priv->ieee->iw_mode) {
10091 	case IW_MODE_ADHOC:
10092 		unicast = !is_multicast_ether_addr(hdr->addr1);
10093 		id = ipw_find_station(priv, hdr->addr1);
10094 		if (id == IPW_INVALID_STATION) {
10095 			id = ipw_add_station(priv, hdr->addr1);
10096 			if (id == IPW_INVALID_STATION) {
10097 				IPW_WARNING("Attempt to send data to "
10098 					    "invalid cell: %pM\n",
10099 					    hdr->addr1);
10100 				goto drop;
10101 			}
10102 		}
10103 		break;
10104 
10105 	case IW_MODE_INFRA:
10106 	default:
10107 		unicast = !is_multicast_ether_addr(hdr->addr3);
10108 		id = 0;
10109 		break;
10110 	}
10111 
10112 	tfd = &txq->bd[q->first_empty];
10113 	txq->txb[q->first_empty] = txb;
10114 	memset(tfd, 0, sizeof(*tfd));
10115 	tfd->u.data.station_number = id;
10116 
10117 	tfd->control_flags.message_type = TX_FRAME_TYPE;
10118 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10119 
10120 	tfd->u.data.cmd_id = DINO_CMD_TX;
10121 	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10122 
10123 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10124 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10125 	else
10126 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10127 
10128 	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10129 		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10130 
10131 	fc = le16_to_cpu(hdr->frame_ctl);
10132 	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10133 
10134 	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10135 
10136 	if (likely(unicast))
10137 		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10138 
10139 	if (txb->encrypted && !priv->ieee->host_encrypt) {
10140 		switch (priv->ieee->sec.level) {
10141 		case SEC_LEVEL_3:
10142 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10143 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10144 			/* XXX: ACK flag must be set for CCMP even if it
10145 			 * is a multicast/broadcast packet, because CCMP
10146 			 * group communication encrypted by GTK is
10147 			 * actually done by the AP. */
10148 			if (!unicast)
10149 				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10150 
10151 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10152 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10153 			tfd->u.data.key_index = 0;
10154 			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10155 			break;
10156 		case SEC_LEVEL_2:
10157 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10158 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10159 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10160 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10161 			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10162 			break;
10163 		case SEC_LEVEL_1:
10164 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10165 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10166 			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10167 			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10168 			    40)
10169 				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10170 			else
10171 				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10172 			break;
10173 		case SEC_LEVEL_0:
10174 			break;
10175 		default:
10176 			printk(KERN_ERR "Unknown security level %d\n",
10177 			       priv->ieee->sec.level);
10178 			break;
10179 		}
10180 	} else
10181 		/* No hardware encryption */
10182 		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10183 
10184 #ifdef CONFIG_IPW2200_QOS
10185 	if (fc & IEEE80211_STYPE_QOS_DATA)
10186 		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10187 #endif				/* CONFIG_IPW2200_QOS */
10188 
10189 	/* payload */
10190 	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10191 						 txb->nr_frags));
10192 	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10193 		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10194 	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10195 		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10196 			       i, le32_to_cpu(tfd->u.data.num_chunks),
10197 			       txb->fragments[i]->len - hdr_len);
10198 		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10199 			     i, tfd->u.data.num_chunks,
10200 			     txb->fragments[i]->len - hdr_len);
10201 		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10202 			   txb->fragments[i]->len - hdr_len);
10203 
10204 		tfd->u.data.chunk_ptr[i] =
10205 		    cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
10206 					       txb->fragments[i]->data + hdr_len,
10207 					       txb->fragments[i]->len - hdr_len,
10208 					       DMA_TO_DEVICE));
10209 		tfd->u.data.chunk_len[i] =
10210 		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10211 	}
10212 
10213 	if (i != txb->nr_frags) {
10214 		struct sk_buff *skb;
10215 		u16 remaining_bytes = 0;
10216 		int j;
10217 
10218 		for (j = i; j < txb->nr_frags; j++)
10219 			remaining_bytes += txb->fragments[j]->len - hdr_len;
10220 
10221 		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10222 		       remaining_bytes);
10223 		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10224 		if (skb != NULL) {
10225 			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10226 			for (j = i; j < txb->nr_frags; j++) {
10227 				int size = txb->fragments[j]->len - hdr_len;
10228 
10229 				printk(KERN_INFO "Adding frag %d %d...\n",
10230 				       j, size);
10231 				skb_put_data(skb,
10232 					     txb->fragments[j]->data + hdr_len,
10233 					     size);
10234 			}
10235 			dev_kfree_skb_any(txb->fragments[i]);
10236 			txb->fragments[i] = skb;
10237 			tfd->u.data.chunk_ptr[i] =
10238 			    cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
10239 						       skb->data,
10240 						       remaining_bytes,
10241 						       DMA_TO_DEVICE));
10242 
10243 			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10244 		}
10245 	}
10246 
10247 	/* kick DMA */
10248 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10249 	ipw_write32(priv, q->reg_w, q->first_empty);
10250 
10251 	if (ipw_tx_queue_space(q) < q->high_mark)
10252 		netif_stop_queue(priv->net_dev);
10253 
10254 	return NETDEV_TX_OK;
10255 
10256       drop:
10257 	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10258 	libipw_txb_free(txb);
10259 	return NETDEV_TX_OK;
10260 }
10261 
10262 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10263 {
10264 	struct ipw_priv *priv = libipw_priv(dev);
10265 #ifdef CONFIG_IPW2200_QOS
10266 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10267 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10268 #else
10269 	struct clx2_tx_queue *txq = &priv->txq[0];
10270 #endif				/* CONFIG_IPW2200_QOS */
10271 
10272 	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10273 		return 1;
10274 
10275 	return 0;
10276 }
10277 
10278 #ifdef CONFIG_IPW2200_PROMISCUOUS
10279 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10280 				      struct libipw_txb *txb)
10281 {
10282 	struct libipw_rx_stats dummystats;
10283 	struct ieee80211_hdr *hdr;
10284 	u8 n;
10285 	u16 filter = priv->prom_priv->filter;
10286 	int hdr_only = 0;
10287 
10288 	if (filter & IPW_PROM_NO_TX)
10289 		return;
10290 
10291 	memset(&dummystats, 0, sizeof(dummystats));
10292 
10293 	/* Filtering of fragment chains is done against the first fragment */
10294 	hdr = (void *)txb->fragments[0]->data;
10295 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10296 		if (filter & IPW_PROM_NO_MGMT)
10297 			return;
10298 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10299 			hdr_only = 1;
10300 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10301 		if (filter & IPW_PROM_NO_CTL)
10302 			return;
10303 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10304 			hdr_only = 1;
10305 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10306 		if (filter & IPW_PROM_NO_DATA)
10307 			return;
10308 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10309 			hdr_only = 1;
10310 	}
10311 
10312 	for(n=0; n<txb->nr_frags; ++n) {
10313 		struct sk_buff *src = txb->fragments[n];
10314 		struct sk_buff *dst;
10315 		struct ieee80211_radiotap_header *rt_hdr;
10316 		int len;
10317 
10318 		if (hdr_only) {
10319 			hdr = (void *)src->data;
10320 			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10321 		} else
10322 			len = src->len;
10323 
10324 		dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10325 		if (!dst)
10326 			continue;
10327 
10328 		rt_hdr = skb_put(dst, sizeof(*rt_hdr));
10329 
10330 		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10331 		rt_hdr->it_pad = 0;
10332 		rt_hdr->it_present = 0; /* after all, it's just an idea */
10333 		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10334 
10335 		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10336 			ieee80211chan2mhz(priv->channel));
10337 		if (priv->channel > 14) 	/* 802.11a */
10338 			*(__le16*)skb_put(dst, sizeof(u16)) =
10339 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10340 					     IEEE80211_CHAN_5GHZ);
10341 		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10342 			*(__le16*)skb_put(dst, sizeof(u16)) =
10343 				cpu_to_le16(IEEE80211_CHAN_CCK |
10344 					     IEEE80211_CHAN_2GHZ);
10345 		else 		/* 802.11g */
10346 			*(__le16*)skb_put(dst, sizeof(u16)) =
10347 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10348 				 IEEE80211_CHAN_2GHZ);
10349 
10350 		rt_hdr->it_len = cpu_to_le16(dst->len);
10351 
10352 		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10353 
10354 		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10355 			dev_kfree_skb_any(dst);
10356 	}
10357 }
10358 #endif
10359 
10360 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10361 					   struct net_device *dev, int pri)
10362 {
10363 	struct ipw_priv *priv = libipw_priv(dev);
10364 	unsigned long flags;
10365 	netdev_tx_t ret;
10366 
10367 	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10368 	spin_lock_irqsave(&priv->lock, flags);
10369 
10370 #ifdef CONFIG_IPW2200_PROMISCUOUS
10371 	if (rtap_iface && netif_running(priv->prom_net_dev))
10372 		ipw_handle_promiscuous_tx(priv, txb);
10373 #endif
10374 
10375 	ret = ipw_tx_skb(priv, txb, pri);
10376 	if (ret == NETDEV_TX_OK)
10377 		__ipw_led_activity_on(priv);
10378 	spin_unlock_irqrestore(&priv->lock, flags);
10379 
10380 	return ret;
10381 }
10382 
10383 static void ipw_net_set_multicast_list(struct net_device *dev)
10384 {
10385 
10386 }
10387 
10388 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10389 {
10390 	struct ipw_priv *priv = libipw_priv(dev);
10391 	struct sockaddr *addr = p;
10392 
10393 	if (!is_valid_ether_addr(addr->sa_data))
10394 		return -EADDRNOTAVAIL;
10395 	mutex_lock(&priv->mutex);
10396 	priv->config |= CFG_CUSTOM_MAC;
10397 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10398 	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10399 	       priv->net_dev->name, priv->mac_addr);
10400 	schedule_work(&priv->adapter_restart);
10401 	mutex_unlock(&priv->mutex);
10402 	return 0;
10403 }
10404 
10405 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10406 				    struct ethtool_drvinfo *info)
10407 {
10408 	struct ipw_priv *p = libipw_priv(dev);
10409 	char vers[64];
10410 	char date[32];
10411 	u32 len;
10412 
10413 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
10414 	strscpy(info->version, DRV_VERSION, sizeof(info->version));
10415 
10416 	len = sizeof(vers);
10417 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10418 	len = sizeof(date);
10419 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10420 
10421 	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10422 		 vers, date);
10423 	strscpy(info->bus_info, pci_name(p->pci_dev),
10424 		sizeof(info->bus_info));
10425 }
10426 
10427 static u32 ipw_ethtool_get_link(struct net_device *dev)
10428 {
10429 	struct ipw_priv *priv = libipw_priv(dev);
10430 	return (priv->status & STATUS_ASSOCIATED) != 0;
10431 }
10432 
10433 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10434 {
10435 	return IPW_EEPROM_IMAGE_SIZE;
10436 }
10437 
10438 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10439 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10440 {
10441 	struct ipw_priv *p = libipw_priv(dev);
10442 
10443 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10444 		return -EINVAL;
10445 	mutex_lock(&p->mutex);
10446 	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10447 	mutex_unlock(&p->mutex);
10448 	return 0;
10449 }
10450 
10451 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10452 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10453 {
10454 	struct ipw_priv *p = libipw_priv(dev);
10455 	int i;
10456 
10457 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10458 		return -EINVAL;
10459 	mutex_lock(&p->mutex);
10460 	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10461 	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10462 		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10463 	mutex_unlock(&p->mutex);
10464 	return 0;
10465 }
10466 
10467 static const struct ethtool_ops ipw_ethtool_ops = {
10468 	.get_link = ipw_ethtool_get_link,
10469 	.get_drvinfo = ipw_ethtool_get_drvinfo,
10470 	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10471 	.get_eeprom = ipw_ethtool_get_eeprom,
10472 	.set_eeprom = ipw_ethtool_set_eeprom,
10473 };
10474 
10475 static irqreturn_t ipw_isr(int irq, void *data)
10476 {
10477 	struct ipw_priv *priv = data;
10478 	u32 inta, inta_mask;
10479 
10480 	if (!priv)
10481 		return IRQ_NONE;
10482 
10483 	spin_lock(&priv->irq_lock);
10484 
10485 	if (!(priv->status & STATUS_INT_ENABLED)) {
10486 		/* IRQ is disabled */
10487 		goto none;
10488 	}
10489 
10490 	inta = ipw_read32(priv, IPW_INTA_RW);
10491 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10492 
10493 	if (inta == 0xFFFFFFFF) {
10494 		/* Hardware disappeared */
10495 		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10496 		goto none;
10497 	}
10498 
10499 	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10500 		/* Shared interrupt */
10501 		goto none;
10502 	}
10503 
10504 	/* tell the device to stop sending interrupts */
10505 	__ipw_disable_interrupts(priv);
10506 
10507 	/* ack current interrupts */
10508 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10509 	ipw_write32(priv, IPW_INTA_RW, inta);
10510 
10511 	/* Cache INTA value for our tasklet */
10512 	priv->isr_inta = inta;
10513 
10514 	tasklet_schedule(&priv->irq_tasklet);
10515 
10516 	spin_unlock(&priv->irq_lock);
10517 
10518 	return IRQ_HANDLED;
10519       none:
10520 	spin_unlock(&priv->irq_lock);
10521 	return IRQ_NONE;
10522 }
10523 
10524 static void ipw_rf_kill(void *adapter)
10525 {
10526 	struct ipw_priv *priv = adapter;
10527 	unsigned long flags;
10528 
10529 	spin_lock_irqsave(&priv->lock, flags);
10530 
10531 	if (rf_kill_active(priv)) {
10532 		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10533 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10534 		goto exit_unlock;
10535 	}
10536 
10537 	/* RF Kill is now disabled, so bring the device back up */
10538 
10539 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10540 		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10541 				  "device\n");
10542 
10543 		/* we can not do an adapter restart while inside an irq lock */
10544 		schedule_work(&priv->adapter_restart);
10545 	} else
10546 		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10547 				  "enabled\n");
10548 
10549       exit_unlock:
10550 	spin_unlock_irqrestore(&priv->lock, flags);
10551 }
10552 
10553 static void ipw_bg_rf_kill(struct work_struct *work)
10554 {
10555 	struct ipw_priv *priv =
10556 		container_of(work, struct ipw_priv, rf_kill.work);
10557 	mutex_lock(&priv->mutex);
10558 	ipw_rf_kill(priv);
10559 	mutex_unlock(&priv->mutex);
10560 }
10561 
10562 static void ipw_link_up(struct ipw_priv *priv)
10563 {
10564 	priv->last_seq_num = -1;
10565 	priv->last_frag_num = -1;
10566 	priv->last_packet_time = 0;
10567 
10568 	netif_carrier_on(priv->net_dev);
10569 
10570 	cancel_delayed_work(&priv->request_scan);
10571 	cancel_delayed_work(&priv->request_direct_scan);
10572 	cancel_delayed_work(&priv->request_passive_scan);
10573 	cancel_delayed_work(&priv->scan_event);
10574 	ipw_reset_stats(priv);
10575 	/* Ensure the rate is updated immediately */
10576 	priv->last_rate = ipw_get_current_rate(priv);
10577 	ipw_gather_stats(priv);
10578 	ipw_led_link_up(priv);
10579 	notify_wx_assoc_event(priv);
10580 
10581 	if (priv->config & CFG_BACKGROUND_SCAN)
10582 		schedule_delayed_work(&priv->request_scan, HZ);
10583 }
10584 
10585 static void ipw_bg_link_up(struct work_struct *work)
10586 {
10587 	struct ipw_priv *priv =
10588 		container_of(work, struct ipw_priv, link_up);
10589 	mutex_lock(&priv->mutex);
10590 	ipw_link_up(priv);
10591 	mutex_unlock(&priv->mutex);
10592 }
10593 
10594 static void ipw_link_down(struct ipw_priv *priv)
10595 {
10596 	ipw_led_link_down(priv);
10597 	netif_carrier_off(priv->net_dev);
10598 	notify_wx_assoc_event(priv);
10599 
10600 	/* Cancel any queued work ... */
10601 	cancel_delayed_work(&priv->request_scan);
10602 	cancel_delayed_work(&priv->request_direct_scan);
10603 	cancel_delayed_work(&priv->request_passive_scan);
10604 	cancel_delayed_work(&priv->adhoc_check);
10605 	cancel_delayed_work(&priv->gather_stats);
10606 
10607 	ipw_reset_stats(priv);
10608 
10609 	if (!(priv->status & STATUS_EXIT_PENDING)) {
10610 		/* Queue up another scan... */
10611 		schedule_delayed_work(&priv->request_scan, 0);
10612 	} else
10613 		cancel_delayed_work(&priv->scan_event);
10614 }
10615 
10616 static void ipw_bg_link_down(struct work_struct *work)
10617 {
10618 	struct ipw_priv *priv =
10619 		container_of(work, struct ipw_priv, link_down);
10620 	mutex_lock(&priv->mutex);
10621 	ipw_link_down(priv);
10622 	mutex_unlock(&priv->mutex);
10623 }
10624 
10625 static void ipw_setup_deferred_work(struct ipw_priv *priv)
10626 {
10627 	init_waitqueue_head(&priv->wait_command_queue);
10628 	init_waitqueue_head(&priv->wait_state);
10629 
10630 	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10631 	INIT_WORK(&priv->associate, ipw_bg_associate);
10632 	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10633 	INIT_WORK(&priv->system_config, ipw_system_config);
10634 	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10635 	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10636 	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10637 	INIT_WORK(&priv->up, ipw_bg_up);
10638 	INIT_WORK(&priv->down, ipw_bg_down);
10639 	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10640 	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10641 	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10642 	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10643 	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10644 	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10645 	INIT_WORK(&priv->roam, ipw_bg_roam);
10646 	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10647 	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10648 	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10649 	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10650 	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10651 	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10652 	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10653 
10654 #ifdef CONFIG_IPW2200_QOS
10655 	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10656 #endif				/* CONFIG_IPW2200_QOS */
10657 
10658 	tasklet_setup(&priv->irq_tasklet, ipw_irq_tasklet);
10659 }
10660 
10661 static void shim__set_security(struct net_device *dev,
10662 			       struct libipw_security *sec)
10663 {
10664 	struct ipw_priv *priv = libipw_priv(dev);
10665 	int i;
10666 	for (i = 0; i < 4; i++) {
10667 		if (sec->flags & (1 << i)) {
10668 			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10669 			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10670 			if (sec->key_sizes[i] == 0)
10671 				priv->ieee->sec.flags &= ~(1 << i);
10672 			else {
10673 				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10674 				       sec->key_sizes[i]);
10675 				priv->ieee->sec.flags |= (1 << i);
10676 			}
10677 			priv->status |= STATUS_SECURITY_UPDATED;
10678 		} else if (sec->level != SEC_LEVEL_1)
10679 			priv->ieee->sec.flags &= ~(1 << i);
10680 	}
10681 
10682 	if (sec->flags & SEC_ACTIVE_KEY) {
10683 		priv->ieee->sec.active_key = sec->active_key;
10684 		priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10685 		priv->status |= STATUS_SECURITY_UPDATED;
10686 	} else
10687 		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10688 
10689 	if ((sec->flags & SEC_AUTH_MODE) &&
10690 	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10691 		priv->ieee->sec.auth_mode = sec->auth_mode;
10692 		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10693 		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10694 			priv->capability |= CAP_SHARED_KEY;
10695 		else
10696 			priv->capability &= ~CAP_SHARED_KEY;
10697 		priv->status |= STATUS_SECURITY_UPDATED;
10698 	}
10699 
10700 	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10701 		priv->ieee->sec.flags |= SEC_ENABLED;
10702 		priv->ieee->sec.enabled = sec->enabled;
10703 		priv->status |= STATUS_SECURITY_UPDATED;
10704 		if (sec->enabled)
10705 			priv->capability |= CAP_PRIVACY_ON;
10706 		else
10707 			priv->capability &= ~CAP_PRIVACY_ON;
10708 	}
10709 
10710 	if (sec->flags & SEC_ENCRYPT)
10711 		priv->ieee->sec.encrypt = sec->encrypt;
10712 
10713 	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10714 		priv->ieee->sec.level = sec->level;
10715 		priv->ieee->sec.flags |= SEC_LEVEL;
10716 		priv->status |= STATUS_SECURITY_UPDATED;
10717 	}
10718 
10719 	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10720 		ipw_set_hwcrypto_keys(priv);
10721 
10722 	/* To match current functionality of ipw2100 (which works well w/
10723 	 * various supplicants, we don't force a disassociate if the
10724 	 * privacy capability changes ... */
10725 #if 0
10726 	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10727 	    (((priv->assoc_request.capability &
10728 	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10729 	     (!(priv->assoc_request.capability &
10730 		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10731 		IPW_DEBUG_ASSOC("Disassociating due to capability "
10732 				"change.\n");
10733 		ipw_disassociate(priv);
10734 	}
10735 #endif
10736 }
10737 
10738 static int init_supported_rates(struct ipw_priv *priv,
10739 				struct ipw_supported_rates *rates)
10740 {
10741 	/* TODO: Mask out rates based on priv->rates_mask */
10742 
10743 	memset(rates, 0, sizeof(*rates));
10744 	/* configure supported rates */
10745 	switch (priv->ieee->freq_band) {
10746 	case LIBIPW_52GHZ_BAND:
10747 		rates->ieee_mode = IPW_A_MODE;
10748 		rates->purpose = IPW_RATE_CAPABILITIES;
10749 		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10750 					LIBIPW_OFDM_DEFAULT_RATES_MASK);
10751 		break;
10752 
10753 	default:		/* Mixed or 2.4Ghz */
10754 		rates->ieee_mode = IPW_G_MODE;
10755 		rates->purpose = IPW_RATE_CAPABILITIES;
10756 		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10757 				       LIBIPW_CCK_DEFAULT_RATES_MASK);
10758 		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10759 			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10760 						LIBIPW_OFDM_DEFAULT_RATES_MASK);
10761 		}
10762 		break;
10763 	}
10764 
10765 	return 0;
10766 }
10767 
10768 static int ipw_config(struct ipw_priv *priv)
10769 {
10770 	/* This is only called from ipw_up, which resets/reloads the firmware
10771 	   so, we don't need to first disable the card before we configure
10772 	   it */
10773 	if (ipw_set_tx_power(priv))
10774 		goto error;
10775 
10776 	/* initialize adapter address */
10777 	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10778 		goto error;
10779 
10780 	/* set basic system config settings */
10781 	init_sys_config(&priv->sys_config);
10782 
10783 	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10784 	 * Does not support BT priority yet (don't abort or defer our Tx) */
10785 	if (bt_coexist) {
10786 		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10787 
10788 		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10789 			priv->sys_config.bt_coexistence
10790 			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10791 		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10792 			priv->sys_config.bt_coexistence
10793 			    |= CFG_BT_COEXISTENCE_OOB;
10794 	}
10795 
10796 #ifdef CONFIG_IPW2200_PROMISCUOUS
10797 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10798 		priv->sys_config.accept_all_data_frames = 1;
10799 		priv->sys_config.accept_non_directed_frames = 1;
10800 		priv->sys_config.accept_all_mgmt_bcpr = 1;
10801 		priv->sys_config.accept_all_mgmt_frames = 1;
10802 	}
10803 #endif
10804 
10805 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10806 		priv->sys_config.answer_broadcast_ssid_probe = 1;
10807 	else
10808 		priv->sys_config.answer_broadcast_ssid_probe = 0;
10809 
10810 	if (ipw_send_system_config(priv))
10811 		goto error;
10812 
10813 	init_supported_rates(priv, &priv->rates);
10814 	if (ipw_send_supported_rates(priv, &priv->rates))
10815 		goto error;
10816 
10817 	/* Set request-to-send threshold */
10818 	if (priv->rts_threshold) {
10819 		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10820 			goto error;
10821 	}
10822 #ifdef CONFIG_IPW2200_QOS
10823 	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10824 	ipw_qos_activate(priv, NULL);
10825 #endif				/* CONFIG_IPW2200_QOS */
10826 
10827 	if (ipw_set_random_seed(priv))
10828 		goto error;
10829 
10830 	/* final state transition to the RUN state */
10831 	if (ipw_send_host_complete(priv))
10832 		goto error;
10833 
10834 	priv->status |= STATUS_INIT;
10835 
10836 	ipw_led_init(priv);
10837 	ipw_led_radio_on(priv);
10838 	priv->notif_missed_beacons = 0;
10839 
10840 	/* Set hardware WEP key if it is configured. */
10841 	if ((priv->capability & CAP_PRIVACY_ON) &&
10842 	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10843 	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10844 		ipw_set_hwcrypto_keys(priv);
10845 
10846 	return 0;
10847 
10848       error:
10849 	return -EIO;
10850 }
10851 
10852 /*
10853  * NOTE:
10854  *
10855  * These tables have been tested in conjunction with the
10856  * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10857  *
10858  * Altering this values, using it on other hardware, or in geographies
10859  * not intended for resale of the above mentioned Intel adapters has
10860  * not been tested.
10861  *
10862  * Remember to update the table in README.ipw2200 when changing this
10863  * table.
10864  *
10865  */
10866 static const struct libipw_geo ipw_geos[] = {
10867 	{			/* Restricted */
10868 	 "---",
10869 	 .bg_channels = 11,
10870 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10871 		{2427, 4}, {2432, 5}, {2437, 6},
10872 		{2442, 7}, {2447, 8}, {2452, 9},
10873 		{2457, 10}, {2462, 11}},
10874 	 },
10875 
10876 	{			/* Custom US/Canada */
10877 	 "ZZF",
10878 	 .bg_channels = 11,
10879 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10880 		{2427, 4}, {2432, 5}, {2437, 6},
10881 		{2442, 7}, {2447, 8}, {2452, 9},
10882 		{2457, 10}, {2462, 11}},
10883 	 .a_channels = 8,
10884 	 .a = {{5180, 36},
10885 	       {5200, 40},
10886 	       {5220, 44},
10887 	       {5240, 48},
10888 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10889 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10890 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10891 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
10892 	 },
10893 
10894 	{			/* Rest of World */
10895 	 "ZZD",
10896 	 .bg_channels = 13,
10897 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10898 		{2427, 4}, {2432, 5}, {2437, 6},
10899 		{2442, 7}, {2447, 8}, {2452, 9},
10900 		{2457, 10}, {2462, 11}, {2467, 12},
10901 		{2472, 13}},
10902 	 },
10903 
10904 	{			/* Custom USA & Europe & High */
10905 	 "ZZA",
10906 	 .bg_channels = 11,
10907 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10908 		{2427, 4}, {2432, 5}, {2437, 6},
10909 		{2442, 7}, {2447, 8}, {2452, 9},
10910 		{2457, 10}, {2462, 11}},
10911 	 .a_channels = 13,
10912 	 .a = {{5180, 36},
10913 	       {5200, 40},
10914 	       {5220, 44},
10915 	       {5240, 48},
10916 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10917 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10918 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10919 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10920 	       {5745, 149},
10921 	       {5765, 153},
10922 	       {5785, 157},
10923 	       {5805, 161},
10924 	       {5825, 165}},
10925 	 },
10926 
10927 	{			/* Custom NA & Europe */
10928 	 "ZZB",
10929 	 .bg_channels = 11,
10930 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10931 		{2427, 4}, {2432, 5}, {2437, 6},
10932 		{2442, 7}, {2447, 8}, {2452, 9},
10933 		{2457, 10}, {2462, 11}},
10934 	 .a_channels = 13,
10935 	 .a = {{5180, 36},
10936 	       {5200, 40},
10937 	       {5220, 44},
10938 	       {5240, 48},
10939 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10940 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10941 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10942 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10943 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
10944 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
10945 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
10946 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
10947 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
10948 	 },
10949 
10950 	{			/* Custom Japan */
10951 	 "ZZC",
10952 	 .bg_channels = 11,
10953 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10954 		{2427, 4}, {2432, 5}, {2437, 6},
10955 		{2442, 7}, {2447, 8}, {2452, 9},
10956 		{2457, 10}, {2462, 11}},
10957 	 .a_channels = 4,
10958 	 .a = {{5170, 34}, {5190, 38},
10959 	       {5210, 42}, {5230, 46}},
10960 	 },
10961 
10962 	{			/* Custom */
10963 	 "ZZM",
10964 	 .bg_channels = 11,
10965 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10966 		{2427, 4}, {2432, 5}, {2437, 6},
10967 		{2442, 7}, {2447, 8}, {2452, 9},
10968 		{2457, 10}, {2462, 11}},
10969 	 },
10970 
10971 	{			/* Europe */
10972 	 "ZZE",
10973 	 .bg_channels = 13,
10974 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10975 		{2427, 4}, {2432, 5}, {2437, 6},
10976 		{2442, 7}, {2447, 8}, {2452, 9},
10977 		{2457, 10}, {2462, 11}, {2467, 12},
10978 		{2472, 13}},
10979 	 .a_channels = 19,
10980 	 .a = {{5180, 36},
10981 	       {5200, 40},
10982 	       {5220, 44},
10983 	       {5240, 48},
10984 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10985 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10986 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10987 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10988 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
10989 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
10990 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
10991 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
10992 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
10993 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
10994 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
10995 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
10996 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
10997 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
10998 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
10999 	 },
11000 
11001 	{			/* Custom Japan */
11002 	 "ZZJ",
11003 	 .bg_channels = 14,
11004 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11005 		{2427, 4}, {2432, 5}, {2437, 6},
11006 		{2442, 7}, {2447, 8}, {2452, 9},
11007 		{2457, 10}, {2462, 11}, {2467, 12},
11008 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11009 	 .a_channels = 4,
11010 	 .a = {{5170, 34}, {5190, 38},
11011 	       {5210, 42}, {5230, 46}},
11012 	 },
11013 
11014 	{			/* Rest of World */
11015 	 "ZZR",
11016 	 .bg_channels = 14,
11017 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11018 		{2427, 4}, {2432, 5}, {2437, 6},
11019 		{2442, 7}, {2447, 8}, {2452, 9},
11020 		{2457, 10}, {2462, 11}, {2467, 12},
11021 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11022 			     LIBIPW_CH_PASSIVE_ONLY}},
11023 	 },
11024 
11025 	{			/* High Band */
11026 	 "ZZH",
11027 	 .bg_channels = 13,
11028 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11029 		{2427, 4}, {2432, 5}, {2437, 6},
11030 		{2442, 7}, {2447, 8}, {2452, 9},
11031 		{2457, 10}, {2462, 11},
11032 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11033 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11034 	 .a_channels = 4,
11035 	 .a = {{5745, 149}, {5765, 153},
11036 	       {5785, 157}, {5805, 161}},
11037 	 },
11038 
11039 	{			/* Custom Europe */
11040 	 "ZZG",
11041 	 .bg_channels = 13,
11042 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11043 		{2427, 4}, {2432, 5}, {2437, 6},
11044 		{2442, 7}, {2447, 8}, {2452, 9},
11045 		{2457, 10}, {2462, 11},
11046 		{2467, 12}, {2472, 13}},
11047 	 .a_channels = 4,
11048 	 .a = {{5180, 36}, {5200, 40},
11049 	       {5220, 44}, {5240, 48}},
11050 	 },
11051 
11052 	{			/* Europe */
11053 	 "ZZK",
11054 	 .bg_channels = 13,
11055 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11056 		{2427, 4}, {2432, 5}, {2437, 6},
11057 		{2442, 7}, {2447, 8}, {2452, 9},
11058 		{2457, 10}, {2462, 11},
11059 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11060 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11061 	 .a_channels = 24,
11062 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11063 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11064 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11065 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11066 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11067 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11068 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11069 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11070 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11071 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11072 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11073 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11074 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11075 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11076 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11077 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11078 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11079 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11080 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11081 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11082 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11083 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11084 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11085 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11086 	 },
11087 
11088 	{			/* Europe */
11089 	 "ZZL",
11090 	 .bg_channels = 11,
11091 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11092 		{2427, 4}, {2432, 5}, {2437, 6},
11093 		{2442, 7}, {2447, 8}, {2452, 9},
11094 		{2457, 10}, {2462, 11}},
11095 	 .a_channels = 13,
11096 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11097 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11098 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11099 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11100 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11101 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11102 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11103 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11104 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11105 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11106 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11107 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11108 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11109 	 }
11110 };
11111 
11112 static void ipw_set_geo(struct ipw_priv *priv)
11113 {
11114 	int j;
11115 
11116 	for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11117 		if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11118 			    ipw_geos[j].name, 3))
11119 			break;
11120 	}
11121 
11122 	if (j == ARRAY_SIZE(ipw_geos)) {
11123 		IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11124 			    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11125 			    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11126 			    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11127 		j = 0;
11128 	}
11129 
11130 	libipw_set_geo(priv->ieee, &ipw_geos[j]);
11131 }
11132 
11133 #define MAX_HW_RESTARTS 5
11134 static int ipw_up(struct ipw_priv *priv)
11135 {
11136 	int rc, i;
11137 
11138 	/* Age scan list entries found before suspend */
11139 	if (priv->suspend_time) {
11140 		libipw_networks_age(priv->ieee, priv->suspend_time);
11141 		priv->suspend_time = 0;
11142 	}
11143 
11144 	if (priv->status & STATUS_EXIT_PENDING)
11145 		return -EIO;
11146 
11147 	if (cmdlog && !priv->cmdlog) {
11148 		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11149 				       GFP_KERNEL);
11150 		if (priv->cmdlog == NULL) {
11151 			IPW_ERROR("Error allocating %d command log entries.\n",
11152 				  cmdlog);
11153 			return -ENOMEM;
11154 		} else {
11155 			priv->cmdlog_len = cmdlog;
11156 		}
11157 	}
11158 
11159 	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11160 		/* Load the microcode, firmware, and eeprom.
11161 		 * Also start the clocks. */
11162 		rc = ipw_load(priv);
11163 		if (rc) {
11164 			IPW_ERROR("Unable to load firmware: %d\n", rc);
11165 			return rc;
11166 		}
11167 
11168 		ipw_init_ordinals(priv);
11169 		if (!(priv->config & CFG_CUSTOM_MAC))
11170 			eeprom_parse_mac(priv, priv->mac_addr);
11171 		eth_hw_addr_set(priv->net_dev, priv->mac_addr);
11172 
11173 		ipw_set_geo(priv);
11174 
11175 		if (priv->status & STATUS_RF_KILL_SW) {
11176 			IPW_WARNING("Radio disabled by module parameter.\n");
11177 			return 0;
11178 		} else if (rf_kill_active(priv)) {
11179 			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11180 				    "Kill switch must be turned off for "
11181 				    "wireless networking to work.\n");
11182 			schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11183 			return 0;
11184 		}
11185 
11186 		rc = ipw_config(priv);
11187 		if (!rc) {
11188 			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11189 
11190 			/* If configure to try and auto-associate, kick
11191 			 * off a scan. */
11192 			schedule_delayed_work(&priv->request_scan, 0);
11193 
11194 			return 0;
11195 		}
11196 
11197 		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11198 		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11199 			       i, MAX_HW_RESTARTS);
11200 
11201 		/* We had an error bringing up the hardware, so take it
11202 		 * all the way back down so we can try again */
11203 		ipw_down(priv);
11204 	}
11205 
11206 	/* tried to restart and config the device for as long as our
11207 	 * patience could withstand */
11208 	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11209 
11210 	return -EIO;
11211 }
11212 
11213 static void ipw_bg_up(struct work_struct *work)
11214 {
11215 	struct ipw_priv *priv =
11216 		container_of(work, struct ipw_priv, up);
11217 	mutex_lock(&priv->mutex);
11218 	ipw_up(priv);
11219 	mutex_unlock(&priv->mutex);
11220 }
11221 
11222 static void ipw_deinit(struct ipw_priv *priv)
11223 {
11224 	int i;
11225 
11226 	if (priv->status & STATUS_SCANNING) {
11227 		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11228 		ipw_abort_scan(priv);
11229 	}
11230 
11231 	if (priv->status & STATUS_ASSOCIATED) {
11232 		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11233 		ipw_disassociate(priv);
11234 	}
11235 
11236 	ipw_led_shutdown(priv);
11237 
11238 	/* Wait up to 1s for status to change to not scanning and not
11239 	 * associated (disassociation can take a while for a ful 802.11
11240 	 * exchange */
11241 	for (i = 1000; i && (priv->status &
11242 			     (STATUS_DISASSOCIATING |
11243 			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11244 		udelay(10);
11245 
11246 	if (priv->status & (STATUS_DISASSOCIATING |
11247 			    STATUS_ASSOCIATED | STATUS_SCANNING))
11248 		IPW_DEBUG_INFO("Still associated or scanning...\n");
11249 	else
11250 		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11251 
11252 	/* Attempt to disable the card */
11253 	ipw_send_card_disable(priv, 0);
11254 
11255 	priv->status &= ~STATUS_INIT;
11256 }
11257 
11258 static void ipw_down(struct ipw_priv *priv)
11259 {
11260 	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11261 
11262 	priv->status |= STATUS_EXIT_PENDING;
11263 
11264 	if (ipw_is_init(priv))
11265 		ipw_deinit(priv);
11266 
11267 	/* Wipe out the EXIT_PENDING status bit if we are not actually
11268 	 * exiting the module */
11269 	if (!exit_pending)
11270 		priv->status &= ~STATUS_EXIT_PENDING;
11271 
11272 	/* tell the device to stop sending interrupts */
11273 	ipw_disable_interrupts(priv);
11274 
11275 	/* Clear all bits but the RF Kill */
11276 	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11277 	netif_carrier_off(priv->net_dev);
11278 
11279 	ipw_stop_nic(priv);
11280 
11281 	ipw_led_radio_off(priv);
11282 }
11283 
11284 static void ipw_bg_down(struct work_struct *work)
11285 {
11286 	struct ipw_priv *priv =
11287 		container_of(work, struct ipw_priv, down);
11288 	mutex_lock(&priv->mutex);
11289 	ipw_down(priv);
11290 	mutex_unlock(&priv->mutex);
11291 }
11292 
11293 static int ipw_wdev_init(struct net_device *dev)
11294 {
11295 	int i, rc = 0;
11296 	struct ipw_priv *priv = libipw_priv(dev);
11297 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11298 	struct wireless_dev *wdev = &priv->ieee->wdev;
11299 
11300 	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11301 
11302 	/* fill-out priv->ieee->bg_band */
11303 	if (geo->bg_channels) {
11304 		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11305 
11306 		bg_band->band = NL80211_BAND_2GHZ;
11307 		bg_band->n_channels = geo->bg_channels;
11308 		bg_band->channels = kcalloc(geo->bg_channels,
11309 					    sizeof(struct ieee80211_channel),
11310 					    GFP_KERNEL);
11311 		if (!bg_band->channels) {
11312 			rc = -ENOMEM;
11313 			goto out;
11314 		}
11315 		/* translate geo->bg to bg_band.channels */
11316 		for (i = 0; i < geo->bg_channels; i++) {
11317 			bg_band->channels[i].band = NL80211_BAND_2GHZ;
11318 			bg_band->channels[i].center_freq = geo->bg[i].freq;
11319 			bg_band->channels[i].hw_value = geo->bg[i].channel;
11320 			bg_band->channels[i].max_power = geo->bg[i].max_power;
11321 			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11322 				bg_band->channels[i].flags |=
11323 					IEEE80211_CHAN_NO_IR;
11324 			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11325 				bg_band->channels[i].flags |=
11326 					IEEE80211_CHAN_NO_IR;
11327 			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11328 				bg_band->channels[i].flags |=
11329 					IEEE80211_CHAN_RADAR;
11330 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11331 			   LIBIPW_CH_UNIFORM_SPREADING, or
11332 			   LIBIPW_CH_B_ONLY... */
11333 		}
11334 		/* point at bitrate info */
11335 		bg_band->bitrates = ipw2200_bg_rates;
11336 		bg_band->n_bitrates = ipw2200_num_bg_rates;
11337 
11338 		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
11339 	}
11340 
11341 	/* fill-out priv->ieee->a_band */
11342 	if (geo->a_channels) {
11343 		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11344 
11345 		a_band->band = NL80211_BAND_5GHZ;
11346 		a_band->n_channels = geo->a_channels;
11347 		a_band->channels = kcalloc(geo->a_channels,
11348 					   sizeof(struct ieee80211_channel),
11349 					   GFP_KERNEL);
11350 		if (!a_band->channels) {
11351 			rc = -ENOMEM;
11352 			goto out;
11353 		}
11354 		/* translate geo->a to a_band.channels */
11355 		for (i = 0; i < geo->a_channels; i++) {
11356 			a_band->channels[i].band = NL80211_BAND_5GHZ;
11357 			a_band->channels[i].center_freq = geo->a[i].freq;
11358 			a_band->channels[i].hw_value = geo->a[i].channel;
11359 			a_band->channels[i].max_power = geo->a[i].max_power;
11360 			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11361 				a_band->channels[i].flags |=
11362 					IEEE80211_CHAN_NO_IR;
11363 			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11364 				a_band->channels[i].flags |=
11365 					IEEE80211_CHAN_NO_IR;
11366 			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11367 				a_band->channels[i].flags |=
11368 					IEEE80211_CHAN_RADAR;
11369 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11370 			   LIBIPW_CH_UNIFORM_SPREADING, or
11371 			   LIBIPW_CH_B_ONLY... */
11372 		}
11373 		/* point at bitrate info */
11374 		a_band->bitrates = ipw2200_a_rates;
11375 		a_band->n_bitrates = ipw2200_num_a_rates;
11376 
11377 		wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
11378 	}
11379 
11380 	wdev->wiphy->cipher_suites = ipw_cipher_suites;
11381 	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11382 
11383 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11384 
11385 	/* With that information in place, we can now register the wiphy... */
11386 	rc = wiphy_register(wdev->wiphy);
11387 	if (rc)
11388 		goto out;
11389 
11390 	return 0;
11391 out:
11392 	kfree(priv->ieee->a_band.channels);
11393 	kfree(priv->ieee->bg_band.channels);
11394 	return rc;
11395 }
11396 
11397 /* PCI driver stuff */
11398 static const struct pci_device_id card_ids[] = {
11399 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11400 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11401 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11402 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11403 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11404 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11405 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11406 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11407 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11408 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11409 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11410 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11411 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11412 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11413 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11414 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11415 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11416 	{PCI_VDEVICE(INTEL, 0x104f), 0},
11417 	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
11418 	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
11419 	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
11420 	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
11421 
11422 	/* required last entry */
11423 	{0,}
11424 };
11425 
11426 MODULE_DEVICE_TABLE(pci, card_ids);
11427 
11428 static struct attribute *ipw_sysfs_entries[] = {
11429 	&dev_attr_rf_kill.attr,
11430 	&dev_attr_direct_dword.attr,
11431 	&dev_attr_indirect_byte.attr,
11432 	&dev_attr_indirect_dword.attr,
11433 	&dev_attr_mem_gpio_reg.attr,
11434 	&dev_attr_command_event_reg.attr,
11435 	&dev_attr_nic_type.attr,
11436 	&dev_attr_status.attr,
11437 	&dev_attr_cfg.attr,
11438 	&dev_attr_error.attr,
11439 	&dev_attr_event_log.attr,
11440 	&dev_attr_cmd_log.attr,
11441 	&dev_attr_eeprom_delay.attr,
11442 	&dev_attr_ucode_version.attr,
11443 	&dev_attr_rtc.attr,
11444 	&dev_attr_scan_age.attr,
11445 	&dev_attr_led.attr,
11446 	&dev_attr_speed_scan.attr,
11447 	&dev_attr_net_stats.attr,
11448 	&dev_attr_channels.attr,
11449 #ifdef CONFIG_IPW2200_PROMISCUOUS
11450 	&dev_attr_rtap_iface.attr,
11451 	&dev_attr_rtap_filter.attr,
11452 #endif
11453 	NULL
11454 };
11455 
11456 static const struct attribute_group ipw_attribute_group = {
11457 	.name = NULL,		/* put in device directory */
11458 	.attrs = ipw_sysfs_entries,
11459 };
11460 
11461 #ifdef CONFIG_IPW2200_PROMISCUOUS
11462 static int ipw_prom_open(struct net_device *dev)
11463 {
11464 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11465 	struct ipw_priv *priv = prom_priv->priv;
11466 
11467 	IPW_DEBUG_INFO("prom dev->open\n");
11468 	netif_carrier_off(dev);
11469 
11470 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11471 		priv->sys_config.accept_all_data_frames = 1;
11472 		priv->sys_config.accept_non_directed_frames = 1;
11473 		priv->sys_config.accept_all_mgmt_bcpr = 1;
11474 		priv->sys_config.accept_all_mgmt_frames = 1;
11475 
11476 		ipw_send_system_config(priv);
11477 	}
11478 
11479 	return 0;
11480 }
11481 
11482 static int ipw_prom_stop(struct net_device *dev)
11483 {
11484 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11485 	struct ipw_priv *priv = prom_priv->priv;
11486 
11487 	IPW_DEBUG_INFO("prom dev->stop\n");
11488 
11489 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11490 		priv->sys_config.accept_all_data_frames = 0;
11491 		priv->sys_config.accept_non_directed_frames = 0;
11492 		priv->sys_config.accept_all_mgmt_bcpr = 0;
11493 		priv->sys_config.accept_all_mgmt_frames = 0;
11494 
11495 		ipw_send_system_config(priv);
11496 	}
11497 
11498 	return 0;
11499 }
11500 
11501 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11502 					    struct net_device *dev)
11503 {
11504 	IPW_DEBUG_INFO("prom dev->xmit\n");
11505 	dev_kfree_skb(skb);
11506 	return NETDEV_TX_OK;
11507 }
11508 
11509 static const struct net_device_ops ipw_prom_netdev_ops = {
11510 	.ndo_open 		= ipw_prom_open,
11511 	.ndo_stop		= ipw_prom_stop,
11512 	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
11513 	.ndo_set_mac_address 	= eth_mac_addr,
11514 	.ndo_validate_addr	= eth_validate_addr,
11515 };
11516 
11517 static int ipw_prom_alloc(struct ipw_priv *priv)
11518 {
11519 	int rc = 0;
11520 
11521 	if (priv->prom_net_dev)
11522 		return -EPERM;
11523 
11524 	priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11525 	if (priv->prom_net_dev == NULL)
11526 		return -ENOMEM;
11527 
11528 	priv->prom_priv = libipw_priv(priv->prom_net_dev);
11529 	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11530 	priv->prom_priv->priv = priv;
11531 
11532 	strcpy(priv->prom_net_dev->name, "rtap%d");
11533 	eth_hw_addr_set(priv->prom_net_dev, priv->mac_addr);
11534 
11535 	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11536 	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11537 
11538 	priv->prom_net_dev->min_mtu = 68;
11539 	priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
11540 
11541 	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11542 	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11543 
11544 	rc = register_netdev(priv->prom_net_dev);
11545 	if (rc) {
11546 		free_libipw(priv->prom_net_dev, 1);
11547 		priv->prom_net_dev = NULL;
11548 		return rc;
11549 	}
11550 
11551 	return 0;
11552 }
11553 
11554 static void ipw_prom_free(struct ipw_priv *priv)
11555 {
11556 	if (!priv->prom_net_dev)
11557 		return;
11558 
11559 	unregister_netdev(priv->prom_net_dev);
11560 	free_libipw(priv->prom_net_dev, 1);
11561 
11562 	priv->prom_net_dev = NULL;
11563 }
11564 
11565 #endif
11566 
11567 static const struct net_device_ops ipw_netdev_ops = {
11568 	.ndo_open		= ipw_net_open,
11569 	.ndo_stop		= ipw_net_stop,
11570 	.ndo_set_rx_mode	= ipw_net_set_multicast_list,
11571 	.ndo_set_mac_address	= ipw_net_set_mac_address,
11572 	.ndo_start_xmit		= libipw_xmit,
11573 	.ndo_validate_addr	= eth_validate_addr,
11574 };
11575 
11576 static int ipw_pci_probe(struct pci_dev *pdev,
11577 				   const struct pci_device_id *ent)
11578 {
11579 	int err = 0;
11580 	struct net_device *net_dev;
11581 	void __iomem *base;
11582 	u32 length, val;
11583 	struct ipw_priv *priv;
11584 	int i;
11585 
11586 	net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11587 	if (net_dev == NULL) {
11588 		err = -ENOMEM;
11589 		goto out;
11590 	}
11591 
11592 	priv = libipw_priv(net_dev);
11593 	priv->ieee = netdev_priv(net_dev);
11594 
11595 	priv->net_dev = net_dev;
11596 	priv->pci_dev = pdev;
11597 	ipw_debug_level = debug;
11598 	spin_lock_init(&priv->irq_lock);
11599 	spin_lock_init(&priv->lock);
11600 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11601 		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11602 
11603 	mutex_init(&priv->mutex);
11604 	if (pci_enable_device(pdev)) {
11605 		err = -ENODEV;
11606 		goto out_free_libipw;
11607 	}
11608 
11609 	pci_set_master(pdev);
11610 
11611 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
11612 	if (!err)
11613 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
11614 	if (err) {
11615 		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11616 		goto out_pci_disable_device;
11617 	}
11618 
11619 	pci_set_drvdata(pdev, priv);
11620 
11621 	err = pci_request_regions(pdev, DRV_NAME);
11622 	if (err)
11623 		goto out_pci_disable_device;
11624 
11625 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11626 	 * PCI Tx retries from interfering with C3 CPU state */
11627 	pci_read_config_dword(pdev, 0x40, &val);
11628 	if ((val & 0x0000ff00) != 0)
11629 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11630 
11631 	length = pci_resource_len(pdev, 0);
11632 	priv->hw_len = length;
11633 
11634 	base = pci_ioremap_bar(pdev, 0);
11635 	if (!base) {
11636 		err = -ENODEV;
11637 		goto out_pci_release_regions;
11638 	}
11639 
11640 	priv->hw_base = base;
11641 	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11642 	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11643 
11644 	ipw_setup_deferred_work(priv);
11645 
11646 	ipw_sw_reset(priv, 1);
11647 
11648 	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11649 	if (err) {
11650 		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11651 		goto out_iounmap;
11652 	}
11653 
11654 	SET_NETDEV_DEV(net_dev, &pdev->dev);
11655 
11656 	mutex_lock(&priv->mutex);
11657 
11658 	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11659 	priv->ieee->set_security = shim__set_security;
11660 	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11661 
11662 #ifdef CONFIG_IPW2200_QOS
11663 	priv->ieee->is_qos_active = ipw_is_qos_active;
11664 	priv->ieee->handle_probe_response = ipw_handle_beacon;
11665 	priv->ieee->handle_beacon = ipw_handle_probe_response;
11666 	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11667 #endif				/* CONFIG_IPW2200_QOS */
11668 
11669 	priv->ieee->perfect_rssi = -20;
11670 	priv->ieee->worst_rssi = -85;
11671 
11672 	net_dev->netdev_ops = &ipw_netdev_ops;
11673 	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11674 	net_dev->wireless_data = &priv->wireless_data;
11675 	net_dev->wireless_handlers = &ipw_wx_handler_def;
11676 	net_dev->ethtool_ops = &ipw_ethtool_ops;
11677 
11678 	net_dev->min_mtu = 68;
11679 	net_dev->max_mtu = LIBIPW_DATA_LEN;
11680 
11681 	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11682 	if (err) {
11683 		IPW_ERROR("failed to create sysfs device attributes\n");
11684 		mutex_unlock(&priv->mutex);
11685 		goto out_release_irq;
11686 	}
11687 
11688 	if (ipw_up(priv)) {
11689 		mutex_unlock(&priv->mutex);
11690 		err = -EIO;
11691 		goto out_remove_sysfs;
11692 	}
11693 
11694 	mutex_unlock(&priv->mutex);
11695 
11696 	err = ipw_wdev_init(net_dev);
11697 	if (err) {
11698 		IPW_ERROR("failed to register wireless device\n");
11699 		goto out_remove_sysfs;
11700 	}
11701 
11702 	err = register_netdev(net_dev);
11703 	if (err) {
11704 		IPW_ERROR("failed to register network device\n");
11705 		goto out_unregister_wiphy;
11706 	}
11707 
11708 #ifdef CONFIG_IPW2200_PROMISCUOUS
11709 	if (rtap_iface) {
11710 	        err = ipw_prom_alloc(priv);
11711 		if (err) {
11712 			IPW_ERROR("Failed to register promiscuous network "
11713 				  "device (error %d).\n", err);
11714 			unregister_netdev(priv->net_dev);
11715 			goto out_unregister_wiphy;
11716 		}
11717 	}
11718 #endif
11719 
11720 	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11721 	       "channels, %d 802.11a channels)\n",
11722 	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11723 	       priv->ieee->geo.a_channels);
11724 
11725 	return 0;
11726 
11727       out_unregister_wiphy:
11728 	wiphy_unregister(priv->ieee->wdev.wiphy);
11729 	kfree(priv->ieee->a_band.channels);
11730 	kfree(priv->ieee->bg_band.channels);
11731       out_remove_sysfs:
11732 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11733       out_release_irq:
11734 	free_irq(pdev->irq, priv);
11735       out_iounmap:
11736 	iounmap(priv->hw_base);
11737       out_pci_release_regions:
11738 	pci_release_regions(pdev);
11739       out_pci_disable_device:
11740 	pci_disable_device(pdev);
11741       out_free_libipw:
11742 	free_libipw(priv->net_dev, 0);
11743       out:
11744 	return err;
11745 }
11746 
11747 static void ipw_pci_remove(struct pci_dev *pdev)
11748 {
11749 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11750 	struct list_head *p, *q;
11751 	int i;
11752 
11753 	if (!priv)
11754 		return;
11755 
11756 	mutex_lock(&priv->mutex);
11757 
11758 	priv->status |= STATUS_EXIT_PENDING;
11759 	ipw_down(priv);
11760 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11761 
11762 	mutex_unlock(&priv->mutex);
11763 
11764 	unregister_netdev(priv->net_dev);
11765 
11766 	if (priv->rxq) {
11767 		ipw_rx_queue_free(priv, priv->rxq);
11768 		priv->rxq = NULL;
11769 	}
11770 	ipw_tx_queue_free(priv);
11771 
11772 	if (priv->cmdlog) {
11773 		kfree(priv->cmdlog);
11774 		priv->cmdlog = NULL;
11775 	}
11776 
11777 	/* make sure all works are inactive */
11778 	cancel_delayed_work_sync(&priv->adhoc_check);
11779 	cancel_work_sync(&priv->associate);
11780 	cancel_work_sync(&priv->disassociate);
11781 	cancel_work_sync(&priv->system_config);
11782 	cancel_work_sync(&priv->rx_replenish);
11783 	cancel_work_sync(&priv->adapter_restart);
11784 	cancel_delayed_work_sync(&priv->rf_kill);
11785 	cancel_work_sync(&priv->up);
11786 	cancel_work_sync(&priv->down);
11787 	cancel_delayed_work_sync(&priv->request_scan);
11788 	cancel_delayed_work_sync(&priv->request_direct_scan);
11789 	cancel_delayed_work_sync(&priv->request_passive_scan);
11790 	cancel_delayed_work_sync(&priv->scan_event);
11791 	cancel_delayed_work_sync(&priv->gather_stats);
11792 	cancel_work_sync(&priv->abort_scan);
11793 	cancel_work_sync(&priv->roam);
11794 	cancel_delayed_work_sync(&priv->scan_check);
11795 	cancel_work_sync(&priv->link_up);
11796 	cancel_work_sync(&priv->link_down);
11797 	cancel_delayed_work_sync(&priv->led_link_on);
11798 	cancel_delayed_work_sync(&priv->led_link_off);
11799 	cancel_delayed_work_sync(&priv->led_act_off);
11800 	cancel_work_sync(&priv->merge_networks);
11801 
11802 	/* Free MAC hash list for ADHOC */
11803 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11804 		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11805 			list_del(p);
11806 			kfree(list_entry(p, struct ipw_ibss_seq, list));
11807 		}
11808 	}
11809 
11810 	kfree(priv->error);
11811 	priv->error = NULL;
11812 
11813 #ifdef CONFIG_IPW2200_PROMISCUOUS
11814 	ipw_prom_free(priv);
11815 #endif
11816 
11817 	free_irq(pdev->irq, priv);
11818 	iounmap(priv->hw_base);
11819 	pci_release_regions(pdev);
11820 	pci_disable_device(pdev);
11821 	/* wiphy_unregister needs to be here, before free_libipw */
11822 	wiphy_unregister(priv->ieee->wdev.wiphy);
11823 	kfree(priv->ieee->a_band.channels);
11824 	kfree(priv->ieee->bg_band.channels);
11825 	free_libipw(priv->net_dev, 0);
11826 	free_firmware();
11827 }
11828 
11829 static int __maybe_unused ipw_pci_suspend(struct device *dev_d)
11830 {
11831 	struct ipw_priv *priv = dev_get_drvdata(dev_d);
11832 	struct net_device *dev = priv->net_dev;
11833 
11834 	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11835 
11836 	/* Take down the device; powers it off, etc. */
11837 	ipw_down(priv);
11838 
11839 	/* Remove the PRESENT state of the device */
11840 	netif_device_detach(dev);
11841 
11842 	priv->suspend_at = ktime_get_boottime_seconds();
11843 
11844 	return 0;
11845 }
11846 
11847 static int __maybe_unused ipw_pci_resume(struct device *dev_d)
11848 {
11849 	struct pci_dev *pdev = to_pci_dev(dev_d);
11850 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11851 	struct net_device *dev = priv->net_dev;
11852 	u32 val;
11853 
11854 	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11855 
11856 	/*
11857 	 * Suspend/Resume resets the PCI configuration space, so we have to
11858 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11859 	 * from interfering with C3 CPU state. pci_restore_state won't help
11860 	 * here since it only restores the first 64 bytes pci config header.
11861 	 */
11862 	pci_read_config_dword(pdev, 0x40, &val);
11863 	if ((val & 0x0000ff00) != 0)
11864 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11865 
11866 	/* Set the device back into the PRESENT state; this will also wake
11867 	 * the queue of needed */
11868 	netif_device_attach(dev);
11869 
11870 	priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
11871 
11872 	/* Bring the device back up */
11873 	schedule_work(&priv->up);
11874 
11875 	return 0;
11876 }
11877 
11878 static void ipw_pci_shutdown(struct pci_dev *pdev)
11879 {
11880 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11881 
11882 	/* Take down the device; powers it off, etc. */
11883 	ipw_down(priv);
11884 
11885 	pci_disable_device(pdev);
11886 }
11887 
11888 static SIMPLE_DEV_PM_OPS(ipw_pci_pm_ops, ipw_pci_suspend, ipw_pci_resume);
11889 
11890 /* driver initialization stuff */
11891 static struct pci_driver ipw_driver = {
11892 	.name = DRV_NAME,
11893 	.id_table = card_ids,
11894 	.probe = ipw_pci_probe,
11895 	.remove = ipw_pci_remove,
11896 	.driver.pm = &ipw_pci_pm_ops,
11897 	.shutdown = ipw_pci_shutdown,
11898 };
11899 
11900 static int __init ipw_init(void)
11901 {
11902 	int ret;
11903 
11904 	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11905 	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11906 
11907 	ret = pci_register_driver(&ipw_driver);
11908 	if (ret) {
11909 		IPW_ERROR("Unable to initialize PCI module\n");
11910 		return ret;
11911 	}
11912 
11913 	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11914 	if (ret) {
11915 		IPW_ERROR("Unable to create driver sysfs file\n");
11916 		pci_unregister_driver(&ipw_driver);
11917 		return ret;
11918 	}
11919 
11920 	return ret;
11921 }
11922 
11923 static void __exit ipw_exit(void)
11924 {
11925 	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11926 	pci_unregister_driver(&ipw_driver);
11927 }
11928 
11929 module_param(disable, int, 0444);
11930 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11931 
11932 module_param(associate, int, 0444);
11933 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11934 
11935 module_param(auto_create, int, 0444);
11936 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11937 
11938 module_param_named(led, led_support, int, 0444);
11939 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
11940 
11941 module_param(debug, int, 0444);
11942 MODULE_PARM_DESC(debug, "debug output mask");
11943 
11944 module_param_named(channel, default_channel, int, 0444);
11945 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11946 
11947 #ifdef CONFIG_IPW2200_PROMISCUOUS
11948 module_param(rtap_iface, int, 0444);
11949 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11950 #endif
11951 
11952 #ifdef CONFIG_IPW2200_QOS
11953 module_param(qos_enable, int, 0444);
11954 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities");
11955 
11956 module_param(qos_burst_enable, int, 0444);
11957 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11958 
11959 module_param(qos_no_ack_mask, int, 0444);
11960 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11961 
11962 module_param(burst_duration_CCK, int, 0444);
11963 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11964 
11965 module_param(burst_duration_OFDM, int, 0444);
11966 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11967 #endif				/* CONFIG_IPW2200_QOS */
11968 
11969 #ifdef CONFIG_IPW2200_MONITOR
11970 module_param_named(mode, network_mode, int, 0444);
11971 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11972 #else
11973 module_param_named(mode, network_mode, int, 0444);
11974 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11975 #endif
11976 
11977 module_param(bt_coexist, int, 0444);
11978 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11979 
11980 module_param(hwcrypto, int, 0444);
11981 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11982 
11983 module_param(cmdlog, int, 0444);
11984 MODULE_PARM_DESC(cmdlog,
11985 		 "allocate a ring buffer for logging firmware commands");
11986 
11987 module_param(roaming, int, 0444);
11988 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11989 
11990 module_param(antenna, int, 0444);
11991 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11992 
11993 module_exit(ipw_exit);
11994 module_init(ipw_init);
11995