1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3  *
4  * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5  *
6  * Portions of this file are derived from the ipw3945 project, as well
7  * as portions of the ieee80211 subsystem header files.
8  *
9  * Contact Information:
10  *  Intel Linux Wireless <ilw@linux.intel.com>
11  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
12  *
13  *****************************************************************************/
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/sched.h>
25 #include <linux/skbuff.h>
26 #include <linux/netdevice.h>
27 #include <linux/firmware.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_arp.h>
30 
31 #include <net/ieee80211_radiotap.h>
32 #include <net/mac80211.h>
33 
34 #include <asm/div64.h>
35 
36 #define DRV_NAME	"iwl3945"
37 
38 #include "commands.h"
39 #include "common.h"
40 #include "3945.h"
41 #include "iwl-spectrum.h"
42 
43 /*
44  * module name, copyright, version, etc.
45  */
46 
47 #define DRV_DESCRIPTION	\
48 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
49 
50 #ifdef CONFIG_IWLEGACY_DEBUG
51 #define VD "d"
52 #else
53 #define VD
54 #endif
55 
56 /*
57  * add "s" to indicate spectrum measurement included.
58  * we add it here to be consistent with previous releases in which
59  * this was configurable.
60  */
61 #define DRV_VERSION  IWLWIFI_VERSION VD "s"
62 #define DRV_COPYRIGHT	"Copyright(c) 2003-2011 Intel Corporation"
63 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
64 
65 MODULE_DESCRIPTION(DRV_DESCRIPTION);
66 MODULE_VERSION(DRV_VERSION);
67 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
68 MODULE_LICENSE("GPL");
69 
70  /* module parameters */
71 struct il_mod_params il3945_mod_params = {
72 	.sw_crypto = 1,
73 	.restart_fw = 1,
74 	.disable_hw_scan = 1,
75 	/* the rest are 0 by default */
76 };
77 
78 /**
79  * il3945_get_antenna_flags - Get antenna flags for RXON command
80  * @il: eeprom and antenna fields are used to determine antenna flags
81  *
82  * il->eeprom39  is used to determine if antenna AUX/MAIN are reversed
83  * il3945_mod_params.antenna specifies the antenna diversity mode:
84  *
85  * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
86  * IL_ANTENNA_MAIN      - Force MAIN antenna
87  * IL_ANTENNA_AUX       - Force AUX antenna
88  */
89 __le32
90 il3945_get_antenna_flags(const struct il_priv *il)
91 {
92 	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
93 
94 	switch (il3945_mod_params.antenna) {
95 	case IL_ANTENNA_DIVERSITY:
96 		return 0;
97 
98 	case IL_ANTENNA_MAIN:
99 		if (eeprom->antenna_switch_type)
100 			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
101 		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
102 
103 	case IL_ANTENNA_AUX:
104 		if (eeprom->antenna_switch_type)
105 			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
106 		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
107 	}
108 
109 	/* bad antenna selector value */
110 	IL_ERR("Bad antenna selector value (0x%x)\n",
111 	       il3945_mod_params.antenna);
112 
113 	return 0;		/* "diversity" is default if error */
114 }
115 
116 static int
117 il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
118 				 struct ieee80211_key_conf *keyconf, u8 sta_id)
119 {
120 	unsigned long flags;
121 	__le16 key_flags = 0;
122 	int ret;
123 
124 	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
125 	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
126 
127 	if (sta_id == il->hw_params.bcast_id)
128 		key_flags |= STA_KEY_MULTICAST_MSK;
129 
130 	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
131 	keyconf->hw_key_idx = keyconf->keyidx;
132 	key_flags &= ~STA_KEY_FLG_INVALID;
133 
134 	spin_lock_irqsave(&il->sta_lock, flags);
135 	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
136 	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
137 	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
138 
139 	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
140 
141 	if ((il->stations[sta_id].sta.key.
142 	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
143 		il->stations[sta_id].sta.key.key_offset =
144 		    il_get_free_ucode_key_idx(il);
145 	/* else, we are overriding an existing key => no need to allocated room
146 	 * in uCode. */
147 
148 	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
149 	     "no space for a new key");
150 
151 	il->stations[sta_id].sta.key.key_flags = key_flags;
152 	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
153 	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
154 
155 	D_INFO("hwcrypto: modify ucode station key info\n");
156 
157 	ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
158 
159 	spin_unlock_irqrestore(&il->sta_lock, flags);
160 
161 	return ret;
162 }
163 
164 static int
165 il3945_set_tkip_dynamic_key_info(struct il_priv *il,
166 				 struct ieee80211_key_conf *keyconf, u8 sta_id)
167 {
168 	return -EOPNOTSUPP;
169 }
170 
171 static int
172 il3945_set_wep_dynamic_key_info(struct il_priv *il,
173 				struct ieee80211_key_conf *keyconf, u8 sta_id)
174 {
175 	return -EOPNOTSUPP;
176 }
177 
178 static int
179 il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
180 {
181 	unsigned long flags;
182 	struct il_addsta_cmd sta_cmd;
183 
184 	spin_lock_irqsave(&il->sta_lock, flags);
185 	memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
186 	memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
187 	il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
188 	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
189 	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
190 	memcpy(&sta_cmd, &il->stations[sta_id].sta,
191 	       sizeof(struct il_addsta_cmd));
192 	spin_unlock_irqrestore(&il->sta_lock, flags);
193 
194 	D_INFO("hwcrypto: clear ucode station key info\n");
195 	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
196 }
197 
198 static int
199 il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
200 		       u8 sta_id)
201 {
202 	int ret = 0;
203 
204 	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
205 
206 	switch (keyconf->cipher) {
207 	case WLAN_CIPHER_SUITE_CCMP:
208 		ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
209 		break;
210 	case WLAN_CIPHER_SUITE_TKIP:
211 		ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
212 		break;
213 	case WLAN_CIPHER_SUITE_WEP40:
214 	case WLAN_CIPHER_SUITE_WEP104:
215 		ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
216 		break;
217 	default:
218 		IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
219 		ret = -EINVAL;
220 	}
221 
222 	D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
223 	      keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
224 
225 	return ret;
226 }
227 
228 static int
229 il3945_remove_static_key(struct il_priv *il)
230 {
231 	return -EOPNOTSUPP;
232 }
233 
234 static int
235 il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
236 {
237 	if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
238 	    key->cipher == WLAN_CIPHER_SUITE_WEP104)
239 		return -EOPNOTSUPP;
240 
241 	IL_ERR("Static key invalid: cipher %x\n", key->cipher);
242 	return -EINVAL;
243 }
244 
245 static void
246 il3945_clear_free_frames(struct il_priv *il)
247 {
248 	struct list_head *element;
249 
250 	D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
251 
252 	while (!list_empty(&il->free_frames)) {
253 		element = il->free_frames.next;
254 		list_del(element);
255 		kfree(list_entry(element, struct il3945_frame, list));
256 		il->frames_count--;
257 	}
258 
259 	if (il->frames_count) {
260 		IL_WARN("%d frames still in use.  Did we lose one?\n",
261 			il->frames_count);
262 		il->frames_count = 0;
263 	}
264 }
265 
266 static struct il3945_frame *
267 il3945_get_free_frame(struct il_priv *il)
268 {
269 	struct il3945_frame *frame;
270 	struct list_head *element;
271 	if (list_empty(&il->free_frames)) {
272 		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
273 		if (!frame) {
274 			IL_ERR("Could not allocate frame!\n");
275 			return NULL;
276 		}
277 
278 		il->frames_count++;
279 		return frame;
280 	}
281 
282 	element = il->free_frames.next;
283 	list_del(element);
284 	return list_entry(element, struct il3945_frame, list);
285 }
286 
287 static void
288 il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
289 {
290 	memset(frame, 0, sizeof(*frame));
291 	list_add(&frame->list, &il->free_frames);
292 }
293 
294 unsigned int
295 il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
296 			 int left)
297 {
298 
299 	if (!il_is_associated(il) || !il->beacon_skb)
300 		return 0;
301 
302 	if (il->beacon_skb->len > left)
303 		return 0;
304 
305 	memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
306 
307 	return il->beacon_skb->len;
308 }
309 
310 static int
311 il3945_send_beacon_cmd(struct il_priv *il)
312 {
313 	struct il3945_frame *frame;
314 	unsigned int frame_size;
315 	int rc;
316 	u8 rate;
317 
318 	frame = il3945_get_free_frame(il);
319 
320 	if (!frame) {
321 		IL_ERR("Could not obtain free frame buffer for beacon "
322 		       "command.\n");
323 		return -ENOMEM;
324 	}
325 
326 	rate = il_get_lowest_plcp(il);
327 
328 	frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
329 
330 	rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
331 
332 	il3945_free_frame(il, frame);
333 
334 	return rc;
335 }
336 
337 static void
338 il3945_unset_hw_params(struct il_priv *il)
339 {
340 	if (il->_3945.shared_virt)
341 		dma_free_coherent(&il->pci_dev->dev,
342 				  sizeof(struct il3945_shared),
343 				  il->_3945.shared_virt, il->_3945.shared_phys);
344 }
345 
346 static void
347 il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
348 			     struct il_device_cmd *cmd,
349 			     struct sk_buff *skb_frag, int sta_id)
350 {
351 	struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
352 	struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
353 
354 	tx_cmd->sec_ctl = 0;
355 
356 	switch (keyinfo->cipher) {
357 	case WLAN_CIPHER_SUITE_CCMP:
358 		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
359 		memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
360 		D_TX("tx_cmd with AES hwcrypto\n");
361 		break;
362 
363 	case WLAN_CIPHER_SUITE_TKIP:
364 		break;
365 
366 	case WLAN_CIPHER_SUITE_WEP104:
367 		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
368 		fallthrough;
369 	case WLAN_CIPHER_SUITE_WEP40:
370 		tx_cmd->sec_ctl |=
371 		    TX_CMD_SEC_WEP | (info->control.hw_key->
372 				      hw_key_idx & TX_CMD_SEC_MSK) <<
373 		    TX_CMD_SEC_SHIFT;
374 
375 		memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
376 
377 		D_TX("Configuring packet for WEP encryption " "with key %d\n",
378 		     info->control.hw_key->hw_key_idx);
379 		break;
380 
381 	default:
382 		IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
383 		break;
384 	}
385 }
386 
387 /*
388  * handle build C_TX command notification.
389  */
390 static void
391 il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
392 			  struct ieee80211_tx_info *info,
393 			  struct ieee80211_hdr *hdr, u8 std_id)
394 {
395 	struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
396 	__le32 tx_flags = tx_cmd->tx_flags;
397 	__le16 fc = hdr->frame_control;
398 
399 	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
400 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
401 		tx_flags |= TX_CMD_FLG_ACK_MSK;
402 		if (ieee80211_is_mgmt(fc))
403 			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
404 		if (ieee80211_is_probe_resp(fc) &&
405 		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
406 			tx_flags |= TX_CMD_FLG_TSF_MSK;
407 	} else {
408 		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
409 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
410 	}
411 
412 	tx_cmd->sta_id = std_id;
413 	if (ieee80211_has_morefrags(fc))
414 		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
415 
416 	if (ieee80211_is_data_qos(fc)) {
417 		u8 *qc = ieee80211_get_qos_ctl(hdr);
418 		tx_cmd->tid_tspec = qc[0] & 0xf;
419 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
420 	} else {
421 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
422 	}
423 
424 	il_tx_cmd_protection(il, info, fc, &tx_flags);
425 
426 	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
427 	if (ieee80211_is_mgmt(fc)) {
428 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
429 			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
430 		else
431 			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
432 	} else {
433 		tx_cmd->timeout.pm_frame_timeout = 0;
434 	}
435 
436 	tx_cmd->driver_txop = 0;
437 	tx_cmd->tx_flags = tx_flags;
438 	tx_cmd->next_frame_len = 0;
439 }
440 
441 /*
442  * start C_TX command process
443  */
444 static int
445 il3945_tx_skb(struct il_priv *il,
446 	      struct ieee80211_sta *sta,
447 	      struct sk_buff *skb)
448 {
449 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
450 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
451 	struct il3945_tx_cmd *tx_cmd;
452 	struct il_tx_queue *txq = NULL;
453 	struct il_queue *q = NULL;
454 	struct il_device_cmd *out_cmd;
455 	struct il_cmd_meta *out_meta;
456 	dma_addr_t phys_addr;
457 	dma_addr_t txcmd_phys;
458 	int txq_id = skb_get_queue_mapping(skb);
459 	u16 len, idx, hdr_len;
460 	u16 firstlen, secondlen;
461 	u8 sta_id;
462 	u8 tid = 0;
463 	__le16 fc;
464 	u8 wait_write_ptr = 0;
465 	unsigned long flags;
466 
467 	spin_lock_irqsave(&il->lock, flags);
468 	if (il_is_rfkill(il)) {
469 		D_DROP("Dropping - RF KILL\n");
470 		goto drop_unlock;
471 	}
472 
473 	if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
474 	    IL_INVALID_RATE) {
475 		IL_ERR("ERROR: No TX rate available.\n");
476 		goto drop_unlock;
477 	}
478 
479 	fc = hdr->frame_control;
480 
481 #ifdef CONFIG_IWLEGACY_DEBUG
482 	if (ieee80211_is_auth(fc))
483 		D_TX("Sending AUTH frame\n");
484 	else if (ieee80211_is_assoc_req(fc))
485 		D_TX("Sending ASSOC frame\n");
486 	else if (ieee80211_is_reassoc_req(fc))
487 		D_TX("Sending REASSOC frame\n");
488 #endif
489 
490 	spin_unlock_irqrestore(&il->lock, flags);
491 
492 	hdr_len = ieee80211_hdrlen(fc);
493 
494 	/* Find idx into station table for destination station */
495 	sta_id = il_sta_id_or_broadcast(il, sta);
496 	if (sta_id == IL_INVALID_STATION) {
497 		D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
498 		goto drop;
499 	}
500 
501 	D_RATE("station Id %d\n", sta_id);
502 
503 	if (ieee80211_is_data_qos(fc)) {
504 		u8 *qc = ieee80211_get_qos_ctl(hdr);
505 		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
506 		if (unlikely(tid >= MAX_TID_COUNT))
507 			goto drop;
508 	}
509 
510 	/* Descriptor for chosen Tx queue */
511 	txq = &il->txq[txq_id];
512 	q = &txq->q;
513 
514 	if ((il_queue_space(q) < q->high_mark))
515 		goto drop;
516 
517 	spin_lock_irqsave(&il->lock, flags);
518 
519 	idx = il_get_cmd_idx(q, q->write_ptr, 0);
520 
521 	txq->skbs[q->write_ptr] = skb;
522 
523 	/* Init first empty entry in queue's array of Tx/cmd buffers */
524 	out_cmd = txq->cmd[idx];
525 	out_meta = &txq->meta[idx];
526 	tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
527 	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
528 	memset(tx_cmd, 0, sizeof(*tx_cmd));
529 
530 	/*
531 	 * Set up the Tx-command (not MAC!) header.
532 	 * Store the chosen Tx queue and TFD idx within the sequence field;
533 	 * after Tx, uCode's Tx response will return this value so driver can
534 	 * locate the frame within the tx queue and do post-tx processing.
535 	 */
536 	out_cmd->hdr.cmd = C_TX;
537 	out_cmd->hdr.sequence =
538 	    cpu_to_le16((u16)
539 			(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
540 
541 	/* Copy MAC header from skb into command buffer */
542 	memcpy(tx_cmd->hdr, hdr, hdr_len);
543 
544 	if (info->control.hw_key)
545 		il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
546 
547 	/* TODO need this for burst mode later on */
548 	il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
549 
550 	il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
551 
552 	/* Total # bytes to be transmitted */
553 	tx_cmd->len = cpu_to_le16((u16) skb->len);
554 
555 	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
556 	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
557 
558 	/*
559 	 * Use the first empty entry in this queue's command buffer array
560 	 * to contain the Tx command and MAC header concatenated together
561 	 * (payload data will be in another buffer).
562 	 * Size of this varies, due to varying MAC header length.
563 	 * If end is not dword aligned, we'll have 2 extra bytes at the end
564 	 * of the MAC header (device reads on dword boundaries).
565 	 * We'll tell device about this padding later.
566 	 */
567 	len =
568 	    sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
569 	    hdr_len;
570 	firstlen = (len + 3) & ~3;
571 
572 	/* Physical address of this Tx command's header (not MAC header!),
573 	 * within command buffer array. */
574 	txcmd_phys =
575 	    pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
576 			   PCI_DMA_TODEVICE);
577 	if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
578 		goto drop_unlock;
579 
580 	/* Set up TFD's 2nd entry to point directly to remainder of skb,
581 	 * if any (802.11 null frames have no payload). */
582 	secondlen = skb->len - hdr_len;
583 	if (secondlen > 0) {
584 		phys_addr =
585 		    pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
586 				   PCI_DMA_TODEVICE);
587 		if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
588 			goto drop_unlock;
589 	}
590 
591 	/* Add buffer containing Tx command and MAC(!) header to TFD's
592 	 * first entry */
593 	il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
594 	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
595 	dma_unmap_len_set(out_meta, len, firstlen);
596 	if (secondlen > 0)
597 		il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0,
598 					       U32_PAD(secondlen));
599 
600 	if (!ieee80211_has_morefrags(hdr->frame_control)) {
601 		txq->need_update = 1;
602 	} else {
603 		wait_write_ptr = 1;
604 		txq->need_update = 0;
605 	}
606 
607 	il_update_stats(il, true, fc, skb->len);
608 
609 	D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
610 	D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
611 	il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
612 	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
613 			  ieee80211_hdrlen(fc));
614 
615 	/* Tell device the write idx *just past* this latest filled TFD */
616 	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
617 	il_txq_update_write_ptr(il, txq);
618 	spin_unlock_irqrestore(&il->lock, flags);
619 
620 	if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
621 		if (wait_write_ptr) {
622 			spin_lock_irqsave(&il->lock, flags);
623 			txq->need_update = 1;
624 			il_txq_update_write_ptr(il, txq);
625 			spin_unlock_irqrestore(&il->lock, flags);
626 		}
627 
628 		il_stop_queue(il, txq);
629 	}
630 
631 	return 0;
632 
633 drop_unlock:
634 	spin_unlock_irqrestore(&il->lock, flags);
635 drop:
636 	return -1;
637 }
638 
639 static int
640 il3945_get_measurement(struct il_priv *il,
641 		       struct ieee80211_measurement_params *params, u8 type)
642 {
643 	struct il_spectrum_cmd spectrum;
644 	struct il_rx_pkt *pkt;
645 	struct il_host_cmd cmd = {
646 		.id = C_SPECTRUM_MEASUREMENT,
647 		.data = (void *)&spectrum,
648 		.flags = CMD_WANT_SKB,
649 	};
650 	u32 add_time = le64_to_cpu(params->start_time);
651 	int rc;
652 	int spectrum_resp_status;
653 	int duration = le16_to_cpu(params->duration);
654 
655 	if (il_is_associated(il))
656 		add_time =
657 		    il_usecs_to_beacons(il,
658 					le64_to_cpu(params->start_time) -
659 					il->_3945.last_tsf,
660 					le16_to_cpu(il->timing.beacon_interval));
661 
662 	memset(&spectrum, 0, sizeof(spectrum));
663 
664 	spectrum.channel_count = cpu_to_le16(1);
665 	spectrum.flags =
666 	    RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
667 	spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
668 	cmd.len = sizeof(spectrum);
669 	spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
670 
671 	if (il_is_associated(il))
672 		spectrum.start_time =
673 		    il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
674 				       le16_to_cpu(il->timing.beacon_interval));
675 	else
676 		spectrum.start_time = 0;
677 
678 	spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
679 	spectrum.channels[0].channel = params->channel;
680 	spectrum.channels[0].type = type;
681 	if (il->active.flags & RXON_FLG_BAND_24G_MSK)
682 		spectrum.flags |=
683 		    RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
684 		    RXON_FLG_TGG_PROTECT_MSK;
685 
686 	rc = il_send_cmd_sync(il, &cmd);
687 	if (rc)
688 		return rc;
689 
690 	pkt = (struct il_rx_pkt *)cmd.reply_page;
691 	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
692 		IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
693 		rc = -EIO;
694 	}
695 
696 	spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
697 	switch (spectrum_resp_status) {
698 	case 0:		/* Command will be handled */
699 		if (pkt->u.spectrum.id != 0xff) {
700 			D_INFO("Replaced existing measurement: %d\n",
701 			       pkt->u.spectrum.id);
702 			il->measurement_status &= ~MEASUREMENT_READY;
703 		}
704 		il->measurement_status |= MEASUREMENT_ACTIVE;
705 		rc = 0;
706 		break;
707 
708 	case 1:		/* Command will not be handled */
709 		rc = -EAGAIN;
710 		break;
711 	}
712 
713 	il_free_pages(il, cmd.reply_page);
714 
715 	return rc;
716 }
717 
718 static void
719 il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
720 {
721 	struct il_rx_pkt *pkt = rxb_addr(rxb);
722 	struct il_alive_resp *palive;
723 	struct delayed_work *pwork;
724 
725 	palive = &pkt->u.alive_frame;
726 
727 	D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
728 	       palive->is_valid, palive->ver_type, palive->ver_subtype);
729 
730 	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
731 		D_INFO("Initialization Alive received.\n");
732 		memcpy(&il->card_alive_init, &pkt->u.alive_frame,
733 		       sizeof(struct il_alive_resp));
734 		pwork = &il->init_alive_start;
735 	} else {
736 		D_INFO("Runtime Alive received.\n");
737 		memcpy(&il->card_alive, &pkt->u.alive_frame,
738 		       sizeof(struct il_alive_resp));
739 		pwork = &il->alive_start;
740 		il3945_disable_events(il);
741 	}
742 
743 	/* We delay the ALIVE response by 5ms to
744 	 * give the HW RF Kill time to activate... */
745 	if (palive->is_valid == UCODE_VALID_OK)
746 		queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
747 	else
748 		IL_WARN("uCode did not respond OK.\n");
749 }
750 
751 static void
752 il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
753 {
754 #ifdef CONFIG_IWLEGACY_DEBUG
755 	struct il_rx_pkt *pkt = rxb_addr(rxb);
756 #endif
757 
758 	D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
759 }
760 
761 static void
762 il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
763 {
764 	struct il_rx_pkt *pkt = rxb_addr(rxb);
765 	struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
766 #ifdef CONFIG_IWLEGACY_DEBUG
767 	u8 rate = beacon->beacon_notify_hdr.rate;
768 
769 	D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
770 	     le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
771 	     beacon->beacon_notify_hdr.failure_frame,
772 	     le32_to_cpu(beacon->ibss_mgr_status),
773 	     le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
774 #endif
775 
776 	il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
777 
778 }
779 
780 /* Handle notification from uCode that card's power state is changing
781  * due to software, hardware, or critical temperature RFKILL */
782 static void
783 il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
784 {
785 	struct il_rx_pkt *pkt = rxb_addr(rxb);
786 	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
787 	unsigned long status = il->status;
788 
789 	IL_WARN("Card state received: HW:%s SW:%s\n",
790 		(flags & HW_CARD_DISABLED) ? "Kill" : "On",
791 		(flags & SW_CARD_DISABLED) ? "Kill" : "On");
792 
793 	_il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
794 
795 	if (flags & HW_CARD_DISABLED)
796 		set_bit(S_RFKILL, &il->status);
797 	else
798 		clear_bit(S_RFKILL, &il->status);
799 
800 	il_scan_cancel(il);
801 
802 	if ((test_bit(S_RFKILL, &status) !=
803 	     test_bit(S_RFKILL, &il->status)))
804 		wiphy_rfkill_set_hw_state(il->hw->wiphy,
805 					  test_bit(S_RFKILL, &il->status));
806 	else
807 		wake_up(&il->wait_command_queue);
808 }
809 
810 /*
811  * il3945_setup_handlers - Initialize Rx handler callbacks
812  *
813  * Setup the RX handlers for each of the reply types sent from the uCode
814  * to the host.
815  *
816  * This function chains into the hardware specific files for them to setup
817  * any hardware specific handlers as well.
818  */
819 static void
820 il3945_setup_handlers(struct il_priv *il)
821 {
822 	il->handlers[N_ALIVE] = il3945_hdl_alive;
823 	il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
824 	il->handlers[N_ERROR] = il_hdl_error;
825 	il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
826 	il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
827 	il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
828 	il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
829 	il->handlers[N_BEACON] = il3945_hdl_beacon;
830 
831 	/*
832 	 * The same handler is used for both the REPLY to a discrete
833 	 * stats request from the host as well as for the periodic
834 	 * stats notifications (after received beacons) from the uCode.
835 	 */
836 	il->handlers[C_STATS] = il3945_hdl_c_stats;
837 	il->handlers[N_STATS] = il3945_hdl_stats;
838 
839 	il_setup_rx_scan_handlers(il);
840 	il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
841 
842 	/* Set up hardware specific Rx handlers */
843 	il3945_hw_handler_setup(il);
844 }
845 
846 /************************** RX-FUNCTIONS ****************************/
847 /*
848  * Rx theory of operation
849  *
850  * The host allocates 32 DMA target addresses and passes the host address
851  * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
852  * 0 to 31
853  *
854  * Rx Queue Indexes
855  * The host/firmware share two idx registers for managing the Rx buffers.
856  *
857  * The READ idx maps to the first position that the firmware may be writing
858  * to -- the driver can read up to (but not including) this position and get
859  * good data.
860  * The READ idx is managed by the firmware once the card is enabled.
861  *
862  * The WRITE idx maps to the last position the driver has read from -- the
863  * position preceding WRITE is the last slot the firmware can place a packet.
864  *
865  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
866  * WRITE = READ.
867  *
868  * During initialization, the host sets up the READ queue position to the first
869  * IDX position, and WRITE to the last (READ - 1 wrapped)
870  *
871  * When the firmware places a packet in a buffer, it will advance the READ idx
872  * and fire the RX interrupt.  The driver can then query the READ idx and
873  * process as many packets as possible, moving the WRITE idx forward as it
874  * resets the Rx queue buffers with new memory.
875  *
876  * The management in the driver is as follows:
877  * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
878  *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
879  *   to replenish the iwl->rxq->rx_free.
880  * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
881  *   iwl->rxq is replenished and the READ IDX is updated (updating the
882  *   'processed' and 'read' driver idxes as well)
883  * + A received packet is processed and handed to the kernel network stack,
884  *   detached from the iwl->rxq.  The driver 'processed' idx is updated.
885  * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
886  *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
887  *   IDX is not incremented and iwl->status(RX_STALLED) is set.  If there
888  *   were enough free buffers and RX_STALLED is set it is cleared.
889  *
890  *
891  * Driver sequence:
892  *
893  * il3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
894  *                            il3945_rx_queue_restock
895  * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
896  *                            queue, updates firmware pointers, and updates
897  *                            the WRITE idx.  If insufficient rx_free buffers
898  *                            are available, schedules il3945_rx_replenish
899  *
900  * -- enable interrupts --
901  * ISR - il3945_rx()         Detach il_rx_bufs from pool up to the
902  *                            READ IDX, detaching the SKB from the pool.
903  *                            Moves the packet buffer from queue to rx_used.
904  *                            Calls il3945_rx_queue_restock to refill any empty
905  *                            slots.
906  * ...
907  *
908  */
909 
910 /*
911  * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
912  */
913 static inline __le32
914 il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
915 {
916 	return cpu_to_le32((u32) dma_addr);
917 }
918 
919 /*
920  * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
921  *
922  * If there are slots in the RX queue that need to be restocked,
923  * and we have free pre-allocated buffers, fill the ranks as much
924  * as we can, pulling from rx_free.
925  *
926  * This moves the 'write' idx forward to catch up with 'processed', and
927  * also updates the memory address in the firmware to reference the new
928  * target buffer.
929  */
930 static void
931 il3945_rx_queue_restock(struct il_priv *il)
932 {
933 	struct il_rx_queue *rxq = &il->rxq;
934 	struct list_head *element;
935 	struct il_rx_buf *rxb;
936 	unsigned long flags;
937 
938 	spin_lock_irqsave(&rxq->lock, flags);
939 	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
940 		/* Get next free Rx buffer, remove from free list */
941 		element = rxq->rx_free.next;
942 		rxb = list_entry(element, struct il_rx_buf, list);
943 		list_del(element);
944 
945 		/* Point to Rx buffer via next RBD in circular buffer */
946 		rxq->bd[rxq->write] =
947 		    il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
948 		rxq->queue[rxq->write] = rxb;
949 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
950 		rxq->free_count--;
951 	}
952 	spin_unlock_irqrestore(&rxq->lock, flags);
953 	/* If the pre-allocated buffer pool is dropping low, schedule to
954 	 * refill it */
955 	if (rxq->free_count <= RX_LOW_WATERMARK)
956 		queue_work(il->workqueue, &il->rx_replenish);
957 
958 	/* If we've added more space for the firmware to place data, tell it.
959 	 * Increment device's write pointer in multiples of 8. */
960 	if (rxq->write_actual != (rxq->write & ~0x7) ||
961 	    abs(rxq->write - rxq->read) > 7) {
962 		spin_lock_irqsave(&rxq->lock, flags);
963 		rxq->need_update = 1;
964 		spin_unlock_irqrestore(&rxq->lock, flags);
965 		il_rx_queue_update_write_ptr(il, rxq);
966 	}
967 }
968 
969 /*
970  * il3945_rx_replenish - Move all used packet from rx_used to rx_free
971  *
972  * When moving to rx_free an SKB is allocated for the slot.
973  *
974  * Also restock the Rx queue via il3945_rx_queue_restock.
975  * This is called as a scheduled work item (except for during initialization)
976  */
977 static void
978 il3945_rx_allocate(struct il_priv *il, gfp_t priority)
979 {
980 	struct il_rx_queue *rxq = &il->rxq;
981 	struct list_head *element;
982 	struct il_rx_buf *rxb;
983 	struct page *page;
984 	dma_addr_t page_dma;
985 	unsigned long flags;
986 	gfp_t gfp_mask = priority;
987 
988 	while (1) {
989 		spin_lock_irqsave(&rxq->lock, flags);
990 		if (list_empty(&rxq->rx_used)) {
991 			spin_unlock_irqrestore(&rxq->lock, flags);
992 			return;
993 		}
994 		spin_unlock_irqrestore(&rxq->lock, flags);
995 
996 		if (rxq->free_count > RX_LOW_WATERMARK)
997 			gfp_mask |= __GFP_NOWARN;
998 
999 		if (il->hw_params.rx_page_order > 0)
1000 			gfp_mask |= __GFP_COMP;
1001 
1002 		/* Alloc a new receive buffer */
1003 		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
1004 		if (!page) {
1005 			if (net_ratelimit())
1006 				D_INFO("Failed to allocate SKB buffer.\n");
1007 			if (rxq->free_count <= RX_LOW_WATERMARK &&
1008 			    net_ratelimit())
1009 				IL_ERR("Failed to allocate SKB buffer with %0x."
1010 				       "Only %u free buffers remaining.\n",
1011 				       priority, rxq->free_count);
1012 			/* We don't reschedule replenish work here -- we will
1013 			 * call the restock method and if it still needs
1014 			 * more buffers it will schedule replenish */
1015 			break;
1016 		}
1017 
1018 		/* Get physical address of RB/SKB */
1019 		page_dma =
1020 		    pci_map_page(il->pci_dev, page, 0,
1021 				 PAGE_SIZE << il->hw_params.rx_page_order,
1022 				 PCI_DMA_FROMDEVICE);
1023 
1024 		if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
1025 			__free_pages(page, il->hw_params.rx_page_order);
1026 			break;
1027 		}
1028 
1029 		spin_lock_irqsave(&rxq->lock, flags);
1030 
1031 		if (list_empty(&rxq->rx_used)) {
1032 			spin_unlock_irqrestore(&rxq->lock, flags);
1033 			pci_unmap_page(il->pci_dev, page_dma,
1034 				       PAGE_SIZE << il->hw_params.rx_page_order,
1035 				       PCI_DMA_FROMDEVICE);
1036 			__free_pages(page, il->hw_params.rx_page_order);
1037 			return;
1038 		}
1039 
1040 		element = rxq->rx_used.next;
1041 		rxb = list_entry(element, struct il_rx_buf, list);
1042 		list_del(element);
1043 
1044 		rxb->page = page;
1045 		rxb->page_dma = page_dma;
1046 		list_add_tail(&rxb->list, &rxq->rx_free);
1047 		rxq->free_count++;
1048 		il->alloc_rxb_page++;
1049 
1050 		spin_unlock_irqrestore(&rxq->lock, flags);
1051 	}
1052 }
1053 
1054 void
1055 il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
1056 {
1057 	unsigned long flags;
1058 	int i;
1059 	spin_lock_irqsave(&rxq->lock, flags);
1060 	INIT_LIST_HEAD(&rxq->rx_free);
1061 	INIT_LIST_HEAD(&rxq->rx_used);
1062 	/* Fill the rx_used queue with _all_ of the Rx buffers */
1063 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1064 		/* In the reset function, these buffers may have been allocated
1065 		 * to an SKB, so we need to unmap and free potential storage */
1066 		if (rxq->pool[i].page != NULL) {
1067 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1068 				       PAGE_SIZE << il->hw_params.rx_page_order,
1069 				       PCI_DMA_FROMDEVICE);
1070 			__il_free_pages(il, rxq->pool[i].page);
1071 			rxq->pool[i].page = NULL;
1072 		}
1073 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1074 	}
1075 
1076 	/* Set us so that we have processed and used all buffers, but have
1077 	 * not restocked the Rx queue with fresh buffers */
1078 	rxq->read = rxq->write = 0;
1079 	rxq->write_actual = 0;
1080 	rxq->free_count = 0;
1081 	spin_unlock_irqrestore(&rxq->lock, flags);
1082 }
1083 
1084 void
1085 il3945_rx_replenish(void *data)
1086 {
1087 	struct il_priv *il = data;
1088 	unsigned long flags;
1089 
1090 	il3945_rx_allocate(il, GFP_KERNEL);
1091 
1092 	spin_lock_irqsave(&il->lock, flags);
1093 	il3945_rx_queue_restock(il);
1094 	spin_unlock_irqrestore(&il->lock, flags);
1095 }
1096 
1097 static void
1098 il3945_rx_replenish_now(struct il_priv *il)
1099 {
1100 	il3945_rx_allocate(il, GFP_ATOMIC);
1101 
1102 	il3945_rx_queue_restock(il);
1103 }
1104 
1105 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1106  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1107  * This free routine walks the list of POOL entries and if SKB is set to
1108  * non NULL it is unmapped and freed
1109  */
1110 static void
1111 il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
1112 {
1113 	int i;
1114 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1115 		if (rxq->pool[i].page != NULL) {
1116 			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1117 				       PAGE_SIZE << il->hw_params.rx_page_order,
1118 				       PCI_DMA_FROMDEVICE);
1119 			__il_free_pages(il, rxq->pool[i].page);
1120 			rxq->pool[i].page = NULL;
1121 		}
1122 	}
1123 
1124 	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1125 			  rxq->bd_dma);
1126 	dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
1127 			  rxq->rb_stts, rxq->rb_stts_dma);
1128 	rxq->bd = NULL;
1129 	rxq->rb_stts = NULL;
1130 }
1131 
1132 /* Convert linear signal-to-noise ratio into dB */
1133 static u8 ratio2dB[100] = {
1134 /*	 0   1   2   3   4   5   6   7   8   9 */
1135 	0, 0, 6, 10, 12, 14, 16, 17, 18, 19,	/* 00 - 09 */
1136 	20, 21, 22, 22, 23, 23, 24, 25, 26, 26,	/* 10 - 19 */
1137 	26, 26, 26, 27, 27, 28, 28, 28, 29, 29,	/* 20 - 29 */
1138 	29, 30, 30, 30, 31, 31, 31, 31, 32, 32,	/* 30 - 39 */
1139 	32, 32, 32, 33, 33, 33, 33, 33, 34, 34,	/* 40 - 49 */
1140 	34, 34, 34, 34, 35, 35, 35, 35, 35, 35,	/* 50 - 59 */
1141 	36, 36, 36, 36, 36, 36, 36, 37, 37, 37,	/* 60 - 69 */
1142 	37, 37, 37, 37, 37, 38, 38, 38, 38, 38,	/* 70 - 79 */
1143 	38, 38, 38, 38, 38, 39, 39, 39, 39, 39,	/* 80 - 89 */
1144 	39, 39, 39, 39, 39, 40, 40, 40, 40, 40	/* 90 - 99 */
1145 };
1146 
1147 /* Calculates a relative dB value from a ratio of linear
1148  *   (i.e. not dB) signal levels.
1149  * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1150 int
1151 il3945_calc_db_from_ratio(int sig_ratio)
1152 {
1153 	/* 1000:1 or higher just report as 60 dB */
1154 	if (sig_ratio >= 1000)
1155 		return 60;
1156 
1157 	/* 100:1 or higher, divide by 10 and use table,
1158 	 *   add 20 dB to make up for divide by 10 */
1159 	if (sig_ratio >= 100)
1160 		return 20 + (int)ratio2dB[sig_ratio / 10];
1161 
1162 	/* We shouldn't see this */
1163 	if (sig_ratio < 1)
1164 		return 0;
1165 
1166 	/* Use table for ratios 1:1 - 99:1 */
1167 	return (int)ratio2dB[sig_ratio];
1168 }
1169 
1170 /*
1171  * il3945_rx_handle - Main entry function for receiving responses from uCode
1172  *
1173  * Uses the il->handlers callback function array to invoke
1174  * the appropriate handlers, including command responses,
1175  * frame-received notifications, and other notifications.
1176  */
1177 static void
1178 il3945_rx_handle(struct il_priv *il)
1179 {
1180 	struct il_rx_buf *rxb;
1181 	struct il_rx_pkt *pkt;
1182 	struct il_rx_queue *rxq = &il->rxq;
1183 	u32 r, i;
1184 	int reclaim;
1185 	unsigned long flags;
1186 	u8 fill_rx = 0;
1187 	u32 count = 8;
1188 	int total_empty = 0;
1189 
1190 	/* uCode's read idx (stored in shared DRAM) indicates the last Rx
1191 	 * buffer that the driver may process (last buffer filled by ucode). */
1192 	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1193 	i = rxq->read;
1194 
1195 	/* calculate total frames need to be restock after handling RX */
1196 	total_empty = r - rxq->write_actual;
1197 	if (total_empty < 0)
1198 		total_empty += RX_QUEUE_SIZE;
1199 
1200 	if (total_empty > (RX_QUEUE_SIZE / 2))
1201 		fill_rx = 1;
1202 	/* Rx interrupt, but nothing sent from uCode */
1203 	if (i == r)
1204 		D_RX("r = %d, i = %d\n", r, i);
1205 
1206 	while (i != r) {
1207 		int len;
1208 
1209 		rxb = rxq->queue[i];
1210 
1211 		/* If an RXB doesn't have a Rx queue slot associated with it,
1212 		 * then a bug has been introduced in the queue refilling
1213 		 * routines -- catch it here */
1214 		BUG_ON(rxb == NULL);
1215 
1216 		rxq->queue[i] = NULL;
1217 
1218 		pci_unmap_page(il->pci_dev, rxb->page_dma,
1219 			       PAGE_SIZE << il->hw_params.rx_page_order,
1220 			       PCI_DMA_FROMDEVICE);
1221 		pkt = rxb_addr(rxb);
1222 
1223 		len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
1224 		len += sizeof(u32);	/* account for status word */
1225 
1226 		reclaim = il_need_reclaim(il, pkt);
1227 
1228 		/* Based on type of command response or notification,
1229 		 *   handle those that need handling via function in
1230 		 *   handlers table.  See il3945_setup_handlers() */
1231 		if (il->handlers[pkt->hdr.cmd]) {
1232 			D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
1233 			     il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1234 			il->isr_stats.handlers[pkt->hdr.cmd]++;
1235 			il->handlers[pkt->hdr.cmd] (il, rxb);
1236 		} else {
1237 			/* No handling needed */
1238 			D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
1239 			     i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1240 		}
1241 
1242 		/*
1243 		 * XXX: After here, we should always check rxb->page
1244 		 * against NULL before touching it or its virtual
1245 		 * memory (pkt). Because some handler might have
1246 		 * already taken or freed the pages.
1247 		 */
1248 
1249 		if (reclaim) {
1250 			/* Invoke any callbacks, transfer the buffer to caller,
1251 			 * and fire off the (possibly) blocking il_send_cmd()
1252 			 * as we reclaim the driver command queue */
1253 			if (rxb->page)
1254 				il_tx_cmd_complete(il, rxb);
1255 			else
1256 				IL_WARN("Claim null rxb?\n");
1257 		}
1258 
1259 		/* Reuse the page if possible. For notification packets and
1260 		 * SKBs that fail to Rx correctly, add them back into the
1261 		 * rx_free list for reuse later. */
1262 		spin_lock_irqsave(&rxq->lock, flags);
1263 		if (rxb->page != NULL) {
1264 			rxb->page_dma =
1265 			    pci_map_page(il->pci_dev, rxb->page, 0,
1266 					 PAGE_SIZE << il->hw_params.
1267 					 rx_page_order, PCI_DMA_FROMDEVICE);
1268 			if (unlikely(pci_dma_mapping_error(il->pci_dev,
1269 							   rxb->page_dma))) {
1270 				__il_free_pages(il, rxb->page);
1271 				rxb->page = NULL;
1272 				list_add_tail(&rxb->list, &rxq->rx_used);
1273 			} else {
1274 				list_add_tail(&rxb->list, &rxq->rx_free);
1275 				rxq->free_count++;
1276 			}
1277 		} else
1278 			list_add_tail(&rxb->list, &rxq->rx_used);
1279 
1280 		spin_unlock_irqrestore(&rxq->lock, flags);
1281 
1282 		i = (i + 1) & RX_QUEUE_MASK;
1283 		/* If there are a lot of unused frames,
1284 		 * restock the Rx queue so ucode won't assert. */
1285 		if (fill_rx) {
1286 			count++;
1287 			if (count >= 8) {
1288 				rxq->read = i;
1289 				il3945_rx_replenish_now(il);
1290 				count = 0;
1291 			}
1292 		}
1293 	}
1294 
1295 	/* Backtrack one entry */
1296 	rxq->read = i;
1297 	if (fill_rx)
1298 		il3945_rx_replenish_now(il);
1299 	else
1300 		il3945_rx_queue_restock(il);
1301 }
1302 
1303 /* call this function to flush any scheduled tasklet */
1304 static inline void
1305 il3945_synchronize_irq(struct il_priv *il)
1306 {
1307 	/* wait to make sure we flush pending tasklet */
1308 	synchronize_irq(il->pci_dev->irq);
1309 	tasklet_kill(&il->irq_tasklet);
1310 }
1311 
1312 static const char *
1313 il3945_desc_lookup(int i)
1314 {
1315 	switch (i) {
1316 	case 1:
1317 		return "FAIL";
1318 	case 2:
1319 		return "BAD_PARAM";
1320 	case 3:
1321 		return "BAD_CHECKSUM";
1322 	case 4:
1323 		return "NMI_INTERRUPT";
1324 	case 5:
1325 		return "SYSASSERT";
1326 	case 6:
1327 		return "FATAL_ERROR";
1328 	}
1329 
1330 	return "UNKNOWN";
1331 }
1332 
1333 #define ERROR_START_OFFSET  (1 * sizeof(u32))
1334 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
1335 
1336 void
1337 il3945_dump_nic_error_log(struct il_priv *il)
1338 {
1339 	u32 i;
1340 	u32 desc, time, count, base, data1;
1341 	u32 blink1, blink2, ilink1, ilink2;
1342 
1343 	base = le32_to_cpu(il->card_alive.error_event_table_ptr);
1344 
1345 	if (!il3945_hw_valid_rtc_data_addr(base)) {
1346 		IL_ERR("Not valid error log pointer 0x%08X\n", base);
1347 		return;
1348 	}
1349 
1350 	count = il_read_targ_mem(il, base);
1351 
1352 	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1353 		IL_ERR("Start IWL Error Log Dump:\n");
1354 		IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
1355 	}
1356 
1357 	IL_ERR("Desc       Time       asrtPC  blink2 "
1358 	       "ilink1  nmiPC   Line\n");
1359 	for (i = ERROR_START_OFFSET;
1360 	     i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1361 	     i += ERROR_ELEM_SIZE) {
1362 		desc = il_read_targ_mem(il, base + i);
1363 		time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
1364 		blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
1365 		blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
1366 		ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
1367 		ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
1368 		data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
1369 
1370 		IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1371 		       il3945_desc_lookup(desc), desc, time, blink1, blink2,
1372 		       ilink1, ilink2, data1);
1373 	}
1374 }
1375 
1376 static void
1377 il3945_irq_tasklet(struct tasklet_struct *t)
1378 {
1379 	struct il_priv *il = from_tasklet(il, t, irq_tasklet);
1380 	u32 inta, handled = 0;
1381 	u32 inta_fh;
1382 	unsigned long flags;
1383 #ifdef CONFIG_IWLEGACY_DEBUG
1384 	u32 inta_mask;
1385 #endif
1386 
1387 	spin_lock_irqsave(&il->lock, flags);
1388 
1389 	/* Ack/clear/reset pending uCode interrupts.
1390 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1391 	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
1392 	inta = _il_rd(il, CSR_INT);
1393 	_il_wr(il, CSR_INT, inta);
1394 
1395 	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
1396 	 * Any new interrupts that happen after this, either while we're
1397 	 * in this tasklet, or later, will show up in next ISR/tasklet. */
1398 	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1399 	_il_wr(il, CSR_FH_INT_STATUS, inta_fh);
1400 
1401 #ifdef CONFIG_IWLEGACY_DEBUG
1402 	if (il_get_debug_level(il) & IL_DL_ISR) {
1403 		/* just for debug */
1404 		inta_mask = _il_rd(il, CSR_INT_MASK);
1405 		D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
1406 		      inta_mask, inta_fh);
1407 	}
1408 #endif
1409 
1410 	spin_unlock_irqrestore(&il->lock, flags);
1411 
1412 	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1413 	 * atomic, make sure that inta covers all the interrupts that
1414 	 * we've discovered, even if FH interrupt came in just after
1415 	 * reading CSR_INT. */
1416 	if (inta_fh & CSR39_FH_INT_RX_MASK)
1417 		inta |= CSR_INT_BIT_FH_RX;
1418 	if (inta_fh & CSR39_FH_INT_TX_MASK)
1419 		inta |= CSR_INT_BIT_FH_TX;
1420 
1421 	/* Now service all interrupt bits discovered above. */
1422 	if (inta & CSR_INT_BIT_HW_ERR) {
1423 		IL_ERR("Hardware error detected.  Restarting.\n");
1424 
1425 		/* Tell the device to stop sending interrupts */
1426 		il_disable_interrupts(il);
1427 
1428 		il->isr_stats.hw++;
1429 		il_irq_handle_error(il);
1430 
1431 		handled |= CSR_INT_BIT_HW_ERR;
1432 
1433 		return;
1434 	}
1435 #ifdef CONFIG_IWLEGACY_DEBUG
1436 	if (il_get_debug_level(il) & (IL_DL_ISR)) {
1437 		/* NIC fires this, but we don't use it, redundant with WAKEUP */
1438 		if (inta & CSR_INT_BIT_SCD) {
1439 			D_ISR("Scheduler finished to transmit "
1440 			      "the frame/frames.\n");
1441 			il->isr_stats.sch++;
1442 		}
1443 
1444 		/* Alive notification via Rx interrupt will do the real work */
1445 		if (inta & CSR_INT_BIT_ALIVE) {
1446 			D_ISR("Alive interrupt\n");
1447 			il->isr_stats.alive++;
1448 		}
1449 	}
1450 #endif
1451 	/* Safely ignore these bits for debug checks below */
1452 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1453 
1454 	/* Error detected by uCode */
1455 	if (inta & CSR_INT_BIT_SW_ERR) {
1456 		IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
1457 		       inta);
1458 		il->isr_stats.sw++;
1459 		il_irq_handle_error(il);
1460 		handled |= CSR_INT_BIT_SW_ERR;
1461 	}
1462 
1463 	/* uCode wakes up after power-down sleep */
1464 	if (inta & CSR_INT_BIT_WAKEUP) {
1465 		D_ISR("Wakeup interrupt\n");
1466 		il_rx_queue_update_write_ptr(il, &il->rxq);
1467 
1468 		spin_lock_irqsave(&il->lock, flags);
1469 		il_txq_update_write_ptr(il, &il->txq[0]);
1470 		il_txq_update_write_ptr(il, &il->txq[1]);
1471 		il_txq_update_write_ptr(il, &il->txq[2]);
1472 		il_txq_update_write_ptr(il, &il->txq[3]);
1473 		il_txq_update_write_ptr(il, &il->txq[4]);
1474 		spin_unlock_irqrestore(&il->lock, flags);
1475 
1476 		il->isr_stats.wakeup++;
1477 		handled |= CSR_INT_BIT_WAKEUP;
1478 	}
1479 
1480 	/* All uCode command responses, including Tx command responses,
1481 	 * Rx "responses" (frame-received notification), and other
1482 	 * notifications from uCode come through here*/
1483 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1484 		il3945_rx_handle(il);
1485 		il->isr_stats.rx++;
1486 		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1487 	}
1488 
1489 	if (inta & CSR_INT_BIT_FH_TX) {
1490 		D_ISR("Tx interrupt\n");
1491 		il->isr_stats.tx++;
1492 
1493 		_il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
1494 		il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
1495 		handled |= CSR_INT_BIT_FH_TX;
1496 	}
1497 
1498 	if (inta & ~handled) {
1499 		IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1500 		il->isr_stats.unhandled++;
1501 	}
1502 
1503 	if (inta & ~il->inta_mask) {
1504 		IL_WARN("Disabled INTA bits 0x%08x were pending\n",
1505 			inta & ~il->inta_mask);
1506 		IL_WARN("   with inta_fh = 0x%08x\n", inta_fh);
1507 	}
1508 
1509 	/* Re-enable all interrupts */
1510 	/* only Re-enable if disabled by irq */
1511 	if (test_bit(S_INT_ENABLED, &il->status))
1512 		il_enable_interrupts(il);
1513 
1514 #ifdef CONFIG_IWLEGACY_DEBUG
1515 	if (il_get_debug_level(il) & (IL_DL_ISR)) {
1516 		inta = _il_rd(il, CSR_INT);
1517 		inta_mask = _il_rd(il, CSR_INT_MASK);
1518 		inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1519 		D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1520 		      "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1521 	}
1522 #endif
1523 }
1524 
1525 static int
1526 il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band,
1527 			     u8 is_active, u8 n_probes,
1528 			     struct il3945_scan_channel *scan_ch,
1529 			     struct ieee80211_vif *vif)
1530 {
1531 	struct ieee80211_channel *chan;
1532 	const struct ieee80211_supported_band *sband;
1533 	const struct il_channel_info *ch_info;
1534 	u16 passive_dwell = 0;
1535 	u16 active_dwell = 0;
1536 	int added, i;
1537 
1538 	sband = il_get_hw_mode(il, band);
1539 	if (!sband)
1540 		return 0;
1541 
1542 	active_dwell = il_get_active_dwell_time(il, band, n_probes);
1543 	passive_dwell = il_get_passive_dwell_time(il, band, vif);
1544 
1545 	if (passive_dwell <= active_dwell)
1546 		passive_dwell = active_dwell + 1;
1547 
1548 	for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
1549 		chan = il->scan_request->channels[i];
1550 
1551 		if (chan->band != band)
1552 			continue;
1553 
1554 		scan_ch->channel = chan->hw_value;
1555 
1556 		ch_info = il_get_channel_info(il, band, scan_ch->channel);
1557 		if (!il_is_channel_valid(ch_info)) {
1558 			D_SCAN("Channel %d is INVALID for this band.\n",
1559 			       scan_ch->channel);
1560 			continue;
1561 		}
1562 
1563 		scan_ch->active_dwell = cpu_to_le16(active_dwell);
1564 		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1565 		/* If passive , set up for auto-switch
1566 		 *  and use long active_dwell time.
1567 		 */
1568 		if (!is_active || il_is_channel_passive(ch_info) ||
1569 		    (chan->flags & IEEE80211_CHAN_NO_IR)) {
1570 			scan_ch->type = 0;	/* passive */
1571 			if (IL_UCODE_API(il->ucode_ver) == 1)
1572 				scan_ch->active_dwell =
1573 				    cpu_to_le16(passive_dwell - 1);
1574 		} else {
1575 			scan_ch->type = 1;	/* active */
1576 		}
1577 
1578 		/* Set direct probe bits. These may be used both for active
1579 		 * scan channels (probes gets sent right away),
1580 		 * or for passive channels (probes get se sent only after
1581 		 * hearing clear Rx packet).*/
1582 		if (IL_UCODE_API(il->ucode_ver) >= 2) {
1583 			if (n_probes)
1584 				scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1585 		} else {
1586 			/* uCode v1 does not allow setting direct probe bits on
1587 			 * passive channel. */
1588 			if ((scan_ch->type & 1) && n_probes)
1589 				scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1590 		}
1591 
1592 		/* Set txpower levels to defaults */
1593 		scan_ch->tpc.dsp_atten = 110;
1594 		/* scan_pwr_info->tpc.dsp_atten; */
1595 
1596 		/*scan_pwr_info->tpc.tx_gain; */
1597 		if (band == NL80211_BAND_5GHZ)
1598 			scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1599 		else {
1600 			scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1601 			/* NOTE: if we were doing 6Mb OFDM for scans we'd use
1602 			 * power level:
1603 			 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1604 			 */
1605 		}
1606 
1607 		D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
1608 		       (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1609 		       (scan_ch->type & 1) ? active_dwell : passive_dwell);
1610 
1611 		scan_ch++;
1612 		added++;
1613 	}
1614 
1615 	D_SCAN("total channels to scan %d\n", added);
1616 	return added;
1617 }
1618 
1619 static void
1620 il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
1621 {
1622 	int i;
1623 
1624 	for (i = 0; i < RATE_COUNT_LEGACY; i++) {
1625 		rates[i].bitrate = il3945_rates[i].ieee * 5;
1626 		rates[i].hw_value = i;	/* Rate scaling will work on idxes */
1627 		rates[i].hw_value_short = i;
1628 		rates[i].flags = 0;
1629 		if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
1630 			/*
1631 			 * If CCK != 1M then set short preamble rate flag.
1632 			 */
1633 			rates[i].flags |=
1634 			    (il3945_rates[i].plcp ==
1635 			     10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1636 		}
1637 	}
1638 }
1639 
1640 /******************************************************************************
1641  *
1642  * uCode download functions
1643  *
1644  ******************************************************************************/
1645 
1646 static void
1647 il3945_dealloc_ucode_pci(struct il_priv *il)
1648 {
1649 	il_free_fw_desc(il->pci_dev, &il->ucode_code);
1650 	il_free_fw_desc(il->pci_dev, &il->ucode_data);
1651 	il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
1652 	il_free_fw_desc(il->pci_dev, &il->ucode_init);
1653 	il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
1654 	il_free_fw_desc(il->pci_dev, &il->ucode_boot);
1655 }
1656 
1657 /*
1658  * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
1659  *     looking at all data.
1660  */
1661 static int
1662 il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
1663 {
1664 	u32 val;
1665 	u32 save_len = len;
1666 	int rc = 0;
1667 	u32 errcnt;
1668 
1669 	D_INFO("ucode inst image size is %u\n", len);
1670 
1671 	il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
1672 
1673 	errcnt = 0;
1674 	for (; len > 0; len -= sizeof(u32), image++) {
1675 		/* read data comes through single port, auto-incr addr */
1676 		/* NOTE: Use the debugless read so we don't flood kernel log
1677 		 * if IL_DL_IO is set */
1678 		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1679 		if (val != le32_to_cpu(*image)) {
1680 			IL_ERR("uCode INST section is invalid at "
1681 			       "offset 0x%x, is 0x%x, s/b 0x%x\n",
1682 			       save_len - len, val, le32_to_cpu(*image));
1683 			rc = -EIO;
1684 			errcnt++;
1685 			if (errcnt >= 20)
1686 				break;
1687 		}
1688 	}
1689 
1690 	if (!errcnt)
1691 		D_INFO("ucode image in INSTRUCTION memory is good\n");
1692 
1693 	return rc;
1694 }
1695 
1696 /*
1697  * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1698  *   using sample data 100 bytes apart.  If these sample points are good,
1699  *   it's a pretty good bet that everything between them is good, too.
1700  */
1701 static int
1702 il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
1703 {
1704 	u32 val;
1705 	int rc = 0;
1706 	u32 errcnt = 0;
1707 	u32 i;
1708 
1709 	D_INFO("ucode inst image size is %u\n", len);
1710 
1711 	for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
1712 		/* read data comes through single port, auto-incr addr */
1713 		/* NOTE: Use the debugless read so we don't flood kernel log
1714 		 * if IL_DL_IO is set */
1715 		il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
1716 		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1717 		if (val != le32_to_cpu(*image)) {
1718 #if 0				/* Enable this if you want to see details */
1719 			IL_ERR("uCode INST section is invalid at "
1720 			       "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
1721 			       *image);
1722 #endif
1723 			rc = -EIO;
1724 			errcnt++;
1725 			if (errcnt >= 3)
1726 				break;
1727 		}
1728 	}
1729 
1730 	return rc;
1731 }
1732 
1733 /*
1734  * il3945_verify_ucode - determine which instruction image is in SRAM,
1735  *    and verify its contents
1736  */
1737 static int
1738 il3945_verify_ucode(struct il_priv *il)
1739 {
1740 	__le32 *image;
1741 	u32 len;
1742 	int rc = 0;
1743 
1744 	/* Try bootstrap */
1745 	image = (__le32 *) il->ucode_boot.v_addr;
1746 	len = il->ucode_boot.len;
1747 	rc = il3945_verify_inst_sparse(il, image, len);
1748 	if (rc == 0) {
1749 		D_INFO("Bootstrap uCode is good in inst SRAM\n");
1750 		return 0;
1751 	}
1752 
1753 	/* Try initialize */
1754 	image = (__le32 *) il->ucode_init.v_addr;
1755 	len = il->ucode_init.len;
1756 	rc = il3945_verify_inst_sparse(il, image, len);
1757 	if (rc == 0) {
1758 		D_INFO("Initialize uCode is good in inst SRAM\n");
1759 		return 0;
1760 	}
1761 
1762 	/* Try runtime/protocol */
1763 	image = (__le32 *) il->ucode_code.v_addr;
1764 	len = il->ucode_code.len;
1765 	rc = il3945_verify_inst_sparse(il, image, len);
1766 	if (rc == 0) {
1767 		D_INFO("Runtime uCode is good in inst SRAM\n");
1768 		return 0;
1769 	}
1770 
1771 	IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1772 
1773 	/* Since nothing seems to match, show first several data entries in
1774 	 * instruction SRAM, so maybe visual inspection will give a clue.
1775 	 * Selection of bootstrap image (vs. other images) is arbitrary. */
1776 	image = (__le32 *) il->ucode_boot.v_addr;
1777 	len = il->ucode_boot.len;
1778 	rc = il3945_verify_inst_full(il, image, len);
1779 
1780 	return rc;
1781 }
1782 
1783 static void
1784 il3945_nic_start(struct il_priv *il)
1785 {
1786 	/* Remove all resets to allow NIC to operate */
1787 	_il_wr(il, CSR_RESET, 0);
1788 }
1789 
1790 #define IL3945_UCODE_GET(item)						\
1791 static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
1792 {									\
1793 	return le32_to_cpu(ucode->v1.item);				\
1794 }
1795 
1796 static u32
1797 il3945_ucode_get_header_size(u32 api_ver)
1798 {
1799 	return 24;
1800 }
1801 
1802 static u8 *
1803 il3945_ucode_get_data(const struct il_ucode_header *ucode)
1804 {
1805 	return (u8 *) ucode->v1.data;
1806 }
1807 
1808 IL3945_UCODE_GET(inst_size);
1809 IL3945_UCODE_GET(data_size);
1810 IL3945_UCODE_GET(init_size);
1811 IL3945_UCODE_GET(init_data_size);
1812 IL3945_UCODE_GET(boot_size);
1813 
1814 /*
1815  * il3945_read_ucode - Read uCode images from disk file.
1816  *
1817  * Copy into buffers for card to fetch via bus-mastering
1818  */
1819 static int
1820 il3945_read_ucode(struct il_priv *il)
1821 {
1822 	const struct il_ucode_header *ucode;
1823 	int ret = -EINVAL, idx;
1824 	const struct firmware *ucode_raw;
1825 	/* firmware file name contains uCode/driver compatibility version */
1826 	const char *name_pre = il->cfg->fw_name_pre;
1827 	const unsigned int api_max = il->cfg->ucode_api_max;
1828 	const unsigned int api_min = il->cfg->ucode_api_min;
1829 	char buf[25];
1830 	u8 *src;
1831 	size_t len;
1832 	u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1833 
1834 	/* Ask kernel firmware_class module to get the boot firmware off disk.
1835 	 * request_firmware() is synchronous, file is in memory on return. */
1836 	for (idx = api_max; idx >= api_min; idx--) {
1837 		sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
1838 		ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
1839 		if (ret < 0) {
1840 			IL_ERR("%s firmware file req failed: %d\n", buf, ret);
1841 			if (ret == -ENOENT)
1842 				continue;
1843 			else
1844 				goto error;
1845 		} else {
1846 			if (idx < api_max)
1847 				IL_ERR("Loaded firmware %s, "
1848 				       "which is deprecated. "
1849 				       " Please use API v%u instead.\n", buf,
1850 				       api_max);
1851 			D_INFO("Got firmware '%s' file "
1852 			       "(%zd bytes) from disk\n", buf, ucode_raw->size);
1853 			break;
1854 		}
1855 	}
1856 
1857 	if (ret < 0)
1858 		goto error;
1859 
1860 	/* Make sure that we got at least our header! */
1861 	if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
1862 		IL_ERR("File size way too small!\n");
1863 		ret = -EINVAL;
1864 		goto err_release;
1865 	}
1866 
1867 	/* Data from ucode file:  header followed by uCode images */
1868 	ucode = (struct il_ucode_header *)ucode_raw->data;
1869 
1870 	il->ucode_ver = le32_to_cpu(ucode->ver);
1871 	api_ver = IL_UCODE_API(il->ucode_ver);
1872 	inst_size = il3945_ucode_get_inst_size(ucode);
1873 	data_size = il3945_ucode_get_data_size(ucode);
1874 	init_size = il3945_ucode_get_init_size(ucode);
1875 	init_data_size = il3945_ucode_get_init_data_size(ucode);
1876 	boot_size = il3945_ucode_get_boot_size(ucode);
1877 	src = il3945_ucode_get_data(ucode);
1878 
1879 	/* api_ver should match the api version forming part of the
1880 	 * firmware filename ... but we don't check for that and only rely
1881 	 * on the API version read from firmware header from here on forward */
1882 
1883 	if (api_ver < api_min || api_ver > api_max) {
1884 		IL_ERR("Driver unable to support your firmware API. "
1885 		       "Driver supports v%u, firmware is v%u.\n", api_max,
1886 		       api_ver);
1887 		il->ucode_ver = 0;
1888 		ret = -EINVAL;
1889 		goto err_release;
1890 	}
1891 	if (api_ver != api_max)
1892 		IL_ERR("Firmware has old API version. Expected %u, "
1893 		       "got %u. New firmware can be obtained "
1894 		       "from http://www.intellinuxwireless.org.\n", api_max,
1895 		       api_ver);
1896 
1897 	IL_INFO("loaded firmware version %u.%u.%u.%u\n",
1898 		IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
1899 		IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
1900 
1901 	snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
1902 		 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
1903 		 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
1904 		 IL_UCODE_SERIAL(il->ucode_ver));
1905 
1906 	D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
1907 	D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
1908 	D_INFO("f/w package hdr runtime data size = %u\n", data_size);
1909 	D_INFO("f/w package hdr init inst size = %u\n", init_size);
1910 	D_INFO("f/w package hdr init data size = %u\n", init_data_size);
1911 	D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
1912 
1913 	/* Verify size of file vs. image size info in file's header */
1914 	if (ucode_raw->size !=
1915 	    il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
1916 	    init_size + init_data_size + boot_size) {
1917 
1918 		D_INFO("uCode file size %zd does not match expected size\n",
1919 		       ucode_raw->size);
1920 		ret = -EINVAL;
1921 		goto err_release;
1922 	}
1923 
1924 	/* Verify that uCode images will fit in card's SRAM */
1925 	if (inst_size > IL39_MAX_INST_SIZE) {
1926 		D_INFO("uCode instr len %d too large to fit in\n", inst_size);
1927 		ret = -EINVAL;
1928 		goto err_release;
1929 	}
1930 
1931 	if (data_size > IL39_MAX_DATA_SIZE) {
1932 		D_INFO("uCode data len %d too large to fit in\n", data_size);
1933 		ret = -EINVAL;
1934 		goto err_release;
1935 	}
1936 	if (init_size > IL39_MAX_INST_SIZE) {
1937 		D_INFO("uCode init instr len %d too large to fit in\n",
1938 		       init_size);
1939 		ret = -EINVAL;
1940 		goto err_release;
1941 	}
1942 	if (init_data_size > IL39_MAX_DATA_SIZE) {
1943 		D_INFO("uCode init data len %d too large to fit in\n",
1944 		       init_data_size);
1945 		ret = -EINVAL;
1946 		goto err_release;
1947 	}
1948 	if (boot_size > IL39_MAX_BSM_SIZE) {
1949 		D_INFO("uCode boot instr len %d too large to fit in\n",
1950 		       boot_size);
1951 		ret = -EINVAL;
1952 		goto err_release;
1953 	}
1954 
1955 	/* Allocate ucode buffers for card's bus-master loading ... */
1956 
1957 	/* Runtime instructions and 2 copies of data:
1958 	 * 1) unmodified from disk
1959 	 * 2) backup cache for save/restore during power-downs */
1960 	il->ucode_code.len = inst_size;
1961 	il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
1962 
1963 	il->ucode_data.len = data_size;
1964 	il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
1965 
1966 	il->ucode_data_backup.len = data_size;
1967 	il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
1968 
1969 	if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
1970 	    !il->ucode_data_backup.v_addr)
1971 		goto err_pci_alloc;
1972 
1973 	/* Initialization instructions and data */
1974 	if (init_size && init_data_size) {
1975 		il->ucode_init.len = init_size;
1976 		il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
1977 
1978 		il->ucode_init_data.len = init_data_size;
1979 		il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
1980 
1981 		if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
1982 			goto err_pci_alloc;
1983 	}
1984 
1985 	/* Bootstrap (instructions only, no data) */
1986 	if (boot_size) {
1987 		il->ucode_boot.len = boot_size;
1988 		il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
1989 
1990 		if (!il->ucode_boot.v_addr)
1991 			goto err_pci_alloc;
1992 	}
1993 
1994 	/* Copy images into buffers for card's bus-master reads ... */
1995 
1996 	/* Runtime instructions (first block of data in file) */
1997 	len = inst_size;
1998 	D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
1999 	memcpy(il->ucode_code.v_addr, src, len);
2000 	src += len;
2001 
2002 	D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2003 	       il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
2004 
2005 	/* Runtime data (2nd block)
2006 	 * NOTE:  Copy into backup buffer will be done in il3945_up()  */
2007 	len = data_size;
2008 	D_INFO("Copying (but not loading) uCode data len %zd\n", len);
2009 	memcpy(il->ucode_data.v_addr, src, len);
2010 	memcpy(il->ucode_data_backup.v_addr, src, len);
2011 	src += len;
2012 
2013 	/* Initialization instructions (3rd block) */
2014 	if (init_size) {
2015 		len = init_size;
2016 		D_INFO("Copying (but not loading) init instr len %zd\n", len);
2017 		memcpy(il->ucode_init.v_addr, src, len);
2018 		src += len;
2019 	}
2020 
2021 	/* Initialization data (4th block) */
2022 	if (init_data_size) {
2023 		len = init_data_size;
2024 		D_INFO("Copying (but not loading) init data len %zd\n", len);
2025 		memcpy(il->ucode_init_data.v_addr, src, len);
2026 		src += len;
2027 	}
2028 
2029 	/* Bootstrap instructions (5th block) */
2030 	len = boot_size;
2031 	D_INFO("Copying (but not loading) boot instr len %zd\n", len);
2032 	memcpy(il->ucode_boot.v_addr, src, len);
2033 
2034 	/* We have our copies now, allow OS release its copies */
2035 	release_firmware(ucode_raw);
2036 	return 0;
2037 
2038 err_pci_alloc:
2039 	IL_ERR("failed to allocate pci memory\n");
2040 	ret = -ENOMEM;
2041 	il3945_dealloc_ucode_pci(il);
2042 
2043 err_release:
2044 	release_firmware(ucode_raw);
2045 
2046 error:
2047 	return ret;
2048 }
2049 
2050 /*
2051  * il3945_set_ucode_ptrs - Set uCode address location
2052  *
2053  * Tell initialization uCode where to find runtime uCode.
2054  *
2055  * BSM registers initially contain pointers to initialization uCode.
2056  * We need to replace them to load runtime uCode inst and data,
2057  * and to save runtime data when powering down.
2058  */
2059 static int
2060 il3945_set_ucode_ptrs(struct il_priv *il)
2061 {
2062 	dma_addr_t pinst;
2063 	dma_addr_t pdata;
2064 
2065 	/* bits 31:0 for 3945 */
2066 	pinst = il->ucode_code.p_addr;
2067 	pdata = il->ucode_data_backup.p_addr;
2068 
2069 	/* Tell bootstrap uCode where to find image to load */
2070 	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2071 	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2072 	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
2073 
2074 	/* Inst byte count must be last to set up, bit 31 signals uCode
2075 	 *   that all new ptr/size info is in place */
2076 	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
2077 		   il->ucode_code.len | BSM_DRAM_INST_LOAD);
2078 
2079 	D_INFO("Runtime uCode pointers are set.\n");
2080 
2081 	return 0;
2082 }
2083 
2084 /*
2085  * il3945_init_alive_start - Called after N_ALIVE notification received
2086  *
2087  * Called after N_ALIVE notification received from "initialize" uCode.
2088  *
2089  * Tell "initialize" uCode to go ahead and load the runtime uCode.
2090  */
2091 static void
2092 il3945_init_alive_start(struct il_priv *il)
2093 {
2094 	/* Check alive response for "valid" sign from uCode */
2095 	if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
2096 		/* We had an error bringing up the hardware, so take it
2097 		 * all the way back down so we can try again */
2098 		D_INFO("Initialize Alive failed.\n");
2099 		goto restart;
2100 	}
2101 
2102 	/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2103 	 * This is a paranoid check, because we would not have gotten the
2104 	 * "initialize" alive if code weren't properly loaded.  */
2105 	if (il3945_verify_ucode(il)) {
2106 		/* Runtime instruction load was bad;
2107 		 * take it all the way back down so we can try again */
2108 		D_INFO("Bad \"initialize\" uCode load.\n");
2109 		goto restart;
2110 	}
2111 
2112 	/* Send pointers to protocol/runtime uCode image ... init code will
2113 	 * load and launch runtime uCode, which will send us another "Alive"
2114 	 * notification. */
2115 	D_INFO("Initialization Alive received.\n");
2116 	if (il3945_set_ucode_ptrs(il)) {
2117 		/* Runtime instruction load won't happen;
2118 		 * take it all the way back down so we can try again */
2119 		D_INFO("Couldn't set up uCode pointers.\n");
2120 		goto restart;
2121 	}
2122 	return;
2123 
2124 restart:
2125 	queue_work(il->workqueue, &il->restart);
2126 }
2127 
2128 /*
2129  * il3945_alive_start - called after N_ALIVE notification received
2130  *                   from protocol/runtime uCode (initialization uCode's
2131  *                   Alive gets handled by il3945_init_alive_start()).
2132  */
2133 static void
2134 il3945_alive_start(struct il_priv *il)
2135 {
2136 	int thermal_spin = 0;
2137 	u32 rfkill;
2138 
2139 	D_INFO("Runtime Alive received.\n");
2140 
2141 	if (il->card_alive.is_valid != UCODE_VALID_OK) {
2142 		/* We had an error bringing up the hardware, so take it
2143 		 * all the way back down so we can try again */
2144 		D_INFO("Alive failed.\n");
2145 		goto restart;
2146 	}
2147 
2148 	/* Initialize uCode has loaded Runtime uCode ... verify inst image.
2149 	 * This is a paranoid check, because we would not have gotten the
2150 	 * "runtime" alive if code weren't properly loaded.  */
2151 	if (il3945_verify_ucode(il)) {
2152 		/* Runtime instruction load was bad;
2153 		 * take it all the way back down so we can try again */
2154 		D_INFO("Bad runtime uCode load.\n");
2155 		goto restart;
2156 	}
2157 
2158 	rfkill = il_rd_prph(il, APMG_RFKILL_REG);
2159 	D_INFO("RFKILL status: 0x%x\n", rfkill);
2160 
2161 	if (rfkill & 0x1) {
2162 		clear_bit(S_RFKILL, &il->status);
2163 		/* if RFKILL is not on, then wait for thermal
2164 		 * sensor in adapter to kick in */
2165 		while (il3945_hw_get_temperature(il) == 0) {
2166 			thermal_spin++;
2167 			udelay(10);
2168 		}
2169 
2170 		if (thermal_spin)
2171 			D_INFO("Thermal calibration took %dus\n",
2172 			       thermal_spin * 10);
2173 	} else
2174 		set_bit(S_RFKILL, &il->status);
2175 
2176 	/* After the ALIVE response, we can send commands to 3945 uCode */
2177 	set_bit(S_ALIVE, &il->status);
2178 
2179 	/* Enable watchdog to monitor the driver tx queues */
2180 	il_setup_watchdog(il);
2181 
2182 	if (il_is_rfkill(il))
2183 		return;
2184 
2185 	ieee80211_wake_queues(il->hw);
2186 
2187 	il->active_rate = RATES_MASK_3945;
2188 
2189 	il_power_update_mode(il, true);
2190 
2191 	if (il_is_associated(il)) {
2192 		struct il3945_rxon_cmd *active_rxon =
2193 		    (struct il3945_rxon_cmd *)(&il->active);
2194 
2195 		il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2196 		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2197 	} else {
2198 		/* Initialize our rx_config data */
2199 		il_connection_init_rx_config(il);
2200 	}
2201 
2202 	/* Configure Bluetooth device coexistence support */
2203 	il_send_bt_config(il);
2204 
2205 	set_bit(S_READY, &il->status);
2206 
2207 	/* Configure the adapter for unassociated operation */
2208 	il3945_commit_rxon(il);
2209 
2210 	il3945_reg_txpower_periodic(il);
2211 
2212 	D_INFO("ALIVE processing complete.\n");
2213 	wake_up(&il->wait_command_queue);
2214 
2215 	return;
2216 
2217 restart:
2218 	queue_work(il->workqueue, &il->restart);
2219 }
2220 
2221 static void il3945_cancel_deferred_work(struct il_priv *il);
2222 
2223 static void
2224 __il3945_down(struct il_priv *il)
2225 {
2226 	unsigned long flags;
2227 	int exit_pending;
2228 
2229 	D_INFO(DRV_NAME " is going down\n");
2230 
2231 	il_scan_cancel_timeout(il, 200);
2232 
2233 	exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
2234 
2235 	/* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
2236 	 * to prevent rearm timer */
2237 	del_timer_sync(&il->watchdog);
2238 
2239 	/* Station information will now be cleared in device */
2240 	il_clear_ucode_stations(il);
2241 	il_dealloc_bcast_stations(il);
2242 	il_clear_driver_stations(il);
2243 
2244 	/* Unblock any waiting calls */
2245 	wake_up_all(&il->wait_command_queue);
2246 
2247 	/* Wipe out the EXIT_PENDING status bit if we are not actually
2248 	 * exiting the module */
2249 	if (!exit_pending)
2250 		clear_bit(S_EXIT_PENDING, &il->status);
2251 
2252 	/* stop and reset the on-board processor */
2253 	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2254 
2255 	/* tell the device to stop sending interrupts */
2256 	spin_lock_irqsave(&il->lock, flags);
2257 	il_disable_interrupts(il);
2258 	spin_unlock_irqrestore(&il->lock, flags);
2259 	il3945_synchronize_irq(il);
2260 
2261 	if (il->mac80211_registered)
2262 		ieee80211_stop_queues(il->hw);
2263 
2264 	/* If we have not previously called il3945_init() then
2265 	 * clear all bits but the RF Kill bits and return */
2266 	if (!il_is_init(il)) {
2267 		il->status =
2268 		    test_bit(S_RFKILL, &il->status) << S_RFKILL |
2269 		    test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
2270 		    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2271 		goto exit;
2272 	}
2273 
2274 	/* ...otherwise clear out all the status bits but the RF Kill
2275 	 * bit and continue taking the NIC down. */
2276 	il->status &=
2277 	    test_bit(S_RFKILL, &il->status) << S_RFKILL |
2278 	    test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
2279 	    test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
2280 	    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2281 
2282 	/*
2283 	 * We disabled and synchronized interrupt, and priv->mutex is taken, so
2284 	 * here is the only thread which will program device registers, but
2285 	 * still have lockdep assertions, so we are taking reg_lock.
2286 	 */
2287 	spin_lock_irq(&il->reg_lock);
2288 	/* FIXME: il_grab_nic_access if rfkill is off ? */
2289 
2290 	il3945_hw_txq_ctx_stop(il);
2291 	il3945_hw_rxq_stop(il);
2292 	/* Power-down device's busmaster DMA clocks */
2293 	_il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2294 	udelay(5);
2295 	/* Stop the device, and put it in low power state */
2296 	_il_apm_stop(il);
2297 
2298 	spin_unlock_irq(&il->reg_lock);
2299 
2300 	il3945_hw_txq_ctx_free(il);
2301 exit:
2302 	memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
2303 	dev_kfree_skb(il->beacon_skb);
2304 	il->beacon_skb = NULL;
2305 
2306 	/* clear out any free frames */
2307 	il3945_clear_free_frames(il);
2308 }
2309 
2310 static void
2311 il3945_down(struct il_priv *il)
2312 {
2313 	mutex_lock(&il->mutex);
2314 	__il3945_down(il);
2315 	mutex_unlock(&il->mutex);
2316 
2317 	il3945_cancel_deferred_work(il);
2318 }
2319 
2320 #define MAX_HW_RESTARTS 5
2321 
2322 static int
2323 il3945_alloc_bcast_station(struct il_priv *il)
2324 {
2325 	unsigned long flags;
2326 	u8 sta_id;
2327 
2328 	spin_lock_irqsave(&il->sta_lock, flags);
2329 	sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
2330 	if (sta_id == IL_INVALID_STATION) {
2331 		IL_ERR("Unable to prepare broadcast station\n");
2332 		spin_unlock_irqrestore(&il->sta_lock, flags);
2333 
2334 		return -EINVAL;
2335 	}
2336 
2337 	il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
2338 	il->stations[sta_id].used |= IL_STA_BCAST;
2339 	spin_unlock_irqrestore(&il->sta_lock, flags);
2340 
2341 	return 0;
2342 }
2343 
2344 static int
2345 __il3945_up(struct il_priv *il)
2346 {
2347 	int rc, i;
2348 
2349 	rc = il3945_alloc_bcast_station(il);
2350 	if (rc)
2351 		return rc;
2352 
2353 	if (test_bit(S_EXIT_PENDING, &il->status)) {
2354 		IL_WARN("Exit pending; will not bring the NIC up\n");
2355 		return -EIO;
2356 	}
2357 
2358 	if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
2359 		IL_ERR("ucode not available for device bring up\n");
2360 		return -EIO;
2361 	}
2362 
2363 	/* If platform's RF_KILL switch is NOT set to KILL */
2364 	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2365 		clear_bit(S_RFKILL, &il->status);
2366 	else {
2367 		set_bit(S_RFKILL, &il->status);
2368 		return -ERFKILL;
2369 	}
2370 
2371 	_il_wr(il, CSR_INT, 0xFFFFFFFF);
2372 
2373 	rc = il3945_hw_nic_init(il);
2374 	if (rc) {
2375 		IL_ERR("Unable to int nic\n");
2376 		return rc;
2377 	}
2378 
2379 	/* make sure rfkill handshake bits are cleared */
2380 	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2381 	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2382 
2383 	/* clear (again), then enable host interrupts */
2384 	_il_wr(il, CSR_INT, 0xFFFFFFFF);
2385 	il_enable_interrupts(il);
2386 
2387 	/* really make sure rfkill handshake bits are cleared */
2388 	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2389 	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2390 
2391 	/* Copy original ucode data image from disk into backup cache.
2392 	 * This will be used to initialize the on-board processor's
2393 	 * data SRAM for a clean start when the runtime program first loads. */
2394 	memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
2395 	       il->ucode_data.len);
2396 
2397 	/* We return success when we resume from suspend and rf_kill is on. */
2398 	if (test_bit(S_RFKILL, &il->status))
2399 		return 0;
2400 
2401 	for (i = 0; i < MAX_HW_RESTARTS; i++) {
2402 
2403 		/* load bootstrap state machine,
2404 		 * load bootstrap program into processor's memory,
2405 		 * prepare to load the "initialize" uCode */
2406 		rc = il->ops->load_ucode(il);
2407 
2408 		if (rc) {
2409 			IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
2410 			continue;
2411 		}
2412 
2413 		/* start card; "initialize" will load runtime ucode */
2414 		il3945_nic_start(il);
2415 
2416 		D_INFO(DRV_NAME " is coming up\n");
2417 
2418 		return 0;
2419 	}
2420 
2421 	set_bit(S_EXIT_PENDING, &il->status);
2422 	__il3945_down(il);
2423 	clear_bit(S_EXIT_PENDING, &il->status);
2424 
2425 	/* tried to restart and config the device for as long as our
2426 	 * patience could withstand */
2427 	IL_ERR("Unable to initialize device after %d attempts.\n", i);
2428 	return -EIO;
2429 }
2430 
2431 /*****************************************************************************
2432  *
2433  * Workqueue callbacks
2434  *
2435  *****************************************************************************/
2436 
2437 static void
2438 il3945_bg_init_alive_start(struct work_struct *data)
2439 {
2440 	struct il_priv *il =
2441 	    container_of(data, struct il_priv, init_alive_start.work);
2442 
2443 	mutex_lock(&il->mutex);
2444 	if (test_bit(S_EXIT_PENDING, &il->status))
2445 		goto out;
2446 
2447 	il3945_init_alive_start(il);
2448 out:
2449 	mutex_unlock(&il->mutex);
2450 }
2451 
2452 static void
2453 il3945_bg_alive_start(struct work_struct *data)
2454 {
2455 	struct il_priv *il =
2456 	    container_of(data, struct il_priv, alive_start.work);
2457 
2458 	mutex_lock(&il->mutex);
2459 	if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
2460 		goto out;
2461 
2462 	il3945_alive_start(il);
2463 out:
2464 	mutex_unlock(&il->mutex);
2465 }
2466 
2467 /*
2468  * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2469  * driver must poll CSR_GP_CNTRL_REG register for change.  This register
2470  * *is* readable even when device has been SW_RESET into low power mode
2471  * (e.g. during RF KILL).
2472  */
2473 static void
2474 il3945_rfkill_poll(struct work_struct *data)
2475 {
2476 	struct il_priv *il =
2477 	    container_of(data, struct il_priv, _3945.rfkill_poll.work);
2478 	bool old_rfkill = test_bit(S_RFKILL, &il->status);
2479 	bool new_rfkill =
2480 	    !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2481 
2482 	if (new_rfkill != old_rfkill) {
2483 		if (new_rfkill)
2484 			set_bit(S_RFKILL, &il->status);
2485 		else
2486 			clear_bit(S_RFKILL, &il->status);
2487 
2488 		wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
2489 
2490 		D_RF_KILL("RF_KILL bit toggled to %s.\n",
2491 			  new_rfkill ? "disable radio" : "enable radio");
2492 	}
2493 
2494 	/* Keep this running, even if radio now enabled.  This will be
2495 	 * cancelled in mac_start() if system decides to start again */
2496 	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2497 			   round_jiffies_relative(2 * HZ));
2498 
2499 }
2500 
2501 int
2502 il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
2503 {
2504 	struct il_host_cmd cmd = {
2505 		.id = C_SCAN,
2506 		.len = sizeof(struct il3945_scan_cmd),
2507 		.flags = CMD_SIZE_HUGE,
2508 	};
2509 	struct il3945_scan_cmd *scan;
2510 	u8 n_probes = 0;
2511 	enum nl80211_band band;
2512 	bool is_active = false;
2513 	int ret;
2514 	u16 len;
2515 
2516 	lockdep_assert_held(&il->mutex);
2517 
2518 	if (!il->scan_cmd) {
2519 		il->scan_cmd =
2520 		    kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
2521 			    GFP_KERNEL);
2522 		if (!il->scan_cmd) {
2523 			D_SCAN("Fail to allocate scan memory\n");
2524 			return -ENOMEM;
2525 		}
2526 	}
2527 	scan = il->scan_cmd;
2528 	memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
2529 
2530 	scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
2531 	scan->quiet_time = IL_ACTIVE_QUIET_TIME;
2532 
2533 	if (il_is_associated(il)) {
2534 		u16 interval;
2535 		u32 extra;
2536 		u32 suspend_time = 100;
2537 		u32 scan_suspend_time = 100;
2538 
2539 		D_INFO("Scanning while associated...\n");
2540 
2541 		interval = vif->bss_conf.beacon_int;
2542 
2543 		scan->suspend_time = 0;
2544 		scan->max_out_time = cpu_to_le32(200 * 1024);
2545 		if (!interval)
2546 			interval = suspend_time;
2547 		/*
2548 		 * suspend time format:
2549 		 *  0-19: beacon interval in usec (time before exec.)
2550 		 * 20-23: 0
2551 		 * 24-31: number of beacons (suspend between channels)
2552 		 */
2553 
2554 		extra = (suspend_time / interval) << 24;
2555 		scan_suspend_time =
2556 		    0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
2557 
2558 		scan->suspend_time = cpu_to_le32(scan_suspend_time);
2559 		D_SCAN("suspend_time 0x%X beacon interval %d\n",
2560 		       scan_suspend_time, interval);
2561 	}
2562 
2563 	if (il->scan_request->n_ssids) {
2564 		int i, p = 0;
2565 		D_SCAN("Kicking off active scan\n");
2566 		for (i = 0; i < il->scan_request->n_ssids; i++) {
2567 			/* always does wildcard anyway */
2568 			if (!il->scan_request->ssids[i].ssid_len)
2569 				continue;
2570 			scan->direct_scan[p].id = WLAN_EID_SSID;
2571 			scan->direct_scan[p].len =
2572 			    il->scan_request->ssids[i].ssid_len;
2573 			memcpy(scan->direct_scan[p].ssid,
2574 			       il->scan_request->ssids[i].ssid,
2575 			       il->scan_request->ssids[i].ssid_len);
2576 			n_probes++;
2577 			p++;
2578 		}
2579 		is_active = true;
2580 	} else
2581 		D_SCAN("Kicking off passive scan.\n");
2582 
2583 	/* We don't build a direct scan probe request; the uCode will do
2584 	 * that based on the direct_mask added to each channel entry */
2585 	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2586 	scan->tx_cmd.sta_id = il->hw_params.bcast_id;
2587 	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2588 
2589 	/* flags + rate selection */
2590 
2591 	switch (il->scan_band) {
2592 	case NL80211_BAND_2GHZ:
2593 		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2594 		scan->tx_cmd.rate = RATE_1M_PLCP;
2595 		band = NL80211_BAND_2GHZ;
2596 		break;
2597 	case NL80211_BAND_5GHZ:
2598 		scan->tx_cmd.rate = RATE_6M_PLCP;
2599 		band = NL80211_BAND_5GHZ;
2600 		break;
2601 	default:
2602 		IL_WARN("Invalid scan band\n");
2603 		return -EIO;
2604 	}
2605 
2606 	/*
2607 	 * If active scaning is requested but a certain channel is marked
2608 	 * passive, we can do active scanning if we detect transmissions. For
2609 	 * passive only scanning disable switching to active on any channel.
2610 	 */
2611 	scan->good_CRC_th =
2612 	    is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
2613 
2614 	len =
2615 	    il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
2616 			      vif->addr, il->scan_request->ie,
2617 			      il->scan_request->ie_len,
2618 			      IL_MAX_SCAN_SIZE - sizeof(*scan));
2619 	scan->tx_cmd.len = cpu_to_le16(len);
2620 
2621 	/* select Rx antennas */
2622 	scan->flags |= il3945_get_antenna_flags(il);
2623 
2624 	scan->channel_count =
2625 	    il3945_get_channels_for_scan(il, band, is_active, n_probes,
2626 					 (void *)&scan->data[len], vif);
2627 	if (scan->channel_count == 0) {
2628 		D_SCAN("channel count %d\n", scan->channel_count);
2629 		return -EIO;
2630 	}
2631 
2632 	cmd.len +=
2633 	    le16_to_cpu(scan->tx_cmd.len) +
2634 	    scan->channel_count * sizeof(struct il3945_scan_channel);
2635 	cmd.data = scan;
2636 	scan->len = cpu_to_le16(cmd.len);
2637 
2638 	set_bit(S_SCAN_HW, &il->status);
2639 	ret = il_send_cmd_sync(il, &cmd);
2640 	if (ret)
2641 		clear_bit(S_SCAN_HW, &il->status);
2642 	return ret;
2643 }
2644 
2645 void
2646 il3945_post_scan(struct il_priv *il)
2647 {
2648 	/*
2649 	 * Since setting the RXON may have been deferred while
2650 	 * performing the scan, fire one off if needed
2651 	 */
2652 	if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
2653 		il3945_commit_rxon(il);
2654 }
2655 
2656 static void
2657 il3945_bg_restart(struct work_struct *data)
2658 {
2659 	struct il_priv *il = container_of(data, struct il_priv, restart);
2660 
2661 	if (test_bit(S_EXIT_PENDING, &il->status))
2662 		return;
2663 
2664 	if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
2665 		mutex_lock(&il->mutex);
2666 		il->is_open = 0;
2667 		mutex_unlock(&il->mutex);
2668 		il3945_down(il);
2669 		ieee80211_restart_hw(il->hw);
2670 	} else {
2671 		il3945_down(il);
2672 
2673 		mutex_lock(&il->mutex);
2674 		if (test_bit(S_EXIT_PENDING, &il->status)) {
2675 			mutex_unlock(&il->mutex);
2676 			return;
2677 		}
2678 
2679 		__il3945_up(il);
2680 		mutex_unlock(&il->mutex);
2681 	}
2682 }
2683 
2684 static void
2685 il3945_bg_rx_replenish(struct work_struct *data)
2686 {
2687 	struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
2688 
2689 	mutex_lock(&il->mutex);
2690 	if (test_bit(S_EXIT_PENDING, &il->status))
2691 		goto out;
2692 
2693 	il3945_rx_replenish(il);
2694 out:
2695 	mutex_unlock(&il->mutex);
2696 }
2697 
2698 void
2699 il3945_post_associate(struct il_priv *il)
2700 {
2701 	int rc = 0;
2702 
2703 	if (!il->vif || !il->is_open)
2704 		return;
2705 
2706 	D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid,
2707 		il->active.bssid_addr);
2708 
2709 	if (test_bit(S_EXIT_PENDING, &il->status))
2710 		return;
2711 
2712 	il_scan_cancel_timeout(il, 200);
2713 
2714 	il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2715 	il3945_commit_rxon(il);
2716 
2717 	rc = il_send_rxon_timing(il);
2718 	if (rc)
2719 		IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
2720 
2721 	il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2722 
2723 	il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid);
2724 
2725 	D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid,
2726 		il->vif->bss_conf.beacon_int);
2727 
2728 	if (il->vif->bss_conf.use_short_preamble)
2729 		il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2730 	else
2731 		il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2732 
2733 	if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
2734 		if (il->vif->bss_conf.use_short_slot)
2735 			il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2736 		else
2737 			il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2738 	}
2739 
2740 	il3945_commit_rxon(il);
2741 
2742 	switch (il->vif->type) {
2743 	case NL80211_IFTYPE_STATION:
2744 		il3945_rate_scale_init(il->hw, IL_AP_ID);
2745 		break;
2746 	case NL80211_IFTYPE_ADHOC:
2747 		il3945_send_beacon_cmd(il);
2748 		break;
2749 	default:
2750 		IL_ERR("%s Should not be called in %d mode\n", __func__,
2751 		      il->vif->type);
2752 		break;
2753 	}
2754 }
2755 
2756 /*****************************************************************************
2757  *
2758  * mac80211 entry point functions
2759  *
2760  *****************************************************************************/
2761 
2762 #define UCODE_READY_TIMEOUT	(2 * HZ)
2763 
2764 static int
2765 il3945_mac_start(struct ieee80211_hw *hw)
2766 {
2767 	struct il_priv *il = hw->priv;
2768 	int ret;
2769 
2770 	/* we should be verifying the device is ready to be opened */
2771 	mutex_lock(&il->mutex);
2772 	D_MAC80211("enter\n");
2773 
2774 	/* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2775 	 * ucode filename and max sizes are card-specific. */
2776 
2777 	if (!il->ucode_code.len) {
2778 		ret = il3945_read_ucode(il);
2779 		if (ret) {
2780 			IL_ERR("Could not read microcode: %d\n", ret);
2781 			mutex_unlock(&il->mutex);
2782 			goto out_release_irq;
2783 		}
2784 	}
2785 
2786 	ret = __il3945_up(il);
2787 
2788 	mutex_unlock(&il->mutex);
2789 
2790 	if (ret)
2791 		goto out_release_irq;
2792 
2793 	D_INFO("Start UP work.\n");
2794 
2795 	/* Wait for START_ALIVE from ucode. Otherwise callbacks from
2796 	 * mac80211 will not be run successfully. */
2797 	ret = wait_event_timeout(il->wait_command_queue,
2798 				 test_bit(S_READY, &il->status),
2799 				 UCODE_READY_TIMEOUT);
2800 	if (!ret) {
2801 		if (!test_bit(S_READY, &il->status)) {
2802 			IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
2803 			       jiffies_to_msecs(UCODE_READY_TIMEOUT));
2804 			ret = -ETIMEDOUT;
2805 			goto out_release_irq;
2806 		}
2807 	}
2808 
2809 	/* ucode is running and will send rfkill notifications,
2810 	 * no need to poll the killswitch state anymore */
2811 	cancel_delayed_work(&il->_3945.rfkill_poll);
2812 
2813 	il->is_open = 1;
2814 	D_MAC80211("leave\n");
2815 	return 0;
2816 
2817 out_release_irq:
2818 	il->is_open = 0;
2819 	D_MAC80211("leave - failed\n");
2820 	return ret;
2821 }
2822 
2823 static void
2824 il3945_mac_stop(struct ieee80211_hw *hw)
2825 {
2826 	struct il_priv *il = hw->priv;
2827 
2828 	D_MAC80211("enter\n");
2829 
2830 	if (!il->is_open) {
2831 		D_MAC80211("leave - skip\n");
2832 		return;
2833 	}
2834 
2835 	il->is_open = 0;
2836 
2837 	il3945_down(il);
2838 
2839 	flush_workqueue(il->workqueue);
2840 
2841 	/* start polling the killswitch state again */
2842 	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2843 			   round_jiffies_relative(2 * HZ));
2844 
2845 	D_MAC80211("leave\n");
2846 }
2847 
2848 static void
2849 il3945_mac_tx(struct ieee80211_hw *hw,
2850 	       struct ieee80211_tx_control *control,
2851 	       struct sk_buff *skb)
2852 {
2853 	struct il_priv *il = hw->priv;
2854 
2855 	D_MAC80211("enter\n");
2856 
2857 	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2858 	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2859 
2860 	if (il3945_tx_skb(il, control->sta, skb))
2861 		dev_kfree_skb_any(skb);
2862 
2863 	D_MAC80211("leave\n");
2864 }
2865 
2866 void
2867 il3945_config_ap(struct il_priv *il)
2868 {
2869 	struct ieee80211_vif *vif = il->vif;
2870 	int rc = 0;
2871 
2872 	if (test_bit(S_EXIT_PENDING, &il->status))
2873 		return;
2874 
2875 	/* The following should be done only at AP bring up */
2876 	if (!(il_is_associated(il))) {
2877 
2878 		/* RXON - unassoc (to set timing command) */
2879 		il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2880 		il3945_commit_rxon(il);
2881 
2882 		/* RXON Timing */
2883 		rc = il_send_rxon_timing(il);
2884 		if (rc)
2885 			IL_WARN("C_RXON_TIMING failed - "
2886 				"Attempting to continue.\n");
2887 
2888 		il->staging.assoc_id = 0;
2889 
2890 		if (vif->bss_conf.use_short_preamble)
2891 			il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2892 		else
2893 			il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2894 
2895 		if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
2896 			if (vif->bss_conf.use_short_slot)
2897 				il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2898 			else
2899 				il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2900 		}
2901 		/* restore RXON assoc */
2902 		il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2903 		il3945_commit_rxon(il);
2904 	}
2905 	il3945_send_beacon_cmd(il);
2906 }
2907 
2908 static int
2909 il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2910 		   struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2911 		   struct ieee80211_key_conf *key)
2912 {
2913 	struct il_priv *il = hw->priv;
2914 	int ret = 0;
2915 	u8 sta_id = IL_INVALID_STATION;
2916 	u8 static_key;
2917 
2918 	D_MAC80211("enter\n");
2919 
2920 	if (il3945_mod_params.sw_crypto) {
2921 		D_MAC80211("leave - hwcrypto disabled\n");
2922 		return -EOPNOTSUPP;
2923 	}
2924 
2925 	/*
2926 	 * To support IBSS RSN, don't program group keys in IBSS, the
2927 	 * hardware will then not attempt to decrypt the frames.
2928 	 */
2929 	if (vif->type == NL80211_IFTYPE_ADHOC &&
2930 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
2931 		D_MAC80211("leave - IBSS RSN\n");
2932 		return -EOPNOTSUPP;
2933 	}
2934 
2935 	static_key = !il_is_associated(il);
2936 
2937 	if (!static_key) {
2938 		sta_id = il_sta_id_or_broadcast(il, sta);
2939 		if (sta_id == IL_INVALID_STATION) {
2940 			D_MAC80211("leave - station not found\n");
2941 			return -EINVAL;
2942 		}
2943 	}
2944 
2945 	mutex_lock(&il->mutex);
2946 	il_scan_cancel_timeout(il, 100);
2947 
2948 	switch (cmd) {
2949 	case SET_KEY:
2950 		if (static_key)
2951 			ret = il3945_set_static_key(il, key);
2952 		else
2953 			ret = il3945_set_dynamic_key(il, key, sta_id);
2954 		D_MAC80211("enable hwcrypto key\n");
2955 		break;
2956 	case DISABLE_KEY:
2957 		if (static_key)
2958 			ret = il3945_remove_static_key(il);
2959 		else
2960 			ret = il3945_clear_sta_key_info(il, sta_id);
2961 		D_MAC80211("disable hwcrypto key\n");
2962 		break;
2963 	default:
2964 		ret = -EINVAL;
2965 	}
2966 
2967 	D_MAC80211("leave ret %d\n", ret);
2968 	mutex_unlock(&il->mutex);
2969 
2970 	return ret;
2971 }
2972 
2973 static int
2974 il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2975 		   struct ieee80211_sta *sta)
2976 {
2977 	struct il_priv *il = hw->priv;
2978 	struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
2979 	int ret;
2980 	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2981 	u8 sta_id;
2982 
2983 	mutex_lock(&il->mutex);
2984 	D_INFO("station %pM\n", sta->addr);
2985 	sta_priv->common.sta_id = IL_INVALID_STATION;
2986 
2987 	ret = il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
2988 	if (ret) {
2989 		IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
2990 		/* Should we return success if return code is EEXIST ? */
2991 		mutex_unlock(&il->mutex);
2992 		return ret;
2993 	}
2994 
2995 	sta_priv->common.sta_id = sta_id;
2996 
2997 	/* Initialize rate scaling */
2998 	D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
2999 	il3945_rs_rate_init(il, sta, sta_id);
3000 	mutex_unlock(&il->mutex);
3001 
3002 	return 0;
3003 }
3004 
3005 static void
3006 il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
3007 			unsigned int *total_flags, u64 multicast)
3008 {
3009 	struct il_priv *il = hw->priv;
3010 	__le32 filter_or = 0, filter_nand = 0;
3011 
3012 #define CHK(test, flag)	do { \
3013 	if (*total_flags & (test))		\
3014 		filter_or |= (flag);		\
3015 	else					\
3016 		filter_nand |= (flag);		\
3017 	} while (0)
3018 
3019 	D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
3020 		   *total_flags);
3021 
3022 	CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
3023 	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3024 	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3025 
3026 #undef CHK
3027 
3028 	mutex_lock(&il->mutex);
3029 
3030 	il->staging.filter_flags &= ~filter_nand;
3031 	il->staging.filter_flags |= filter_or;
3032 
3033 	/*
3034 	 * Not committing directly because hardware can perform a scan,
3035 	 * but even if hw is ready, committing here breaks for some reason,
3036 	 * we'll eventually commit the filter flags change anyway.
3037 	 */
3038 
3039 	mutex_unlock(&il->mutex);
3040 
3041 	/*
3042 	 * Receiving all multicast frames is always enabled by the
3043 	 * default flags setup in il_connection_init_rx_config()
3044 	 * since we currently do not support programming multicast
3045 	 * filters into the device.
3046 	 */
3047 	*total_flags &=
3048 	    FIF_OTHER_BSS | FIF_ALLMULTI |
3049 	    FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3050 }
3051 
3052 /*****************************************************************************
3053  *
3054  * sysfs attributes
3055  *
3056  *****************************************************************************/
3057 
3058 #ifdef CONFIG_IWLEGACY_DEBUG
3059 
3060 /*
3061  * The following adds a new attribute to the sysfs representation
3062  * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3063  * used for controlling the debug level.
3064  *
3065  * See the level definitions in iwl for details.
3066  *
3067  * The debug_level being managed using sysfs below is a per device debug
3068  * level that is used instead of the global debug level if it (the per
3069  * device debug level) is set.
3070  */
3071 static ssize_t
3072 il3945_show_debug_level(struct device *d, struct device_attribute *attr,
3073 			char *buf)
3074 {
3075 	struct il_priv *il = dev_get_drvdata(d);
3076 	return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
3077 }
3078 
3079 static ssize_t
3080 il3945_store_debug_level(struct device *d, struct device_attribute *attr,
3081 			 const char *buf, size_t count)
3082 {
3083 	struct il_priv *il = dev_get_drvdata(d);
3084 	unsigned long val;
3085 	int ret;
3086 
3087 	ret = kstrtoul(buf, 0, &val);
3088 	if (ret)
3089 		IL_INFO("%s is not in hex or decimal form.\n", buf);
3090 	else
3091 		il->debug_level = val;
3092 
3093 	return strnlen(buf, count);
3094 }
3095 
3096 static DEVICE_ATTR(debug_level, 0644, il3945_show_debug_level,
3097 		   il3945_store_debug_level);
3098 
3099 #endif /* CONFIG_IWLEGACY_DEBUG */
3100 
3101 static ssize_t
3102 il3945_show_temperature(struct device *d, struct device_attribute *attr,
3103 			char *buf)
3104 {
3105 	struct il_priv *il = dev_get_drvdata(d);
3106 
3107 	if (!il_is_alive(il))
3108 		return -EAGAIN;
3109 
3110 	return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
3111 }
3112 
3113 static DEVICE_ATTR(temperature, 0444, il3945_show_temperature, NULL);
3114 
3115 static ssize_t
3116 il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
3117 {
3118 	struct il_priv *il = dev_get_drvdata(d);
3119 	return sprintf(buf, "%d\n", il->tx_power_user_lmt);
3120 }
3121 
3122 static ssize_t
3123 il3945_store_tx_power(struct device *d, struct device_attribute *attr,
3124 		      const char *buf, size_t count)
3125 {
3126 	struct il_priv *il = dev_get_drvdata(d);
3127 	char *p = (char *)buf;
3128 	u32 val;
3129 
3130 	val = simple_strtoul(p, &p, 10);
3131 	if (p == buf)
3132 		IL_INFO(": %s is not in decimal form.\n", buf);
3133 	else
3134 		il3945_hw_reg_set_txpower(il, val);
3135 
3136 	return count;
3137 }
3138 
3139 static DEVICE_ATTR(tx_power, 0644, il3945_show_tx_power, il3945_store_tx_power);
3140 
3141 static ssize_t
3142 il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
3143 {
3144 	struct il_priv *il = dev_get_drvdata(d);
3145 
3146 	return sprintf(buf, "0x%04X\n", il->active.flags);
3147 }
3148 
3149 static ssize_t
3150 il3945_store_flags(struct device *d, struct device_attribute *attr,
3151 		   const char *buf, size_t count)
3152 {
3153 	struct il_priv *il = dev_get_drvdata(d);
3154 	u32 flags = simple_strtoul(buf, NULL, 0);
3155 
3156 	mutex_lock(&il->mutex);
3157 	if (le32_to_cpu(il->staging.flags) != flags) {
3158 		/* Cancel any currently running scans... */
3159 		if (il_scan_cancel_timeout(il, 100))
3160 			IL_WARN("Could not cancel scan.\n");
3161 		else {
3162 			D_INFO("Committing rxon.flags = 0x%04X\n", flags);
3163 			il->staging.flags = cpu_to_le32(flags);
3164 			il3945_commit_rxon(il);
3165 		}
3166 	}
3167 	mutex_unlock(&il->mutex);
3168 
3169 	return count;
3170 }
3171 
3172 static DEVICE_ATTR(flags, 0644, il3945_show_flags, il3945_store_flags);
3173 
3174 static ssize_t
3175 il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
3176 			 char *buf)
3177 {
3178 	struct il_priv *il = dev_get_drvdata(d);
3179 
3180 	return sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
3181 }
3182 
3183 static ssize_t
3184 il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
3185 			  const char *buf, size_t count)
3186 {
3187 	struct il_priv *il = dev_get_drvdata(d);
3188 	u32 filter_flags = simple_strtoul(buf, NULL, 0);
3189 
3190 	mutex_lock(&il->mutex);
3191 	if (le32_to_cpu(il->staging.filter_flags) != filter_flags) {
3192 		/* Cancel any currently running scans... */
3193 		if (il_scan_cancel_timeout(il, 100))
3194 			IL_WARN("Could not cancel scan.\n");
3195 		else {
3196 			D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
3197 			       filter_flags);
3198 			il->staging.filter_flags = cpu_to_le32(filter_flags);
3199 			il3945_commit_rxon(il);
3200 		}
3201 	}
3202 	mutex_unlock(&il->mutex);
3203 
3204 	return count;
3205 }
3206 
3207 static DEVICE_ATTR(filter_flags, 0644, il3945_show_filter_flags,
3208 		   il3945_store_filter_flags);
3209 
3210 static ssize_t
3211 il3945_show_measurement(struct device *d, struct device_attribute *attr,
3212 			char *buf)
3213 {
3214 	struct il_priv *il = dev_get_drvdata(d);
3215 	struct il_spectrum_notification measure_report;
3216 	u32 size = sizeof(measure_report), len = 0, ofs = 0;
3217 	u8 *data = (u8 *) &measure_report;
3218 	unsigned long flags;
3219 
3220 	spin_lock_irqsave(&il->lock, flags);
3221 	if (!(il->measurement_status & MEASUREMENT_READY)) {
3222 		spin_unlock_irqrestore(&il->lock, flags);
3223 		return 0;
3224 	}
3225 	memcpy(&measure_report, &il->measure_report, size);
3226 	il->measurement_status = 0;
3227 	spin_unlock_irqrestore(&il->lock, flags);
3228 
3229 	while (size && PAGE_SIZE - len) {
3230 		hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3231 				   PAGE_SIZE - len, true);
3232 		len = strlen(buf);
3233 		if (PAGE_SIZE - len)
3234 			buf[len++] = '\n';
3235 
3236 		ofs += 16;
3237 		size -= min(size, 16U);
3238 	}
3239 
3240 	return len;
3241 }
3242 
3243 static ssize_t
3244 il3945_store_measurement(struct device *d, struct device_attribute *attr,
3245 			 const char *buf, size_t count)
3246 {
3247 	struct il_priv *il = dev_get_drvdata(d);
3248 	struct ieee80211_measurement_params params = {
3249 		.channel = le16_to_cpu(il->active.channel),
3250 		.start_time = cpu_to_le64(il->_3945.last_tsf),
3251 		.duration = cpu_to_le16(1),
3252 	};
3253 	u8 type = IL_MEASURE_BASIC;
3254 	u8 buffer[32];
3255 	u8 channel;
3256 
3257 	if (count) {
3258 		char *p = buffer;
3259 		strlcpy(buffer, buf, sizeof(buffer));
3260 		channel = simple_strtoul(p, NULL, 0);
3261 		if (channel)
3262 			params.channel = channel;
3263 
3264 		p = buffer;
3265 		while (*p && *p != ' ')
3266 			p++;
3267 		if (*p)
3268 			type = simple_strtoul(p + 1, NULL, 0);
3269 	}
3270 
3271 	D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
3272 	       type, params.channel, buf);
3273 	il3945_get_measurement(il, &params, type);
3274 
3275 	return count;
3276 }
3277 
3278 static DEVICE_ATTR(measurement, 0600, il3945_show_measurement,
3279 		   il3945_store_measurement);
3280 
3281 static ssize_t
3282 il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
3283 			const char *buf, size_t count)
3284 {
3285 	struct il_priv *il = dev_get_drvdata(d);
3286 
3287 	il->retry_rate = simple_strtoul(buf, NULL, 0);
3288 	if (il->retry_rate <= 0)
3289 		il->retry_rate = 1;
3290 
3291 	return count;
3292 }
3293 
3294 static ssize_t
3295 il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
3296 		       char *buf)
3297 {
3298 	struct il_priv *il = dev_get_drvdata(d);
3299 	return sprintf(buf, "%d", il->retry_rate);
3300 }
3301 
3302 static DEVICE_ATTR(retry_rate, 0600, il3945_show_retry_rate,
3303 		   il3945_store_retry_rate);
3304 
3305 static ssize_t
3306 il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
3307 {
3308 	/* all this shit doesn't belong into sysfs anyway */
3309 	return 0;
3310 }
3311 
3312 static DEVICE_ATTR(channels, 0400, il3945_show_channels, NULL);
3313 
3314 static ssize_t
3315 il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
3316 {
3317 	struct il_priv *il = dev_get_drvdata(d);
3318 
3319 	if (!il_is_alive(il))
3320 		return -EAGAIN;
3321 
3322 	return sprintf(buf, "%d\n", il3945_mod_params.antenna);
3323 }
3324 
3325 static ssize_t
3326 il3945_store_antenna(struct device *d, struct device_attribute *attr,
3327 		     const char *buf, size_t count)
3328 {
3329 	struct il_priv *il __maybe_unused = dev_get_drvdata(d);
3330 	int ant;
3331 
3332 	if (count == 0)
3333 		return 0;
3334 
3335 	if (sscanf(buf, "%1i", &ant) != 1) {
3336 		D_INFO("not in hex or decimal form.\n");
3337 		return count;
3338 	}
3339 
3340 	if (ant >= 0 && ant <= 2) {
3341 		D_INFO("Setting antenna select to %d.\n", ant);
3342 		il3945_mod_params.antenna = (enum il3945_antenna)ant;
3343 	} else
3344 		D_INFO("Bad antenna select value %d.\n", ant);
3345 
3346 	return count;
3347 }
3348 
3349 static DEVICE_ATTR(antenna, 0644, il3945_show_antenna, il3945_store_antenna);
3350 
3351 static ssize_t
3352 il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
3353 {
3354 	struct il_priv *il = dev_get_drvdata(d);
3355 	if (!il_is_alive(il))
3356 		return -EAGAIN;
3357 	return sprintf(buf, "0x%08x\n", (int)il->status);
3358 }
3359 
3360 static DEVICE_ATTR(status, 0444, il3945_show_status, NULL);
3361 
3362 static ssize_t
3363 il3945_dump_error_log(struct device *d, struct device_attribute *attr,
3364 		      const char *buf, size_t count)
3365 {
3366 	struct il_priv *il = dev_get_drvdata(d);
3367 	char *p = (char *)buf;
3368 
3369 	if (p[0] == '1')
3370 		il3945_dump_nic_error_log(il);
3371 
3372 	return strnlen(buf, count);
3373 }
3374 
3375 static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log);
3376 
3377 /*****************************************************************************
3378  *
3379  * driver setup and tear down
3380  *
3381  *****************************************************************************/
3382 
3383 static void
3384 il3945_setup_deferred_work(struct il_priv *il)
3385 {
3386 	il->workqueue = create_singlethread_workqueue(DRV_NAME);
3387 
3388 	init_waitqueue_head(&il->wait_command_queue);
3389 
3390 	INIT_WORK(&il->restart, il3945_bg_restart);
3391 	INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
3392 	INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
3393 	INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
3394 	INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
3395 
3396 	il_setup_scan_deferred_work(il);
3397 
3398 	il3945_hw_setup_deferred_work(il);
3399 
3400 	timer_setup(&il->watchdog, il_bg_watchdog, 0);
3401 
3402 	tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet);
3403 }
3404 
3405 static void
3406 il3945_cancel_deferred_work(struct il_priv *il)
3407 {
3408 	il3945_hw_cancel_deferred_work(il);
3409 
3410 	cancel_delayed_work_sync(&il->init_alive_start);
3411 	cancel_delayed_work(&il->alive_start);
3412 
3413 	il_cancel_scan_deferred_work(il);
3414 }
3415 
3416 static struct attribute *il3945_sysfs_entries[] = {
3417 	&dev_attr_antenna.attr,
3418 	&dev_attr_channels.attr,
3419 	&dev_attr_dump_errors.attr,
3420 	&dev_attr_flags.attr,
3421 	&dev_attr_filter_flags.attr,
3422 	&dev_attr_measurement.attr,
3423 	&dev_attr_retry_rate.attr,
3424 	&dev_attr_status.attr,
3425 	&dev_attr_temperature.attr,
3426 	&dev_attr_tx_power.attr,
3427 #ifdef CONFIG_IWLEGACY_DEBUG
3428 	&dev_attr_debug_level.attr,
3429 #endif
3430 	NULL
3431 };
3432 
3433 static const struct attribute_group il3945_attribute_group = {
3434 	.name = NULL,		/* put in device directory */
3435 	.attrs = il3945_sysfs_entries,
3436 };
3437 
3438 static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
3439 	.tx = il3945_mac_tx,
3440 	.start = il3945_mac_start,
3441 	.stop = il3945_mac_stop,
3442 	.add_interface = il_mac_add_interface,
3443 	.remove_interface = il_mac_remove_interface,
3444 	.change_interface = il_mac_change_interface,
3445 	.config = il_mac_config,
3446 	.configure_filter = il3945_configure_filter,
3447 	.set_key = il3945_mac_set_key,
3448 	.conf_tx = il_mac_conf_tx,
3449 	.reset_tsf = il_mac_reset_tsf,
3450 	.bss_info_changed = il_mac_bss_info_changed,
3451 	.hw_scan = il_mac_hw_scan,
3452 	.sta_add = il3945_mac_sta_add,
3453 	.sta_remove = il_mac_sta_remove,
3454 	.tx_last_beacon = il_mac_tx_last_beacon,
3455 	.flush = il_mac_flush,
3456 };
3457 
3458 static int
3459 il3945_init_drv(struct il_priv *il)
3460 {
3461 	int ret;
3462 	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
3463 
3464 	il->retry_rate = 1;
3465 	il->beacon_skb = NULL;
3466 
3467 	spin_lock_init(&il->sta_lock);
3468 	spin_lock_init(&il->hcmd_lock);
3469 
3470 	INIT_LIST_HEAD(&il->free_frames);
3471 
3472 	mutex_init(&il->mutex);
3473 
3474 	il->ieee_channels = NULL;
3475 	il->ieee_rates = NULL;
3476 	il->band = NL80211_BAND_2GHZ;
3477 
3478 	il->iw_mode = NL80211_IFTYPE_STATION;
3479 	il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
3480 
3481 	/* initialize force reset */
3482 	il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
3483 
3484 	if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3485 		IL_WARN("Unsupported EEPROM version: 0x%04X\n",
3486 			eeprom->version);
3487 		ret = -EINVAL;
3488 		goto err;
3489 	}
3490 	ret = il_init_channel_map(il);
3491 	if (ret) {
3492 		IL_ERR("initializing regulatory failed: %d\n", ret);
3493 		goto err;
3494 	}
3495 
3496 	/* Set up txpower settings in driver for all channels */
3497 	if (il3945_txpower_set_from_eeprom(il)) {
3498 		ret = -EIO;
3499 		goto err_free_channel_map;
3500 	}
3501 
3502 	ret = il_init_geos(il);
3503 	if (ret) {
3504 		IL_ERR("initializing geos failed: %d\n", ret);
3505 		goto err_free_channel_map;
3506 	}
3507 	il3945_init_hw_rates(il, il->ieee_rates);
3508 
3509 	return 0;
3510 
3511 err_free_channel_map:
3512 	il_free_channel_map(il);
3513 err:
3514 	return ret;
3515 }
3516 
3517 #define IL3945_MAX_PROBE_REQUEST	200
3518 
3519 static int
3520 il3945_setup_mac(struct il_priv *il)
3521 {
3522 	int ret;
3523 	struct ieee80211_hw *hw = il->hw;
3524 
3525 	hw->rate_control_algorithm = "iwl-3945-rs";
3526 	hw->sta_data_size = sizeof(struct il3945_sta_priv);
3527 	hw->vif_data_size = sizeof(struct il_vif_priv);
3528 
3529 	/* Tell mac80211 our characteristics */
3530 	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
3531 	ieee80211_hw_set(hw, SUPPORTS_PS);
3532 	ieee80211_hw_set(hw, SIGNAL_DBM);
3533 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
3534 
3535 	hw->wiphy->interface_modes =
3536 	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
3537 
3538 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
3539 	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
3540 				       REGULATORY_DISABLE_BEACON_HINTS;
3541 
3542 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
3543 
3544 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3545 	/* we create the 802.11 header and a zero-length SSID element */
3546 	hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
3547 
3548 	/* Default value; 4 EDCA QOS priorities */
3549 	hw->queues = 4;
3550 
3551 	if (il->bands[NL80211_BAND_2GHZ].n_channels)
3552 		il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
3553 		    &il->bands[NL80211_BAND_2GHZ];
3554 
3555 	if (il->bands[NL80211_BAND_5GHZ].n_channels)
3556 		il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
3557 		    &il->bands[NL80211_BAND_5GHZ];
3558 
3559 	il_leds_init(il);
3560 
3561 	wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
3562 
3563 	ret = ieee80211_register_hw(il->hw);
3564 	if (ret) {
3565 		IL_ERR("Failed to register hw (error %d)\n", ret);
3566 		return ret;
3567 	}
3568 	il->mac80211_registered = 1;
3569 
3570 	return 0;
3571 }
3572 
3573 static int
3574 il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3575 {
3576 	int err = 0;
3577 	struct il_priv *il;
3578 	struct ieee80211_hw *hw;
3579 	struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
3580 	struct il3945_eeprom *eeprom;
3581 	unsigned long flags;
3582 
3583 	/***********************
3584 	 * 1. Allocating HW data
3585 	 * ********************/
3586 
3587 	hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il3945_mac_ops);
3588 	if (!hw) {
3589 		err = -ENOMEM;
3590 		goto out;
3591 	}
3592 	il = hw->priv;
3593 	il->hw = hw;
3594 	SET_IEEE80211_DEV(hw, &pdev->dev);
3595 
3596 	il->cmd_queue = IL39_CMD_QUEUE_NUM;
3597 
3598 	D_INFO("*** LOAD DRIVER ***\n");
3599 	il->cfg = cfg;
3600 	il->ops = &il3945_ops;
3601 #ifdef CONFIG_IWLEGACY_DEBUGFS
3602 	il->debugfs_ops = &il3945_debugfs_ops;
3603 #endif
3604 	il->pci_dev = pdev;
3605 	il->inta_mask = CSR_INI_SET_MASK;
3606 
3607 	/***************************
3608 	 * 2. Initializing PCI bus
3609 	 * *************************/
3610 	pci_disable_link_state(pdev,
3611 			       PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3612 			       PCIE_LINK_STATE_CLKPM);
3613 
3614 	if (pci_enable_device(pdev)) {
3615 		err = -ENODEV;
3616 		goto out_ieee80211_free_hw;
3617 	}
3618 
3619 	pci_set_master(pdev);
3620 
3621 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3622 	if (!err)
3623 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3624 	if (err) {
3625 		IL_WARN("No suitable DMA available.\n");
3626 		goto out_pci_disable_device;
3627 	}
3628 
3629 	pci_set_drvdata(pdev, il);
3630 	err = pci_request_regions(pdev, DRV_NAME);
3631 	if (err)
3632 		goto out_pci_disable_device;
3633 
3634 	/***********************
3635 	 * 3. Read REV Register
3636 	 * ********************/
3637 	il->hw_base = pci_ioremap_bar(pdev, 0);
3638 	if (!il->hw_base) {
3639 		err = -ENODEV;
3640 		goto out_pci_release_regions;
3641 	}
3642 
3643 	D_INFO("pci_resource_len = 0x%08llx\n",
3644 	       (unsigned long long)pci_resource_len(pdev, 0));
3645 	D_INFO("pci_resource_base = %p\n", il->hw_base);
3646 
3647 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
3648 	 * PCI Tx retries from interfering with C3 CPU state */
3649 	pci_write_config_byte(pdev, 0x41, 0x00);
3650 
3651 	/* these spin locks will be used in apm_init and EEPROM access
3652 	 * we should init now
3653 	 */
3654 	spin_lock_init(&il->reg_lock);
3655 	spin_lock_init(&il->lock);
3656 
3657 	/*
3658 	 * stop and reset the on-board processor just in case it is in a
3659 	 * strange state ... like being left stranded by a primary kernel
3660 	 * and this is now the kdump kernel trying to start up
3661 	 */
3662 	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3663 
3664 	/***********************
3665 	 * 4. Read EEPROM
3666 	 * ********************/
3667 
3668 	/* Read the EEPROM */
3669 	err = il_eeprom_init(il);
3670 	if (err) {
3671 		IL_ERR("Unable to init EEPROM\n");
3672 		goto out_iounmap;
3673 	}
3674 	/* MAC Address location in EEPROM same for 3945/4965 */
3675 	eeprom = (struct il3945_eeprom *)il->eeprom;
3676 	D_INFO("MAC address: %pM\n", eeprom->mac_address);
3677 	SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
3678 
3679 	/***********************
3680 	 * 5. Setup HW Constants
3681 	 * ********************/
3682 	/* Device-specific setup */
3683 	err = il3945_hw_set_hw_params(il);
3684 	if (err) {
3685 		IL_ERR("failed to set hw settings\n");
3686 		goto out_eeprom_free;
3687 	}
3688 
3689 	/***********************
3690 	 * 6. Setup il
3691 	 * ********************/
3692 
3693 	err = il3945_init_drv(il);
3694 	if (err) {
3695 		IL_ERR("initializing driver failed\n");
3696 		goto out_unset_hw_params;
3697 	}
3698 
3699 	IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
3700 
3701 	/***********************
3702 	 * 7. Setup Services
3703 	 * ********************/
3704 
3705 	spin_lock_irqsave(&il->lock, flags);
3706 	il_disable_interrupts(il);
3707 	spin_unlock_irqrestore(&il->lock, flags);
3708 
3709 	pci_enable_msi(il->pci_dev);
3710 
3711 	err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
3712 	if (err) {
3713 		IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
3714 		goto out_disable_msi;
3715 	}
3716 
3717 	err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
3718 	if (err) {
3719 		IL_ERR("failed to create sysfs device attributes\n");
3720 		goto out_release_irq;
3721 	}
3722 
3723 	il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
3724 	il3945_setup_deferred_work(il);
3725 	il3945_setup_handlers(il);
3726 	il_power_initialize(il);
3727 
3728 	/*********************************
3729 	 * 8. Setup and Register mac80211
3730 	 * *******************************/
3731 
3732 	il_enable_interrupts(il);
3733 
3734 	err = il3945_setup_mac(il);
3735 	if (err)
3736 		goto out_remove_sysfs;
3737 
3738 	il_dbgfs_register(il, DRV_NAME);
3739 
3740 	/* Start monitoring the killswitch */
3741 	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
3742 
3743 	return 0;
3744 
3745 out_remove_sysfs:
3746 	destroy_workqueue(il->workqueue);
3747 	il->workqueue = NULL;
3748 	sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3749 out_release_irq:
3750 	free_irq(il->pci_dev->irq, il);
3751 out_disable_msi:
3752 	pci_disable_msi(il->pci_dev);
3753 	il_free_geos(il);
3754 	il_free_channel_map(il);
3755 out_unset_hw_params:
3756 	il3945_unset_hw_params(il);
3757 out_eeprom_free:
3758 	il_eeprom_free(il);
3759 out_iounmap:
3760 	iounmap(il->hw_base);
3761 out_pci_release_regions:
3762 	pci_release_regions(pdev);
3763 out_pci_disable_device:
3764 	pci_disable_device(pdev);
3765 out_ieee80211_free_hw:
3766 	ieee80211_free_hw(il->hw);
3767 out:
3768 	return err;
3769 }
3770 
3771 static void
3772 il3945_pci_remove(struct pci_dev *pdev)
3773 {
3774 	struct il_priv *il = pci_get_drvdata(pdev);
3775 	unsigned long flags;
3776 
3777 	if (!il)
3778 		return;
3779 
3780 	D_INFO("*** UNLOAD DRIVER ***\n");
3781 
3782 	il_dbgfs_unregister(il);
3783 
3784 	set_bit(S_EXIT_PENDING, &il->status);
3785 
3786 	il_leds_exit(il);
3787 
3788 	if (il->mac80211_registered) {
3789 		ieee80211_unregister_hw(il->hw);
3790 		il->mac80211_registered = 0;
3791 	} else {
3792 		il3945_down(il);
3793 	}
3794 
3795 	/*
3796 	 * Make sure device is reset to low power before unloading driver.
3797 	 * This may be redundant with il_down(), but there are paths to
3798 	 * run il_down() without calling apm_ops.stop(), and there are
3799 	 * paths to avoid running il_down() at all before leaving driver.
3800 	 * This (inexpensive) call *makes sure* device is reset.
3801 	 */
3802 	il_apm_stop(il);
3803 
3804 	/* make sure we flush any pending irq or
3805 	 * tasklet for the driver
3806 	 */
3807 	spin_lock_irqsave(&il->lock, flags);
3808 	il_disable_interrupts(il);
3809 	spin_unlock_irqrestore(&il->lock, flags);
3810 
3811 	il3945_synchronize_irq(il);
3812 
3813 	sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3814 
3815 	cancel_delayed_work_sync(&il->_3945.rfkill_poll);
3816 
3817 	il3945_dealloc_ucode_pci(il);
3818 
3819 	if (il->rxq.bd)
3820 		il3945_rx_queue_free(il, &il->rxq);
3821 	il3945_hw_txq_ctx_free(il);
3822 
3823 	il3945_unset_hw_params(il);
3824 
3825 	/*netif_stop_queue(dev); */
3826 	flush_workqueue(il->workqueue);
3827 
3828 	/* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
3829 	 * il->workqueue... so we can't take down the workqueue
3830 	 * until now... */
3831 	destroy_workqueue(il->workqueue);
3832 	il->workqueue = NULL;
3833 
3834 	free_irq(pdev->irq, il);
3835 	pci_disable_msi(pdev);
3836 
3837 	iounmap(il->hw_base);
3838 	pci_release_regions(pdev);
3839 	pci_disable_device(pdev);
3840 
3841 	il_free_channel_map(il);
3842 	il_free_geos(il);
3843 	kfree(il->scan_cmd);
3844 	dev_kfree_skb(il->beacon_skb);
3845 	ieee80211_free_hw(il->hw);
3846 }
3847 
3848 /*****************************************************************************
3849  *
3850  * driver and module entry point
3851  *
3852  *****************************************************************************/
3853 
3854 static struct pci_driver il3945_driver = {
3855 	.name = DRV_NAME,
3856 	.id_table = il3945_hw_card_ids,
3857 	.probe = il3945_pci_probe,
3858 	.remove = il3945_pci_remove,
3859 	.driver.pm = IL_LEGACY_PM_OPS,
3860 };
3861 
3862 static int __init
3863 il3945_init(void)
3864 {
3865 
3866 	int ret;
3867 	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3868 	pr_info(DRV_COPYRIGHT "\n");
3869 
3870 	/*
3871 	 * Disabling hardware scan means that mac80211 will perform scans
3872 	 * "the hard way", rather than using device's scan.
3873 	 */
3874 	if (il3945_mod_params.disable_hw_scan) {
3875 		pr_info("hw_scan is disabled\n");
3876 		il3945_mac_ops.hw_scan = NULL;
3877 	}
3878 
3879 	ret = il3945_rate_control_register();
3880 	if (ret) {
3881 		pr_err("Unable to register rate control algorithm: %d\n", ret);
3882 		return ret;
3883 	}
3884 
3885 	ret = pci_register_driver(&il3945_driver);
3886 	if (ret) {
3887 		pr_err("Unable to initialize PCI module\n");
3888 		goto error_register;
3889 	}
3890 
3891 	return ret;
3892 
3893 error_register:
3894 	il3945_rate_control_unregister();
3895 	return ret;
3896 }
3897 
3898 static void __exit
3899 il3945_exit(void)
3900 {
3901 	pci_unregister_driver(&il3945_driver);
3902 	il3945_rate_control_unregister();
3903 }
3904 
3905 MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
3906 
3907 module_param_named(antenna, il3945_mod_params.antenna, int, 0444);
3908 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
3909 module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, 0444);
3910 MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
3911 module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
3912 		   0444);
3913 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
3914 #ifdef CONFIG_IWLEGACY_DEBUG
3915 module_param_named(debug, il_debug_level, uint, 0644);
3916 MODULE_PARM_DESC(debug, "debug output mask");
3917 #endif
3918 module_param_named(fw_restart, il3945_mod_params.restart_fw, int, 0444);
3919 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3920 
3921 module_exit(il3945_exit);
3922 module_init(il3945_init);
3923