1 /****************************************************************************** 2 * 3 * GPL LICENSE SUMMARY 4 * 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of version 2 of the GNU General Public License as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 19 * USA 20 * 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 *****************************************************************************/ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/etherdevice.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/types.h> 35 #include <linux/lockdep.h> 36 #include <linux/pci.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/delay.h> 39 #include <linux/skbuff.h> 40 #include <net/mac80211.h> 41 42 #include "common.h" 43 44 int 45 _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout) 46 { 47 const int interval = 10; /* microseconds */ 48 int t = 0; 49 50 do { 51 if ((_il_rd(il, addr) & mask) == (bits & mask)) 52 return t; 53 udelay(interval); 54 t += interval; 55 } while (t < timeout); 56 57 return -ETIMEDOUT; 58 } 59 EXPORT_SYMBOL(_il_poll_bit); 60 61 void 62 il_set_bit(struct il_priv *p, u32 r, u32 m) 63 { 64 unsigned long reg_flags; 65 66 spin_lock_irqsave(&p->reg_lock, reg_flags); 67 _il_set_bit(p, r, m); 68 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 69 } 70 EXPORT_SYMBOL(il_set_bit); 71 72 void 73 il_clear_bit(struct il_priv *p, u32 r, u32 m) 74 { 75 unsigned long reg_flags; 76 77 spin_lock_irqsave(&p->reg_lock, reg_flags); 78 _il_clear_bit(p, r, m); 79 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 80 } 81 EXPORT_SYMBOL(il_clear_bit); 82 83 bool 84 _il_grab_nic_access(struct il_priv *il) 85 { 86 int ret; 87 u32 val; 88 89 /* this bit wakes up the NIC */ 90 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 91 92 /* 93 * These bits say the device is running, and should keep running for 94 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 95 * but they do not indicate that embedded SRAM is restored yet; 96 * 3945 and 4965 have volatile SRAM, and must save/restore contents 97 * to/from host DRAM when sleeping/waking for power-saving. 98 * Each direction takes approximately 1/4 millisecond; with this 99 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 100 * series of register accesses are expected (e.g. reading Event Log), 101 * to keep device from sleeping. 102 * 103 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 104 * SRAM is okay/restored. We don't check that here because this call 105 * is just for hardware register access; but GP1 MAC_SLEEP check is a 106 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). 107 * 108 */ 109 ret = 110 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 111 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 112 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 113 if (unlikely(ret < 0)) { 114 val = _il_rd(il, CSR_GP_CNTRL); 115 WARN_ONCE(1, "Timeout waiting for ucode processor access " 116 "(CSR_GP_CNTRL 0x%08x)\n", val); 117 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 118 return false; 119 } 120 121 return true; 122 } 123 EXPORT_SYMBOL_GPL(_il_grab_nic_access); 124 125 int 126 il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) 127 { 128 const int interval = 10; /* microseconds */ 129 int t = 0; 130 131 do { 132 if ((il_rd(il, addr) & mask) == mask) 133 return t; 134 udelay(interval); 135 t += interval; 136 } while (t < timeout); 137 138 return -ETIMEDOUT; 139 } 140 EXPORT_SYMBOL(il_poll_bit); 141 142 u32 143 il_rd_prph(struct il_priv *il, u32 reg) 144 { 145 unsigned long reg_flags; 146 u32 val; 147 148 spin_lock_irqsave(&il->reg_lock, reg_flags); 149 _il_grab_nic_access(il); 150 val = _il_rd_prph(il, reg); 151 _il_release_nic_access(il); 152 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 153 return val; 154 } 155 EXPORT_SYMBOL(il_rd_prph); 156 157 void 158 il_wr_prph(struct il_priv *il, u32 addr, u32 val) 159 { 160 unsigned long reg_flags; 161 162 spin_lock_irqsave(&il->reg_lock, reg_flags); 163 if (likely(_il_grab_nic_access(il))) { 164 _il_wr_prph(il, addr, val); 165 _il_release_nic_access(il); 166 } 167 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 168 } 169 EXPORT_SYMBOL(il_wr_prph); 170 171 u32 172 il_read_targ_mem(struct il_priv *il, u32 addr) 173 { 174 unsigned long reg_flags; 175 u32 value; 176 177 spin_lock_irqsave(&il->reg_lock, reg_flags); 178 _il_grab_nic_access(il); 179 180 _il_wr(il, HBUS_TARG_MEM_RADDR, addr); 181 value = _il_rd(il, HBUS_TARG_MEM_RDAT); 182 183 _il_release_nic_access(il); 184 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 185 return value; 186 } 187 EXPORT_SYMBOL(il_read_targ_mem); 188 189 void 190 il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) 191 { 192 unsigned long reg_flags; 193 194 spin_lock_irqsave(&il->reg_lock, reg_flags); 195 if (likely(_il_grab_nic_access(il))) { 196 _il_wr(il, HBUS_TARG_MEM_WADDR, addr); 197 _il_wr(il, HBUS_TARG_MEM_WDAT, val); 198 _il_release_nic_access(il); 199 } 200 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 201 } 202 EXPORT_SYMBOL(il_write_targ_mem); 203 204 const char * 205 il_get_cmd_string(u8 cmd) 206 { 207 switch (cmd) { 208 IL_CMD(N_ALIVE); 209 IL_CMD(N_ERROR); 210 IL_CMD(C_RXON); 211 IL_CMD(C_RXON_ASSOC); 212 IL_CMD(C_QOS_PARAM); 213 IL_CMD(C_RXON_TIMING); 214 IL_CMD(C_ADD_STA); 215 IL_CMD(C_REM_STA); 216 IL_CMD(C_WEPKEY); 217 IL_CMD(N_3945_RX); 218 IL_CMD(C_TX); 219 IL_CMD(C_RATE_SCALE); 220 IL_CMD(C_LEDS); 221 IL_CMD(C_TX_LINK_QUALITY_CMD); 222 IL_CMD(C_CHANNEL_SWITCH); 223 IL_CMD(N_CHANNEL_SWITCH); 224 IL_CMD(C_SPECTRUM_MEASUREMENT); 225 IL_CMD(N_SPECTRUM_MEASUREMENT); 226 IL_CMD(C_POWER_TBL); 227 IL_CMD(N_PM_SLEEP); 228 IL_CMD(N_PM_DEBUG_STATS); 229 IL_CMD(C_SCAN); 230 IL_CMD(C_SCAN_ABORT); 231 IL_CMD(N_SCAN_START); 232 IL_CMD(N_SCAN_RESULTS); 233 IL_CMD(N_SCAN_COMPLETE); 234 IL_CMD(N_BEACON); 235 IL_CMD(C_TX_BEACON); 236 IL_CMD(C_TX_PWR_TBL); 237 IL_CMD(C_BT_CONFIG); 238 IL_CMD(C_STATS); 239 IL_CMD(N_STATS); 240 IL_CMD(N_CARD_STATE); 241 IL_CMD(N_MISSED_BEACONS); 242 IL_CMD(C_CT_KILL_CONFIG); 243 IL_CMD(C_SENSITIVITY); 244 IL_CMD(C_PHY_CALIBRATION); 245 IL_CMD(N_RX_PHY); 246 IL_CMD(N_RX_MPDU); 247 IL_CMD(N_RX); 248 IL_CMD(N_COMPRESSED_BA); 249 default: 250 return "UNKNOWN"; 251 252 } 253 } 254 EXPORT_SYMBOL(il_get_cmd_string); 255 256 #define HOST_COMPLETE_TIMEOUT (HZ / 2) 257 258 static void 259 il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, 260 struct il_rx_pkt *pkt) 261 { 262 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 263 IL_ERR("Bad return from %s (0x%08X)\n", 264 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 265 return; 266 } 267 #ifdef CONFIG_IWLEGACY_DEBUG 268 switch (cmd->hdr.cmd) { 269 case C_TX_LINK_QUALITY_CMD: 270 case C_SENSITIVITY: 271 D_HC_DUMP("back from %s (0x%08X)\n", 272 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 273 break; 274 default: 275 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), 276 pkt->hdr.flags); 277 } 278 #endif 279 } 280 281 static int 282 il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) 283 { 284 int ret; 285 286 BUG_ON(!(cmd->flags & CMD_ASYNC)); 287 288 /* An asynchronous command can not expect an SKB to be set. */ 289 BUG_ON(cmd->flags & CMD_WANT_SKB); 290 291 /* Assign a generic callback if one is not provided */ 292 if (!cmd->callback) 293 cmd->callback = il_generic_cmd_callback; 294 295 if (test_bit(S_EXIT_PENDING, &il->status)) 296 return -EBUSY; 297 298 ret = il_enqueue_hcmd(il, cmd); 299 if (ret < 0) { 300 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 301 il_get_cmd_string(cmd->id), ret); 302 return ret; 303 } 304 return 0; 305 } 306 307 int 308 il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) 309 { 310 int cmd_idx; 311 int ret; 312 313 lockdep_assert_held(&il->mutex); 314 315 BUG_ON(cmd->flags & CMD_ASYNC); 316 317 /* A synchronous command can not have a callback set. */ 318 BUG_ON(cmd->callback); 319 320 D_INFO("Attempting to send sync command %s\n", 321 il_get_cmd_string(cmd->id)); 322 323 set_bit(S_HCMD_ACTIVE, &il->status); 324 D_INFO("Setting HCMD_ACTIVE for command %s\n", 325 il_get_cmd_string(cmd->id)); 326 327 cmd_idx = il_enqueue_hcmd(il, cmd); 328 if (cmd_idx < 0) { 329 ret = cmd_idx; 330 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 331 il_get_cmd_string(cmd->id), ret); 332 goto out; 333 } 334 335 ret = wait_event_timeout(il->wait_command_queue, 336 !test_bit(S_HCMD_ACTIVE, &il->status), 337 HOST_COMPLETE_TIMEOUT); 338 if (!ret) { 339 if (test_bit(S_HCMD_ACTIVE, &il->status)) { 340 IL_ERR("Error sending %s: time out after %dms.\n", 341 il_get_cmd_string(cmd->id), 342 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 343 344 clear_bit(S_HCMD_ACTIVE, &il->status); 345 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 346 il_get_cmd_string(cmd->id)); 347 ret = -ETIMEDOUT; 348 goto cancel; 349 } 350 } 351 352 if (test_bit(S_RFKILL, &il->status)) { 353 IL_ERR("Command %s aborted: RF KILL Switch\n", 354 il_get_cmd_string(cmd->id)); 355 ret = -ECANCELED; 356 goto fail; 357 } 358 if (test_bit(S_FW_ERROR, &il->status)) { 359 IL_ERR("Command %s failed: FW Error\n", 360 il_get_cmd_string(cmd->id)); 361 ret = -EIO; 362 goto fail; 363 } 364 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { 365 IL_ERR("Error: Response NULL in '%s'\n", 366 il_get_cmd_string(cmd->id)); 367 ret = -EIO; 368 goto cancel; 369 } 370 371 ret = 0; 372 goto out; 373 374 cancel: 375 if (cmd->flags & CMD_WANT_SKB) { 376 /* 377 * Cancel the CMD_WANT_SKB flag for the cmd in the 378 * TX cmd queue. Otherwise in case the cmd comes 379 * in later, it will possibly set an invalid 380 * address (cmd->meta.source). 381 */ 382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; 383 } 384 fail: 385 if (cmd->reply_page) { 386 il_free_pages(il, cmd->reply_page); 387 cmd->reply_page = 0; 388 } 389 out: 390 return ret; 391 } 392 EXPORT_SYMBOL(il_send_cmd_sync); 393 394 int 395 il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) 396 { 397 if (cmd->flags & CMD_ASYNC) 398 return il_send_cmd_async(il, cmd); 399 400 return il_send_cmd_sync(il, cmd); 401 } 402 EXPORT_SYMBOL(il_send_cmd); 403 404 int 405 il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) 406 { 407 struct il_host_cmd cmd = { 408 .id = id, 409 .len = len, 410 .data = data, 411 }; 412 413 return il_send_cmd_sync(il, &cmd); 414 } 415 EXPORT_SYMBOL(il_send_cmd_pdu); 416 417 int 418 il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, 419 void (*callback) (struct il_priv *il, 420 struct il_device_cmd *cmd, 421 struct il_rx_pkt *pkt)) 422 { 423 struct il_host_cmd cmd = { 424 .id = id, 425 .len = len, 426 .data = data, 427 }; 428 429 cmd.flags |= CMD_ASYNC; 430 cmd.callback = callback; 431 432 return il_send_cmd_async(il, &cmd); 433 } 434 EXPORT_SYMBOL(il_send_cmd_pdu_async); 435 436 /* default: IL_LED_BLINK(0) using blinking idx table */ 437 static int led_mode; 438 module_param(led_mode, int, S_IRUGO); 439 MODULE_PARM_DESC(led_mode, 440 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); 441 442 /* Throughput OFF time(ms) ON time (ms) 443 * >300 25 25 444 * >200 to 300 40 40 445 * >100 to 200 55 55 446 * >70 to 100 65 65 447 * >50 to 70 75 75 448 * >20 to 50 85 85 449 * >10 to 20 95 95 450 * >5 to 10 110 110 451 * >1 to 5 130 130 452 * >0 to 1 167 167 453 * <=0 SOLID ON 454 */ 455 static const struct ieee80211_tpt_blink il_blink[] = { 456 {.throughput = 0, .blink_time = 334}, 457 {.throughput = 1 * 1024 - 1, .blink_time = 260}, 458 {.throughput = 5 * 1024 - 1, .blink_time = 220}, 459 {.throughput = 10 * 1024 - 1, .blink_time = 190}, 460 {.throughput = 20 * 1024 - 1, .blink_time = 170}, 461 {.throughput = 50 * 1024 - 1, .blink_time = 150}, 462 {.throughput = 70 * 1024 - 1, .blink_time = 130}, 463 {.throughput = 100 * 1024 - 1, .blink_time = 110}, 464 {.throughput = 200 * 1024 - 1, .blink_time = 80}, 465 {.throughput = 300 * 1024 - 1, .blink_time = 50}, 466 }; 467 468 /* 469 * Adjust led blink rate to compensate on a MAC Clock difference on every HW 470 * Led blink rate analysis showed an average deviation of 0% on 3945, 471 * 5% on 4965 HW. 472 * Need to compensate on the led on/off time per HW according to the deviation 473 * to achieve the desired led frequency 474 * The calculation is: (100-averageDeviation)/100 * blinkTime 475 * For code efficiency the calculation will be: 476 * compensation = (100 - averageDeviation) * 64 / 100 477 * NewBlinkTime = (compensation * BlinkTime) / 64 478 */ 479 static inline u8 480 il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) 481 { 482 if (!compensation) { 483 IL_ERR("undefined blink compensation: " 484 "use pre-defined blinking time\n"); 485 return time; 486 } 487 488 return (u8) ((time * compensation) >> 6); 489 } 490 491 /* Set led pattern command */ 492 static int 493 il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) 494 { 495 struct il_led_cmd led_cmd = { 496 .id = IL_LED_LINK, 497 .interval = IL_DEF_LED_INTRVL 498 }; 499 int ret; 500 501 if (!test_bit(S_READY, &il->status)) 502 return -EBUSY; 503 504 if (il->blink_on == on && il->blink_off == off) 505 return 0; 506 507 if (off == 0) { 508 /* led is SOLID_ON */ 509 on = IL_LED_SOLID; 510 } 511 512 D_LED("Led blink time compensation=%u\n", 513 il->cfg->led_compensation); 514 led_cmd.on = 515 il_blink_compensation(il, on, 516 il->cfg->led_compensation); 517 led_cmd.off = 518 il_blink_compensation(il, off, 519 il->cfg->led_compensation); 520 521 ret = il->ops->send_led_cmd(il, &led_cmd); 522 if (!ret) { 523 il->blink_on = on; 524 il->blink_off = off; 525 } 526 return ret; 527 } 528 529 static void 530 il_led_brightness_set(struct led_classdev *led_cdev, 531 enum led_brightness brightness) 532 { 533 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 534 unsigned long on = 0; 535 536 if (brightness > 0) 537 on = IL_LED_SOLID; 538 539 il_led_cmd(il, on, 0); 540 } 541 542 static int 543 il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, 544 unsigned long *delay_off) 545 { 546 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 547 548 return il_led_cmd(il, *delay_on, *delay_off); 549 } 550 551 void 552 il_leds_init(struct il_priv *il) 553 { 554 int mode = led_mode; 555 int ret; 556 557 if (mode == IL_LED_DEFAULT) 558 mode = il->cfg->led_mode; 559 560 il->led.name = 561 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); 562 il->led.brightness_set = il_led_brightness_set; 563 il->led.blink_set = il_led_blink_set; 564 il->led.max_brightness = 1; 565 566 switch (mode) { 567 case IL_LED_DEFAULT: 568 WARN_ON(1); 569 break; 570 case IL_LED_BLINK: 571 il->led.default_trigger = 572 ieee80211_create_tpt_led_trigger(il->hw, 573 IEEE80211_TPT_LEDTRIG_FL_CONNECTED, 574 il_blink, 575 ARRAY_SIZE(il_blink)); 576 break; 577 case IL_LED_RF_STATE: 578 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); 579 break; 580 } 581 582 ret = led_classdev_register(&il->pci_dev->dev, &il->led); 583 if (ret) { 584 kfree(il->led.name); 585 return; 586 } 587 588 il->led_registered = true; 589 } 590 EXPORT_SYMBOL(il_leds_init); 591 592 void 593 il_leds_exit(struct il_priv *il) 594 { 595 if (!il->led_registered) 596 return; 597 598 led_classdev_unregister(&il->led); 599 kfree(il->led.name); 600 } 601 EXPORT_SYMBOL(il_leds_exit); 602 603 /************************** EEPROM BANDS **************************** 604 * 605 * The il_eeprom_band definitions below provide the mapping from the 606 * EEPROM contents to the specific channel number supported for each 607 * band. 608 * 609 * For example, il_priv->eeprom.band_3_channels[4] from the band_3 610 * definition below maps to physical channel 42 in the 5.2GHz spectrum. 611 * The specific geography and calibration information for that channel 612 * is contained in the eeprom map itself. 613 * 614 * During init, we copy the eeprom information and channel map 615 * information into il->channel_info_24/52 and il->channel_map_24/52 616 * 617 * channel_map_24/52 provides the idx in the channel_info array for a 618 * given channel. We have to have two separate maps as there is channel 619 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and 620 * band_2 621 * 622 * A value of 0xff stored in the channel_map indicates that the channel 623 * is not supported by the hardware at all. 624 * 625 * A value of 0xfe in the channel_map indicates that the channel is not 626 * valid for Tx with the current hardware. This means that 627 * while the system can tune and receive on a given channel, it may not 628 * be able to associate or transmit any frames on that 629 * channel. There is no corresponding channel information for that 630 * entry. 631 * 632 *********************************************************************/ 633 634 /* 2.4 GHz */ 635 const u8 il_eeprom_band_1[14] = { 636 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 637 }; 638 639 /* 5.2 GHz bands */ 640 static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ 641 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 642 }; 643 644 static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ 645 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 646 }; 647 648 static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ 649 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 650 }; 651 652 static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ 653 145, 149, 153, 157, 161, 165 654 }; 655 656 static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ 657 1, 2, 3, 4, 5, 6, 7 658 }; 659 660 static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ 661 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 662 }; 663 664 /****************************************************************************** 665 * 666 * EEPROM related functions 667 * 668 ******************************************************************************/ 669 670 static int 671 il_eeprom_verify_signature(struct il_priv *il) 672 { 673 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 674 int ret = 0; 675 676 D_EEPROM("EEPROM signature=0x%08x\n", gp); 677 switch (gp) { 678 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: 679 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: 680 break; 681 default: 682 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp); 683 ret = -ENOENT; 684 break; 685 } 686 return ret; 687 } 688 689 const u8 * 690 il_eeprom_query_addr(const struct il_priv *il, size_t offset) 691 { 692 BUG_ON(offset >= il->cfg->eeprom_size); 693 return &il->eeprom[offset]; 694 } 695 EXPORT_SYMBOL(il_eeprom_query_addr); 696 697 u16 698 il_eeprom_query16(const struct il_priv *il, size_t offset) 699 { 700 if (!il->eeprom) 701 return 0; 702 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); 703 } 704 EXPORT_SYMBOL(il_eeprom_query16); 705 706 /** 707 * il_eeprom_init - read EEPROM contents 708 * 709 * Load the EEPROM contents from adapter into il->eeprom 710 * 711 * NOTE: This routine uses the non-debug IO access functions. 712 */ 713 int 714 il_eeprom_init(struct il_priv *il) 715 { 716 __le16 *e; 717 u32 gp = _il_rd(il, CSR_EEPROM_GP); 718 int sz; 719 int ret; 720 u16 addr; 721 722 /* allocate eeprom */ 723 sz = il->cfg->eeprom_size; 724 D_EEPROM("NVM size = %d\n", sz); 725 il->eeprom = kzalloc(sz, GFP_KERNEL); 726 if (!il->eeprom) 727 return -ENOMEM; 728 729 e = (__le16 *) il->eeprom; 730 731 il->ops->apm_init(il); 732 733 ret = il_eeprom_verify_signature(il); 734 if (ret < 0) { 735 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 736 ret = -ENOENT; 737 goto err; 738 } 739 740 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 741 ret = il->ops->eeprom_acquire_semaphore(il); 742 if (ret < 0) { 743 IL_ERR("Failed to acquire EEPROM semaphore.\n"); 744 ret = -ENOENT; 745 goto err; 746 } 747 748 /* eeprom is an array of 16bit values */ 749 for (addr = 0; addr < sz; addr += sizeof(u16)) { 750 u32 r; 751 752 _il_wr(il, CSR_EEPROM_REG, 753 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 754 755 ret = 756 _il_poll_bit(il, CSR_EEPROM_REG, 757 CSR_EEPROM_REG_READ_VALID_MSK, 758 CSR_EEPROM_REG_READ_VALID_MSK, 759 IL_EEPROM_ACCESS_TIMEOUT); 760 if (ret < 0) { 761 IL_ERR("Time out reading EEPROM[%d]\n", addr); 762 goto done; 763 } 764 r = _il_rd(il, CSR_EEPROM_REG); 765 e[addr / 2] = cpu_to_le16(r >> 16); 766 } 767 768 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM", 769 il_eeprom_query16(il, EEPROM_VERSION)); 770 771 ret = 0; 772 done: 773 il->ops->eeprom_release_semaphore(il); 774 775 err: 776 if (ret) 777 il_eeprom_free(il); 778 /* Reset chip to save power until we load uCode during "up". */ 779 il_apm_stop(il); 780 return ret; 781 } 782 EXPORT_SYMBOL(il_eeprom_init); 783 784 void 785 il_eeprom_free(struct il_priv *il) 786 { 787 kfree(il->eeprom); 788 il->eeprom = NULL; 789 } 790 EXPORT_SYMBOL(il_eeprom_free); 791 792 static void 793 il_init_band_reference(const struct il_priv *il, int eep_band, 794 int *eeprom_ch_count, 795 const struct il_eeprom_channel **eeprom_ch_info, 796 const u8 **eeprom_ch_idx) 797 { 798 u32 offset = il->cfg->regulatory_bands[eep_band - 1]; 799 800 switch (eep_band) { 801 case 1: /* 2.4GHz band */ 802 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); 803 *eeprom_ch_info = 804 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 805 offset); 806 *eeprom_ch_idx = il_eeprom_band_1; 807 break; 808 case 2: /* 4.9GHz band */ 809 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); 810 *eeprom_ch_info = 811 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 812 offset); 813 *eeprom_ch_idx = il_eeprom_band_2; 814 break; 815 case 3: /* 5.2GHz band */ 816 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); 817 *eeprom_ch_info = 818 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 819 offset); 820 *eeprom_ch_idx = il_eeprom_band_3; 821 break; 822 case 4: /* 5.5GHz band */ 823 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); 824 *eeprom_ch_info = 825 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 826 offset); 827 *eeprom_ch_idx = il_eeprom_band_4; 828 break; 829 case 5: /* 5.7GHz band */ 830 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); 831 *eeprom_ch_info = 832 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 833 offset); 834 *eeprom_ch_idx = il_eeprom_band_5; 835 break; 836 case 6: /* 2.4GHz ht40 channels */ 837 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); 838 *eeprom_ch_info = 839 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 840 offset); 841 *eeprom_ch_idx = il_eeprom_band_6; 842 break; 843 case 7: /* 5 GHz ht40 channels */ 844 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); 845 *eeprom_ch_info = 846 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 847 offset); 848 *eeprom_ch_idx = il_eeprom_band_7; 849 break; 850 default: 851 BUG(); 852 } 853 } 854 855 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ 856 ? # x " " : "") 857 /** 858 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. 859 * 860 * Does not set up a command, or touch hardware. 861 */ 862 static int 863 il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel, 864 const struct il_eeprom_channel *eeprom_ch, 865 u8 clear_ht40_extension_channel) 866 { 867 struct il_channel_info *ch_info; 868 869 ch_info = 870 (struct il_channel_info *)il_get_channel_info(il, band, channel); 871 872 if (!il_is_channel_valid(ch_info)) 873 return -1; 874 875 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" 876 " Ad-Hoc %ssupported\n", ch_info->channel, 877 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 878 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), 879 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), 880 CHECK_AND_PRINT(DFS), eeprom_ch->flags, 881 eeprom_ch->max_power_avg, 882 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && 883 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); 884 885 ch_info->ht40_eeprom = *eeprom_ch; 886 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; 887 ch_info->ht40_flags = eeprom_ch->flags; 888 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) 889 ch_info->ht40_extension_channel &= 890 ~clear_ht40_extension_channel; 891 892 return 0; 893 } 894 895 #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ 896 ? # x " " : "") 897 898 /** 899 * il_init_channel_map - Set up driver's info for all possible channels 900 */ 901 int 902 il_init_channel_map(struct il_priv *il) 903 { 904 int eeprom_ch_count = 0; 905 const u8 *eeprom_ch_idx = NULL; 906 const struct il_eeprom_channel *eeprom_ch_info = NULL; 907 int band, ch; 908 struct il_channel_info *ch_info; 909 910 if (il->channel_count) { 911 D_EEPROM("Channel map already initialized.\n"); 912 return 0; 913 } 914 915 D_EEPROM("Initializing regulatory info from EEPROM\n"); 916 917 il->channel_count = 918 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + 919 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + 920 ARRAY_SIZE(il_eeprom_band_5); 921 922 D_EEPROM("Parsing data for %d channels.\n", il->channel_count); 923 924 il->channel_info = 925 kzalloc(sizeof(struct il_channel_info) * il->channel_count, 926 GFP_KERNEL); 927 if (!il->channel_info) { 928 IL_ERR("Could not allocate channel_info\n"); 929 il->channel_count = 0; 930 return -ENOMEM; 931 } 932 933 ch_info = il->channel_info; 934 935 /* Loop through the 5 EEPROM bands adding them in order to the 936 * channel map we maintain (that contains additional information than 937 * what just in the EEPROM) */ 938 for (band = 1; band <= 5; band++) { 939 940 il_init_band_reference(il, band, &eeprom_ch_count, 941 &eeprom_ch_info, &eeprom_ch_idx); 942 943 /* Loop through each band adding each of the channels */ 944 for (ch = 0; ch < eeprom_ch_count; ch++) { 945 ch_info->channel = eeprom_ch_idx[ch]; 946 ch_info->band = 947 (band == 948 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 949 950 /* permanently store EEPROM's channel regulatory flags 951 * and max power in channel info database. */ 952 ch_info->eeprom = eeprom_ch_info[ch]; 953 954 /* Copy the run-time flags so they are there even on 955 * invalid channels */ 956 ch_info->flags = eeprom_ch_info[ch].flags; 957 /* First write that ht40 is not enabled, and then enable 958 * one by one */ 959 ch_info->ht40_extension_channel = 960 IEEE80211_CHAN_NO_HT40; 961 962 if (!(il_is_channel_valid(ch_info))) { 963 D_EEPROM("Ch. %d Flags %x [%sGHz] - " 964 "No traffic\n", ch_info->channel, 965 ch_info->flags, 966 il_is_channel_a_band(ch_info) ? "5.2" : 967 "2.4"); 968 ch_info++; 969 continue; 970 } 971 972 /* Initialize regulatory-based run-time data */ 973 ch_info->max_power_avg = ch_info->curr_txpow = 974 eeprom_ch_info[ch].max_power_avg; 975 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 976 ch_info->min_power = 0; 977 978 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" 979 " Ad-Hoc %ssupported\n", ch_info->channel, 980 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 981 CHECK_AND_PRINT_I(VALID), 982 CHECK_AND_PRINT_I(IBSS), 983 CHECK_AND_PRINT_I(ACTIVE), 984 CHECK_AND_PRINT_I(RADAR), 985 CHECK_AND_PRINT_I(WIDE), 986 CHECK_AND_PRINT_I(DFS), 987 eeprom_ch_info[ch].flags, 988 eeprom_ch_info[ch].max_power_avg, 989 ((eeprom_ch_info[ch]. 990 flags & EEPROM_CHANNEL_IBSS) && 991 !(eeprom_ch_info[ch]. 992 flags & EEPROM_CHANNEL_RADAR)) ? "" : 993 "not "); 994 995 ch_info++; 996 } 997 } 998 999 /* Check if we do have HT40 channels */ 1000 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && 1001 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) 1002 return 0; 1003 1004 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ 1005 for (band = 6; band <= 7; band++) { 1006 enum nl80211_band ieeeband; 1007 1008 il_init_band_reference(il, band, &eeprom_ch_count, 1009 &eeprom_ch_info, &eeprom_ch_idx); 1010 1011 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ 1012 ieeeband = 1013 (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 1014 1015 /* Loop through each band adding each of the channels */ 1016 for (ch = 0; ch < eeprom_ch_count; ch++) { 1017 /* Set up driver's info for lower half */ 1018 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch], 1019 &eeprom_ch_info[ch], 1020 IEEE80211_CHAN_NO_HT40PLUS); 1021 1022 /* Set up driver's info for upper half */ 1023 il_mod_ht40_chan_info(il, ieeeband, 1024 eeprom_ch_idx[ch] + 4, 1025 &eeprom_ch_info[ch], 1026 IEEE80211_CHAN_NO_HT40MINUS); 1027 } 1028 } 1029 1030 return 0; 1031 } 1032 EXPORT_SYMBOL(il_init_channel_map); 1033 1034 /* 1035 * il_free_channel_map - undo allocations in il_init_channel_map 1036 */ 1037 void 1038 il_free_channel_map(struct il_priv *il) 1039 { 1040 kfree(il->channel_info); 1041 il->channel_count = 0; 1042 } 1043 EXPORT_SYMBOL(il_free_channel_map); 1044 1045 /** 1046 * il_get_channel_info - Find driver's ilate channel info 1047 * 1048 * Based on band and channel number. 1049 */ 1050 const struct il_channel_info * 1051 il_get_channel_info(const struct il_priv *il, enum nl80211_band band, 1052 u16 channel) 1053 { 1054 int i; 1055 1056 switch (band) { 1057 case NL80211_BAND_5GHZ: 1058 for (i = 14; i < il->channel_count; i++) { 1059 if (il->channel_info[i].channel == channel) 1060 return &il->channel_info[i]; 1061 } 1062 break; 1063 case NL80211_BAND_2GHZ: 1064 if (channel >= 1 && channel <= 14) 1065 return &il->channel_info[channel - 1]; 1066 break; 1067 default: 1068 BUG(); 1069 } 1070 1071 return NULL; 1072 } 1073 EXPORT_SYMBOL(il_get_channel_info); 1074 1075 /* 1076 * Setting power level allows the card to go to sleep when not busy. 1077 * 1078 * We calculate a sleep command based on the required latency, which 1079 * we get from mac80211. 1080 */ 1081 1082 #define SLP_VEC(X0, X1, X2, X3, X4) { \ 1083 cpu_to_le32(X0), \ 1084 cpu_to_le32(X1), \ 1085 cpu_to_le32(X2), \ 1086 cpu_to_le32(X3), \ 1087 cpu_to_le32(X4) \ 1088 } 1089 1090 static void 1091 il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) 1092 { 1093 const __le32 interval[3][IL_POWER_VEC_SIZE] = { 1094 SLP_VEC(2, 2, 4, 6, 0xFF), 1095 SLP_VEC(2, 4, 7, 10, 10), 1096 SLP_VEC(4, 7, 10, 10, 0xFF) 1097 }; 1098 int i, dtim_period, no_dtim; 1099 u32 max_sleep; 1100 bool skip; 1101 1102 memset(cmd, 0, sizeof(*cmd)); 1103 1104 if (il->power_data.pci_pm) 1105 cmd->flags |= IL_POWER_PCI_PM_MSK; 1106 1107 /* if no Power Save, we are done */ 1108 if (il->power_data.ps_disabled) 1109 return; 1110 1111 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK; 1112 cmd->keep_alive_seconds = 0; 1113 cmd->debug_flags = 0; 1114 cmd->rx_data_timeout = cpu_to_le32(25 * 1024); 1115 cmd->tx_data_timeout = cpu_to_le32(25 * 1024); 1116 cmd->keep_alive_beacons = 0; 1117 1118 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0; 1119 1120 if (dtim_period <= 2) { 1121 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0])); 1122 no_dtim = 2; 1123 } else if (dtim_period <= 10) { 1124 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1])); 1125 no_dtim = 2; 1126 } else { 1127 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2])); 1128 no_dtim = 0; 1129 } 1130 1131 if (dtim_period == 0) { 1132 dtim_period = 1; 1133 skip = false; 1134 } else { 1135 skip = !!no_dtim; 1136 } 1137 1138 if (skip) { 1139 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1]; 1140 1141 max_sleep = le32_to_cpu(tmp); 1142 if (max_sleep == 0xFF) 1143 max_sleep = dtim_period * (skip + 1); 1144 else if (max_sleep > dtim_period) 1145 max_sleep = (max_sleep / dtim_period) * dtim_period; 1146 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK; 1147 } else { 1148 max_sleep = dtim_period; 1149 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK; 1150 } 1151 1152 for (i = 0; i < IL_POWER_VEC_SIZE; i++) 1153 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 1154 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 1155 } 1156 1157 static int 1158 il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) 1159 { 1160 D_POWER("Sending power/sleep command\n"); 1161 D_POWER("Flags value = 0x%08X\n", cmd->flags); 1162 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); 1163 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); 1164 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", 1165 le32_to_cpu(cmd->sleep_interval[0]), 1166 le32_to_cpu(cmd->sleep_interval[1]), 1167 le32_to_cpu(cmd->sleep_interval[2]), 1168 le32_to_cpu(cmd->sleep_interval[3]), 1169 le32_to_cpu(cmd->sleep_interval[4])); 1170 1171 return il_send_cmd_pdu(il, C_POWER_TBL, 1172 sizeof(struct il_powertable_cmd), cmd); 1173 } 1174 1175 static int 1176 il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) 1177 { 1178 int ret; 1179 bool update_chains; 1180 1181 lockdep_assert_held(&il->mutex); 1182 1183 /* Don't update the RX chain when chain noise calibration is running */ 1184 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || 1185 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; 1186 1187 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) 1188 return 0; 1189 1190 if (!il_is_ready_rf(il)) 1191 return -EIO; 1192 1193 /* scan complete use sleep_power_next, need to be updated */ 1194 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); 1195 if (test_bit(S_SCANNING, &il->status) && !force) { 1196 D_INFO("Defer power set mode while scanning\n"); 1197 return 0; 1198 } 1199 1200 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) 1201 set_bit(S_POWER_PMI, &il->status); 1202 1203 ret = il_set_power(il, cmd); 1204 if (!ret) { 1205 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 1206 clear_bit(S_POWER_PMI, &il->status); 1207 1208 if (il->ops->update_chain_flags && update_chains) 1209 il->ops->update_chain_flags(il); 1210 else if (il->ops->update_chain_flags) 1211 D_POWER("Cannot update the power, chain noise " 1212 "calibration running: %d\n", 1213 il->chain_noise_data.state); 1214 1215 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); 1216 } else 1217 IL_ERR("set power fail, ret = %d", ret); 1218 1219 return ret; 1220 } 1221 1222 int 1223 il_power_update_mode(struct il_priv *il, bool force) 1224 { 1225 struct il_powertable_cmd cmd; 1226 1227 il_build_powertable_cmd(il, &cmd); 1228 1229 return il_power_set_mode(il, &cmd, force); 1230 } 1231 EXPORT_SYMBOL(il_power_update_mode); 1232 1233 /* initialize to default */ 1234 void 1235 il_power_initialize(struct il_priv *il) 1236 { 1237 u16 lctl; 1238 1239 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 1240 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 1241 1242 il->power_data.debug_sleep_level_override = -1; 1243 1244 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); 1245 } 1246 EXPORT_SYMBOL(il_power_initialize); 1247 1248 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 1249 * sending probe req. This should be set long enough to hear probe responses 1250 * from more than one AP. */ 1251 #define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ 1252 #define IL_ACTIVE_DWELL_TIME_52 (20) 1253 1254 #define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) 1255 #define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) 1256 1257 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. 1258 * Must be set longer than active dwell time. 1259 * For the most reliable scan, set > AP beacon interval (typically 100msec). */ 1260 #define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ 1261 #define IL_PASSIVE_DWELL_TIME_52 (10) 1262 #define IL_PASSIVE_DWELL_BASE (100) 1263 #define IL_CHANNEL_TUNE_TIME 5 1264 1265 static int 1266 il_send_scan_abort(struct il_priv *il) 1267 { 1268 int ret; 1269 struct il_rx_pkt *pkt; 1270 struct il_host_cmd cmd = { 1271 .id = C_SCAN_ABORT, 1272 .flags = CMD_WANT_SKB, 1273 }; 1274 1275 /* Exit instantly with error when device is not ready 1276 * to receive scan abort command or it does not perform 1277 * hardware scan currently */ 1278 if (!test_bit(S_READY, &il->status) || 1279 !test_bit(S_GEO_CONFIGURED, &il->status) || 1280 !test_bit(S_SCAN_HW, &il->status) || 1281 test_bit(S_FW_ERROR, &il->status) || 1282 test_bit(S_EXIT_PENDING, &il->status)) 1283 return -EIO; 1284 1285 ret = il_send_cmd_sync(il, &cmd); 1286 if (ret) 1287 return ret; 1288 1289 pkt = (struct il_rx_pkt *)cmd.reply_page; 1290 if (pkt->u.status != CAN_ABORT_STATUS) { 1291 /* The scan abort will return 1 for success or 1292 * 2 for "failure". A failure condition can be 1293 * due to simply not being in an active scan which 1294 * can occur if we send the scan abort before we 1295 * the microcode has notified us that a scan is 1296 * completed. */ 1297 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); 1298 ret = -EIO; 1299 } 1300 1301 il_free_pages(il, cmd.reply_page); 1302 return ret; 1303 } 1304 1305 static void 1306 il_complete_scan(struct il_priv *il, bool aborted) 1307 { 1308 struct cfg80211_scan_info info = { 1309 .aborted = aborted, 1310 }; 1311 1312 /* check if scan was requested from mac80211 */ 1313 if (il->scan_request) { 1314 D_SCAN("Complete scan in mac80211\n"); 1315 ieee80211_scan_completed(il->hw, &info); 1316 } 1317 1318 il->scan_vif = NULL; 1319 il->scan_request = NULL; 1320 } 1321 1322 void 1323 il_force_scan_end(struct il_priv *il) 1324 { 1325 lockdep_assert_held(&il->mutex); 1326 1327 if (!test_bit(S_SCANNING, &il->status)) { 1328 D_SCAN("Forcing scan end while not scanning\n"); 1329 return; 1330 } 1331 1332 D_SCAN("Forcing scan end\n"); 1333 clear_bit(S_SCANNING, &il->status); 1334 clear_bit(S_SCAN_HW, &il->status); 1335 clear_bit(S_SCAN_ABORTING, &il->status); 1336 il_complete_scan(il, true); 1337 } 1338 1339 static void 1340 il_do_scan_abort(struct il_priv *il) 1341 { 1342 int ret; 1343 1344 lockdep_assert_held(&il->mutex); 1345 1346 if (!test_bit(S_SCANNING, &il->status)) { 1347 D_SCAN("Not performing scan to abort\n"); 1348 return; 1349 } 1350 1351 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { 1352 D_SCAN("Scan abort in progress\n"); 1353 return; 1354 } 1355 1356 ret = il_send_scan_abort(il); 1357 if (ret) { 1358 D_SCAN("Send scan abort failed %d\n", ret); 1359 il_force_scan_end(il); 1360 } else 1361 D_SCAN("Successfully send scan abort\n"); 1362 } 1363 1364 /** 1365 * il_scan_cancel - Cancel any currently executing HW scan 1366 */ 1367 int 1368 il_scan_cancel(struct il_priv *il) 1369 { 1370 D_SCAN("Queuing abort scan\n"); 1371 queue_work(il->workqueue, &il->abort_scan); 1372 return 0; 1373 } 1374 EXPORT_SYMBOL(il_scan_cancel); 1375 1376 /** 1377 * il_scan_cancel_timeout - Cancel any currently executing HW scan 1378 * @ms: amount of time to wait (in milliseconds) for scan to abort 1379 * 1380 */ 1381 int 1382 il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) 1383 { 1384 unsigned long timeout = jiffies + msecs_to_jiffies(ms); 1385 1386 lockdep_assert_held(&il->mutex); 1387 1388 D_SCAN("Scan cancel timeout\n"); 1389 1390 il_do_scan_abort(il); 1391 1392 while (time_before_eq(jiffies, timeout)) { 1393 if (!test_bit(S_SCAN_HW, &il->status)) 1394 break; 1395 msleep(20); 1396 } 1397 1398 return test_bit(S_SCAN_HW, &il->status); 1399 } 1400 EXPORT_SYMBOL(il_scan_cancel_timeout); 1401 1402 /* Service response to C_SCAN (0x80) */ 1403 static void 1404 il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) 1405 { 1406 #ifdef CONFIG_IWLEGACY_DEBUG 1407 struct il_rx_pkt *pkt = rxb_addr(rxb); 1408 struct il_scanreq_notification *notif = 1409 (struct il_scanreq_notification *)pkt->u.raw; 1410 1411 D_SCAN("Scan request status = 0x%x\n", notif->status); 1412 #endif 1413 } 1414 1415 /* Service N_SCAN_START (0x82) */ 1416 static void 1417 il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) 1418 { 1419 struct il_rx_pkt *pkt = rxb_addr(rxb); 1420 struct il_scanstart_notification *notif = 1421 (struct il_scanstart_notification *)pkt->u.raw; 1422 il->scan_start_tsf = le32_to_cpu(notif->tsf_low); 1423 D_SCAN("Scan start: " "%d [802.11%s] " 1424 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, 1425 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), 1426 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); 1427 } 1428 1429 /* Service N_SCAN_RESULTS (0x83) */ 1430 static void 1431 il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) 1432 { 1433 #ifdef CONFIG_IWLEGACY_DEBUG 1434 struct il_rx_pkt *pkt = rxb_addr(rxb); 1435 struct il_scanresults_notification *notif = 1436 (struct il_scanresults_notification *)pkt->u.raw; 1437 1438 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " 1439 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", 1440 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), 1441 le32_to_cpu(notif->stats[0]), 1442 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); 1443 #endif 1444 } 1445 1446 /* Service N_SCAN_COMPLETE (0x84) */ 1447 static void 1448 il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) 1449 { 1450 1451 #ifdef CONFIG_IWLEGACY_DEBUG 1452 struct il_rx_pkt *pkt = rxb_addr(rxb); 1453 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 1454 #endif 1455 1456 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 1457 scan_notif->scanned_channels, scan_notif->tsf_low, 1458 scan_notif->tsf_high, scan_notif->status); 1459 1460 /* The HW is no longer scanning */ 1461 clear_bit(S_SCAN_HW, &il->status); 1462 1463 D_SCAN("Scan on %sGHz took %dms\n", 1464 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2", 1465 jiffies_to_msecs(jiffies - il->scan_start)); 1466 1467 queue_work(il->workqueue, &il->scan_completed); 1468 } 1469 1470 void 1471 il_setup_rx_scan_handlers(struct il_priv *il) 1472 { 1473 /* scan handlers */ 1474 il->handlers[C_SCAN] = il_hdl_scan; 1475 il->handlers[N_SCAN_START] = il_hdl_scan_start; 1476 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; 1477 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; 1478 } 1479 EXPORT_SYMBOL(il_setup_rx_scan_handlers); 1480 1481 u16 1482 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band, 1483 u8 n_probes) 1484 { 1485 if (band == NL80211_BAND_5GHZ) 1486 return IL_ACTIVE_DWELL_TIME_52 + 1487 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); 1488 else 1489 return IL_ACTIVE_DWELL_TIME_24 + 1490 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 1491 } 1492 EXPORT_SYMBOL(il_get_active_dwell_time); 1493 1494 u16 1495 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band, 1496 struct ieee80211_vif *vif) 1497 { 1498 u16 value; 1499 1500 u16 passive = 1501 (band == 1502 NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + 1503 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + 1504 IL_PASSIVE_DWELL_TIME_52; 1505 1506 if (il_is_any_associated(il)) { 1507 /* 1508 * If we're associated, we clamp the maximum passive 1509 * dwell time to be 98% of the smallest beacon interval 1510 * (minus 2 * channel tune time) 1511 */ 1512 value = il->vif ? il->vif->bss_conf.beacon_int : 0; 1513 if (value > IL_PASSIVE_DWELL_BASE || !value) 1514 value = IL_PASSIVE_DWELL_BASE; 1515 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; 1516 passive = min(value, passive); 1517 } 1518 1519 return passive; 1520 } 1521 EXPORT_SYMBOL(il_get_passive_dwell_time); 1522 1523 void 1524 il_init_scan_params(struct il_priv *il) 1525 { 1526 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; 1527 if (!il->scan_tx_ant[NL80211_BAND_5GHZ]) 1528 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; 1529 if (!il->scan_tx_ant[NL80211_BAND_2GHZ]) 1530 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; 1531 } 1532 EXPORT_SYMBOL(il_init_scan_params); 1533 1534 static int 1535 il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) 1536 { 1537 int ret; 1538 1539 lockdep_assert_held(&il->mutex); 1540 1541 cancel_delayed_work(&il->scan_check); 1542 1543 if (!il_is_ready_rf(il)) { 1544 IL_WARN("Request scan called when driver not ready.\n"); 1545 return -EIO; 1546 } 1547 1548 if (test_bit(S_SCAN_HW, &il->status)) { 1549 D_SCAN("Multiple concurrent scan requests in parallel.\n"); 1550 return -EBUSY; 1551 } 1552 1553 if (test_bit(S_SCAN_ABORTING, &il->status)) { 1554 D_SCAN("Scan request while abort pending.\n"); 1555 return -EBUSY; 1556 } 1557 1558 D_SCAN("Starting scan...\n"); 1559 1560 set_bit(S_SCANNING, &il->status); 1561 il->scan_start = jiffies; 1562 1563 ret = il->ops->request_scan(il, vif); 1564 if (ret) { 1565 clear_bit(S_SCANNING, &il->status); 1566 return ret; 1567 } 1568 1569 queue_delayed_work(il->workqueue, &il->scan_check, 1570 IL_SCAN_CHECK_WATCHDOG); 1571 1572 return 0; 1573 } 1574 1575 int 1576 il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1577 struct ieee80211_scan_request *hw_req) 1578 { 1579 struct cfg80211_scan_request *req = &hw_req->req; 1580 struct il_priv *il = hw->priv; 1581 int ret; 1582 1583 if (req->n_channels == 0) { 1584 IL_ERR("Can not scan on no channels.\n"); 1585 return -EINVAL; 1586 } 1587 1588 mutex_lock(&il->mutex); 1589 D_MAC80211("enter\n"); 1590 1591 if (test_bit(S_SCANNING, &il->status)) { 1592 D_SCAN("Scan already in progress.\n"); 1593 ret = -EAGAIN; 1594 goto out_unlock; 1595 } 1596 1597 /* mac80211 will only ask for one band at a time */ 1598 il->scan_request = req; 1599 il->scan_vif = vif; 1600 il->scan_band = req->channels[0]->band; 1601 1602 ret = il_scan_initiate(il, vif); 1603 1604 out_unlock: 1605 D_MAC80211("leave ret %d\n", ret); 1606 mutex_unlock(&il->mutex); 1607 1608 return ret; 1609 } 1610 EXPORT_SYMBOL(il_mac_hw_scan); 1611 1612 static void 1613 il_bg_scan_check(struct work_struct *data) 1614 { 1615 struct il_priv *il = 1616 container_of(data, struct il_priv, scan_check.work); 1617 1618 D_SCAN("Scan check work\n"); 1619 1620 /* Since we are here firmware does not finish scan and 1621 * most likely is in bad shape, so we don't bother to 1622 * send abort command, just force scan complete to mac80211 */ 1623 mutex_lock(&il->mutex); 1624 il_force_scan_end(il); 1625 mutex_unlock(&il->mutex); 1626 } 1627 1628 /** 1629 * il_fill_probe_req - fill in all required fields and IE for probe request 1630 */ 1631 1632 u16 1633 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, 1634 const u8 *ta, const u8 *ies, int ie_len, int left) 1635 { 1636 int len = 0; 1637 u8 *pos = NULL; 1638 1639 /* Make sure there is enough space for the probe request, 1640 * two mandatory IEs and the data */ 1641 left -= 24; 1642 if (left < 0) 1643 return 0; 1644 1645 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 1646 eth_broadcast_addr(frame->da); 1647 memcpy(frame->sa, ta, ETH_ALEN); 1648 eth_broadcast_addr(frame->bssid); 1649 frame->seq_ctrl = 0; 1650 1651 len += 24; 1652 1653 /* ...next IE... */ 1654 pos = &frame->u.probe_req.variable[0]; 1655 1656 /* fill in our indirect SSID IE */ 1657 left -= 2; 1658 if (left < 0) 1659 return 0; 1660 *pos++ = WLAN_EID_SSID; 1661 *pos++ = 0; 1662 1663 len += 2; 1664 1665 if (WARN_ON(left < ie_len)) 1666 return len; 1667 1668 if (ies && ie_len) { 1669 memcpy(pos, ies, ie_len); 1670 len += ie_len; 1671 } 1672 1673 return (u16) len; 1674 } 1675 EXPORT_SYMBOL(il_fill_probe_req); 1676 1677 static void 1678 il_bg_abort_scan(struct work_struct *work) 1679 { 1680 struct il_priv *il = container_of(work, struct il_priv, abort_scan); 1681 1682 D_SCAN("Abort scan work\n"); 1683 1684 /* We keep scan_check work queued in case when firmware will not 1685 * report back scan completed notification */ 1686 mutex_lock(&il->mutex); 1687 il_scan_cancel_timeout(il, 200); 1688 mutex_unlock(&il->mutex); 1689 } 1690 1691 static void 1692 il_bg_scan_completed(struct work_struct *work) 1693 { 1694 struct il_priv *il = container_of(work, struct il_priv, scan_completed); 1695 bool aborted; 1696 1697 D_SCAN("Completed scan.\n"); 1698 1699 cancel_delayed_work(&il->scan_check); 1700 1701 mutex_lock(&il->mutex); 1702 1703 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); 1704 if (aborted) 1705 D_SCAN("Aborted scan completed.\n"); 1706 1707 if (!test_and_clear_bit(S_SCANNING, &il->status)) { 1708 D_SCAN("Scan already completed.\n"); 1709 goto out_settings; 1710 } 1711 1712 il_complete_scan(il, aborted); 1713 1714 out_settings: 1715 /* Can we still talk to firmware ? */ 1716 if (!il_is_ready_rf(il)) 1717 goto out; 1718 1719 /* 1720 * We do not commit power settings while scan is pending, 1721 * do it now if the settings changed. 1722 */ 1723 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); 1724 il_set_tx_power(il, il->tx_power_next, false); 1725 1726 il->ops->post_scan(il); 1727 1728 out: 1729 mutex_unlock(&il->mutex); 1730 } 1731 1732 void 1733 il_setup_scan_deferred_work(struct il_priv *il) 1734 { 1735 INIT_WORK(&il->scan_completed, il_bg_scan_completed); 1736 INIT_WORK(&il->abort_scan, il_bg_abort_scan); 1737 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); 1738 } 1739 EXPORT_SYMBOL(il_setup_scan_deferred_work); 1740 1741 void 1742 il_cancel_scan_deferred_work(struct il_priv *il) 1743 { 1744 cancel_work_sync(&il->abort_scan); 1745 cancel_work_sync(&il->scan_completed); 1746 1747 if (cancel_delayed_work_sync(&il->scan_check)) { 1748 mutex_lock(&il->mutex); 1749 il_force_scan_end(il); 1750 mutex_unlock(&il->mutex); 1751 } 1752 } 1753 EXPORT_SYMBOL(il_cancel_scan_deferred_work); 1754 1755 /* il->sta_lock must be held */ 1756 static void 1757 il_sta_ucode_activate(struct il_priv *il, u8 sta_id) 1758 { 1759 1760 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) 1761 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n", 1762 sta_id, il->stations[sta_id].sta.sta.addr); 1763 1764 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { 1765 D_ASSOC("STA id %u addr %pM already present" 1766 " in uCode (according to driver)\n", sta_id, 1767 il->stations[sta_id].sta.sta.addr); 1768 } else { 1769 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; 1770 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id, 1771 il->stations[sta_id].sta.sta.addr); 1772 } 1773 } 1774 1775 static int 1776 il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, 1777 struct il_rx_pkt *pkt, bool sync) 1778 { 1779 u8 sta_id = addsta->sta.sta_id; 1780 unsigned long flags; 1781 int ret = -EIO; 1782 1783 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 1784 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); 1785 return ret; 1786 } 1787 1788 D_INFO("Processing response for adding station %u\n", sta_id); 1789 1790 spin_lock_irqsave(&il->sta_lock, flags); 1791 1792 switch (pkt->u.add_sta.status) { 1793 case ADD_STA_SUCCESS_MSK: 1794 D_INFO("C_ADD_STA PASSED\n"); 1795 il_sta_ucode_activate(il, sta_id); 1796 ret = 0; 1797 break; 1798 case ADD_STA_NO_ROOM_IN_TBL: 1799 IL_ERR("Adding station %d failed, no room in table.\n", sta_id); 1800 break; 1801 case ADD_STA_NO_BLOCK_ACK_RESOURCE: 1802 IL_ERR("Adding station %d failed, no block ack resource.\n", 1803 sta_id); 1804 break; 1805 case ADD_STA_MODIFY_NON_EXIST_STA: 1806 IL_ERR("Attempting to modify non-existing station %d\n", 1807 sta_id); 1808 break; 1809 default: 1810 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); 1811 break; 1812 } 1813 1814 D_INFO("%s station id %u addr %pM\n", 1815 il->stations[sta_id].sta.mode == 1816 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, 1817 il->stations[sta_id].sta.sta.addr); 1818 1819 /* 1820 * XXX: The MAC address in the command buffer is often changed from 1821 * the original sent to the device. That is, the MAC address 1822 * written to the command buffer often is not the same MAC address 1823 * read from the command buffer when the command returns. This 1824 * issue has not yet been resolved and this debugging is left to 1825 * observe the problem. 1826 */ 1827 D_INFO("%s station according to cmd buffer %pM\n", 1828 il->stations[sta_id].sta.mode == 1829 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); 1830 spin_unlock_irqrestore(&il->sta_lock, flags); 1831 1832 return ret; 1833 } 1834 1835 static void 1836 il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, 1837 struct il_rx_pkt *pkt) 1838 { 1839 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; 1840 1841 il_process_add_sta_resp(il, addsta, pkt, false); 1842 1843 } 1844 1845 int 1846 il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) 1847 { 1848 struct il_rx_pkt *pkt = NULL; 1849 int ret = 0; 1850 u8 data[sizeof(*sta)]; 1851 struct il_host_cmd cmd = { 1852 .id = C_ADD_STA, 1853 .flags = flags, 1854 .data = data, 1855 }; 1856 u8 sta_id __maybe_unused = sta->sta.sta_id; 1857 1858 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, 1859 flags & CMD_ASYNC ? "a" : ""); 1860 1861 if (flags & CMD_ASYNC) 1862 cmd.callback = il_add_sta_callback; 1863 else { 1864 cmd.flags |= CMD_WANT_SKB; 1865 might_sleep(); 1866 } 1867 1868 cmd.len = il->ops->build_addsta_hcmd(sta, data); 1869 ret = il_send_cmd(il, &cmd); 1870 if (ret) 1871 return ret; 1872 if (flags & CMD_ASYNC) 1873 return 0; 1874 1875 pkt = (struct il_rx_pkt *)cmd.reply_page; 1876 ret = il_process_add_sta_resp(il, sta, pkt, true); 1877 1878 il_free_pages(il, cmd.reply_page); 1879 1880 return ret; 1881 } 1882 EXPORT_SYMBOL(il_send_add_sta); 1883 1884 static void 1885 il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) 1886 { 1887 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; 1888 __le32 sta_flags; 1889 1890 if (!sta || !sta_ht_inf->ht_supported) 1891 goto done; 1892 1893 D_ASSOC("spatial multiplexing power save mode: %s\n", 1894 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" : 1895 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : 1896 "disabled"); 1897 1898 sta_flags = il->stations[idx].sta.station_flags; 1899 1900 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 1901 1902 switch (sta->smps_mode) { 1903 case IEEE80211_SMPS_STATIC: 1904 sta_flags |= STA_FLG_MIMO_DIS_MSK; 1905 break; 1906 case IEEE80211_SMPS_DYNAMIC: 1907 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; 1908 break; 1909 case IEEE80211_SMPS_OFF: 1910 break; 1911 default: 1912 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode); 1913 break; 1914 } 1915 1916 sta_flags |= 1917 cpu_to_le32((u32) sta_ht_inf-> 1918 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); 1919 1920 sta_flags |= 1921 cpu_to_le32((u32) sta_ht_inf-> 1922 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 1923 1924 if (il_is_ht40_tx_allowed(il, &sta->ht_cap)) 1925 sta_flags |= STA_FLG_HT40_EN_MSK; 1926 else 1927 sta_flags &= ~STA_FLG_HT40_EN_MSK; 1928 1929 il->stations[idx].sta.station_flags = sta_flags; 1930 done: 1931 return; 1932 } 1933 1934 /** 1935 * il_prep_station - Prepare station information for addition 1936 * 1937 * should be called with sta_lock held 1938 */ 1939 u8 1940 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, 1941 struct ieee80211_sta *sta) 1942 { 1943 struct il_station_entry *station; 1944 int i; 1945 u8 sta_id = IL_INVALID_STATION; 1946 u16 rate; 1947 1948 if (is_ap) 1949 sta_id = IL_AP_ID; 1950 else if (is_broadcast_ether_addr(addr)) 1951 sta_id = il->hw_params.bcast_id; 1952 else 1953 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { 1954 if (ether_addr_equal(il->stations[i].sta.sta.addr, 1955 addr)) { 1956 sta_id = i; 1957 break; 1958 } 1959 1960 if (!il->stations[i].used && 1961 sta_id == IL_INVALID_STATION) 1962 sta_id = i; 1963 } 1964 1965 /* 1966 * These two conditions have the same outcome, but keep them 1967 * separate 1968 */ 1969 if (unlikely(sta_id == IL_INVALID_STATION)) 1970 return sta_id; 1971 1972 /* 1973 * uCode is not able to deal with multiple requests to add a 1974 * station. Keep track if one is in progress so that we do not send 1975 * another. 1976 */ 1977 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 1978 D_INFO("STA %d already in process of being added.\n", sta_id); 1979 return sta_id; 1980 } 1981 1982 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 1983 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && 1984 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) { 1985 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 1986 sta_id, addr); 1987 return sta_id; 1988 } 1989 1990 station = &il->stations[sta_id]; 1991 station->used = IL_STA_DRIVER_ACTIVE; 1992 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr); 1993 il->num_stations++; 1994 1995 /* Set up the C_ADD_STA command to send to device */ 1996 memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); 1997 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 1998 station->sta.mode = 0; 1999 station->sta.sta.sta_id = sta_id; 2000 station->sta.station_flags = 0; 2001 2002 /* 2003 * OK to call unconditionally, since local stations (IBSS BSSID 2004 * STA and broadcast STA) pass in a NULL sta, and mac80211 2005 * doesn't allow HT IBSS. 2006 */ 2007 il_set_ht_add_station(il, sta_id, sta); 2008 2009 /* 3945 only */ 2010 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; 2011 /* Turn on both antennas for the station... */ 2012 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 2013 2014 return sta_id; 2015 2016 } 2017 EXPORT_SYMBOL_GPL(il_prep_station); 2018 2019 #define STA_WAIT_TIMEOUT (HZ/2) 2020 2021 /** 2022 * il_add_station_common - 2023 */ 2024 int 2025 il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap, 2026 struct ieee80211_sta *sta, u8 *sta_id_r) 2027 { 2028 unsigned long flags_spin; 2029 int ret = 0; 2030 u8 sta_id; 2031 struct il_addsta_cmd sta_cmd; 2032 2033 *sta_id_r = 0; 2034 spin_lock_irqsave(&il->sta_lock, flags_spin); 2035 sta_id = il_prep_station(il, addr, is_ap, sta); 2036 if (sta_id == IL_INVALID_STATION) { 2037 IL_ERR("Unable to prepare station %pM for addition\n", addr); 2038 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2039 return -EINVAL; 2040 } 2041 2042 /* 2043 * uCode is not able to deal with multiple requests to add a 2044 * station. Keep track if one is in progress so that we do not send 2045 * another. 2046 */ 2047 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 2048 D_INFO("STA %d already in process of being added.\n", sta_id); 2049 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2050 return -EEXIST; 2051 } 2052 2053 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 2054 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2055 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 2056 sta_id, addr); 2057 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2058 return -EEXIST; 2059 } 2060 2061 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; 2062 memcpy(&sta_cmd, &il->stations[sta_id].sta, 2063 sizeof(struct il_addsta_cmd)); 2064 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2065 2066 /* Add station to device's station table */ 2067 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2068 if (ret) { 2069 spin_lock_irqsave(&il->sta_lock, flags_spin); 2070 IL_ERR("Adding station %pM failed.\n", 2071 il->stations[sta_id].sta.sta.addr); 2072 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2073 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2074 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2075 } 2076 *sta_id_r = sta_id; 2077 return ret; 2078 } 2079 EXPORT_SYMBOL(il_add_station_common); 2080 2081 /** 2082 * il_sta_ucode_deactivate - deactivate ucode status for a station 2083 * 2084 * il->sta_lock must be held 2085 */ 2086 static void 2087 il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) 2088 { 2089 /* Ucode must be active and driver must be non active */ 2090 if ((il->stations[sta_id]. 2091 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != 2092 IL_STA_UCODE_ACTIVE) 2093 IL_ERR("removed non active STA %u\n", sta_id); 2094 2095 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; 2096 2097 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); 2098 D_ASSOC("Removed STA %u\n", sta_id); 2099 } 2100 2101 static int 2102 il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, 2103 bool temporary) 2104 { 2105 struct il_rx_pkt *pkt; 2106 int ret; 2107 2108 unsigned long flags_spin; 2109 struct il_rem_sta_cmd rm_sta_cmd; 2110 2111 struct il_host_cmd cmd = { 2112 .id = C_REM_STA, 2113 .len = sizeof(struct il_rem_sta_cmd), 2114 .flags = CMD_SYNC, 2115 .data = &rm_sta_cmd, 2116 }; 2117 2118 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 2119 rm_sta_cmd.num_sta = 1; 2120 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); 2121 2122 cmd.flags |= CMD_WANT_SKB; 2123 2124 ret = il_send_cmd(il, &cmd); 2125 2126 if (ret) 2127 return ret; 2128 2129 pkt = (struct il_rx_pkt *)cmd.reply_page; 2130 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 2131 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); 2132 ret = -EIO; 2133 } 2134 2135 if (!ret) { 2136 switch (pkt->u.rem_sta.status) { 2137 case REM_STA_SUCCESS_MSK: 2138 if (!temporary) { 2139 spin_lock_irqsave(&il->sta_lock, flags_spin); 2140 il_sta_ucode_deactivate(il, sta_id); 2141 spin_unlock_irqrestore(&il->sta_lock, 2142 flags_spin); 2143 } 2144 D_ASSOC("C_REM_STA PASSED\n"); 2145 break; 2146 default: 2147 ret = -EIO; 2148 IL_ERR("C_REM_STA failed\n"); 2149 break; 2150 } 2151 } 2152 il_free_pages(il, cmd.reply_page); 2153 2154 return ret; 2155 } 2156 2157 /** 2158 * il_remove_station - Remove driver's knowledge of station. 2159 */ 2160 int 2161 il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) 2162 { 2163 unsigned long flags; 2164 2165 if (!il_is_ready(il)) { 2166 D_INFO("Unable to remove station %pM, device not ready.\n", 2167 addr); 2168 /* 2169 * It is typical for stations to be removed when we are 2170 * going down. Return success since device will be down 2171 * soon anyway 2172 */ 2173 return 0; 2174 } 2175 2176 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr); 2177 2178 if (WARN_ON(sta_id == IL_INVALID_STATION)) 2179 return -EINVAL; 2180 2181 spin_lock_irqsave(&il->sta_lock, flags); 2182 2183 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2184 D_INFO("Removing %pM but non DRIVER active\n", addr); 2185 goto out_err; 2186 } 2187 2188 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2189 D_INFO("Removing %pM but non UCODE active\n", addr); 2190 goto out_err; 2191 } 2192 2193 if (il->stations[sta_id].used & IL_STA_LOCAL) { 2194 kfree(il->stations[sta_id].lq); 2195 il->stations[sta_id].lq = NULL; 2196 } 2197 2198 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2199 2200 il->num_stations--; 2201 2202 BUG_ON(il->num_stations < 0); 2203 2204 spin_unlock_irqrestore(&il->sta_lock, flags); 2205 2206 return il_send_remove_station(il, addr, sta_id, false); 2207 out_err: 2208 spin_unlock_irqrestore(&il->sta_lock, flags); 2209 return -EINVAL; 2210 } 2211 EXPORT_SYMBOL_GPL(il_remove_station); 2212 2213 /** 2214 * il_clear_ucode_stations - clear ucode station table bits 2215 * 2216 * This function clears all the bits in the driver indicating 2217 * which stations are active in the ucode. Call when something 2218 * other than explicit station management would cause this in 2219 * the ucode, e.g. unassociated RXON. 2220 */ 2221 void 2222 il_clear_ucode_stations(struct il_priv *il) 2223 { 2224 int i; 2225 unsigned long flags_spin; 2226 bool cleared = false; 2227 2228 D_INFO("Clearing ucode stations in driver\n"); 2229 2230 spin_lock_irqsave(&il->sta_lock, flags_spin); 2231 for (i = 0; i < il->hw_params.max_stations; i++) { 2232 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { 2233 D_INFO("Clearing ucode active for station %d\n", i); 2234 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2235 cleared = true; 2236 } 2237 } 2238 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2239 2240 if (!cleared) 2241 D_INFO("No active stations found to be cleared\n"); 2242 } 2243 EXPORT_SYMBOL(il_clear_ucode_stations); 2244 2245 /** 2246 * il_restore_stations() - Restore driver known stations to device 2247 * 2248 * All stations considered active by driver, but not present in ucode, is 2249 * restored. 2250 * 2251 * Function sleeps. 2252 */ 2253 void 2254 il_restore_stations(struct il_priv *il) 2255 { 2256 struct il_addsta_cmd sta_cmd; 2257 struct il_link_quality_cmd lq; 2258 unsigned long flags_spin; 2259 int i; 2260 bool found = false; 2261 int ret; 2262 bool send_lq; 2263 2264 if (!il_is_ready(il)) { 2265 D_INFO("Not ready yet, not restoring any stations.\n"); 2266 return; 2267 } 2268 2269 D_ASSOC("Restoring all known stations ... start.\n"); 2270 spin_lock_irqsave(&il->sta_lock, flags_spin); 2271 for (i = 0; i < il->hw_params.max_stations; i++) { 2272 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && 2273 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { 2274 D_ASSOC("Restoring sta %pM\n", 2275 il->stations[i].sta.sta.addr); 2276 il->stations[i].sta.mode = 0; 2277 il->stations[i].used |= IL_STA_UCODE_INPROGRESS; 2278 found = true; 2279 } 2280 } 2281 2282 for (i = 0; i < il->hw_params.max_stations; i++) { 2283 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { 2284 memcpy(&sta_cmd, &il->stations[i].sta, 2285 sizeof(struct il_addsta_cmd)); 2286 send_lq = false; 2287 if (il->stations[i].lq) { 2288 memcpy(&lq, il->stations[i].lq, 2289 sizeof(struct il_link_quality_cmd)); 2290 send_lq = true; 2291 } 2292 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2293 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2294 if (ret) { 2295 spin_lock_irqsave(&il->sta_lock, flags_spin); 2296 IL_ERR("Adding station %pM failed.\n", 2297 il->stations[i].sta.sta.addr); 2298 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; 2299 il->stations[i].used &= 2300 ~IL_STA_UCODE_INPROGRESS; 2301 spin_unlock_irqrestore(&il->sta_lock, 2302 flags_spin); 2303 } 2304 /* 2305 * Rate scaling has already been initialized, send 2306 * current LQ command 2307 */ 2308 if (send_lq) 2309 il_send_lq_cmd(il, &lq, CMD_SYNC, true); 2310 spin_lock_irqsave(&il->sta_lock, flags_spin); 2311 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; 2312 } 2313 } 2314 2315 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2316 if (!found) 2317 D_INFO("Restoring all known stations" 2318 " .... no stations to be restored.\n"); 2319 else 2320 D_INFO("Restoring all known stations" " .... complete.\n"); 2321 } 2322 EXPORT_SYMBOL(il_restore_stations); 2323 2324 int 2325 il_get_free_ucode_key_idx(struct il_priv *il) 2326 { 2327 int i; 2328 2329 for (i = 0; i < il->sta_key_max_num; i++) 2330 if (!test_and_set_bit(i, &il->ucode_key_table)) 2331 return i; 2332 2333 return WEP_INVALID_OFFSET; 2334 } 2335 EXPORT_SYMBOL(il_get_free_ucode_key_idx); 2336 2337 void 2338 il_dealloc_bcast_stations(struct il_priv *il) 2339 { 2340 unsigned long flags; 2341 int i; 2342 2343 spin_lock_irqsave(&il->sta_lock, flags); 2344 for (i = 0; i < il->hw_params.max_stations; i++) { 2345 if (!(il->stations[i].used & IL_STA_BCAST)) 2346 continue; 2347 2348 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2349 il->num_stations--; 2350 BUG_ON(il->num_stations < 0); 2351 kfree(il->stations[i].lq); 2352 il->stations[i].lq = NULL; 2353 } 2354 spin_unlock_irqrestore(&il->sta_lock, flags); 2355 } 2356 EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); 2357 2358 #ifdef CONFIG_IWLEGACY_DEBUG 2359 static void 2360 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2361 { 2362 int i; 2363 D_RATE("lq station id 0x%x\n", lq->sta_id); 2364 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, 2365 lq->general_params.dual_stream_ant_msk); 2366 2367 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 2368 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); 2369 } 2370 #else 2371 static inline void 2372 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2373 { 2374 } 2375 #endif 2376 2377 /** 2378 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity 2379 * 2380 * It sometimes happens when a HT rate has been in use and we 2381 * loose connectivity with AP then mac80211 will first tell us that the 2382 * current channel is not HT anymore before removing the station. In such a 2383 * scenario the RXON flags will be updated to indicate we are not 2384 * communicating HT anymore, but the LQ command may still contain HT rates. 2385 * Test for this to prevent driver from sending LQ command between the time 2386 * RXON flags are updated and when LQ command is updated. 2387 */ 2388 static bool 2389 il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq) 2390 { 2391 int i; 2392 2393 if (il->ht.enabled) 2394 return true; 2395 2396 D_INFO("Channel %u is not an HT channel\n", il->active.channel); 2397 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 2398 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { 2399 D_INFO("idx %d of LQ expects HT channel\n", i); 2400 return false; 2401 } 2402 } 2403 return true; 2404 } 2405 2406 /** 2407 * il_send_lq_cmd() - Send link quality command 2408 * @init: This command is sent as part of station initialization right 2409 * after station has been added. 2410 * 2411 * The link quality command is sent as the last step of station creation. 2412 * This is the special case in which init is set and we call a callback in 2413 * this case to clear the state indicating that station creation is in 2414 * progress. 2415 */ 2416 int 2417 il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq, 2418 u8 flags, bool init) 2419 { 2420 int ret = 0; 2421 unsigned long flags_spin; 2422 2423 struct il_host_cmd cmd = { 2424 .id = C_TX_LINK_QUALITY_CMD, 2425 .len = sizeof(struct il_link_quality_cmd), 2426 .flags = flags, 2427 .data = lq, 2428 }; 2429 2430 if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) 2431 return -EINVAL; 2432 2433 spin_lock_irqsave(&il->sta_lock, flags_spin); 2434 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2435 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2436 return -EINVAL; 2437 } 2438 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2439 2440 il_dump_lq_cmd(il, lq); 2441 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 2442 2443 if (il_is_lq_table_valid(il, lq)) 2444 ret = il_send_cmd(il, &cmd); 2445 else 2446 ret = -EINVAL; 2447 2448 if (cmd.flags & CMD_ASYNC) 2449 return ret; 2450 2451 if (init) { 2452 D_INFO("init LQ command complete," 2453 " clearing sta addition status for sta %d\n", 2454 lq->sta_id); 2455 spin_lock_irqsave(&il->sta_lock, flags_spin); 2456 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2457 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2458 } 2459 return ret; 2460 } 2461 EXPORT_SYMBOL(il_send_lq_cmd); 2462 2463 int 2464 il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2465 struct ieee80211_sta *sta) 2466 { 2467 struct il_priv *il = hw->priv; 2468 struct il_station_priv_common *sta_common = (void *)sta->drv_priv; 2469 int ret; 2470 2471 mutex_lock(&il->mutex); 2472 D_MAC80211("enter station %pM\n", sta->addr); 2473 2474 ret = il_remove_station(il, sta_common->sta_id, sta->addr); 2475 if (ret) 2476 IL_ERR("Error removing station %pM\n", sta->addr); 2477 2478 D_MAC80211("leave ret %d\n", ret); 2479 mutex_unlock(&il->mutex); 2480 2481 return ret; 2482 } 2483 EXPORT_SYMBOL(il_mac_sta_remove); 2484 2485 /************************** RX-FUNCTIONS ****************************/ 2486 /* 2487 * Rx theory of operation 2488 * 2489 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 2490 * each of which point to Receive Buffers to be filled by the NIC. These get 2491 * used not only for Rx frames, but for any command response or notification 2492 * from the NIC. The driver and NIC manage the Rx buffers by means 2493 * of idxes into the circular buffer. 2494 * 2495 * Rx Queue Indexes 2496 * The host/firmware share two idx registers for managing the Rx buffers. 2497 * 2498 * The READ idx maps to the first position that the firmware may be writing 2499 * to -- the driver can read up to (but not including) this position and get 2500 * good data. 2501 * The READ idx is managed by the firmware once the card is enabled. 2502 * 2503 * The WRITE idx maps to the last position the driver has read from -- the 2504 * position preceding WRITE is the last slot the firmware can place a packet. 2505 * 2506 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 2507 * WRITE = READ. 2508 * 2509 * During initialization, the host sets up the READ queue position to the first 2510 * IDX position, and WRITE to the last (READ - 1 wrapped) 2511 * 2512 * When the firmware places a packet in a buffer, it will advance the READ idx 2513 * and fire the RX interrupt. The driver can then query the READ idx and 2514 * process as many packets as possible, moving the WRITE idx forward as it 2515 * resets the Rx queue buffers with new memory. 2516 * 2517 * The management in the driver is as follows: 2518 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 2519 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 2520 * to replenish the iwl->rxq->rx_free. 2521 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the 2522 * iwl->rxq is replenished and the READ IDX is updated (updating the 2523 * 'processed' and 'read' driver idxes as well) 2524 * + A received packet is processed and handed to the kernel network stack, 2525 * detached from the iwl->rxq. The driver 'processed' idx is updated. 2526 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 2527 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 2528 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there 2529 * were enough free buffers and RX_STALLED is set it is cleared. 2530 * 2531 * 2532 * Driver sequence: 2533 * 2534 * il_rx_queue_alloc() Allocates rx_free 2535 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls 2536 * il_rx_queue_restock 2537 * il_rx_queue_restock() Moves available buffers from rx_free into Rx 2538 * queue, updates firmware pointers, and updates 2539 * the WRITE idx. If insufficient rx_free buffers 2540 * are available, schedules il_rx_replenish 2541 * 2542 * -- enable interrupts -- 2543 * ISR - il_rx() Detach il_rx_bufs from pool up to the 2544 * READ IDX, detaching the SKB from the pool. 2545 * Moves the packet buffer from queue to rx_used. 2546 * Calls il_rx_queue_restock to refill any empty 2547 * slots. 2548 * ... 2549 * 2550 */ 2551 2552 /** 2553 * il_rx_queue_space - Return number of free slots available in queue. 2554 */ 2555 int 2556 il_rx_queue_space(const struct il_rx_queue *q) 2557 { 2558 int s = q->read - q->write; 2559 if (s <= 0) 2560 s += RX_QUEUE_SIZE; 2561 /* keep some buffer to not confuse full and empty queue */ 2562 s -= 2; 2563 if (s < 0) 2564 s = 0; 2565 return s; 2566 } 2567 EXPORT_SYMBOL(il_rx_queue_space); 2568 2569 /** 2570 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue 2571 */ 2572 void 2573 il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) 2574 { 2575 unsigned long flags; 2576 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; 2577 u32 reg; 2578 2579 spin_lock_irqsave(&q->lock, flags); 2580 2581 if (q->need_update == 0) 2582 goto exit_unlock; 2583 2584 /* If power-saving is in use, make sure device is awake */ 2585 if (test_bit(S_POWER_PMI, &il->status)) { 2586 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2587 2588 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2589 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n", 2590 reg); 2591 il_set_bit(il, CSR_GP_CNTRL, 2592 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2593 goto exit_unlock; 2594 } 2595 2596 q->write_actual = (q->write & ~0x7); 2597 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2598 2599 /* Else device is assumed to be awake */ 2600 } else { 2601 /* Device expects a multiple of 8 */ 2602 q->write_actual = (q->write & ~0x7); 2603 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2604 } 2605 2606 q->need_update = 0; 2607 2608 exit_unlock: 2609 spin_unlock_irqrestore(&q->lock, flags); 2610 } 2611 EXPORT_SYMBOL(il_rx_queue_update_write_ptr); 2612 2613 int 2614 il_rx_queue_alloc(struct il_priv *il) 2615 { 2616 struct il_rx_queue *rxq = &il->rxq; 2617 struct device *dev = &il->pci_dev->dev; 2618 int i; 2619 2620 spin_lock_init(&rxq->lock); 2621 INIT_LIST_HEAD(&rxq->rx_free); 2622 INIT_LIST_HEAD(&rxq->rx_used); 2623 2624 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 2625 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, 2626 GFP_KERNEL); 2627 if (!rxq->bd) 2628 goto err_bd; 2629 2630 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status), 2631 &rxq->rb_stts_dma, GFP_KERNEL); 2632 if (!rxq->rb_stts) 2633 goto err_rb; 2634 2635 /* Fill the rx_used queue with _all_ of the Rx buffers */ 2636 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 2637 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 2638 2639 /* Set us so that we have processed and used all buffers, but have 2640 * not restocked the Rx queue with fresh buffers */ 2641 rxq->read = rxq->write = 0; 2642 rxq->write_actual = 0; 2643 rxq->free_count = 0; 2644 rxq->need_update = 0; 2645 return 0; 2646 2647 err_rb: 2648 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 2649 rxq->bd_dma); 2650 err_bd: 2651 return -ENOMEM; 2652 } 2653 EXPORT_SYMBOL(il_rx_queue_alloc); 2654 2655 void 2656 il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) 2657 { 2658 struct il_rx_pkt *pkt = rxb_addr(rxb); 2659 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); 2660 2661 if (!report->state) { 2662 D_11H("Spectrum Measure Notification: Start\n"); 2663 return; 2664 } 2665 2666 memcpy(&il->measure_report, report, sizeof(*report)); 2667 il->measurement_status |= MEASUREMENT_READY; 2668 } 2669 EXPORT_SYMBOL(il_hdl_spectrum_measurement); 2670 2671 /* 2672 * returns non-zero if packet should be dropped 2673 */ 2674 int 2675 il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, 2676 u32 decrypt_res, struct ieee80211_rx_status *stats) 2677 { 2678 u16 fc = le16_to_cpu(hdr->frame_control); 2679 2680 /* 2681 * All contexts have the same setting here due to it being 2682 * a module parameter, so OK to check any context. 2683 */ 2684 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) 2685 return 0; 2686 2687 if (!(fc & IEEE80211_FCTL_PROTECTED)) 2688 return 0; 2689 2690 D_RX("decrypt_res:0x%x\n", decrypt_res); 2691 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { 2692 case RX_RES_STATUS_SEC_TYPE_TKIP: 2693 /* The uCode has got a bad phase 1 Key, pushes the packet. 2694 * Decryption will be done in SW. */ 2695 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2696 RX_RES_STATUS_BAD_KEY_TTAK) 2697 break; 2698 2699 case RX_RES_STATUS_SEC_TYPE_WEP: 2700 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2701 RX_RES_STATUS_BAD_ICV_MIC) { 2702 /* bad ICV, the packet is destroyed since the 2703 * decryption is inplace, drop it */ 2704 D_RX("Packet destroyed\n"); 2705 return -1; 2706 } 2707 case RX_RES_STATUS_SEC_TYPE_CCMP: 2708 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2709 RX_RES_STATUS_DECRYPT_OK) { 2710 D_RX("hw decrypt successfully!!!\n"); 2711 stats->flag |= RX_FLAG_DECRYPTED; 2712 } 2713 break; 2714 2715 default: 2716 break; 2717 } 2718 return 0; 2719 } 2720 EXPORT_SYMBOL(il_set_decrypted_flag); 2721 2722 /** 2723 * il_txq_update_write_ptr - Send new write idx to hardware 2724 */ 2725 void 2726 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) 2727 { 2728 u32 reg = 0; 2729 int txq_id = txq->q.id; 2730 2731 if (txq->need_update == 0) 2732 return; 2733 2734 /* if we're trying to save power */ 2735 if (test_bit(S_POWER_PMI, &il->status)) { 2736 /* wake up nic if it's powered down ... 2737 * uCode will wake up, and interrupt us again, so next 2738 * time we'll skip this part. */ 2739 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2740 2741 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2742 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n", 2743 txq_id, reg); 2744 il_set_bit(il, CSR_GP_CNTRL, 2745 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2746 return; 2747 } 2748 2749 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2750 2751 /* 2752 * else not in power-save mode, 2753 * uCode will never sleep when we're 2754 * trying to tx (during RFKILL, we're not trying to tx). 2755 */ 2756 } else 2757 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2758 txq->need_update = 0; 2759 } 2760 EXPORT_SYMBOL(il_txq_update_write_ptr); 2761 2762 /** 2763 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 2764 */ 2765 void 2766 il_tx_queue_unmap(struct il_priv *il, int txq_id) 2767 { 2768 struct il_tx_queue *txq = &il->txq[txq_id]; 2769 struct il_queue *q = &txq->q; 2770 2771 if (q->n_bd == 0) 2772 return; 2773 2774 while (q->write_ptr != q->read_ptr) { 2775 il->ops->txq_free_tfd(il, txq); 2776 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2777 } 2778 } 2779 EXPORT_SYMBOL(il_tx_queue_unmap); 2780 2781 /** 2782 * il_tx_queue_free - Deallocate DMA queue. 2783 * @txq: Transmit queue to deallocate. 2784 * 2785 * Empty queue by removing and destroying all BD's. 2786 * Free all buffers. 2787 * 0-fill, but do not free "txq" descriptor structure. 2788 */ 2789 void 2790 il_tx_queue_free(struct il_priv *il, int txq_id) 2791 { 2792 struct il_tx_queue *txq = &il->txq[txq_id]; 2793 struct device *dev = &il->pci_dev->dev; 2794 int i; 2795 2796 il_tx_queue_unmap(il, txq_id); 2797 2798 /* De-alloc array of command/tx buffers */ 2799 if (txq->cmd) { 2800 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 2801 kfree(txq->cmd[i]); 2802 } 2803 2804 /* De-alloc circular buffer of TFDs */ 2805 if (txq->q.n_bd) 2806 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2807 txq->tfds, txq->q.dma_addr); 2808 2809 /* De-alloc array of per-TFD driver data */ 2810 kfree(txq->skbs); 2811 txq->skbs = NULL; 2812 2813 /* deallocate arrays */ 2814 kfree(txq->cmd); 2815 kfree(txq->meta); 2816 txq->cmd = NULL; 2817 txq->meta = NULL; 2818 2819 /* 0-fill queue descriptor structure */ 2820 memset(txq, 0, sizeof(*txq)); 2821 } 2822 EXPORT_SYMBOL(il_tx_queue_free); 2823 2824 /** 2825 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue 2826 */ 2827 void 2828 il_cmd_queue_unmap(struct il_priv *il) 2829 { 2830 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2831 struct il_queue *q = &txq->q; 2832 int i; 2833 2834 if (q->n_bd == 0) 2835 return; 2836 2837 while (q->read_ptr != q->write_ptr) { 2838 i = il_get_cmd_idx(q, q->read_ptr, 0); 2839 2840 if (txq->meta[i].flags & CMD_MAPPED) { 2841 pci_unmap_single(il->pci_dev, 2842 dma_unmap_addr(&txq->meta[i], mapping), 2843 dma_unmap_len(&txq->meta[i], len), 2844 PCI_DMA_BIDIRECTIONAL); 2845 txq->meta[i].flags = 0; 2846 } 2847 2848 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2849 } 2850 2851 i = q->n_win; 2852 if (txq->meta[i].flags & CMD_MAPPED) { 2853 pci_unmap_single(il->pci_dev, 2854 dma_unmap_addr(&txq->meta[i], mapping), 2855 dma_unmap_len(&txq->meta[i], len), 2856 PCI_DMA_BIDIRECTIONAL); 2857 txq->meta[i].flags = 0; 2858 } 2859 } 2860 EXPORT_SYMBOL(il_cmd_queue_unmap); 2861 2862 /** 2863 * il_cmd_queue_free - Deallocate DMA queue. 2864 * @txq: Transmit queue to deallocate. 2865 * 2866 * Empty queue by removing and destroying all BD's. 2867 * Free all buffers. 2868 * 0-fill, but do not free "txq" descriptor structure. 2869 */ 2870 void 2871 il_cmd_queue_free(struct il_priv *il) 2872 { 2873 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2874 struct device *dev = &il->pci_dev->dev; 2875 int i; 2876 2877 il_cmd_queue_unmap(il); 2878 2879 /* De-alloc array of command/tx buffers */ 2880 if (txq->cmd) { 2881 for (i = 0; i <= TFD_CMD_SLOTS; i++) 2882 kfree(txq->cmd[i]); 2883 } 2884 2885 /* De-alloc circular buffer of TFDs */ 2886 if (txq->q.n_bd) 2887 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2888 txq->tfds, txq->q.dma_addr); 2889 2890 /* deallocate arrays */ 2891 kfree(txq->cmd); 2892 kfree(txq->meta); 2893 txq->cmd = NULL; 2894 txq->meta = NULL; 2895 2896 /* 0-fill queue descriptor structure */ 2897 memset(txq, 0, sizeof(*txq)); 2898 } 2899 EXPORT_SYMBOL(il_cmd_queue_free); 2900 2901 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 2902 * DMA services 2903 * 2904 * Theory of operation 2905 * 2906 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 2907 * of buffer descriptors, each of which points to one or more data buffers for 2908 * the device to read from or fill. Driver and device exchange status of each 2909 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 2910 * entries in each circular buffer, to protect against confusing empty and full 2911 * queue states. 2912 * 2913 * The device reads or writes the data in the queues via the device's several 2914 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 2915 * 2916 * For Tx queue, there are low mark and high mark limits. If, after queuing 2917 * the packet for Tx, free space become < low mark, Tx queue stopped. When 2918 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 2919 * Tx queue resumed. 2920 * 2921 * See more detailed info in 4965.h. 2922 ***************************************************/ 2923 2924 int 2925 il_queue_space(const struct il_queue *q) 2926 { 2927 int s = q->read_ptr - q->write_ptr; 2928 2929 if (q->read_ptr > q->write_ptr) 2930 s -= q->n_bd; 2931 2932 if (s <= 0) 2933 s += q->n_win; 2934 /* keep some reserve to not confuse empty and full situations */ 2935 s -= 2; 2936 if (s < 0) 2937 s = 0; 2938 return s; 2939 } 2940 EXPORT_SYMBOL(il_queue_space); 2941 2942 2943 /** 2944 * il_queue_init - Initialize queue's high/low-water and read/write idxes 2945 */ 2946 static int 2947 il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id) 2948 { 2949 /* 2950 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 2951 * il_queue_inc_wrap and il_queue_dec_wrap are broken. 2952 */ 2953 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 2954 /* FIXME: remove q->n_bd */ 2955 q->n_bd = TFD_QUEUE_SIZE_MAX; 2956 2957 q->n_win = slots; 2958 q->id = id; 2959 2960 /* slots_must be power-of-two size, otherwise 2961 * il_get_cmd_idx is broken. */ 2962 BUG_ON(!is_power_of_2(slots)); 2963 2964 q->low_mark = q->n_win / 4; 2965 if (q->low_mark < 4) 2966 q->low_mark = 4; 2967 2968 q->high_mark = q->n_win / 8; 2969 if (q->high_mark < 2) 2970 q->high_mark = 2; 2971 2972 q->write_ptr = q->read_ptr = 0; 2973 2974 return 0; 2975 } 2976 2977 /** 2978 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue 2979 */ 2980 static int 2981 il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) 2982 { 2983 struct device *dev = &il->pci_dev->dev; 2984 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 2985 2986 /* Driver ilate data, only for Tx (not command) queues, 2987 * not shared with device. */ 2988 if (id != il->cmd_queue) { 2989 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, 2990 sizeof(struct sk_buff *), 2991 GFP_KERNEL); 2992 if (!txq->skbs) { 2993 IL_ERR("Fail to alloc skbs\n"); 2994 goto error; 2995 } 2996 } else 2997 txq->skbs = NULL; 2998 2999 /* Circular buffer of transmit frame descriptors (TFDs), 3000 * shared with device */ 3001 txq->tfds = 3002 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); 3003 if (!txq->tfds) 3004 goto error; 3005 3006 txq->q.id = id; 3007 3008 return 0; 3009 3010 error: 3011 kfree(txq->skbs); 3012 txq->skbs = NULL; 3013 3014 return -ENOMEM; 3015 } 3016 3017 /** 3018 * il_tx_queue_init - Allocate and initialize one tx/cmd queue 3019 */ 3020 int 3021 il_tx_queue_init(struct il_priv *il, u32 txq_id) 3022 { 3023 int i, len, ret; 3024 int slots, actual_slots; 3025 struct il_tx_queue *txq = &il->txq[txq_id]; 3026 3027 /* 3028 * Alloc buffer array for commands (Tx or other types of commands). 3029 * For the command queue (#4/#9), allocate command space + one big 3030 * command for scan, since scan command is very huge; the system will 3031 * not have two scans at the same time, so only one is needed. 3032 * For normal Tx queues (all other queues), no super-size command 3033 * space is needed. 3034 */ 3035 if (txq_id == il->cmd_queue) { 3036 slots = TFD_CMD_SLOTS; 3037 actual_slots = slots + 1; 3038 } else { 3039 slots = TFD_TX_CMD_SLOTS; 3040 actual_slots = slots; 3041 } 3042 3043 txq->meta = 3044 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL); 3045 txq->cmd = 3046 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL); 3047 3048 if (!txq->meta || !txq->cmd) 3049 goto out_free_arrays; 3050 3051 len = sizeof(struct il_device_cmd); 3052 for (i = 0; i < actual_slots; i++) { 3053 /* only happens for cmd queue */ 3054 if (i == slots) 3055 len = IL_MAX_CMD_SIZE; 3056 3057 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 3058 if (!txq->cmd[i]) 3059 goto err; 3060 } 3061 3062 /* Alloc driver data array and TFD circular buffer */ 3063 ret = il_tx_queue_alloc(il, txq, txq_id); 3064 if (ret) 3065 goto err; 3066 3067 txq->need_update = 0; 3068 3069 /* 3070 * For the default queues 0-3, set up the swq_id 3071 * already -- all others need to get one later 3072 * (if they need one at all). 3073 */ 3074 if (txq_id < 4) 3075 il_set_swq_id(txq, txq_id, txq_id); 3076 3077 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3078 il_queue_init(il, &txq->q, slots, txq_id); 3079 3080 /* Tell device where to find queue */ 3081 il->ops->txq_init(il, txq); 3082 3083 return 0; 3084 err: 3085 for (i = 0; i < actual_slots; i++) 3086 kfree(txq->cmd[i]); 3087 out_free_arrays: 3088 kfree(txq->meta); 3089 txq->meta = NULL; 3090 kfree(txq->cmd); 3091 txq->cmd = NULL; 3092 3093 return -ENOMEM; 3094 } 3095 EXPORT_SYMBOL(il_tx_queue_init); 3096 3097 void 3098 il_tx_queue_reset(struct il_priv *il, u32 txq_id) 3099 { 3100 int slots, actual_slots; 3101 struct il_tx_queue *txq = &il->txq[txq_id]; 3102 3103 if (txq_id == il->cmd_queue) { 3104 slots = TFD_CMD_SLOTS; 3105 actual_slots = TFD_CMD_SLOTS + 1; 3106 } else { 3107 slots = TFD_TX_CMD_SLOTS; 3108 actual_slots = TFD_TX_CMD_SLOTS; 3109 } 3110 3111 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); 3112 txq->need_update = 0; 3113 3114 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3115 il_queue_init(il, &txq->q, slots, txq_id); 3116 3117 /* Tell device where to find queue */ 3118 il->ops->txq_init(il, txq); 3119 } 3120 EXPORT_SYMBOL(il_tx_queue_reset); 3121 3122 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 3123 3124 /** 3125 * il_enqueue_hcmd - enqueue a uCode command 3126 * @il: device ilate data point 3127 * @cmd: a point to the ucode command structure 3128 * 3129 * The function returns < 0 values to indicate the operation is 3130 * failed. On success, it turns the idx (> 0) of command in the 3131 * command queue. 3132 */ 3133 int 3134 il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) 3135 { 3136 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3137 struct il_queue *q = &txq->q; 3138 struct il_device_cmd *out_cmd; 3139 struct il_cmd_meta *out_meta; 3140 dma_addr_t phys_addr; 3141 unsigned long flags; 3142 int len; 3143 u32 idx; 3144 u16 fix_size; 3145 3146 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len); 3147 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); 3148 3149 /* If any of the command structures end up being larger than 3150 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 3151 * we will need to increase the size of the TFD entries 3152 * Also, check to see if command buffer should not exceed the size 3153 * of device_cmd and max_cmd_size. */ 3154 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 3155 !(cmd->flags & CMD_SIZE_HUGE)); 3156 BUG_ON(fix_size > IL_MAX_CMD_SIZE); 3157 3158 if (il_is_rfkill(il) || il_is_ctkill(il)) { 3159 IL_WARN("Not sending command - %s KILL\n", 3160 il_is_rfkill(il) ? "RF" : "CT"); 3161 return -EIO; 3162 } 3163 3164 spin_lock_irqsave(&il->hcmd_lock, flags); 3165 3166 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 3167 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3168 3169 IL_ERR("Restarting adapter due to command queue full\n"); 3170 queue_work(il->workqueue, &il->restart); 3171 return -ENOSPC; 3172 } 3173 3174 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 3175 out_cmd = txq->cmd[idx]; 3176 out_meta = &txq->meta[idx]; 3177 3178 if (WARN_ON(out_meta->flags & CMD_MAPPED)) { 3179 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3180 return -ENOSPC; 3181 } 3182 3183 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 3184 out_meta->flags = cmd->flags | CMD_MAPPED; 3185 if (cmd->flags & CMD_WANT_SKB) 3186 out_meta->source = cmd; 3187 if (cmd->flags & CMD_ASYNC) 3188 out_meta->callback = cmd->callback; 3189 3190 out_cmd->hdr.cmd = cmd->id; 3191 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); 3192 3193 /* At this point, the out_cmd now has all of the incoming cmd 3194 * information */ 3195 3196 out_cmd->hdr.flags = 0; 3197 out_cmd->hdr.sequence = 3198 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); 3199 if (cmd->flags & CMD_SIZE_HUGE) 3200 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 3201 len = sizeof(struct il_device_cmd); 3202 if (idx == TFD_CMD_SLOTS) 3203 len = IL_MAX_CMD_SIZE; 3204 3205 #ifdef CONFIG_IWLEGACY_DEBUG 3206 switch (out_cmd->hdr.cmd) { 3207 case C_TX_LINK_QUALITY_CMD: 3208 case C_SENSITIVITY: 3209 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " 3210 "%d bytes at %d[%d]:%d\n", 3211 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3212 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 3213 q->write_ptr, idx, il->cmd_queue); 3214 break; 3215 default: 3216 D_HC("Sending command %s (#%x), seq: 0x%04X, " 3217 "%d bytes at %d[%d]:%d\n", 3218 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3219 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, 3220 idx, il->cmd_queue); 3221 } 3222 #endif 3223 3224 phys_addr = 3225 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, 3226 PCI_DMA_BIDIRECTIONAL); 3227 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) { 3228 idx = -ENOMEM; 3229 goto out; 3230 } 3231 dma_unmap_addr_set(out_meta, mapping, phys_addr); 3232 dma_unmap_len_set(out_meta, len, fix_size); 3233 3234 txq->need_update = 1; 3235 3236 if (il->ops->txq_update_byte_cnt_tbl) 3237 /* Set up entry in queue's byte count circular buffer */ 3238 il->ops->txq_update_byte_cnt_tbl(il, txq, 0); 3239 3240 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, 3241 U32_PAD(cmd->len)); 3242 3243 /* Increment and update queue's write idx */ 3244 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); 3245 il_txq_update_write_ptr(il, txq); 3246 3247 out: 3248 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3249 return idx; 3250 } 3251 3252 /** 3253 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 3254 * 3255 * When FW advances 'R' idx, all entries between old and new 'R' idx 3256 * need to be reclaimed. As result, some free space forms. If there is 3257 * enough free space (> low mark), wake the stack that feeds us. 3258 */ 3259 static void 3260 il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) 3261 { 3262 struct il_tx_queue *txq = &il->txq[txq_id]; 3263 struct il_queue *q = &txq->q; 3264 int nfreed = 0; 3265 3266 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { 3267 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " 3268 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, 3269 q->write_ptr, q->read_ptr); 3270 return; 3271 } 3272 3273 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 3274 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { 3275 3276 if (nfreed++ > 0) { 3277 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx, 3278 q->write_ptr, q->read_ptr); 3279 queue_work(il->workqueue, &il->restart); 3280 } 3281 3282 } 3283 } 3284 3285 /** 3286 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them 3287 * @rxb: Rx buffer to reclaim 3288 * 3289 * If an Rx buffer has an async callback associated with it the callback 3290 * will be executed. The attached skb (if present) will only be freed 3291 * if the callback returns 1 3292 */ 3293 void 3294 il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) 3295 { 3296 struct il_rx_pkt *pkt = rxb_addr(rxb); 3297 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 3298 int txq_id = SEQ_TO_QUEUE(sequence); 3299 int idx = SEQ_TO_IDX(sequence); 3300 int cmd_idx; 3301 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 3302 struct il_device_cmd *cmd; 3303 struct il_cmd_meta *meta; 3304 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3305 unsigned long flags; 3306 3307 /* If a Tx command is being handled and it isn't in the actual 3308 * command queue then there a command routing bug has been introduced 3309 * in the queue management code. */ 3310 if (WARN 3311 (txq_id != il->cmd_queue, 3312 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 3313 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, 3314 il->txq[il->cmd_queue].q.write_ptr)) { 3315 il_print_hex_error(il, pkt, 32); 3316 return; 3317 } 3318 3319 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); 3320 cmd = txq->cmd[cmd_idx]; 3321 meta = &txq->meta[cmd_idx]; 3322 3323 txq->time_stamp = jiffies; 3324 3325 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), 3326 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); 3327 3328 /* Input error checking is done when commands are added to queue. */ 3329 if (meta->flags & CMD_WANT_SKB) { 3330 meta->source->reply_page = (unsigned long)rxb_addr(rxb); 3331 rxb->page = NULL; 3332 } else if (meta->callback) 3333 meta->callback(il, cmd, pkt); 3334 3335 spin_lock_irqsave(&il->hcmd_lock, flags); 3336 3337 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx); 3338 3339 if (!(meta->flags & CMD_ASYNC)) { 3340 clear_bit(S_HCMD_ACTIVE, &il->status); 3341 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 3342 il_get_cmd_string(cmd->hdr.cmd)); 3343 wake_up(&il->wait_command_queue); 3344 } 3345 3346 /* Mark as unmapped */ 3347 meta->flags = 0; 3348 3349 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3350 } 3351 EXPORT_SYMBOL(il_tx_cmd_complete); 3352 3353 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); 3354 MODULE_VERSION(IWLWIFI_VERSION); 3355 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 3356 MODULE_LICENSE("GPL"); 3357 3358 /* 3359 * set bt_coex_active to true, uCode will do kill/defer 3360 * every time the priority line is asserted (BT is sending signals on the 3361 * priority line in the PCIx). 3362 * set bt_coex_active to false, uCode will ignore the BT activity and 3363 * perform the normal operation 3364 * 3365 * User might experience transmit issue on some platform due to WiFi/BT 3366 * co-exist problem. The possible behaviors are: 3367 * Able to scan and finding all the available AP 3368 * Not able to associate with any AP 3369 * On those platforms, WiFi communication can be restored by set 3370 * "bt_coex_active" module parameter to "false" 3371 * 3372 * default: bt_coex_active = true (BT_COEX_ENABLE) 3373 */ 3374 static bool bt_coex_active = true; 3375 module_param(bt_coex_active, bool, S_IRUGO); 3376 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 3377 3378 u32 il_debug_level; 3379 EXPORT_SYMBOL(il_debug_level); 3380 3381 const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3382 EXPORT_SYMBOL(il_bcast_addr); 3383 3384 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 3385 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 3386 static void 3387 il_init_ht_hw_capab(const struct il_priv *il, 3388 struct ieee80211_sta_ht_cap *ht_info, 3389 enum nl80211_band band) 3390 { 3391 u16 max_bit_rate = 0; 3392 u8 rx_chains_num = il->hw_params.rx_chains_num; 3393 u8 tx_chains_num = il->hw_params.tx_chains_num; 3394 3395 ht_info->cap = 0; 3396 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 3397 3398 ht_info->ht_supported = true; 3399 3400 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 3401 max_bit_rate = MAX_BIT_RATE_20_MHZ; 3402 if (il->hw_params.ht40_channel & BIT(band)) { 3403 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 3404 ht_info->cap |= IEEE80211_HT_CAP_SGI_40; 3405 ht_info->mcs.rx_mask[4] = 0x01; 3406 max_bit_rate = MAX_BIT_RATE_40_MHZ; 3407 } 3408 3409 if (il->cfg->mod_params->amsdu_size_8K) 3410 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 3411 3412 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 3413 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 3414 3415 ht_info->mcs.rx_mask[0] = 0xFF; 3416 if (rx_chains_num >= 2) 3417 ht_info->mcs.rx_mask[1] = 0xFF; 3418 if (rx_chains_num >= 3) 3419 ht_info->mcs.rx_mask[2] = 0xFF; 3420 3421 /* Highest supported Rx data rate */ 3422 max_bit_rate *= rx_chains_num; 3423 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); 3424 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); 3425 3426 /* Tx MCS capabilities */ 3427 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 3428 if (tx_chains_num != rx_chains_num) { 3429 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 3430 ht_info->mcs.tx_params |= 3431 ((tx_chains_num - 3432 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 3433 } 3434 } 3435 3436 /** 3437 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom 3438 */ 3439 int 3440 il_init_geos(struct il_priv *il) 3441 { 3442 struct il_channel_info *ch; 3443 struct ieee80211_supported_band *sband; 3444 struct ieee80211_channel *channels; 3445 struct ieee80211_channel *geo_ch; 3446 struct ieee80211_rate *rates; 3447 int i = 0; 3448 s8 max_tx_power = 0; 3449 3450 if (il->bands[NL80211_BAND_2GHZ].n_bitrates || 3451 il->bands[NL80211_BAND_5GHZ].n_bitrates) { 3452 D_INFO("Geography modes already initialized.\n"); 3453 set_bit(S_GEO_CONFIGURED, &il->status); 3454 return 0; 3455 } 3456 3457 channels = 3458 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count, 3459 GFP_KERNEL); 3460 if (!channels) 3461 return -ENOMEM; 3462 3463 rates = 3464 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY), 3465 GFP_KERNEL); 3466 if (!rates) { 3467 kfree(channels); 3468 return -ENOMEM; 3469 } 3470 3471 /* 5.2GHz channels start after the 2.4GHz channels */ 3472 sband = &il->bands[NL80211_BAND_5GHZ]; 3473 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; 3474 /* just OFDM */ 3475 sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; 3476 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; 3477 3478 if (il->cfg->sku & IL_SKU_N) 3479 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ); 3480 3481 sband = &il->bands[NL80211_BAND_2GHZ]; 3482 sband->channels = channels; 3483 /* OFDM & CCK */ 3484 sband->bitrates = rates; 3485 sband->n_bitrates = RATE_COUNT_LEGACY; 3486 3487 if (il->cfg->sku & IL_SKU_N) 3488 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ); 3489 3490 il->ieee_channels = channels; 3491 il->ieee_rates = rates; 3492 3493 for (i = 0; i < il->channel_count; i++) { 3494 ch = &il->channel_info[i]; 3495 3496 if (!il_is_channel_valid(ch)) 3497 continue; 3498 3499 sband = &il->bands[ch->band]; 3500 3501 geo_ch = &sband->channels[sband->n_channels++]; 3502 3503 geo_ch->center_freq = 3504 ieee80211_channel_to_frequency(ch->channel, ch->band); 3505 geo_ch->max_power = ch->max_power_avg; 3506 geo_ch->max_antenna_gain = 0xff; 3507 geo_ch->hw_value = ch->channel; 3508 3509 if (il_is_channel_valid(ch)) { 3510 if (!(ch->flags & EEPROM_CHANNEL_IBSS)) 3511 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3512 3513 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) 3514 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3515 3516 if (ch->flags & EEPROM_CHANNEL_RADAR) 3517 geo_ch->flags |= IEEE80211_CHAN_RADAR; 3518 3519 geo_ch->flags |= ch->ht40_extension_channel; 3520 3521 if (ch->max_power_avg > max_tx_power) 3522 max_tx_power = ch->max_power_avg; 3523 } else { 3524 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 3525 } 3526 3527 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, 3528 geo_ch->center_freq, 3529 il_is_channel_a_band(ch) ? "5.2" : "2.4", 3530 geo_ch-> 3531 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid", 3532 geo_ch->flags); 3533 } 3534 3535 il->tx_power_device_lmt = max_tx_power; 3536 il->tx_power_user_lmt = max_tx_power; 3537 il->tx_power_next = max_tx_power; 3538 3539 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 && 3540 (il->cfg->sku & IL_SKU_A)) { 3541 IL_INFO("Incorrectly detected BG card as ABG. " 3542 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", 3543 il->pci_dev->device, il->pci_dev->subsystem_device); 3544 il->cfg->sku &= ~IL_SKU_A; 3545 } 3546 3547 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", 3548 il->bands[NL80211_BAND_2GHZ].n_channels, 3549 il->bands[NL80211_BAND_5GHZ].n_channels); 3550 3551 set_bit(S_GEO_CONFIGURED, &il->status); 3552 3553 return 0; 3554 } 3555 EXPORT_SYMBOL(il_init_geos); 3556 3557 /* 3558 * il_free_geos - undo allocations in il_init_geos 3559 */ 3560 void 3561 il_free_geos(struct il_priv *il) 3562 { 3563 kfree(il->ieee_channels); 3564 kfree(il->ieee_rates); 3565 clear_bit(S_GEO_CONFIGURED, &il->status); 3566 } 3567 EXPORT_SYMBOL(il_free_geos); 3568 3569 static bool 3570 il_is_channel_extension(struct il_priv *il, enum nl80211_band band, 3571 u16 channel, u8 extension_chan_offset) 3572 { 3573 const struct il_channel_info *ch_info; 3574 3575 ch_info = il_get_channel_info(il, band, channel); 3576 if (!il_is_channel_valid(ch_info)) 3577 return false; 3578 3579 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) 3580 return !(ch_info-> 3581 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS); 3582 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) 3583 return !(ch_info-> 3584 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS); 3585 3586 return false; 3587 } 3588 3589 bool 3590 il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap) 3591 { 3592 if (!il->ht.enabled || !il->ht.is_40mhz) 3593 return false; 3594 3595 /* 3596 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 3597 * the bit will not set if it is pure 40MHz case 3598 */ 3599 if (ht_cap && !ht_cap->ht_supported) 3600 return false; 3601 3602 #ifdef CONFIG_IWLEGACY_DEBUGFS 3603 if (il->disable_ht40) 3604 return false; 3605 #endif 3606 3607 return il_is_channel_extension(il, il->band, 3608 le16_to_cpu(il->staging.channel), 3609 il->ht.extension_chan_offset); 3610 } 3611 EXPORT_SYMBOL(il_is_ht40_tx_allowed); 3612 3613 static u16 noinline 3614 il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 3615 { 3616 u16 new_val; 3617 u16 beacon_factor; 3618 3619 /* 3620 * If mac80211 hasn't given us a beacon interval, program 3621 * the default into the device. 3622 */ 3623 if (!beacon_val) 3624 return DEFAULT_BEACON_INTERVAL; 3625 3626 /* 3627 * If the beacon interval we obtained from the peer 3628 * is too large, we'll have to wake up more often 3629 * (and in IBSS case, we'll beacon too much) 3630 * 3631 * For example, if max_beacon_val is 4096, and the 3632 * requested beacon interval is 7000, we'll have to 3633 * use 3500 to be able to wake up on the beacons. 3634 * 3635 * This could badly influence beacon detection stats. 3636 */ 3637 3638 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; 3639 new_val = beacon_val / beacon_factor; 3640 3641 if (!new_val) 3642 new_val = max_beacon_val; 3643 3644 return new_val; 3645 } 3646 3647 int 3648 il_send_rxon_timing(struct il_priv *il) 3649 { 3650 u64 tsf; 3651 s32 interval_tm, rem; 3652 struct ieee80211_conf *conf = NULL; 3653 u16 beacon_int; 3654 struct ieee80211_vif *vif = il->vif; 3655 3656 conf = &il->hw->conf; 3657 3658 lockdep_assert_held(&il->mutex); 3659 3660 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd)); 3661 3662 il->timing.timestamp = cpu_to_le64(il->timestamp); 3663 il->timing.listen_interval = cpu_to_le16(conf->listen_interval); 3664 3665 beacon_int = vif ? vif->bss_conf.beacon_int : 0; 3666 3667 /* 3668 * TODO: For IBSS we need to get atim_win from mac80211, 3669 * for now just always use 0 3670 */ 3671 il->timing.atim_win = 0; 3672 3673 beacon_int = 3674 il_adjust_beacon_interval(beacon_int, 3675 il->hw_params.max_beacon_itrvl * 3676 TIME_UNIT); 3677 il->timing.beacon_interval = cpu_to_le16(beacon_int); 3678 3679 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ 3680 interval_tm = beacon_int * TIME_UNIT; 3681 rem = do_div(tsf, interval_tm); 3682 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 3683 3684 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; 3685 3686 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n", 3687 le16_to_cpu(il->timing.beacon_interval), 3688 le32_to_cpu(il->timing.beacon_init_val), 3689 le16_to_cpu(il->timing.atim_win)); 3690 3691 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing), 3692 &il->timing); 3693 } 3694 EXPORT_SYMBOL(il_send_rxon_timing); 3695 3696 void 3697 il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt) 3698 { 3699 struct il_rxon_cmd *rxon = &il->staging; 3700 3701 if (hw_decrypt) 3702 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 3703 else 3704 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; 3705 3706 } 3707 EXPORT_SYMBOL(il_set_rxon_hwcrypto); 3708 3709 /* validate RXON structure is valid */ 3710 int 3711 il_check_rxon_cmd(struct il_priv *il) 3712 { 3713 struct il_rxon_cmd *rxon = &il->staging; 3714 bool error = false; 3715 3716 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 3717 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { 3718 IL_WARN("check 2.4G: wrong narrow\n"); 3719 error = true; 3720 } 3721 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { 3722 IL_WARN("check 2.4G: wrong radar\n"); 3723 error = true; 3724 } 3725 } else { 3726 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { 3727 IL_WARN("check 5.2G: not short slot!\n"); 3728 error = true; 3729 } 3730 if (rxon->flags & RXON_FLG_CCK_MSK) { 3731 IL_WARN("check 5.2G: CCK!\n"); 3732 error = true; 3733 } 3734 } 3735 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { 3736 IL_WARN("mac/bssid mcast!\n"); 3737 error = true; 3738 } 3739 3740 /* make sure basic rates 6Mbps and 1Mbps are supported */ 3741 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && 3742 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { 3743 IL_WARN("neither 1 nor 6 are basic\n"); 3744 error = true; 3745 } 3746 3747 if (le16_to_cpu(rxon->assoc_id) > 2007) { 3748 IL_WARN("aid > 2007\n"); 3749 error = true; 3750 } 3751 3752 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == 3753 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { 3754 IL_WARN("CCK and short slot\n"); 3755 error = true; 3756 } 3757 3758 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == 3759 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { 3760 IL_WARN("CCK and auto detect"); 3761 error = true; 3762 } 3763 3764 if ((rxon-> 3765 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == 3766 RXON_FLG_TGG_PROTECT_MSK) { 3767 IL_WARN("TGg but no auto-detect\n"); 3768 error = true; 3769 } 3770 3771 if (error) 3772 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel)); 3773 3774 if (error) { 3775 IL_ERR("Invalid RXON\n"); 3776 return -EINVAL; 3777 } 3778 return 0; 3779 } 3780 EXPORT_SYMBOL(il_check_rxon_cmd); 3781 3782 /** 3783 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 3784 * @il: staging_rxon is compared to active_rxon 3785 * 3786 * If the RXON structure is changing enough to require a new tune, 3787 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 3788 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 3789 */ 3790 int 3791 il_full_rxon_required(struct il_priv *il) 3792 { 3793 const struct il_rxon_cmd *staging = &il->staging; 3794 const struct il_rxon_cmd *active = &il->active; 3795 3796 #define CHK(cond) \ 3797 if ((cond)) { \ 3798 D_INFO("need full RXON - " #cond "\n"); \ 3799 return 1; \ 3800 } 3801 3802 #define CHK_NEQ(c1, c2) \ 3803 if ((c1) != (c2)) { \ 3804 D_INFO("need full RXON - " \ 3805 #c1 " != " #c2 " - %d != %d\n", \ 3806 (c1), (c2)); \ 3807 return 1; \ 3808 } 3809 3810 /* These items are only settable from the full RXON command */ 3811 CHK(!il_is_associated(il)); 3812 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr)); 3813 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr)); 3814 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr, 3815 active->wlap_bssid_addr)); 3816 CHK_NEQ(staging->dev_type, active->dev_type); 3817 CHK_NEQ(staging->channel, active->channel); 3818 CHK_NEQ(staging->air_propagation, active->air_propagation); 3819 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, 3820 active->ofdm_ht_single_stream_basic_rates); 3821 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, 3822 active->ofdm_ht_dual_stream_basic_rates); 3823 CHK_NEQ(staging->assoc_id, active->assoc_id); 3824 3825 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can 3826 * be updated with the RXON_ASSOC command -- however only some 3827 * flag transitions are allowed using RXON_ASSOC */ 3828 3829 /* Check if we are not switching bands */ 3830 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, 3831 active->flags & RXON_FLG_BAND_24G_MSK); 3832 3833 /* Check if we are switching association toggle */ 3834 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, 3835 active->filter_flags & RXON_FILTER_ASSOC_MSK); 3836 3837 #undef CHK 3838 #undef CHK_NEQ 3839 3840 return 0; 3841 } 3842 EXPORT_SYMBOL(il_full_rxon_required); 3843 3844 u8 3845 il_get_lowest_plcp(struct il_priv *il) 3846 { 3847 /* 3848 * Assign the lowest rate -- should really get this from 3849 * the beacon skb from mac80211. 3850 */ 3851 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) 3852 return RATE_1M_PLCP; 3853 else 3854 return RATE_6M_PLCP; 3855 } 3856 EXPORT_SYMBOL(il_get_lowest_plcp); 3857 3858 static void 3859 _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3860 { 3861 struct il_rxon_cmd *rxon = &il->staging; 3862 3863 if (!il->ht.enabled) { 3864 rxon->flags &= 3865 ~(RXON_FLG_CHANNEL_MODE_MSK | 3866 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK 3867 | RXON_FLG_HT_PROT_MSK); 3868 return; 3869 } 3870 3871 rxon->flags |= 3872 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); 3873 3874 /* Set up channel bandwidth: 3875 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 3876 /* clear the HT channel mode before set the mode */ 3877 rxon->flags &= 3878 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3879 if (il_is_ht40_tx_allowed(il, NULL)) { 3880 /* pure ht40 */ 3881 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 3882 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 3883 /* Note: control channel is opposite of extension channel */ 3884 switch (il->ht.extension_chan_offset) { 3885 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3886 rxon->flags &= 3887 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3888 break; 3889 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3890 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3891 break; 3892 } 3893 } else { 3894 /* Note: control channel is opposite of extension channel */ 3895 switch (il->ht.extension_chan_offset) { 3896 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3897 rxon->flags &= 3898 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3899 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3900 break; 3901 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3902 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3903 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3904 break; 3905 case IEEE80211_HT_PARAM_CHA_SEC_NONE: 3906 default: 3907 /* channel location only valid if in Mixed mode */ 3908 IL_ERR("invalid extension channel offset\n"); 3909 break; 3910 } 3911 } 3912 } else { 3913 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; 3914 } 3915 3916 if (il->ops->set_rxon_chain) 3917 il->ops->set_rxon_chain(il); 3918 3919 D_ASSOC("rxon flags 0x%X operation mode :0x%X " 3920 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), 3921 il->ht.protection, il->ht.extension_chan_offset); 3922 } 3923 3924 void 3925 il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3926 { 3927 _il_set_rxon_ht(il, ht_conf); 3928 } 3929 EXPORT_SYMBOL(il_set_rxon_ht); 3930 3931 /* Return valid, unused, channel for a passive scan to reset the RF */ 3932 u8 3933 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band) 3934 { 3935 const struct il_channel_info *ch_info; 3936 int i; 3937 u8 channel = 0; 3938 u8 min, max; 3939 3940 if (band == NL80211_BAND_5GHZ) { 3941 min = 14; 3942 max = il->channel_count; 3943 } else { 3944 min = 0; 3945 max = 14; 3946 } 3947 3948 for (i = min; i < max; i++) { 3949 channel = il->channel_info[i].channel; 3950 if (channel == le16_to_cpu(il->staging.channel)) 3951 continue; 3952 3953 ch_info = il_get_channel_info(il, band, channel); 3954 if (il_is_channel_valid(ch_info)) 3955 break; 3956 } 3957 3958 return channel; 3959 } 3960 EXPORT_SYMBOL(il_get_single_channel_number); 3961 3962 /** 3963 * il_set_rxon_channel - Set the band and channel values in staging RXON 3964 * @ch: requested channel as a pointer to struct ieee80211_channel 3965 3966 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 3967 * in the staging RXON flag structure based on the ch->band 3968 */ 3969 int 3970 il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) 3971 { 3972 enum nl80211_band band = ch->band; 3973 u16 channel = ch->hw_value; 3974 3975 if (le16_to_cpu(il->staging.channel) == channel && il->band == band) 3976 return 0; 3977 3978 il->staging.channel = cpu_to_le16(channel); 3979 if (band == NL80211_BAND_5GHZ) 3980 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; 3981 else 3982 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 3983 3984 il->band = band; 3985 3986 D_INFO("Staging channel set to %d [%d]\n", channel, band); 3987 3988 return 0; 3989 } 3990 EXPORT_SYMBOL(il_set_rxon_channel); 3991 3992 void 3993 il_set_flags_for_band(struct il_priv *il, enum nl80211_band band, 3994 struct ieee80211_vif *vif) 3995 { 3996 if (band == NL80211_BAND_5GHZ) { 3997 il->staging.flags &= 3998 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | 3999 RXON_FLG_CCK_MSK); 4000 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 4001 } else { 4002 /* Copied from il_post_associate() */ 4003 if (vif && vif->bss_conf.use_short_slot) 4004 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 4005 else 4006 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 4007 4008 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 4009 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; 4010 il->staging.flags &= ~RXON_FLG_CCK_MSK; 4011 } 4012 } 4013 EXPORT_SYMBOL(il_set_flags_for_band); 4014 4015 /* 4016 * initialize rxon structure with default values from eeprom 4017 */ 4018 void 4019 il_connection_init_rx_config(struct il_priv *il) 4020 { 4021 const struct il_channel_info *ch_info; 4022 4023 memset(&il->staging, 0, sizeof(il->staging)); 4024 4025 switch (il->iw_mode) { 4026 case NL80211_IFTYPE_UNSPECIFIED: 4027 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4028 break; 4029 case NL80211_IFTYPE_STATION: 4030 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4031 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 4032 break; 4033 case NL80211_IFTYPE_ADHOC: 4034 il->staging.dev_type = RXON_DEV_TYPE_IBSS; 4035 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 4036 il->staging.filter_flags = 4037 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 4038 break; 4039 default: 4040 IL_ERR("Unsupported interface type %d\n", il->vif->type); 4041 return; 4042 } 4043 4044 #if 0 4045 /* TODO: Figure out when short_preamble would be set and cache from 4046 * that */ 4047 if (!hw_to_local(il->hw)->short_preamble) 4048 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 4049 else 4050 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 4051 #endif 4052 4053 ch_info = 4054 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel)); 4055 4056 if (!ch_info) 4057 ch_info = &il->channel_info[0]; 4058 4059 il->staging.channel = cpu_to_le16(ch_info->channel); 4060 il->band = ch_info->band; 4061 4062 il_set_flags_for_band(il, il->band, il->vif); 4063 4064 il->staging.ofdm_basic_rates = 4065 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4066 il->staging.cck_basic_rates = 4067 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4068 4069 /* clear both MIX and PURE40 mode flag */ 4070 il->staging.flags &= 4071 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); 4072 if (il->vif) 4073 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN); 4074 4075 il->staging.ofdm_ht_single_stream_basic_rates = 0xff; 4076 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff; 4077 } 4078 EXPORT_SYMBOL(il_connection_init_rx_config); 4079 4080 void 4081 il_set_rate(struct il_priv *il) 4082 { 4083 const struct ieee80211_supported_band *hw = NULL; 4084 struct ieee80211_rate *rate; 4085 int i; 4086 4087 hw = il_get_hw_mode(il, il->band); 4088 if (!hw) { 4089 IL_ERR("Failed to set rate: unable to get hw mode\n"); 4090 return; 4091 } 4092 4093 il->active_rate = 0; 4094 4095 for (i = 0; i < hw->n_bitrates; i++) { 4096 rate = &(hw->bitrates[i]); 4097 if (rate->hw_value < RATE_COUNT_LEGACY) 4098 il->active_rate |= (1 << rate->hw_value); 4099 } 4100 4101 D_RATE("Set active_rate = %0x\n", il->active_rate); 4102 4103 il->staging.cck_basic_rates = 4104 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4105 4106 il->staging.ofdm_basic_rates = 4107 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4108 } 4109 EXPORT_SYMBOL(il_set_rate); 4110 4111 void 4112 il_chswitch_done(struct il_priv *il, bool is_success) 4113 { 4114 if (test_bit(S_EXIT_PENDING, &il->status)) 4115 return; 4116 4117 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4118 ieee80211_chswitch_done(il->vif, is_success); 4119 } 4120 EXPORT_SYMBOL(il_chswitch_done); 4121 4122 void 4123 il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb) 4124 { 4125 struct il_rx_pkt *pkt = rxb_addr(rxb); 4126 struct il_csa_notification *csa = &(pkt->u.csa_notif); 4127 struct il_rxon_cmd *rxon = (void *)&il->active; 4128 4129 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4130 return; 4131 4132 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { 4133 rxon->channel = csa->channel; 4134 il->staging.channel = csa->channel; 4135 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel)); 4136 il_chswitch_done(il, true); 4137 } else { 4138 IL_ERR("CSA notif (fail) : channel %d\n", 4139 le16_to_cpu(csa->channel)); 4140 il_chswitch_done(il, false); 4141 } 4142 } 4143 EXPORT_SYMBOL(il_hdl_csa); 4144 4145 #ifdef CONFIG_IWLEGACY_DEBUG 4146 void 4147 il_print_rx_config_cmd(struct il_priv *il) 4148 { 4149 struct il_rxon_cmd *rxon = &il->staging; 4150 4151 D_RADIO("RX CONFIG:\n"); 4152 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 4153 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 4154 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 4155 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); 4156 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); 4157 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); 4158 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); 4159 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr); 4160 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 4161 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 4162 } 4163 EXPORT_SYMBOL(il_print_rx_config_cmd); 4164 #endif 4165 /** 4166 * il_irq_handle_error - called for HW or SW error interrupt from card 4167 */ 4168 void 4169 il_irq_handle_error(struct il_priv *il) 4170 { 4171 /* Set the FW error flag -- cleared on il_down */ 4172 set_bit(S_FW_ERROR, &il->status); 4173 4174 /* Cancel currently queued command. */ 4175 clear_bit(S_HCMD_ACTIVE, &il->status); 4176 4177 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version); 4178 4179 il->ops->dump_nic_error_log(il); 4180 if (il->ops->dump_fh) 4181 il->ops->dump_fh(il, NULL, false); 4182 #ifdef CONFIG_IWLEGACY_DEBUG 4183 if (il_get_debug_level(il) & IL_DL_FW_ERRORS) 4184 il_print_rx_config_cmd(il); 4185 #endif 4186 4187 wake_up(&il->wait_command_queue); 4188 4189 /* Keep the restart process from trying to send host 4190 * commands by clearing the INIT status bit */ 4191 clear_bit(S_READY, &il->status); 4192 4193 if (!test_bit(S_EXIT_PENDING, &il->status)) { 4194 IL_DBG(IL_DL_FW_ERRORS, 4195 "Restarting adapter due to uCode error.\n"); 4196 4197 if (il->cfg->mod_params->restart_fw) 4198 queue_work(il->workqueue, &il->restart); 4199 } 4200 } 4201 EXPORT_SYMBOL(il_irq_handle_error); 4202 4203 static int 4204 _il_apm_stop_master(struct il_priv *il) 4205 { 4206 int ret = 0; 4207 4208 /* stop device's busmaster DMA activity */ 4209 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 4210 4211 ret = 4212 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, 4213 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 4214 if (ret < 0) 4215 IL_WARN("Master Disable Timed Out, 100 usec\n"); 4216 4217 D_INFO("stop master\n"); 4218 4219 return ret; 4220 } 4221 4222 void 4223 _il_apm_stop(struct il_priv *il) 4224 { 4225 lockdep_assert_held(&il->reg_lock); 4226 4227 D_INFO("Stop card, put in low power state\n"); 4228 4229 /* Stop device's DMA activity */ 4230 _il_apm_stop_master(il); 4231 4232 /* Reset the entire device */ 4233 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 4234 4235 udelay(10); 4236 4237 /* 4238 * Clear "initialization complete" bit to move adapter from 4239 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 4240 */ 4241 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4242 } 4243 EXPORT_SYMBOL(_il_apm_stop); 4244 4245 void 4246 il_apm_stop(struct il_priv *il) 4247 { 4248 unsigned long flags; 4249 4250 spin_lock_irqsave(&il->reg_lock, flags); 4251 _il_apm_stop(il); 4252 spin_unlock_irqrestore(&il->reg_lock, flags); 4253 } 4254 EXPORT_SYMBOL(il_apm_stop); 4255 4256 /* 4257 * Start up NIC's basic functionality after it has been reset 4258 * (e.g. after platform boot, or shutdown via il_apm_stop()) 4259 * NOTE: This does not load uCode nor start the embedded processor 4260 */ 4261 int 4262 il_apm_init(struct il_priv *il) 4263 { 4264 int ret = 0; 4265 u16 lctl; 4266 4267 D_INFO("Init card's basic functions\n"); 4268 4269 /* 4270 * Use "set_bit" below rather than "write", to preserve any hardware 4271 * bits already set by default after reset. 4272 */ 4273 4274 /* Disable L0S exit timer (platform NMI Work/Around) */ 4275 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4276 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 4277 4278 /* 4279 * Disable L0s without affecting L1; 4280 * don't wait for ICH L0s (ICH bug W/A) 4281 */ 4282 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4283 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 4284 4285 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 4286 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 4287 4288 /* 4289 * Enable HAP INTA (interrupt from management bus) to 4290 * wake device's PCI Express link L1a -> L0s 4291 * NOTE: This is no-op for 3945 (non-existent bit) 4292 */ 4293 il_set_bit(il, CSR_HW_IF_CONFIG_REG, 4294 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 4295 4296 /* 4297 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. 4298 * Check if BIOS (or OS) enabled L1-ASPM on this device. 4299 * If so (likely), disable L0S, so device moves directly L0->L1; 4300 * costs negligible amount of power savings. 4301 * If not (unlikely), enable L0S, so there is at least some 4302 * power savings, even without L1. 4303 */ 4304 if (il->cfg->set_l0s) { 4305 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 4306 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { 4307 /* L1-ASPM enabled; disable(!) L0S */ 4308 il_set_bit(il, CSR_GIO_REG, 4309 CSR_GIO_REG_VAL_L0S_ENABLED); 4310 D_POWER("L1 Enabled; Disabling L0S\n"); 4311 } else { 4312 /* L1-ASPM disabled; enable(!) L0S */ 4313 il_clear_bit(il, CSR_GIO_REG, 4314 CSR_GIO_REG_VAL_L0S_ENABLED); 4315 D_POWER("L1 Disabled; Enabling L0S\n"); 4316 } 4317 } 4318 4319 /* Configure analog phase-lock-loop before activating to D0A */ 4320 if (il->cfg->pll_cfg_val) 4321 il_set_bit(il, CSR_ANA_PLL_CFG, 4322 il->cfg->pll_cfg_val); 4323 4324 /* 4325 * Set "initialization complete" bit to move adapter from 4326 * D0U* --> D0A* (powered-up active) state. 4327 */ 4328 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4329 4330 /* 4331 * Wait for clock stabilization; once stabilized, access to 4332 * device-internal resources is supported, e.g. il_wr_prph() 4333 * and accesses to uCode SRAM. 4334 */ 4335 ret = 4336 _il_poll_bit(il, CSR_GP_CNTRL, 4337 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 4338 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 4339 if (ret < 0) { 4340 D_INFO("Failed to init the card\n"); 4341 goto out; 4342 } 4343 4344 /* 4345 * Enable DMA and BSM (if used) clocks, wait for them to stabilize. 4346 * BSM (Boostrap State Machine) is only in 3945 and 4965. 4347 * 4348 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits 4349 * do not disable clocks. This preserves any hardware bits already 4350 * set by default in "CLK_CTRL_REG" after reset. 4351 */ 4352 if (il->cfg->use_bsm) 4353 il_wr_prph(il, APMG_CLK_EN_REG, 4354 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 4355 else 4356 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 4357 udelay(20); 4358 4359 /* Disable L1-Active */ 4360 il_set_bits_prph(il, APMG_PCIDEV_STT_REG, 4361 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 4362 4363 out: 4364 return ret; 4365 } 4366 EXPORT_SYMBOL(il_apm_init); 4367 4368 int 4369 il_set_tx_power(struct il_priv *il, s8 tx_power, bool force) 4370 { 4371 int ret; 4372 s8 prev_tx_power; 4373 bool defer; 4374 4375 lockdep_assert_held(&il->mutex); 4376 4377 if (il->tx_power_user_lmt == tx_power && !force) 4378 return 0; 4379 4380 if (!il->ops->send_tx_power) 4381 return -EOPNOTSUPP; 4382 4383 /* 0 dBm mean 1 milliwatt */ 4384 if (tx_power < 0) { 4385 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power); 4386 return -EINVAL; 4387 } 4388 4389 if (tx_power > il->tx_power_device_lmt) { 4390 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n", 4391 tx_power, il->tx_power_device_lmt); 4392 return -EINVAL; 4393 } 4394 4395 if (!il_is_ready_rf(il)) 4396 return -EIO; 4397 4398 /* scan complete and commit_rxon use tx_power_next value, 4399 * it always need to be updated for newest request */ 4400 il->tx_power_next = tx_power; 4401 4402 /* do not set tx power when scanning or channel changing */ 4403 defer = test_bit(S_SCANNING, &il->status) || 4404 memcmp(&il->active, &il->staging, sizeof(il->staging)); 4405 if (defer && !force) { 4406 D_INFO("Deferring tx power set\n"); 4407 return 0; 4408 } 4409 4410 prev_tx_power = il->tx_power_user_lmt; 4411 il->tx_power_user_lmt = tx_power; 4412 4413 ret = il->ops->send_tx_power(il); 4414 4415 /* if fail to set tx_power, restore the orig. tx power */ 4416 if (ret) { 4417 il->tx_power_user_lmt = prev_tx_power; 4418 il->tx_power_next = prev_tx_power; 4419 } 4420 return ret; 4421 } 4422 EXPORT_SYMBOL(il_set_tx_power); 4423 4424 void 4425 il_send_bt_config(struct il_priv *il) 4426 { 4427 struct il_bt_cmd bt_cmd = { 4428 .lead_time = BT_LEAD_TIME_DEF, 4429 .max_kill = BT_MAX_KILL_DEF, 4430 .kill_ack_mask = 0, 4431 .kill_cts_mask = 0, 4432 }; 4433 4434 if (!bt_coex_active) 4435 bt_cmd.flags = BT_COEX_DISABLE; 4436 else 4437 bt_cmd.flags = BT_COEX_ENABLE; 4438 4439 D_INFO("BT coex %s\n", 4440 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 4441 4442 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd)) 4443 IL_ERR("failed to send BT Coex Config\n"); 4444 } 4445 EXPORT_SYMBOL(il_send_bt_config); 4446 4447 int 4448 il_send_stats_request(struct il_priv *il, u8 flags, bool clear) 4449 { 4450 struct il_stats_cmd stats_cmd = { 4451 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0, 4452 }; 4453 4454 if (flags & CMD_ASYNC) 4455 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd), 4456 &stats_cmd, NULL); 4457 else 4458 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd), 4459 &stats_cmd); 4460 } 4461 EXPORT_SYMBOL(il_send_stats_request); 4462 4463 void 4464 il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb) 4465 { 4466 #ifdef CONFIG_IWLEGACY_DEBUG 4467 struct il_rx_pkt *pkt = rxb_addr(rxb); 4468 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); 4469 D_RX("sleep mode: %d, src: %d\n", 4470 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 4471 #endif 4472 } 4473 EXPORT_SYMBOL(il_hdl_pm_sleep); 4474 4475 void 4476 il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb) 4477 { 4478 struct il_rx_pkt *pkt = rxb_addr(rxb); 4479 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; 4480 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len, 4481 il_get_cmd_string(pkt->hdr.cmd)); 4482 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); 4483 } 4484 EXPORT_SYMBOL(il_hdl_pm_debug_stats); 4485 4486 void 4487 il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb) 4488 { 4489 struct il_rx_pkt *pkt = rxb_addr(rxb); 4490 4491 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) " 4492 "seq 0x%04X ser 0x%08X\n", 4493 le32_to_cpu(pkt->u.err_resp.error_type), 4494 il_get_cmd_string(pkt->u.err_resp.cmd_id), 4495 pkt->u.err_resp.cmd_id, 4496 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 4497 le32_to_cpu(pkt->u.err_resp.error_info)); 4498 } 4499 EXPORT_SYMBOL(il_hdl_error); 4500 4501 void 4502 il_clear_isr_stats(struct il_priv *il) 4503 { 4504 memset(&il->isr_stats, 0, sizeof(il->isr_stats)); 4505 } 4506 4507 int 4508 il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, 4509 const struct ieee80211_tx_queue_params *params) 4510 { 4511 struct il_priv *il = hw->priv; 4512 unsigned long flags; 4513 int q; 4514 4515 D_MAC80211("enter\n"); 4516 4517 if (!il_is_ready_rf(il)) { 4518 D_MAC80211("leave - RF not ready\n"); 4519 return -EIO; 4520 } 4521 4522 if (queue >= AC_NUM) { 4523 D_MAC80211("leave - queue >= AC_NUM %d\n", queue); 4524 return 0; 4525 } 4526 4527 q = AC_NUM - 1 - queue; 4528 4529 spin_lock_irqsave(&il->lock, flags); 4530 4531 il->qos_data.def_qos_parm.ac[q].cw_min = 4532 cpu_to_le16(params->cw_min); 4533 il->qos_data.def_qos_parm.ac[q].cw_max = 4534 cpu_to_le16(params->cw_max); 4535 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 4536 il->qos_data.def_qos_parm.ac[q].edca_txop = 4537 cpu_to_le16((params->txop * 32)); 4538 4539 il->qos_data.def_qos_parm.ac[q].reserved1 = 0; 4540 4541 spin_unlock_irqrestore(&il->lock, flags); 4542 4543 D_MAC80211("leave\n"); 4544 return 0; 4545 } 4546 EXPORT_SYMBOL(il_mac_conf_tx); 4547 4548 int 4549 il_mac_tx_last_beacon(struct ieee80211_hw *hw) 4550 { 4551 struct il_priv *il = hw->priv; 4552 int ret; 4553 4554 D_MAC80211("enter\n"); 4555 4556 ret = (il->ibss_manager == IL_IBSS_MANAGER); 4557 4558 D_MAC80211("leave ret %d\n", ret); 4559 return ret; 4560 } 4561 EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon); 4562 4563 static int 4564 il_set_mode(struct il_priv *il) 4565 { 4566 il_connection_init_rx_config(il); 4567 4568 if (il->ops->set_rxon_chain) 4569 il->ops->set_rxon_chain(il); 4570 4571 return il_commit_rxon(il); 4572 } 4573 4574 int 4575 il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4576 { 4577 struct il_priv *il = hw->priv; 4578 int err; 4579 bool reset; 4580 4581 mutex_lock(&il->mutex); 4582 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4583 4584 if (!il_is_ready_rf(il)) { 4585 IL_WARN("Try to add interface when device not ready\n"); 4586 err = -EINVAL; 4587 goto out; 4588 } 4589 4590 /* 4591 * We do not support multiple virtual interfaces, but on hardware reset 4592 * we have to add the same interface again. 4593 */ 4594 reset = (il->vif == vif); 4595 if (il->vif && !reset) { 4596 err = -EOPNOTSUPP; 4597 goto out; 4598 } 4599 4600 il->vif = vif; 4601 il->iw_mode = vif->type; 4602 4603 err = il_set_mode(il); 4604 if (err) { 4605 IL_WARN("Fail to set mode %d\n", vif->type); 4606 if (!reset) { 4607 il->vif = NULL; 4608 il->iw_mode = NL80211_IFTYPE_STATION; 4609 } 4610 } 4611 4612 out: 4613 D_MAC80211("leave err %d\n", err); 4614 mutex_unlock(&il->mutex); 4615 4616 return err; 4617 } 4618 EXPORT_SYMBOL(il_mac_add_interface); 4619 4620 static void 4621 il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif) 4622 { 4623 lockdep_assert_held(&il->mutex); 4624 4625 if (il->scan_vif == vif) { 4626 il_scan_cancel_timeout(il, 200); 4627 il_force_scan_end(il); 4628 } 4629 4630 il_set_mode(il); 4631 } 4632 4633 void 4634 il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4635 { 4636 struct il_priv *il = hw->priv; 4637 4638 mutex_lock(&il->mutex); 4639 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4640 4641 WARN_ON(il->vif != vif); 4642 il->vif = NULL; 4643 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; 4644 il_teardown_interface(il, vif); 4645 eth_zero_addr(il->bssid); 4646 4647 D_MAC80211("leave\n"); 4648 mutex_unlock(&il->mutex); 4649 } 4650 EXPORT_SYMBOL(il_mac_remove_interface); 4651 4652 int 4653 il_alloc_txq_mem(struct il_priv *il) 4654 { 4655 if (!il->txq) 4656 il->txq = 4657 kzalloc(sizeof(struct il_tx_queue) * 4658 il->cfg->num_of_queues, GFP_KERNEL); 4659 if (!il->txq) { 4660 IL_ERR("Not enough memory for txq\n"); 4661 return -ENOMEM; 4662 } 4663 return 0; 4664 } 4665 EXPORT_SYMBOL(il_alloc_txq_mem); 4666 4667 void 4668 il_free_txq_mem(struct il_priv *il) 4669 { 4670 kfree(il->txq); 4671 il->txq = NULL; 4672 } 4673 EXPORT_SYMBOL(il_free_txq_mem); 4674 4675 int 4676 il_force_reset(struct il_priv *il, bool external) 4677 { 4678 struct il_force_reset *force_reset; 4679 4680 if (test_bit(S_EXIT_PENDING, &il->status)) 4681 return -EINVAL; 4682 4683 force_reset = &il->force_reset; 4684 force_reset->reset_request_count++; 4685 if (!external) { 4686 if (force_reset->last_force_reset_jiffies && 4687 time_after(force_reset->last_force_reset_jiffies + 4688 force_reset->reset_duration, jiffies)) { 4689 D_INFO("force reset rejected\n"); 4690 force_reset->reset_reject_count++; 4691 return -EAGAIN; 4692 } 4693 } 4694 force_reset->reset_success_count++; 4695 force_reset->last_force_reset_jiffies = jiffies; 4696 4697 /* 4698 * if the request is from external(ex: debugfs), 4699 * then always perform the request in regardless the module 4700 * parameter setting 4701 * if the request is from internal (uCode error or driver 4702 * detect failure), then fw_restart module parameter 4703 * need to be check before performing firmware reload 4704 */ 4705 4706 if (!external && !il->cfg->mod_params->restart_fw) { 4707 D_INFO("Cancel firmware reload based on " 4708 "module parameter setting\n"); 4709 return 0; 4710 } 4711 4712 IL_ERR("On demand firmware reload\n"); 4713 4714 /* Set the FW error flag -- cleared on il_down */ 4715 set_bit(S_FW_ERROR, &il->status); 4716 wake_up(&il->wait_command_queue); 4717 /* 4718 * Keep the restart process from trying to send host 4719 * commands by clearing the INIT status bit 4720 */ 4721 clear_bit(S_READY, &il->status); 4722 queue_work(il->workqueue, &il->restart); 4723 4724 return 0; 4725 } 4726 EXPORT_SYMBOL(il_force_reset); 4727 4728 int 4729 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4730 enum nl80211_iftype newtype, bool newp2p) 4731 { 4732 struct il_priv *il = hw->priv; 4733 int err; 4734 4735 mutex_lock(&il->mutex); 4736 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n", 4737 vif->type, vif->addr, newtype, newp2p); 4738 4739 if (newp2p) { 4740 err = -EOPNOTSUPP; 4741 goto out; 4742 } 4743 4744 if (!il->vif || !il_is_ready_rf(il)) { 4745 /* 4746 * Huh? But wait ... this can maybe happen when 4747 * we're in the middle of a firmware restart! 4748 */ 4749 err = -EBUSY; 4750 goto out; 4751 } 4752 4753 /* success */ 4754 vif->type = newtype; 4755 vif->p2p = false; 4756 il->iw_mode = newtype; 4757 il_teardown_interface(il, vif); 4758 err = 0; 4759 4760 out: 4761 D_MAC80211("leave err %d\n", err); 4762 mutex_unlock(&il->mutex); 4763 4764 return err; 4765 } 4766 EXPORT_SYMBOL(il_mac_change_interface); 4767 4768 void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4769 u32 queues, bool drop) 4770 { 4771 struct il_priv *il = hw->priv; 4772 unsigned long timeout = jiffies + msecs_to_jiffies(500); 4773 int i; 4774 4775 mutex_lock(&il->mutex); 4776 D_MAC80211("enter\n"); 4777 4778 if (il->txq == NULL) 4779 goto out; 4780 4781 for (i = 0; i < il->hw_params.max_txq_num; i++) { 4782 struct il_queue *q; 4783 4784 if (i == il->cmd_queue) 4785 continue; 4786 4787 q = &il->txq[i].q; 4788 if (q->read_ptr == q->write_ptr) 4789 continue; 4790 4791 if (time_after(jiffies, timeout)) { 4792 IL_ERR("Failed to flush queue %d\n", q->id); 4793 break; 4794 } 4795 4796 msleep(20); 4797 } 4798 out: 4799 D_MAC80211("leave\n"); 4800 mutex_unlock(&il->mutex); 4801 } 4802 EXPORT_SYMBOL(il_mac_flush); 4803 4804 /* 4805 * On every watchdog tick we check (latest) time stamp. If it does not 4806 * change during timeout period and queue is not empty we reset firmware. 4807 */ 4808 static int 4809 il_check_stuck_queue(struct il_priv *il, int cnt) 4810 { 4811 struct il_tx_queue *txq = &il->txq[cnt]; 4812 struct il_queue *q = &txq->q; 4813 unsigned long timeout; 4814 unsigned long now = jiffies; 4815 int ret; 4816 4817 if (q->read_ptr == q->write_ptr) { 4818 txq->time_stamp = now; 4819 return 0; 4820 } 4821 4822 timeout = 4823 txq->time_stamp + 4824 msecs_to_jiffies(il->cfg->wd_timeout); 4825 4826 if (time_after(now, timeout)) { 4827 IL_ERR("Queue %d stuck for %u ms.\n", q->id, 4828 jiffies_to_msecs(now - txq->time_stamp)); 4829 ret = il_force_reset(il, false); 4830 return (ret == -EAGAIN) ? 0 : 1; 4831 } 4832 4833 return 0; 4834 } 4835 4836 /* 4837 * Making watchdog tick be a quarter of timeout assure we will 4838 * discover the queue hung between timeout and 1.25*timeout 4839 */ 4840 #define IL_WD_TICK(timeout) ((timeout) / 4) 4841 4842 /* 4843 * Watchdog timer callback, we check each tx queue for stuck, if if hung 4844 * we reset the firmware. If everything is fine just rearm the timer. 4845 */ 4846 void 4847 il_bg_watchdog(unsigned long data) 4848 { 4849 struct il_priv *il = (struct il_priv *)data; 4850 int cnt; 4851 unsigned long timeout; 4852 4853 if (test_bit(S_EXIT_PENDING, &il->status)) 4854 return; 4855 4856 timeout = il->cfg->wd_timeout; 4857 if (timeout == 0) 4858 return; 4859 4860 /* monitor and check for stuck cmd queue */ 4861 if (il_check_stuck_queue(il, il->cmd_queue)) 4862 return; 4863 4864 /* monitor and check for other stuck queues */ 4865 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4866 /* skip as we already checked the command queue */ 4867 if (cnt == il->cmd_queue) 4868 continue; 4869 if (il_check_stuck_queue(il, cnt)) 4870 return; 4871 } 4872 4873 mod_timer(&il->watchdog, 4874 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4875 } 4876 EXPORT_SYMBOL(il_bg_watchdog); 4877 4878 void 4879 il_setup_watchdog(struct il_priv *il) 4880 { 4881 unsigned int timeout = il->cfg->wd_timeout; 4882 4883 if (timeout) 4884 mod_timer(&il->watchdog, 4885 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4886 else 4887 del_timer(&il->watchdog); 4888 } 4889 EXPORT_SYMBOL(il_setup_watchdog); 4890 4891 /* 4892 * extended beacon time format 4893 * time in usec will be changed into a 32-bit value in extended:internal format 4894 * the extended part is the beacon counts 4895 * the internal part is the time in usec within one beacon interval 4896 */ 4897 u32 4898 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval) 4899 { 4900 u32 quot; 4901 u32 rem; 4902 u32 interval = beacon_interval * TIME_UNIT; 4903 4904 if (!interval || !usec) 4905 return 0; 4906 4907 quot = 4908 (usec / 4909 interval) & (il_beacon_time_mask_high(il, 4910 il->hw_params. 4911 beacon_time_tsf_bits) >> il-> 4912 hw_params.beacon_time_tsf_bits); 4913 rem = 4914 (usec % interval) & il_beacon_time_mask_low(il, 4915 il->hw_params. 4916 beacon_time_tsf_bits); 4917 4918 return (quot << il->hw_params.beacon_time_tsf_bits) + rem; 4919 } 4920 EXPORT_SYMBOL(il_usecs_to_beacons); 4921 4922 /* base is usually what we get from ucode with each received frame, 4923 * the same as HW timer counter counting down 4924 */ 4925 __le32 4926 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, 4927 u32 beacon_interval) 4928 { 4929 u32 base_low = base & il_beacon_time_mask_low(il, 4930 il->hw_params. 4931 beacon_time_tsf_bits); 4932 u32 addon_low = addon & il_beacon_time_mask_low(il, 4933 il->hw_params. 4934 beacon_time_tsf_bits); 4935 u32 interval = beacon_interval * TIME_UNIT; 4936 u32 res = (base & il_beacon_time_mask_high(il, 4937 il->hw_params. 4938 beacon_time_tsf_bits)) + 4939 (addon & il_beacon_time_mask_high(il, 4940 il->hw_params. 4941 beacon_time_tsf_bits)); 4942 4943 if (base_low > addon_low) 4944 res += base_low - addon_low; 4945 else if (base_low < addon_low) { 4946 res += interval + base_low - addon_low; 4947 res += (1 << il->hw_params.beacon_time_tsf_bits); 4948 } else 4949 res += (1 << il->hw_params.beacon_time_tsf_bits); 4950 4951 return cpu_to_le32(res); 4952 } 4953 EXPORT_SYMBOL(il_add_beacon_time); 4954 4955 #ifdef CONFIG_PM_SLEEP 4956 4957 static int 4958 il_pci_suspend(struct device *device) 4959 { 4960 struct pci_dev *pdev = to_pci_dev(device); 4961 struct il_priv *il = pci_get_drvdata(pdev); 4962 4963 /* 4964 * This function is called when system goes into suspend state 4965 * mac80211 will call il_mac_stop() from the mac80211 suspend function 4966 * first but since il_mac_stop() has no knowledge of who the caller is, 4967 * it will not call apm_ops.stop() to stop the DMA operation. 4968 * Calling apm_ops.stop here to make sure we stop the DMA. 4969 */ 4970 il_apm_stop(il); 4971 4972 return 0; 4973 } 4974 4975 static int 4976 il_pci_resume(struct device *device) 4977 { 4978 struct pci_dev *pdev = to_pci_dev(device); 4979 struct il_priv *il = pci_get_drvdata(pdev); 4980 bool hw_rfkill = false; 4981 4982 /* 4983 * We disable the RETRY_TIMEOUT register (0x41) to keep 4984 * PCI Tx retries from interfering with C3 CPU state. 4985 */ 4986 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 4987 4988 il_enable_interrupts(il); 4989 4990 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 4991 hw_rfkill = true; 4992 4993 if (hw_rfkill) 4994 set_bit(S_RFKILL, &il->status); 4995 else 4996 clear_bit(S_RFKILL, &il->status); 4997 4998 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill); 4999 5000 return 0; 5001 } 5002 5003 SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); 5004 EXPORT_SYMBOL(il_pm_ops); 5005 5006 #endif /* CONFIG_PM_SLEEP */ 5007 5008 static void 5009 il_update_qos(struct il_priv *il) 5010 { 5011 if (test_bit(S_EXIT_PENDING, &il->status)) 5012 return; 5013 5014 il->qos_data.def_qos_parm.qos_flags = 0; 5015 5016 if (il->qos_data.qos_active) 5017 il->qos_data.def_qos_parm.qos_flags |= 5018 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 5019 5020 if (il->ht.enabled) 5021 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 5022 5023 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", 5024 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags); 5025 5026 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd), 5027 &il->qos_data.def_qos_parm, NULL); 5028 } 5029 5030 /** 5031 * il_mac_config - mac80211 config callback 5032 */ 5033 int 5034 il_mac_config(struct ieee80211_hw *hw, u32 changed) 5035 { 5036 struct il_priv *il = hw->priv; 5037 const struct il_channel_info *ch_info; 5038 struct ieee80211_conf *conf = &hw->conf; 5039 struct ieee80211_channel *channel = conf->chandef.chan; 5040 struct il_ht_config *ht_conf = &il->current_ht_config; 5041 unsigned long flags = 0; 5042 int ret = 0; 5043 u16 ch; 5044 int scan_active = 0; 5045 bool ht_changed = false; 5046 5047 mutex_lock(&il->mutex); 5048 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value, 5049 changed); 5050 5051 if (unlikely(test_bit(S_SCANNING, &il->status))) { 5052 scan_active = 1; 5053 D_MAC80211("scan active\n"); 5054 } 5055 5056 if (changed & 5057 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { 5058 /* mac80211 uses static for non-HT which is what we want */ 5059 il->current_ht_config.smps = conf->smps_mode; 5060 5061 /* 5062 * Recalculate chain counts. 5063 * 5064 * If monitor mode is enabled then mac80211 will 5065 * set up the SM PS mode to OFF if an HT channel is 5066 * configured. 5067 */ 5068 if (il->ops->set_rxon_chain) 5069 il->ops->set_rxon_chain(il); 5070 } 5071 5072 /* during scanning mac80211 will delay channel setting until 5073 * scan finish with changed = 0 5074 */ 5075 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { 5076 5077 if (scan_active) 5078 goto set_ch_out; 5079 5080 ch = channel->hw_value; 5081 ch_info = il_get_channel_info(il, channel->band, ch); 5082 if (!il_is_channel_valid(ch_info)) { 5083 D_MAC80211("leave - invalid channel\n"); 5084 ret = -EINVAL; 5085 goto set_ch_out; 5086 } 5087 5088 if (il->iw_mode == NL80211_IFTYPE_ADHOC && 5089 !il_is_channel_ibss(ch_info)) { 5090 D_MAC80211("leave - not IBSS channel\n"); 5091 ret = -EINVAL; 5092 goto set_ch_out; 5093 } 5094 5095 spin_lock_irqsave(&il->lock, flags); 5096 5097 /* Configure HT40 channels */ 5098 if (il->ht.enabled != conf_is_ht(conf)) { 5099 il->ht.enabled = conf_is_ht(conf); 5100 ht_changed = true; 5101 } 5102 if (il->ht.enabled) { 5103 if (conf_is_ht40_minus(conf)) { 5104 il->ht.extension_chan_offset = 5105 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 5106 il->ht.is_40mhz = true; 5107 } else if (conf_is_ht40_plus(conf)) { 5108 il->ht.extension_chan_offset = 5109 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 5110 il->ht.is_40mhz = true; 5111 } else { 5112 il->ht.extension_chan_offset = 5113 IEEE80211_HT_PARAM_CHA_SEC_NONE; 5114 il->ht.is_40mhz = false; 5115 } 5116 } else 5117 il->ht.is_40mhz = false; 5118 5119 /* 5120 * Default to no protection. Protection mode will 5121 * later be set from BSS config in il_ht_conf 5122 */ 5123 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 5124 5125 /* if we are switching from ht to 2.4 clear flags 5126 * from any ht related info since 2.4 does not 5127 * support ht */ 5128 if ((le16_to_cpu(il->staging.channel) != ch)) 5129 il->staging.flags = 0; 5130 5131 il_set_rxon_channel(il, channel); 5132 il_set_rxon_ht(il, ht_conf); 5133 5134 il_set_flags_for_band(il, channel->band, il->vif); 5135 5136 spin_unlock_irqrestore(&il->lock, flags); 5137 5138 if (il->ops->update_bcast_stations) 5139 ret = il->ops->update_bcast_stations(il); 5140 5141 set_ch_out: 5142 /* The list of supported rates and rate mask can be different 5143 * for each band; since the band may have changed, reset 5144 * the rate mask to what mac80211 lists */ 5145 il_set_rate(il); 5146 } 5147 5148 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { 5149 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS); 5150 if (!il->power_data.ps_disabled) 5151 IL_WARN_ONCE("Enabling power save might cause firmware crashes\n"); 5152 ret = il_power_update_mode(il, false); 5153 if (ret) 5154 D_MAC80211("Error setting sleep level\n"); 5155 } 5156 5157 if (changed & IEEE80211_CONF_CHANGE_POWER) { 5158 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt, 5159 conf->power_level); 5160 5161 il_set_tx_power(il, conf->power_level, false); 5162 } 5163 5164 if (!il_is_ready(il)) { 5165 D_MAC80211("leave - not ready\n"); 5166 goto out; 5167 } 5168 5169 if (scan_active) 5170 goto out; 5171 5172 if (memcmp(&il->active, &il->staging, sizeof(il->staging))) 5173 il_commit_rxon(il); 5174 else 5175 D_INFO("Not re-sending same RXON configuration.\n"); 5176 if (ht_changed) 5177 il_update_qos(il); 5178 5179 out: 5180 D_MAC80211("leave ret %d\n", ret); 5181 mutex_unlock(&il->mutex); 5182 5183 return ret; 5184 } 5185 EXPORT_SYMBOL(il_mac_config); 5186 5187 void 5188 il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5189 { 5190 struct il_priv *il = hw->priv; 5191 unsigned long flags; 5192 5193 mutex_lock(&il->mutex); 5194 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 5195 5196 spin_lock_irqsave(&il->lock, flags); 5197 5198 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); 5199 5200 /* new association get rid of ibss beacon skb */ 5201 if (il->beacon_skb) 5202 dev_kfree_skb(il->beacon_skb); 5203 il->beacon_skb = NULL; 5204 il->timestamp = 0; 5205 5206 spin_unlock_irqrestore(&il->lock, flags); 5207 5208 il_scan_cancel_timeout(il, 100); 5209 if (!il_is_ready_rf(il)) { 5210 D_MAC80211("leave - not ready\n"); 5211 mutex_unlock(&il->mutex); 5212 return; 5213 } 5214 5215 /* we are restarting association process */ 5216 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5217 il_commit_rxon(il); 5218 5219 il_set_rate(il); 5220 5221 D_MAC80211("leave\n"); 5222 mutex_unlock(&il->mutex); 5223 } 5224 EXPORT_SYMBOL(il_mac_reset_tsf); 5225 5226 static void 5227 il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) 5228 { 5229 struct il_ht_config *ht_conf = &il->current_ht_config; 5230 struct ieee80211_sta *sta; 5231 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 5232 5233 D_ASSOC("enter:\n"); 5234 5235 if (!il->ht.enabled) 5236 return; 5237 5238 il->ht.protection = 5239 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; 5240 il->ht.non_gf_sta_present = 5241 !!(bss_conf-> 5242 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 5243 5244 ht_conf->single_chain_sufficient = false; 5245 5246 switch (vif->type) { 5247 case NL80211_IFTYPE_STATION: 5248 rcu_read_lock(); 5249 sta = ieee80211_find_sta(vif, bss_conf->bssid); 5250 if (sta) { 5251 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 5252 int maxstreams; 5253 5254 maxstreams = 5255 (ht_cap->mcs. 5256 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 5257 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; 5258 maxstreams += 1; 5259 5260 if (ht_cap->mcs.rx_mask[1] == 0 && 5261 ht_cap->mcs.rx_mask[2] == 0) 5262 ht_conf->single_chain_sufficient = true; 5263 if (maxstreams <= 1) 5264 ht_conf->single_chain_sufficient = true; 5265 } else { 5266 /* 5267 * If at all, this can only happen through a race 5268 * when the AP disconnects us while we're still 5269 * setting up the connection, in that case mac80211 5270 * will soon tell us about that. 5271 */ 5272 ht_conf->single_chain_sufficient = true; 5273 } 5274 rcu_read_unlock(); 5275 break; 5276 case NL80211_IFTYPE_ADHOC: 5277 ht_conf->single_chain_sufficient = true; 5278 break; 5279 default: 5280 break; 5281 } 5282 5283 D_ASSOC("leave\n"); 5284 } 5285 5286 static inline void 5287 il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif) 5288 { 5289 /* 5290 * inform the ucode that there is no longer an 5291 * association and that no more packets should be 5292 * sent 5293 */ 5294 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5295 il->staging.assoc_id = 0; 5296 il_commit_rxon(il); 5297 } 5298 5299 static void 5300 il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5301 { 5302 struct il_priv *il = hw->priv; 5303 unsigned long flags; 5304 __le64 timestamp; 5305 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 5306 5307 if (!skb) 5308 return; 5309 5310 D_MAC80211("enter\n"); 5311 5312 lockdep_assert_held(&il->mutex); 5313 5314 if (!il->beacon_enabled) { 5315 IL_ERR("update beacon with no beaconing enabled\n"); 5316 dev_kfree_skb(skb); 5317 return; 5318 } 5319 5320 spin_lock_irqsave(&il->lock, flags); 5321 5322 if (il->beacon_skb) 5323 dev_kfree_skb(il->beacon_skb); 5324 5325 il->beacon_skb = skb; 5326 5327 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 5328 il->timestamp = le64_to_cpu(timestamp); 5329 5330 D_MAC80211("leave\n"); 5331 spin_unlock_irqrestore(&il->lock, flags); 5332 5333 if (!il_is_ready_rf(il)) { 5334 D_MAC80211("leave - RF not ready\n"); 5335 return; 5336 } 5337 5338 il->ops->post_associate(il); 5339 } 5340 5341 void 5342 il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 5343 struct ieee80211_bss_conf *bss_conf, u32 changes) 5344 { 5345 struct il_priv *il = hw->priv; 5346 int ret; 5347 5348 mutex_lock(&il->mutex); 5349 D_MAC80211("enter: changes 0x%x\n", changes); 5350 5351 if (!il_is_alive(il)) { 5352 D_MAC80211("leave - not alive\n"); 5353 mutex_unlock(&il->mutex); 5354 return; 5355 } 5356 5357 if (changes & BSS_CHANGED_QOS) { 5358 unsigned long flags; 5359 5360 spin_lock_irqsave(&il->lock, flags); 5361 il->qos_data.qos_active = bss_conf->qos; 5362 il_update_qos(il); 5363 spin_unlock_irqrestore(&il->lock, flags); 5364 } 5365 5366 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5367 /* FIXME: can we remove beacon_enabled ? */ 5368 if (vif->bss_conf.enable_beacon) 5369 il->beacon_enabled = true; 5370 else 5371 il->beacon_enabled = false; 5372 } 5373 5374 if (changes & BSS_CHANGED_BSSID) { 5375 D_MAC80211("BSSID %pM\n", bss_conf->bssid); 5376 5377 /* 5378 * On passive channel we wait with blocked queues to see if 5379 * there is traffic on that channel. If no frame will be 5380 * received (what is very unlikely since scan detects AP on 5381 * that channel, but theoretically possible), mac80211 associate 5382 * procedure will time out and mac80211 will call us with NULL 5383 * bssid. We have to unblock queues on such condition. 5384 */ 5385 if (is_zero_ether_addr(bss_conf->bssid)) 5386 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); 5387 5388 /* 5389 * If there is currently a HW scan going on in the background, 5390 * then we need to cancel it, otherwise sometimes we are not 5391 * able to authenticate (FIXME: why ?) 5392 */ 5393 if (il_scan_cancel_timeout(il, 100)) { 5394 D_MAC80211("leave - scan abort failed\n"); 5395 mutex_unlock(&il->mutex); 5396 return; 5397 } 5398 5399 /* mac80211 only sets assoc when in STATION mode */ 5400 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 5401 5402 /* FIXME: currently needed in a few places */ 5403 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5404 } 5405 5406 /* 5407 * This needs to be after setting the BSSID in case 5408 * mac80211 decides to do both changes at once because 5409 * it will invoke post_associate. 5410 */ 5411 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) 5412 il_beacon_update(hw, vif); 5413 5414 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 5415 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); 5416 if (bss_conf->use_short_preamble) 5417 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 5418 else 5419 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 5420 } 5421 5422 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 5423 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); 5424 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ) 5425 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; 5426 else 5427 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 5428 if (bss_conf->use_cts_prot) 5429 il->staging.flags |= RXON_FLG_SELF_CTS_EN; 5430 else 5431 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN; 5432 } 5433 5434 if (changes & BSS_CHANGED_BASIC_RATES) { 5435 /* XXX use this information 5436 * 5437 * To do that, remove code from il_set_rate() and put something 5438 * like this here: 5439 * 5440 if (A-band) 5441 il->staging.ofdm_basic_rates = 5442 bss_conf->basic_rates; 5443 else 5444 il->staging.ofdm_basic_rates = 5445 bss_conf->basic_rates >> 4; 5446 il->staging.cck_basic_rates = 5447 bss_conf->basic_rates & 0xF; 5448 */ 5449 } 5450 5451 if (changes & BSS_CHANGED_HT) { 5452 il_ht_conf(il, vif); 5453 5454 if (il->ops->set_rxon_chain) 5455 il->ops->set_rxon_chain(il); 5456 } 5457 5458 if (changes & BSS_CHANGED_ASSOC) { 5459 D_MAC80211("ASSOC %d\n", bss_conf->assoc); 5460 if (bss_conf->assoc) { 5461 il->timestamp = bss_conf->sync_tsf; 5462 5463 if (!il_is_rfkill(il)) 5464 il->ops->post_associate(il); 5465 } else 5466 il_set_no_assoc(il, vif); 5467 } 5468 5469 if (changes && il_is_associated(il) && bss_conf->aid) { 5470 D_MAC80211("Changes (%#x) while associated\n", changes); 5471 ret = il_send_rxon_assoc(il); 5472 if (!ret) { 5473 /* Sync active_rxon with latest change. */ 5474 memcpy((void *)&il->active, &il->staging, 5475 sizeof(struct il_rxon_cmd)); 5476 } 5477 } 5478 5479 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5480 if (vif->bss_conf.enable_beacon) { 5481 memcpy(il->staging.bssid_addr, bss_conf->bssid, 5482 ETH_ALEN); 5483 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5484 il->ops->config_ap(il); 5485 } else 5486 il_set_no_assoc(il, vif); 5487 } 5488 5489 if (changes & BSS_CHANGED_IBSS) { 5490 ret = il->ops->manage_ibss_station(il, vif, 5491 bss_conf->ibss_joined); 5492 if (ret) 5493 IL_ERR("failed to %s IBSS station %pM\n", 5494 bss_conf->ibss_joined ? "add" : "remove", 5495 bss_conf->bssid); 5496 } 5497 5498 D_MAC80211("leave\n"); 5499 mutex_unlock(&il->mutex); 5500 } 5501 EXPORT_SYMBOL(il_mac_bss_info_changed); 5502 5503 irqreturn_t 5504 il_isr(int irq, void *data) 5505 { 5506 struct il_priv *il = data; 5507 u32 inta, inta_mask; 5508 u32 inta_fh; 5509 unsigned long flags; 5510 if (!il) 5511 return IRQ_NONE; 5512 5513 spin_lock_irqsave(&il->lock, flags); 5514 5515 /* Disable (but don't clear!) interrupts here to avoid 5516 * back-to-back ISRs and sporadic interrupts from our NIC. 5517 * If we have something to service, the tasklet will re-enable ints. 5518 * If we *don't* have something, we'll re-enable before leaving here. */ 5519 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */ 5520 _il_wr(il, CSR_INT_MASK, 0x00000000); 5521 5522 /* Discover which interrupts are active/pending */ 5523 inta = _il_rd(il, CSR_INT); 5524 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 5525 5526 /* Ignore interrupt if there's nothing in NIC to service. 5527 * This may be due to IRQ shared with another device, 5528 * or due to sporadic interrupts thrown from our NIC. */ 5529 if (!inta && !inta_fh) { 5530 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); 5531 goto none; 5532 } 5533 5534 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) { 5535 /* Hardware disappeared. It might have already raised 5536 * an interrupt */ 5537 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta); 5538 goto unplugged; 5539 } 5540 5541 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, 5542 inta_fh); 5543 5544 inta &= ~CSR_INT_BIT_SCD; 5545 5546 /* il_irq_tasklet() will service interrupts and re-enable them */ 5547 if (likely(inta || inta_fh)) 5548 tasklet_schedule(&il->irq_tasklet); 5549 5550 unplugged: 5551 spin_unlock_irqrestore(&il->lock, flags); 5552 return IRQ_HANDLED; 5553 5554 none: 5555 /* re-enable interrupts here since we don't have anything to service. */ 5556 /* only Re-enable if disabled by irq */ 5557 if (test_bit(S_INT_ENABLED, &il->status)) 5558 il_enable_interrupts(il); 5559 spin_unlock_irqrestore(&il->lock, flags); 5560 return IRQ_NONE; 5561 } 5562 EXPORT_SYMBOL(il_isr); 5563 5564 /* 5565 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this 5566 * function. 5567 */ 5568 void 5569 il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, 5570 __le16 fc, __le32 *tx_flags) 5571 { 5572 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 5573 *tx_flags |= TX_CMD_FLG_RTS_MSK; 5574 *tx_flags &= ~TX_CMD_FLG_CTS_MSK; 5575 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5576 5577 if (!ieee80211_is_mgmt(fc)) 5578 return; 5579 5580 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { 5581 case cpu_to_le16(IEEE80211_STYPE_AUTH): 5582 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 5583 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): 5584 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): 5585 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5586 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5587 break; 5588 } 5589 } else if (info->control.rates[0]. 5590 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 5591 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5592 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5593 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5594 } 5595 } 5596 EXPORT_SYMBOL(il_tx_cmd_protection); 5597