1 /****************************************************************************** 2 * 3 * GPL LICENSE SUMMARY 4 * 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of version 2 of the GNU General Public License as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 19 * USA 20 * 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 *****************************************************************************/ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/etherdevice.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/types.h> 35 #include <linux/lockdep.h> 36 #include <linux/pci.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/delay.h> 39 #include <linux/skbuff.h> 40 #include <net/mac80211.h> 41 42 #include "common.h" 43 44 int 45 _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout) 46 { 47 const int interval = 10; /* microseconds */ 48 int t = 0; 49 50 do { 51 if ((_il_rd(il, addr) & mask) == (bits & mask)) 52 return t; 53 udelay(interval); 54 t += interval; 55 } while (t < timeout); 56 57 return -ETIMEDOUT; 58 } 59 EXPORT_SYMBOL(_il_poll_bit); 60 61 void 62 il_set_bit(struct il_priv *p, u32 r, u32 m) 63 { 64 unsigned long reg_flags; 65 66 spin_lock_irqsave(&p->reg_lock, reg_flags); 67 _il_set_bit(p, r, m); 68 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 69 } 70 EXPORT_SYMBOL(il_set_bit); 71 72 void 73 il_clear_bit(struct il_priv *p, u32 r, u32 m) 74 { 75 unsigned long reg_flags; 76 77 spin_lock_irqsave(&p->reg_lock, reg_flags); 78 _il_clear_bit(p, r, m); 79 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 80 } 81 EXPORT_SYMBOL(il_clear_bit); 82 83 bool 84 _il_grab_nic_access(struct il_priv *il) 85 { 86 int ret; 87 u32 val; 88 89 /* this bit wakes up the NIC */ 90 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 91 92 /* 93 * These bits say the device is running, and should keep running for 94 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 95 * but they do not indicate that embedded SRAM is restored yet; 96 * 3945 and 4965 have volatile SRAM, and must save/restore contents 97 * to/from host DRAM when sleeping/waking for power-saving. 98 * Each direction takes approximately 1/4 millisecond; with this 99 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 100 * series of register accesses are expected (e.g. reading Event Log), 101 * to keep device from sleeping. 102 * 103 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 104 * SRAM is okay/restored. We don't check that here because this call 105 * is just for hardware register access; but GP1 MAC_SLEEP check is a 106 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). 107 * 108 */ 109 ret = 110 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 111 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 112 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 113 if (unlikely(ret < 0)) { 114 val = _il_rd(il, CSR_GP_CNTRL); 115 WARN_ONCE(1, "Timeout waiting for ucode processor access " 116 "(CSR_GP_CNTRL 0x%08x)\n", val); 117 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 118 return false; 119 } 120 121 return true; 122 } 123 EXPORT_SYMBOL_GPL(_il_grab_nic_access); 124 125 int 126 il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) 127 { 128 const int interval = 10; /* microseconds */ 129 int t = 0; 130 131 do { 132 if ((il_rd(il, addr) & mask) == mask) 133 return t; 134 udelay(interval); 135 t += interval; 136 } while (t < timeout); 137 138 return -ETIMEDOUT; 139 } 140 EXPORT_SYMBOL(il_poll_bit); 141 142 u32 143 il_rd_prph(struct il_priv *il, u32 reg) 144 { 145 unsigned long reg_flags; 146 u32 val; 147 148 spin_lock_irqsave(&il->reg_lock, reg_flags); 149 _il_grab_nic_access(il); 150 val = _il_rd_prph(il, reg); 151 _il_release_nic_access(il); 152 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 153 return val; 154 } 155 EXPORT_SYMBOL(il_rd_prph); 156 157 void 158 il_wr_prph(struct il_priv *il, u32 addr, u32 val) 159 { 160 unsigned long reg_flags; 161 162 spin_lock_irqsave(&il->reg_lock, reg_flags); 163 if (likely(_il_grab_nic_access(il))) { 164 _il_wr_prph(il, addr, val); 165 _il_release_nic_access(il); 166 } 167 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 168 } 169 EXPORT_SYMBOL(il_wr_prph); 170 171 u32 172 il_read_targ_mem(struct il_priv *il, u32 addr) 173 { 174 unsigned long reg_flags; 175 u32 value; 176 177 spin_lock_irqsave(&il->reg_lock, reg_flags); 178 _il_grab_nic_access(il); 179 180 _il_wr(il, HBUS_TARG_MEM_RADDR, addr); 181 value = _il_rd(il, HBUS_TARG_MEM_RDAT); 182 183 _il_release_nic_access(il); 184 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 185 return value; 186 } 187 EXPORT_SYMBOL(il_read_targ_mem); 188 189 void 190 il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) 191 { 192 unsigned long reg_flags; 193 194 spin_lock_irqsave(&il->reg_lock, reg_flags); 195 if (likely(_il_grab_nic_access(il))) { 196 _il_wr(il, HBUS_TARG_MEM_WADDR, addr); 197 _il_wr(il, HBUS_TARG_MEM_WDAT, val); 198 _il_release_nic_access(il); 199 } 200 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 201 } 202 EXPORT_SYMBOL(il_write_targ_mem); 203 204 const char * 205 il_get_cmd_string(u8 cmd) 206 { 207 switch (cmd) { 208 IL_CMD(N_ALIVE); 209 IL_CMD(N_ERROR); 210 IL_CMD(C_RXON); 211 IL_CMD(C_RXON_ASSOC); 212 IL_CMD(C_QOS_PARAM); 213 IL_CMD(C_RXON_TIMING); 214 IL_CMD(C_ADD_STA); 215 IL_CMD(C_REM_STA); 216 IL_CMD(C_WEPKEY); 217 IL_CMD(N_3945_RX); 218 IL_CMD(C_TX); 219 IL_CMD(C_RATE_SCALE); 220 IL_CMD(C_LEDS); 221 IL_CMD(C_TX_LINK_QUALITY_CMD); 222 IL_CMD(C_CHANNEL_SWITCH); 223 IL_CMD(N_CHANNEL_SWITCH); 224 IL_CMD(C_SPECTRUM_MEASUREMENT); 225 IL_CMD(N_SPECTRUM_MEASUREMENT); 226 IL_CMD(C_POWER_TBL); 227 IL_CMD(N_PM_SLEEP); 228 IL_CMD(N_PM_DEBUG_STATS); 229 IL_CMD(C_SCAN); 230 IL_CMD(C_SCAN_ABORT); 231 IL_CMD(N_SCAN_START); 232 IL_CMD(N_SCAN_RESULTS); 233 IL_CMD(N_SCAN_COMPLETE); 234 IL_CMD(N_BEACON); 235 IL_CMD(C_TX_BEACON); 236 IL_CMD(C_TX_PWR_TBL); 237 IL_CMD(C_BT_CONFIG); 238 IL_CMD(C_STATS); 239 IL_CMD(N_STATS); 240 IL_CMD(N_CARD_STATE); 241 IL_CMD(N_MISSED_BEACONS); 242 IL_CMD(C_CT_KILL_CONFIG); 243 IL_CMD(C_SENSITIVITY); 244 IL_CMD(C_PHY_CALIBRATION); 245 IL_CMD(N_RX_PHY); 246 IL_CMD(N_RX_MPDU); 247 IL_CMD(N_RX); 248 IL_CMD(N_COMPRESSED_BA); 249 default: 250 return "UNKNOWN"; 251 252 } 253 } 254 EXPORT_SYMBOL(il_get_cmd_string); 255 256 #define HOST_COMPLETE_TIMEOUT (HZ / 2) 257 258 static void 259 il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, 260 struct il_rx_pkt *pkt) 261 { 262 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 263 IL_ERR("Bad return from %s (0x%08X)\n", 264 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 265 return; 266 } 267 #ifdef CONFIG_IWLEGACY_DEBUG 268 switch (cmd->hdr.cmd) { 269 case C_TX_LINK_QUALITY_CMD: 270 case C_SENSITIVITY: 271 D_HC_DUMP("back from %s (0x%08X)\n", 272 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 273 break; 274 default: 275 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), 276 pkt->hdr.flags); 277 } 278 #endif 279 } 280 281 static int 282 il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) 283 { 284 int ret; 285 286 BUG_ON(!(cmd->flags & CMD_ASYNC)); 287 288 /* An asynchronous command can not expect an SKB to be set. */ 289 BUG_ON(cmd->flags & CMD_WANT_SKB); 290 291 /* Assign a generic callback if one is not provided */ 292 if (!cmd->callback) 293 cmd->callback = il_generic_cmd_callback; 294 295 if (test_bit(S_EXIT_PENDING, &il->status)) 296 return -EBUSY; 297 298 ret = il_enqueue_hcmd(il, cmd); 299 if (ret < 0) { 300 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 301 il_get_cmd_string(cmd->id), ret); 302 return ret; 303 } 304 return 0; 305 } 306 307 int 308 il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) 309 { 310 int cmd_idx; 311 int ret; 312 313 lockdep_assert_held(&il->mutex); 314 315 BUG_ON(cmd->flags & CMD_ASYNC); 316 317 /* A synchronous command can not have a callback set. */ 318 BUG_ON(cmd->callback); 319 320 D_INFO("Attempting to send sync command %s\n", 321 il_get_cmd_string(cmd->id)); 322 323 set_bit(S_HCMD_ACTIVE, &il->status); 324 D_INFO("Setting HCMD_ACTIVE for command %s\n", 325 il_get_cmd_string(cmd->id)); 326 327 cmd_idx = il_enqueue_hcmd(il, cmd); 328 if (cmd_idx < 0) { 329 ret = cmd_idx; 330 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 331 il_get_cmd_string(cmd->id), ret); 332 goto out; 333 } 334 335 ret = wait_event_timeout(il->wait_command_queue, 336 !test_bit(S_HCMD_ACTIVE, &il->status), 337 HOST_COMPLETE_TIMEOUT); 338 if (!ret) { 339 if (test_bit(S_HCMD_ACTIVE, &il->status)) { 340 IL_ERR("Error sending %s: time out after %dms.\n", 341 il_get_cmd_string(cmd->id), 342 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 343 344 clear_bit(S_HCMD_ACTIVE, &il->status); 345 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 346 il_get_cmd_string(cmd->id)); 347 ret = -ETIMEDOUT; 348 goto cancel; 349 } 350 } 351 352 if (test_bit(S_RFKILL, &il->status)) { 353 IL_ERR("Command %s aborted: RF KILL Switch\n", 354 il_get_cmd_string(cmd->id)); 355 ret = -ECANCELED; 356 goto fail; 357 } 358 if (test_bit(S_FW_ERROR, &il->status)) { 359 IL_ERR("Command %s failed: FW Error\n", 360 il_get_cmd_string(cmd->id)); 361 ret = -EIO; 362 goto fail; 363 } 364 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { 365 IL_ERR("Error: Response NULL in '%s'\n", 366 il_get_cmd_string(cmd->id)); 367 ret = -EIO; 368 goto cancel; 369 } 370 371 ret = 0; 372 goto out; 373 374 cancel: 375 if (cmd->flags & CMD_WANT_SKB) { 376 /* 377 * Cancel the CMD_WANT_SKB flag for the cmd in the 378 * TX cmd queue. Otherwise in case the cmd comes 379 * in later, it will possibly set an invalid 380 * address (cmd->meta.source). 381 */ 382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; 383 } 384 fail: 385 if (cmd->reply_page) { 386 il_free_pages(il, cmd->reply_page); 387 cmd->reply_page = 0; 388 } 389 out: 390 return ret; 391 } 392 EXPORT_SYMBOL(il_send_cmd_sync); 393 394 int 395 il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) 396 { 397 if (cmd->flags & CMD_ASYNC) 398 return il_send_cmd_async(il, cmd); 399 400 return il_send_cmd_sync(il, cmd); 401 } 402 EXPORT_SYMBOL(il_send_cmd); 403 404 int 405 il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) 406 { 407 struct il_host_cmd cmd = { 408 .id = id, 409 .len = len, 410 .data = data, 411 }; 412 413 return il_send_cmd_sync(il, &cmd); 414 } 415 EXPORT_SYMBOL(il_send_cmd_pdu); 416 417 int 418 il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, 419 void (*callback) (struct il_priv *il, 420 struct il_device_cmd *cmd, 421 struct il_rx_pkt *pkt)) 422 { 423 struct il_host_cmd cmd = { 424 .id = id, 425 .len = len, 426 .data = data, 427 }; 428 429 cmd.flags |= CMD_ASYNC; 430 cmd.callback = callback; 431 432 return il_send_cmd_async(il, &cmd); 433 } 434 EXPORT_SYMBOL(il_send_cmd_pdu_async); 435 436 /* default: IL_LED_BLINK(0) using blinking idx table */ 437 static int led_mode; 438 module_param(led_mode, int, 0444); 439 MODULE_PARM_DESC(led_mode, 440 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); 441 442 /* Throughput OFF time(ms) ON time (ms) 443 * >300 25 25 444 * >200 to 300 40 40 445 * >100 to 200 55 55 446 * >70 to 100 65 65 447 * >50 to 70 75 75 448 * >20 to 50 85 85 449 * >10 to 20 95 95 450 * >5 to 10 110 110 451 * >1 to 5 130 130 452 * >0 to 1 167 167 453 * <=0 SOLID ON 454 */ 455 static const struct ieee80211_tpt_blink il_blink[] = { 456 {.throughput = 0, .blink_time = 334}, 457 {.throughput = 1 * 1024 - 1, .blink_time = 260}, 458 {.throughput = 5 * 1024 - 1, .blink_time = 220}, 459 {.throughput = 10 * 1024 - 1, .blink_time = 190}, 460 {.throughput = 20 * 1024 - 1, .blink_time = 170}, 461 {.throughput = 50 * 1024 - 1, .blink_time = 150}, 462 {.throughput = 70 * 1024 - 1, .blink_time = 130}, 463 {.throughput = 100 * 1024 - 1, .blink_time = 110}, 464 {.throughput = 200 * 1024 - 1, .blink_time = 80}, 465 {.throughput = 300 * 1024 - 1, .blink_time = 50}, 466 }; 467 468 /* 469 * Adjust led blink rate to compensate on a MAC Clock difference on every HW 470 * Led blink rate analysis showed an average deviation of 0% on 3945, 471 * 5% on 4965 HW. 472 * Need to compensate on the led on/off time per HW according to the deviation 473 * to achieve the desired led frequency 474 * The calculation is: (100-averageDeviation)/100 * blinkTime 475 * For code efficiency the calculation will be: 476 * compensation = (100 - averageDeviation) * 64 / 100 477 * NewBlinkTime = (compensation * BlinkTime) / 64 478 */ 479 static inline u8 480 il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) 481 { 482 if (!compensation) { 483 IL_ERR("undefined blink compensation: " 484 "use pre-defined blinking time\n"); 485 return time; 486 } 487 488 return (u8) ((time * compensation) >> 6); 489 } 490 491 /* Set led pattern command */ 492 static int 493 il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) 494 { 495 struct il_led_cmd led_cmd = { 496 .id = IL_LED_LINK, 497 .interval = IL_DEF_LED_INTRVL 498 }; 499 int ret; 500 501 if (!test_bit(S_READY, &il->status)) 502 return -EBUSY; 503 504 if (il->blink_on == on && il->blink_off == off) 505 return 0; 506 507 if (off == 0) { 508 /* led is SOLID_ON */ 509 on = IL_LED_SOLID; 510 } 511 512 D_LED("Led blink time compensation=%u\n", 513 il->cfg->led_compensation); 514 led_cmd.on = 515 il_blink_compensation(il, on, 516 il->cfg->led_compensation); 517 led_cmd.off = 518 il_blink_compensation(il, off, 519 il->cfg->led_compensation); 520 521 ret = il->ops->send_led_cmd(il, &led_cmd); 522 if (!ret) { 523 il->blink_on = on; 524 il->blink_off = off; 525 } 526 return ret; 527 } 528 529 static void 530 il_led_brightness_set(struct led_classdev *led_cdev, 531 enum led_brightness brightness) 532 { 533 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 534 unsigned long on = 0; 535 536 if (brightness > 0) 537 on = IL_LED_SOLID; 538 539 il_led_cmd(il, on, 0); 540 } 541 542 static int 543 il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, 544 unsigned long *delay_off) 545 { 546 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 547 548 return il_led_cmd(il, *delay_on, *delay_off); 549 } 550 551 void 552 il_leds_init(struct il_priv *il) 553 { 554 int mode = led_mode; 555 int ret; 556 557 if (mode == IL_LED_DEFAULT) 558 mode = il->cfg->led_mode; 559 560 il->led.name = 561 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); 562 il->led.brightness_set = il_led_brightness_set; 563 il->led.blink_set = il_led_blink_set; 564 il->led.max_brightness = 1; 565 566 switch (mode) { 567 case IL_LED_DEFAULT: 568 WARN_ON(1); 569 break; 570 case IL_LED_BLINK: 571 il->led.default_trigger = 572 ieee80211_create_tpt_led_trigger(il->hw, 573 IEEE80211_TPT_LEDTRIG_FL_CONNECTED, 574 il_blink, 575 ARRAY_SIZE(il_blink)); 576 break; 577 case IL_LED_RF_STATE: 578 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); 579 break; 580 } 581 582 ret = led_classdev_register(&il->pci_dev->dev, &il->led); 583 if (ret) { 584 kfree(il->led.name); 585 return; 586 } 587 588 il->led_registered = true; 589 } 590 EXPORT_SYMBOL(il_leds_init); 591 592 void 593 il_leds_exit(struct il_priv *il) 594 { 595 if (!il->led_registered) 596 return; 597 598 led_classdev_unregister(&il->led); 599 kfree(il->led.name); 600 } 601 EXPORT_SYMBOL(il_leds_exit); 602 603 /************************** EEPROM BANDS **************************** 604 * 605 * The il_eeprom_band definitions below provide the mapping from the 606 * EEPROM contents to the specific channel number supported for each 607 * band. 608 * 609 * For example, il_priv->eeprom.band_3_channels[4] from the band_3 610 * definition below maps to physical channel 42 in the 5.2GHz spectrum. 611 * The specific geography and calibration information for that channel 612 * is contained in the eeprom map itself. 613 * 614 * During init, we copy the eeprom information and channel map 615 * information into il->channel_info_24/52 and il->channel_map_24/52 616 * 617 * channel_map_24/52 provides the idx in the channel_info array for a 618 * given channel. We have to have two separate maps as there is channel 619 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and 620 * band_2 621 * 622 * A value of 0xff stored in the channel_map indicates that the channel 623 * is not supported by the hardware at all. 624 * 625 * A value of 0xfe in the channel_map indicates that the channel is not 626 * valid for Tx with the current hardware. This means that 627 * while the system can tune and receive on a given channel, it may not 628 * be able to associate or transmit any frames on that 629 * channel. There is no corresponding channel information for that 630 * entry. 631 * 632 *********************************************************************/ 633 634 /* 2.4 GHz */ 635 const u8 il_eeprom_band_1[14] = { 636 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 637 }; 638 639 /* 5.2 GHz bands */ 640 static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ 641 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 642 }; 643 644 static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ 645 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 646 }; 647 648 static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ 649 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 650 }; 651 652 static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ 653 145, 149, 153, 157, 161, 165 654 }; 655 656 static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ 657 1, 2, 3, 4, 5, 6, 7 658 }; 659 660 static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ 661 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 662 }; 663 664 /****************************************************************************** 665 * 666 * EEPROM related functions 667 * 668 ******************************************************************************/ 669 670 static int 671 il_eeprom_verify_signature(struct il_priv *il) 672 { 673 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 674 int ret = 0; 675 676 D_EEPROM("EEPROM signature=0x%08x\n", gp); 677 switch (gp) { 678 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: 679 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: 680 break; 681 default: 682 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp); 683 ret = -ENOENT; 684 break; 685 } 686 return ret; 687 } 688 689 const u8 * 690 il_eeprom_query_addr(const struct il_priv *il, size_t offset) 691 { 692 BUG_ON(offset >= il->cfg->eeprom_size); 693 return &il->eeprom[offset]; 694 } 695 EXPORT_SYMBOL(il_eeprom_query_addr); 696 697 u16 698 il_eeprom_query16(const struct il_priv *il, size_t offset) 699 { 700 if (!il->eeprom) 701 return 0; 702 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); 703 } 704 EXPORT_SYMBOL(il_eeprom_query16); 705 706 /** 707 * il_eeprom_init - read EEPROM contents 708 * 709 * Load the EEPROM contents from adapter into il->eeprom 710 * 711 * NOTE: This routine uses the non-debug IO access functions. 712 */ 713 int 714 il_eeprom_init(struct il_priv *il) 715 { 716 __le16 *e; 717 u32 gp = _il_rd(il, CSR_EEPROM_GP); 718 int sz; 719 int ret; 720 u16 addr; 721 722 /* allocate eeprom */ 723 sz = il->cfg->eeprom_size; 724 D_EEPROM("NVM size = %d\n", sz); 725 il->eeprom = kzalloc(sz, GFP_KERNEL); 726 if (!il->eeprom) 727 return -ENOMEM; 728 729 e = (__le16 *) il->eeprom; 730 731 il->ops->apm_init(il); 732 733 ret = il_eeprom_verify_signature(il); 734 if (ret < 0) { 735 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 736 ret = -ENOENT; 737 goto err; 738 } 739 740 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 741 ret = il->ops->eeprom_acquire_semaphore(il); 742 if (ret < 0) { 743 IL_ERR("Failed to acquire EEPROM semaphore.\n"); 744 ret = -ENOENT; 745 goto err; 746 } 747 748 /* eeprom is an array of 16bit values */ 749 for (addr = 0; addr < sz; addr += sizeof(u16)) { 750 u32 r; 751 752 _il_wr(il, CSR_EEPROM_REG, 753 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 754 755 ret = 756 _il_poll_bit(il, CSR_EEPROM_REG, 757 CSR_EEPROM_REG_READ_VALID_MSK, 758 CSR_EEPROM_REG_READ_VALID_MSK, 759 IL_EEPROM_ACCESS_TIMEOUT); 760 if (ret < 0) { 761 IL_ERR("Time out reading EEPROM[%d]\n", addr); 762 goto done; 763 } 764 r = _il_rd(il, CSR_EEPROM_REG); 765 e[addr / 2] = cpu_to_le16(r >> 16); 766 } 767 768 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM", 769 il_eeprom_query16(il, EEPROM_VERSION)); 770 771 ret = 0; 772 done: 773 il->ops->eeprom_release_semaphore(il); 774 775 err: 776 if (ret) 777 il_eeprom_free(il); 778 /* Reset chip to save power until we load uCode during "up". */ 779 il_apm_stop(il); 780 return ret; 781 } 782 EXPORT_SYMBOL(il_eeprom_init); 783 784 void 785 il_eeprom_free(struct il_priv *il) 786 { 787 kfree(il->eeprom); 788 il->eeprom = NULL; 789 } 790 EXPORT_SYMBOL(il_eeprom_free); 791 792 static void 793 il_init_band_reference(const struct il_priv *il, int eep_band, 794 int *eeprom_ch_count, 795 const struct il_eeprom_channel **eeprom_ch_info, 796 const u8 **eeprom_ch_idx) 797 { 798 u32 offset = il->cfg->regulatory_bands[eep_band - 1]; 799 800 switch (eep_band) { 801 case 1: /* 2.4GHz band */ 802 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); 803 *eeprom_ch_info = 804 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 805 offset); 806 *eeprom_ch_idx = il_eeprom_band_1; 807 break; 808 case 2: /* 4.9GHz band */ 809 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); 810 *eeprom_ch_info = 811 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 812 offset); 813 *eeprom_ch_idx = il_eeprom_band_2; 814 break; 815 case 3: /* 5.2GHz band */ 816 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); 817 *eeprom_ch_info = 818 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 819 offset); 820 *eeprom_ch_idx = il_eeprom_band_3; 821 break; 822 case 4: /* 5.5GHz band */ 823 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); 824 *eeprom_ch_info = 825 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 826 offset); 827 *eeprom_ch_idx = il_eeprom_band_4; 828 break; 829 case 5: /* 5.7GHz band */ 830 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); 831 *eeprom_ch_info = 832 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 833 offset); 834 *eeprom_ch_idx = il_eeprom_band_5; 835 break; 836 case 6: /* 2.4GHz ht40 channels */ 837 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); 838 *eeprom_ch_info = 839 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 840 offset); 841 *eeprom_ch_idx = il_eeprom_band_6; 842 break; 843 case 7: /* 5 GHz ht40 channels */ 844 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); 845 *eeprom_ch_info = 846 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 847 offset); 848 *eeprom_ch_idx = il_eeprom_band_7; 849 break; 850 default: 851 BUG(); 852 } 853 } 854 855 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ 856 ? # x " " : "") 857 /** 858 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. 859 * 860 * Does not set up a command, or touch hardware. 861 */ 862 static int 863 il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel, 864 const struct il_eeprom_channel *eeprom_ch, 865 u8 clear_ht40_extension_channel) 866 { 867 struct il_channel_info *ch_info; 868 869 ch_info = 870 (struct il_channel_info *)il_get_channel_info(il, band, channel); 871 872 if (!il_is_channel_valid(ch_info)) 873 return -1; 874 875 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" 876 " Ad-Hoc %ssupported\n", ch_info->channel, 877 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 878 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), 879 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), 880 CHECK_AND_PRINT(DFS), eeprom_ch->flags, 881 eeprom_ch->max_power_avg, 882 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && 883 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); 884 885 ch_info->ht40_eeprom = *eeprom_ch; 886 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; 887 ch_info->ht40_flags = eeprom_ch->flags; 888 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) 889 ch_info->ht40_extension_channel &= 890 ~clear_ht40_extension_channel; 891 892 return 0; 893 } 894 895 #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ 896 ? # x " " : "") 897 898 /** 899 * il_init_channel_map - Set up driver's info for all possible channels 900 */ 901 int 902 il_init_channel_map(struct il_priv *il) 903 { 904 int eeprom_ch_count = 0; 905 const u8 *eeprom_ch_idx = NULL; 906 const struct il_eeprom_channel *eeprom_ch_info = NULL; 907 int band, ch; 908 struct il_channel_info *ch_info; 909 910 if (il->channel_count) { 911 D_EEPROM("Channel map already initialized.\n"); 912 return 0; 913 } 914 915 D_EEPROM("Initializing regulatory info from EEPROM\n"); 916 917 il->channel_count = 918 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + 919 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + 920 ARRAY_SIZE(il_eeprom_band_5); 921 922 D_EEPROM("Parsing data for %d channels.\n", il->channel_count); 923 924 il->channel_info = 925 kcalloc(il->channel_count, sizeof(struct il_channel_info), 926 GFP_KERNEL); 927 if (!il->channel_info) { 928 IL_ERR("Could not allocate channel_info\n"); 929 il->channel_count = 0; 930 return -ENOMEM; 931 } 932 933 ch_info = il->channel_info; 934 935 /* Loop through the 5 EEPROM bands adding them in order to the 936 * channel map we maintain (that contains additional information than 937 * what just in the EEPROM) */ 938 for (band = 1; band <= 5; band++) { 939 940 il_init_band_reference(il, band, &eeprom_ch_count, 941 &eeprom_ch_info, &eeprom_ch_idx); 942 943 /* Loop through each band adding each of the channels */ 944 for (ch = 0; ch < eeprom_ch_count; ch++) { 945 ch_info->channel = eeprom_ch_idx[ch]; 946 ch_info->band = 947 (band == 948 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 949 950 /* permanently store EEPROM's channel regulatory flags 951 * and max power in channel info database. */ 952 ch_info->eeprom = eeprom_ch_info[ch]; 953 954 /* Copy the run-time flags so they are there even on 955 * invalid channels */ 956 ch_info->flags = eeprom_ch_info[ch].flags; 957 /* First write that ht40 is not enabled, and then enable 958 * one by one */ 959 ch_info->ht40_extension_channel = 960 IEEE80211_CHAN_NO_HT40; 961 962 if (!(il_is_channel_valid(ch_info))) { 963 D_EEPROM("Ch. %d Flags %x [%sGHz] - " 964 "No traffic\n", ch_info->channel, 965 ch_info->flags, 966 il_is_channel_a_band(ch_info) ? "5.2" : 967 "2.4"); 968 ch_info++; 969 continue; 970 } 971 972 /* Initialize regulatory-based run-time data */ 973 ch_info->max_power_avg = ch_info->curr_txpow = 974 eeprom_ch_info[ch].max_power_avg; 975 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 976 ch_info->min_power = 0; 977 978 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" 979 " Ad-Hoc %ssupported\n", ch_info->channel, 980 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 981 CHECK_AND_PRINT_I(VALID), 982 CHECK_AND_PRINT_I(IBSS), 983 CHECK_AND_PRINT_I(ACTIVE), 984 CHECK_AND_PRINT_I(RADAR), 985 CHECK_AND_PRINT_I(WIDE), 986 CHECK_AND_PRINT_I(DFS), 987 eeprom_ch_info[ch].flags, 988 eeprom_ch_info[ch].max_power_avg, 989 ((eeprom_ch_info[ch]. 990 flags & EEPROM_CHANNEL_IBSS) && 991 !(eeprom_ch_info[ch]. 992 flags & EEPROM_CHANNEL_RADAR)) ? "" : 993 "not "); 994 995 ch_info++; 996 } 997 } 998 999 /* Check if we do have HT40 channels */ 1000 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && 1001 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) 1002 return 0; 1003 1004 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ 1005 for (band = 6; band <= 7; band++) { 1006 enum nl80211_band ieeeband; 1007 1008 il_init_band_reference(il, band, &eeprom_ch_count, 1009 &eeprom_ch_info, &eeprom_ch_idx); 1010 1011 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ 1012 ieeeband = 1013 (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 1014 1015 /* Loop through each band adding each of the channels */ 1016 for (ch = 0; ch < eeprom_ch_count; ch++) { 1017 /* Set up driver's info for lower half */ 1018 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch], 1019 &eeprom_ch_info[ch], 1020 IEEE80211_CHAN_NO_HT40PLUS); 1021 1022 /* Set up driver's info for upper half */ 1023 il_mod_ht40_chan_info(il, ieeeband, 1024 eeprom_ch_idx[ch] + 4, 1025 &eeprom_ch_info[ch], 1026 IEEE80211_CHAN_NO_HT40MINUS); 1027 } 1028 } 1029 1030 return 0; 1031 } 1032 EXPORT_SYMBOL(il_init_channel_map); 1033 1034 /* 1035 * il_free_channel_map - undo allocations in il_init_channel_map 1036 */ 1037 void 1038 il_free_channel_map(struct il_priv *il) 1039 { 1040 kfree(il->channel_info); 1041 il->channel_count = 0; 1042 } 1043 EXPORT_SYMBOL(il_free_channel_map); 1044 1045 /** 1046 * il_get_channel_info - Find driver's ilate channel info 1047 * 1048 * Based on band and channel number. 1049 */ 1050 const struct il_channel_info * 1051 il_get_channel_info(const struct il_priv *il, enum nl80211_band band, 1052 u16 channel) 1053 { 1054 int i; 1055 1056 switch (band) { 1057 case NL80211_BAND_5GHZ: 1058 for (i = 14; i < il->channel_count; i++) { 1059 if (il->channel_info[i].channel == channel) 1060 return &il->channel_info[i]; 1061 } 1062 break; 1063 case NL80211_BAND_2GHZ: 1064 if (channel >= 1 && channel <= 14) 1065 return &il->channel_info[channel - 1]; 1066 break; 1067 default: 1068 BUG(); 1069 } 1070 1071 return NULL; 1072 } 1073 EXPORT_SYMBOL(il_get_channel_info); 1074 1075 /* 1076 * Setting power level allows the card to go to sleep when not busy. 1077 * 1078 * We calculate a sleep command based on the required latency, which 1079 * we get from mac80211. 1080 */ 1081 1082 #define SLP_VEC(X0, X1, X2, X3, X4) { \ 1083 cpu_to_le32(X0), \ 1084 cpu_to_le32(X1), \ 1085 cpu_to_le32(X2), \ 1086 cpu_to_le32(X3), \ 1087 cpu_to_le32(X4) \ 1088 } 1089 1090 static void 1091 il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) 1092 { 1093 const __le32 interval[3][IL_POWER_VEC_SIZE] = { 1094 SLP_VEC(2, 2, 4, 6, 0xFF), 1095 SLP_VEC(2, 4, 7, 10, 10), 1096 SLP_VEC(4, 7, 10, 10, 0xFF) 1097 }; 1098 int i, dtim_period, no_dtim; 1099 u32 max_sleep; 1100 bool skip; 1101 1102 memset(cmd, 0, sizeof(*cmd)); 1103 1104 if (il->power_data.pci_pm) 1105 cmd->flags |= IL_POWER_PCI_PM_MSK; 1106 1107 /* if no Power Save, we are done */ 1108 if (il->power_data.ps_disabled) 1109 return; 1110 1111 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK; 1112 cmd->keep_alive_seconds = 0; 1113 cmd->debug_flags = 0; 1114 cmd->rx_data_timeout = cpu_to_le32(25 * 1024); 1115 cmd->tx_data_timeout = cpu_to_le32(25 * 1024); 1116 cmd->keep_alive_beacons = 0; 1117 1118 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0; 1119 1120 if (dtim_period <= 2) { 1121 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0])); 1122 no_dtim = 2; 1123 } else if (dtim_period <= 10) { 1124 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1])); 1125 no_dtim = 2; 1126 } else { 1127 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2])); 1128 no_dtim = 0; 1129 } 1130 1131 if (dtim_period == 0) { 1132 dtim_period = 1; 1133 skip = false; 1134 } else { 1135 skip = !!no_dtim; 1136 } 1137 1138 if (skip) { 1139 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1]; 1140 1141 max_sleep = le32_to_cpu(tmp); 1142 if (max_sleep == 0xFF) 1143 max_sleep = dtim_period * (skip + 1); 1144 else if (max_sleep > dtim_period) 1145 max_sleep = (max_sleep / dtim_period) * dtim_period; 1146 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK; 1147 } else { 1148 max_sleep = dtim_period; 1149 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK; 1150 } 1151 1152 for (i = 0; i < IL_POWER_VEC_SIZE; i++) 1153 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 1154 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 1155 } 1156 1157 static int 1158 il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) 1159 { 1160 D_POWER("Sending power/sleep command\n"); 1161 D_POWER("Flags value = 0x%08X\n", cmd->flags); 1162 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); 1163 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); 1164 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", 1165 le32_to_cpu(cmd->sleep_interval[0]), 1166 le32_to_cpu(cmd->sleep_interval[1]), 1167 le32_to_cpu(cmd->sleep_interval[2]), 1168 le32_to_cpu(cmd->sleep_interval[3]), 1169 le32_to_cpu(cmd->sleep_interval[4])); 1170 1171 return il_send_cmd_pdu(il, C_POWER_TBL, 1172 sizeof(struct il_powertable_cmd), cmd); 1173 } 1174 1175 static int 1176 il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) 1177 { 1178 int ret; 1179 bool update_chains; 1180 1181 lockdep_assert_held(&il->mutex); 1182 1183 /* Don't update the RX chain when chain noise calibration is running */ 1184 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || 1185 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; 1186 1187 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) 1188 return 0; 1189 1190 if (!il_is_ready_rf(il)) 1191 return -EIO; 1192 1193 /* scan complete use sleep_power_next, need to be updated */ 1194 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); 1195 if (test_bit(S_SCANNING, &il->status) && !force) { 1196 D_INFO("Defer power set mode while scanning\n"); 1197 return 0; 1198 } 1199 1200 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) 1201 set_bit(S_POWER_PMI, &il->status); 1202 1203 ret = il_set_power(il, cmd); 1204 if (!ret) { 1205 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 1206 clear_bit(S_POWER_PMI, &il->status); 1207 1208 if (il->ops->update_chain_flags && update_chains) 1209 il->ops->update_chain_flags(il); 1210 else if (il->ops->update_chain_flags) 1211 D_POWER("Cannot update the power, chain noise " 1212 "calibration running: %d\n", 1213 il->chain_noise_data.state); 1214 1215 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); 1216 } else 1217 IL_ERR("set power fail, ret = %d", ret); 1218 1219 return ret; 1220 } 1221 1222 int 1223 il_power_update_mode(struct il_priv *il, bool force) 1224 { 1225 struct il_powertable_cmd cmd; 1226 1227 il_build_powertable_cmd(il, &cmd); 1228 1229 return il_power_set_mode(il, &cmd, force); 1230 } 1231 EXPORT_SYMBOL(il_power_update_mode); 1232 1233 /* initialize to default */ 1234 void 1235 il_power_initialize(struct il_priv *il) 1236 { 1237 u16 lctl; 1238 1239 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 1240 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 1241 1242 il->power_data.debug_sleep_level_override = -1; 1243 1244 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); 1245 } 1246 EXPORT_SYMBOL(il_power_initialize); 1247 1248 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 1249 * sending probe req. This should be set long enough to hear probe responses 1250 * from more than one AP. */ 1251 #define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ 1252 #define IL_ACTIVE_DWELL_TIME_52 (20) 1253 1254 #define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) 1255 #define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) 1256 1257 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. 1258 * Must be set longer than active dwell time. 1259 * For the most reliable scan, set > AP beacon interval (typically 100msec). */ 1260 #define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ 1261 #define IL_PASSIVE_DWELL_TIME_52 (10) 1262 #define IL_PASSIVE_DWELL_BASE (100) 1263 #define IL_CHANNEL_TUNE_TIME 5 1264 1265 static int 1266 il_send_scan_abort(struct il_priv *il) 1267 { 1268 int ret; 1269 struct il_rx_pkt *pkt; 1270 struct il_host_cmd cmd = { 1271 .id = C_SCAN_ABORT, 1272 .flags = CMD_WANT_SKB, 1273 }; 1274 1275 /* Exit instantly with error when device is not ready 1276 * to receive scan abort command or it does not perform 1277 * hardware scan currently */ 1278 if (!test_bit(S_READY, &il->status) || 1279 !test_bit(S_GEO_CONFIGURED, &il->status) || 1280 !test_bit(S_SCAN_HW, &il->status) || 1281 test_bit(S_FW_ERROR, &il->status) || 1282 test_bit(S_EXIT_PENDING, &il->status)) 1283 return -EIO; 1284 1285 ret = il_send_cmd_sync(il, &cmd); 1286 if (ret) 1287 return ret; 1288 1289 pkt = (struct il_rx_pkt *)cmd.reply_page; 1290 if (pkt->u.status != CAN_ABORT_STATUS) { 1291 /* The scan abort will return 1 for success or 1292 * 2 for "failure". A failure condition can be 1293 * due to simply not being in an active scan which 1294 * can occur if we send the scan abort before we 1295 * the microcode has notified us that a scan is 1296 * completed. */ 1297 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); 1298 ret = -EIO; 1299 } 1300 1301 il_free_pages(il, cmd.reply_page); 1302 return ret; 1303 } 1304 1305 static void 1306 il_complete_scan(struct il_priv *il, bool aborted) 1307 { 1308 struct cfg80211_scan_info info = { 1309 .aborted = aborted, 1310 }; 1311 1312 /* check if scan was requested from mac80211 */ 1313 if (il->scan_request) { 1314 D_SCAN("Complete scan in mac80211\n"); 1315 ieee80211_scan_completed(il->hw, &info); 1316 } 1317 1318 il->scan_vif = NULL; 1319 il->scan_request = NULL; 1320 } 1321 1322 void 1323 il_force_scan_end(struct il_priv *il) 1324 { 1325 lockdep_assert_held(&il->mutex); 1326 1327 if (!test_bit(S_SCANNING, &il->status)) { 1328 D_SCAN("Forcing scan end while not scanning\n"); 1329 return; 1330 } 1331 1332 D_SCAN("Forcing scan end\n"); 1333 clear_bit(S_SCANNING, &il->status); 1334 clear_bit(S_SCAN_HW, &il->status); 1335 clear_bit(S_SCAN_ABORTING, &il->status); 1336 il_complete_scan(il, true); 1337 } 1338 1339 static void 1340 il_do_scan_abort(struct il_priv *il) 1341 { 1342 int ret; 1343 1344 lockdep_assert_held(&il->mutex); 1345 1346 if (!test_bit(S_SCANNING, &il->status)) { 1347 D_SCAN("Not performing scan to abort\n"); 1348 return; 1349 } 1350 1351 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { 1352 D_SCAN("Scan abort in progress\n"); 1353 return; 1354 } 1355 1356 ret = il_send_scan_abort(il); 1357 if (ret) { 1358 D_SCAN("Send scan abort failed %d\n", ret); 1359 il_force_scan_end(il); 1360 } else 1361 D_SCAN("Successfully send scan abort\n"); 1362 } 1363 1364 /** 1365 * il_scan_cancel - Cancel any currently executing HW scan 1366 */ 1367 int 1368 il_scan_cancel(struct il_priv *il) 1369 { 1370 D_SCAN("Queuing abort scan\n"); 1371 queue_work(il->workqueue, &il->abort_scan); 1372 return 0; 1373 } 1374 EXPORT_SYMBOL(il_scan_cancel); 1375 1376 /** 1377 * il_scan_cancel_timeout - Cancel any currently executing HW scan 1378 * @ms: amount of time to wait (in milliseconds) for scan to abort 1379 * 1380 */ 1381 int 1382 il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) 1383 { 1384 unsigned long timeout = jiffies + msecs_to_jiffies(ms); 1385 1386 lockdep_assert_held(&il->mutex); 1387 1388 D_SCAN("Scan cancel timeout\n"); 1389 1390 il_do_scan_abort(il); 1391 1392 while (time_before_eq(jiffies, timeout)) { 1393 if (!test_bit(S_SCAN_HW, &il->status)) 1394 break; 1395 msleep(20); 1396 } 1397 1398 return test_bit(S_SCAN_HW, &il->status); 1399 } 1400 EXPORT_SYMBOL(il_scan_cancel_timeout); 1401 1402 /* Service response to C_SCAN (0x80) */ 1403 static void 1404 il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) 1405 { 1406 #ifdef CONFIG_IWLEGACY_DEBUG 1407 struct il_rx_pkt *pkt = rxb_addr(rxb); 1408 struct il_scanreq_notification *notif = 1409 (struct il_scanreq_notification *)pkt->u.raw; 1410 1411 D_SCAN("Scan request status = 0x%x\n", notif->status); 1412 #endif 1413 } 1414 1415 /* Service N_SCAN_START (0x82) */ 1416 static void 1417 il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) 1418 { 1419 struct il_rx_pkt *pkt = rxb_addr(rxb); 1420 struct il_scanstart_notification *notif = 1421 (struct il_scanstart_notification *)pkt->u.raw; 1422 il->scan_start_tsf = le32_to_cpu(notif->tsf_low); 1423 D_SCAN("Scan start: " "%d [802.11%s] " 1424 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, 1425 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), 1426 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); 1427 } 1428 1429 /* Service N_SCAN_RESULTS (0x83) */ 1430 static void 1431 il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) 1432 { 1433 #ifdef CONFIG_IWLEGACY_DEBUG 1434 struct il_rx_pkt *pkt = rxb_addr(rxb); 1435 struct il_scanresults_notification *notif = 1436 (struct il_scanresults_notification *)pkt->u.raw; 1437 1438 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " 1439 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", 1440 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), 1441 le32_to_cpu(notif->stats[0]), 1442 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); 1443 #endif 1444 } 1445 1446 /* Service N_SCAN_COMPLETE (0x84) */ 1447 static void 1448 il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) 1449 { 1450 1451 #ifdef CONFIG_IWLEGACY_DEBUG 1452 struct il_rx_pkt *pkt = rxb_addr(rxb); 1453 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 1454 #endif 1455 1456 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 1457 scan_notif->scanned_channels, scan_notif->tsf_low, 1458 scan_notif->tsf_high, scan_notif->status); 1459 1460 /* The HW is no longer scanning */ 1461 clear_bit(S_SCAN_HW, &il->status); 1462 1463 D_SCAN("Scan on %sGHz took %dms\n", 1464 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2", 1465 jiffies_to_msecs(jiffies - il->scan_start)); 1466 1467 queue_work(il->workqueue, &il->scan_completed); 1468 } 1469 1470 void 1471 il_setup_rx_scan_handlers(struct il_priv *il) 1472 { 1473 /* scan handlers */ 1474 il->handlers[C_SCAN] = il_hdl_scan; 1475 il->handlers[N_SCAN_START] = il_hdl_scan_start; 1476 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; 1477 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; 1478 } 1479 EXPORT_SYMBOL(il_setup_rx_scan_handlers); 1480 1481 u16 1482 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band, 1483 u8 n_probes) 1484 { 1485 if (band == NL80211_BAND_5GHZ) 1486 return IL_ACTIVE_DWELL_TIME_52 + 1487 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); 1488 else 1489 return IL_ACTIVE_DWELL_TIME_24 + 1490 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 1491 } 1492 EXPORT_SYMBOL(il_get_active_dwell_time); 1493 1494 u16 1495 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band, 1496 struct ieee80211_vif *vif) 1497 { 1498 u16 value; 1499 1500 u16 passive = 1501 (band == 1502 NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + 1503 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + 1504 IL_PASSIVE_DWELL_TIME_52; 1505 1506 if (il_is_any_associated(il)) { 1507 /* 1508 * If we're associated, we clamp the maximum passive 1509 * dwell time to be 98% of the smallest beacon interval 1510 * (minus 2 * channel tune time) 1511 */ 1512 value = il->vif ? il->vif->bss_conf.beacon_int : 0; 1513 if (value > IL_PASSIVE_DWELL_BASE || !value) 1514 value = IL_PASSIVE_DWELL_BASE; 1515 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; 1516 passive = min(value, passive); 1517 } 1518 1519 return passive; 1520 } 1521 EXPORT_SYMBOL(il_get_passive_dwell_time); 1522 1523 void 1524 il_init_scan_params(struct il_priv *il) 1525 { 1526 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; 1527 if (!il->scan_tx_ant[NL80211_BAND_5GHZ]) 1528 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; 1529 if (!il->scan_tx_ant[NL80211_BAND_2GHZ]) 1530 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; 1531 } 1532 EXPORT_SYMBOL(il_init_scan_params); 1533 1534 static int 1535 il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) 1536 { 1537 int ret; 1538 1539 lockdep_assert_held(&il->mutex); 1540 1541 cancel_delayed_work(&il->scan_check); 1542 1543 if (!il_is_ready_rf(il)) { 1544 IL_WARN("Request scan called when driver not ready.\n"); 1545 return -EIO; 1546 } 1547 1548 if (test_bit(S_SCAN_HW, &il->status)) { 1549 D_SCAN("Multiple concurrent scan requests in parallel.\n"); 1550 return -EBUSY; 1551 } 1552 1553 if (test_bit(S_SCAN_ABORTING, &il->status)) { 1554 D_SCAN("Scan request while abort pending.\n"); 1555 return -EBUSY; 1556 } 1557 1558 D_SCAN("Starting scan...\n"); 1559 1560 set_bit(S_SCANNING, &il->status); 1561 il->scan_start = jiffies; 1562 1563 ret = il->ops->request_scan(il, vif); 1564 if (ret) { 1565 clear_bit(S_SCANNING, &il->status); 1566 return ret; 1567 } 1568 1569 queue_delayed_work(il->workqueue, &il->scan_check, 1570 IL_SCAN_CHECK_WATCHDOG); 1571 1572 return 0; 1573 } 1574 1575 int 1576 il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1577 struct ieee80211_scan_request *hw_req) 1578 { 1579 struct cfg80211_scan_request *req = &hw_req->req; 1580 struct il_priv *il = hw->priv; 1581 int ret; 1582 1583 if (req->n_channels == 0) { 1584 IL_ERR("Can not scan on no channels.\n"); 1585 return -EINVAL; 1586 } 1587 1588 mutex_lock(&il->mutex); 1589 D_MAC80211("enter\n"); 1590 1591 if (test_bit(S_SCANNING, &il->status)) { 1592 D_SCAN("Scan already in progress.\n"); 1593 ret = -EAGAIN; 1594 goto out_unlock; 1595 } 1596 1597 /* mac80211 will only ask for one band at a time */ 1598 il->scan_request = req; 1599 il->scan_vif = vif; 1600 il->scan_band = req->channels[0]->band; 1601 1602 ret = il_scan_initiate(il, vif); 1603 1604 out_unlock: 1605 D_MAC80211("leave ret %d\n", ret); 1606 mutex_unlock(&il->mutex); 1607 1608 return ret; 1609 } 1610 EXPORT_SYMBOL(il_mac_hw_scan); 1611 1612 static void 1613 il_bg_scan_check(struct work_struct *data) 1614 { 1615 struct il_priv *il = 1616 container_of(data, struct il_priv, scan_check.work); 1617 1618 D_SCAN("Scan check work\n"); 1619 1620 /* Since we are here firmware does not finish scan and 1621 * most likely is in bad shape, so we don't bother to 1622 * send abort command, just force scan complete to mac80211 */ 1623 mutex_lock(&il->mutex); 1624 il_force_scan_end(il); 1625 mutex_unlock(&il->mutex); 1626 } 1627 1628 /** 1629 * il_fill_probe_req - fill in all required fields and IE for probe request 1630 */ 1631 1632 u16 1633 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, 1634 const u8 *ta, const u8 *ies, int ie_len, int left) 1635 { 1636 int len = 0; 1637 u8 *pos = NULL; 1638 1639 /* Make sure there is enough space for the probe request, 1640 * two mandatory IEs and the data */ 1641 left -= 24; 1642 if (left < 0) 1643 return 0; 1644 1645 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 1646 eth_broadcast_addr(frame->da); 1647 memcpy(frame->sa, ta, ETH_ALEN); 1648 eth_broadcast_addr(frame->bssid); 1649 frame->seq_ctrl = 0; 1650 1651 len += 24; 1652 1653 /* ...next IE... */ 1654 pos = &frame->u.probe_req.variable[0]; 1655 1656 /* fill in our indirect SSID IE */ 1657 left -= 2; 1658 if (left < 0) 1659 return 0; 1660 *pos++ = WLAN_EID_SSID; 1661 *pos++ = 0; 1662 1663 len += 2; 1664 1665 if (WARN_ON(left < ie_len)) 1666 return len; 1667 1668 if (ies && ie_len) { 1669 memcpy(pos, ies, ie_len); 1670 len += ie_len; 1671 } 1672 1673 return (u16) len; 1674 } 1675 EXPORT_SYMBOL(il_fill_probe_req); 1676 1677 static void 1678 il_bg_abort_scan(struct work_struct *work) 1679 { 1680 struct il_priv *il = container_of(work, struct il_priv, abort_scan); 1681 1682 D_SCAN("Abort scan work\n"); 1683 1684 /* We keep scan_check work queued in case when firmware will not 1685 * report back scan completed notification */ 1686 mutex_lock(&il->mutex); 1687 il_scan_cancel_timeout(il, 200); 1688 mutex_unlock(&il->mutex); 1689 } 1690 1691 static void 1692 il_bg_scan_completed(struct work_struct *work) 1693 { 1694 struct il_priv *il = container_of(work, struct il_priv, scan_completed); 1695 bool aborted; 1696 1697 D_SCAN("Completed scan.\n"); 1698 1699 cancel_delayed_work(&il->scan_check); 1700 1701 mutex_lock(&il->mutex); 1702 1703 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); 1704 if (aborted) 1705 D_SCAN("Aborted scan completed.\n"); 1706 1707 if (!test_and_clear_bit(S_SCANNING, &il->status)) { 1708 D_SCAN("Scan already completed.\n"); 1709 goto out_settings; 1710 } 1711 1712 il_complete_scan(il, aborted); 1713 1714 out_settings: 1715 /* Can we still talk to firmware ? */ 1716 if (!il_is_ready_rf(il)) 1717 goto out; 1718 1719 /* 1720 * We do not commit power settings while scan is pending, 1721 * do it now if the settings changed. 1722 */ 1723 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); 1724 il_set_tx_power(il, il->tx_power_next, false); 1725 1726 il->ops->post_scan(il); 1727 1728 out: 1729 mutex_unlock(&il->mutex); 1730 } 1731 1732 void 1733 il_setup_scan_deferred_work(struct il_priv *il) 1734 { 1735 INIT_WORK(&il->scan_completed, il_bg_scan_completed); 1736 INIT_WORK(&il->abort_scan, il_bg_abort_scan); 1737 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); 1738 } 1739 EXPORT_SYMBOL(il_setup_scan_deferred_work); 1740 1741 void 1742 il_cancel_scan_deferred_work(struct il_priv *il) 1743 { 1744 cancel_work_sync(&il->abort_scan); 1745 cancel_work_sync(&il->scan_completed); 1746 1747 if (cancel_delayed_work_sync(&il->scan_check)) { 1748 mutex_lock(&il->mutex); 1749 il_force_scan_end(il); 1750 mutex_unlock(&il->mutex); 1751 } 1752 } 1753 EXPORT_SYMBOL(il_cancel_scan_deferred_work); 1754 1755 /* il->sta_lock must be held */ 1756 static void 1757 il_sta_ucode_activate(struct il_priv *il, u8 sta_id) 1758 { 1759 1760 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) 1761 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n", 1762 sta_id, il->stations[sta_id].sta.sta.addr); 1763 1764 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { 1765 D_ASSOC("STA id %u addr %pM already present" 1766 " in uCode (according to driver)\n", sta_id, 1767 il->stations[sta_id].sta.sta.addr); 1768 } else { 1769 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; 1770 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id, 1771 il->stations[sta_id].sta.sta.addr); 1772 } 1773 } 1774 1775 static int 1776 il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, 1777 struct il_rx_pkt *pkt, bool sync) 1778 { 1779 u8 sta_id = addsta->sta.sta_id; 1780 unsigned long flags; 1781 int ret = -EIO; 1782 1783 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 1784 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); 1785 return ret; 1786 } 1787 1788 D_INFO("Processing response for adding station %u\n", sta_id); 1789 1790 spin_lock_irqsave(&il->sta_lock, flags); 1791 1792 switch (pkt->u.add_sta.status) { 1793 case ADD_STA_SUCCESS_MSK: 1794 D_INFO("C_ADD_STA PASSED\n"); 1795 il_sta_ucode_activate(il, sta_id); 1796 ret = 0; 1797 break; 1798 case ADD_STA_NO_ROOM_IN_TBL: 1799 IL_ERR("Adding station %d failed, no room in table.\n", sta_id); 1800 break; 1801 case ADD_STA_NO_BLOCK_ACK_RESOURCE: 1802 IL_ERR("Adding station %d failed, no block ack resource.\n", 1803 sta_id); 1804 break; 1805 case ADD_STA_MODIFY_NON_EXIST_STA: 1806 IL_ERR("Attempting to modify non-existing station %d\n", 1807 sta_id); 1808 break; 1809 default: 1810 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); 1811 break; 1812 } 1813 1814 D_INFO("%s station id %u addr %pM\n", 1815 il->stations[sta_id].sta.mode == 1816 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, 1817 il->stations[sta_id].sta.sta.addr); 1818 1819 /* 1820 * XXX: The MAC address in the command buffer is often changed from 1821 * the original sent to the device. That is, the MAC address 1822 * written to the command buffer often is not the same MAC address 1823 * read from the command buffer when the command returns. This 1824 * issue has not yet been resolved and this debugging is left to 1825 * observe the problem. 1826 */ 1827 D_INFO("%s station according to cmd buffer %pM\n", 1828 il->stations[sta_id].sta.mode == 1829 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); 1830 spin_unlock_irqrestore(&il->sta_lock, flags); 1831 1832 return ret; 1833 } 1834 1835 static void 1836 il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, 1837 struct il_rx_pkt *pkt) 1838 { 1839 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; 1840 1841 il_process_add_sta_resp(il, addsta, pkt, false); 1842 1843 } 1844 1845 int 1846 il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) 1847 { 1848 struct il_rx_pkt *pkt = NULL; 1849 int ret = 0; 1850 u8 data[sizeof(*sta)]; 1851 struct il_host_cmd cmd = { 1852 .id = C_ADD_STA, 1853 .flags = flags, 1854 .data = data, 1855 }; 1856 u8 sta_id __maybe_unused = sta->sta.sta_id; 1857 1858 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, 1859 flags & CMD_ASYNC ? "a" : ""); 1860 1861 if (flags & CMD_ASYNC) 1862 cmd.callback = il_add_sta_callback; 1863 else { 1864 cmd.flags |= CMD_WANT_SKB; 1865 might_sleep(); 1866 } 1867 1868 cmd.len = il->ops->build_addsta_hcmd(sta, data); 1869 ret = il_send_cmd(il, &cmd); 1870 if (ret) 1871 return ret; 1872 if (flags & CMD_ASYNC) 1873 return 0; 1874 1875 pkt = (struct il_rx_pkt *)cmd.reply_page; 1876 ret = il_process_add_sta_resp(il, sta, pkt, true); 1877 1878 il_free_pages(il, cmd.reply_page); 1879 1880 return ret; 1881 } 1882 EXPORT_SYMBOL(il_send_add_sta); 1883 1884 static void 1885 il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) 1886 { 1887 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; 1888 __le32 sta_flags; 1889 1890 if (!sta || !sta_ht_inf->ht_supported) 1891 goto done; 1892 1893 D_ASSOC("spatial multiplexing power save mode: %s\n", 1894 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" : 1895 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : 1896 "disabled"); 1897 1898 sta_flags = il->stations[idx].sta.station_flags; 1899 1900 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 1901 1902 switch (sta->smps_mode) { 1903 case IEEE80211_SMPS_STATIC: 1904 sta_flags |= STA_FLG_MIMO_DIS_MSK; 1905 break; 1906 case IEEE80211_SMPS_DYNAMIC: 1907 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; 1908 break; 1909 case IEEE80211_SMPS_OFF: 1910 break; 1911 default: 1912 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode); 1913 break; 1914 } 1915 1916 sta_flags |= 1917 cpu_to_le32((u32) sta_ht_inf-> 1918 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); 1919 1920 sta_flags |= 1921 cpu_to_le32((u32) sta_ht_inf-> 1922 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 1923 1924 if (il_is_ht40_tx_allowed(il, &sta->ht_cap)) 1925 sta_flags |= STA_FLG_HT40_EN_MSK; 1926 else 1927 sta_flags &= ~STA_FLG_HT40_EN_MSK; 1928 1929 il->stations[idx].sta.station_flags = sta_flags; 1930 done: 1931 return; 1932 } 1933 1934 /** 1935 * il_prep_station - Prepare station information for addition 1936 * 1937 * should be called with sta_lock held 1938 */ 1939 u8 1940 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, 1941 struct ieee80211_sta *sta) 1942 { 1943 struct il_station_entry *station; 1944 int i; 1945 u8 sta_id = IL_INVALID_STATION; 1946 u16 rate; 1947 1948 if (is_ap) 1949 sta_id = IL_AP_ID; 1950 else if (is_broadcast_ether_addr(addr)) 1951 sta_id = il->hw_params.bcast_id; 1952 else 1953 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { 1954 if (ether_addr_equal(il->stations[i].sta.sta.addr, 1955 addr)) { 1956 sta_id = i; 1957 break; 1958 } 1959 1960 if (!il->stations[i].used && 1961 sta_id == IL_INVALID_STATION) 1962 sta_id = i; 1963 } 1964 1965 /* 1966 * These two conditions have the same outcome, but keep them 1967 * separate 1968 */ 1969 if (unlikely(sta_id == IL_INVALID_STATION)) 1970 return sta_id; 1971 1972 /* 1973 * uCode is not able to deal with multiple requests to add a 1974 * station. Keep track if one is in progress so that we do not send 1975 * another. 1976 */ 1977 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 1978 D_INFO("STA %d already in process of being added.\n", sta_id); 1979 return sta_id; 1980 } 1981 1982 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 1983 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && 1984 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) { 1985 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 1986 sta_id, addr); 1987 return sta_id; 1988 } 1989 1990 station = &il->stations[sta_id]; 1991 station->used = IL_STA_DRIVER_ACTIVE; 1992 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr); 1993 il->num_stations++; 1994 1995 /* Set up the C_ADD_STA command to send to device */ 1996 memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); 1997 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 1998 station->sta.mode = 0; 1999 station->sta.sta.sta_id = sta_id; 2000 station->sta.station_flags = 0; 2001 2002 /* 2003 * OK to call unconditionally, since local stations (IBSS BSSID 2004 * STA and broadcast STA) pass in a NULL sta, and mac80211 2005 * doesn't allow HT IBSS. 2006 */ 2007 il_set_ht_add_station(il, sta_id, sta); 2008 2009 /* 3945 only */ 2010 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; 2011 /* Turn on both antennas for the station... */ 2012 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 2013 2014 return sta_id; 2015 2016 } 2017 EXPORT_SYMBOL_GPL(il_prep_station); 2018 2019 #define STA_WAIT_TIMEOUT (HZ/2) 2020 2021 /** 2022 * il_add_station_common - 2023 */ 2024 int 2025 il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap, 2026 struct ieee80211_sta *sta, u8 *sta_id_r) 2027 { 2028 unsigned long flags_spin; 2029 int ret = 0; 2030 u8 sta_id; 2031 struct il_addsta_cmd sta_cmd; 2032 2033 *sta_id_r = 0; 2034 spin_lock_irqsave(&il->sta_lock, flags_spin); 2035 sta_id = il_prep_station(il, addr, is_ap, sta); 2036 if (sta_id == IL_INVALID_STATION) { 2037 IL_ERR("Unable to prepare station %pM for addition\n", addr); 2038 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2039 return -EINVAL; 2040 } 2041 2042 /* 2043 * uCode is not able to deal with multiple requests to add a 2044 * station. Keep track if one is in progress so that we do not send 2045 * another. 2046 */ 2047 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 2048 D_INFO("STA %d already in process of being added.\n", sta_id); 2049 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2050 return -EEXIST; 2051 } 2052 2053 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 2054 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2055 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 2056 sta_id, addr); 2057 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2058 return -EEXIST; 2059 } 2060 2061 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; 2062 memcpy(&sta_cmd, &il->stations[sta_id].sta, 2063 sizeof(struct il_addsta_cmd)); 2064 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2065 2066 /* Add station to device's station table */ 2067 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2068 if (ret) { 2069 spin_lock_irqsave(&il->sta_lock, flags_spin); 2070 IL_ERR("Adding station %pM failed.\n", 2071 il->stations[sta_id].sta.sta.addr); 2072 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2073 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2074 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2075 } 2076 *sta_id_r = sta_id; 2077 return ret; 2078 } 2079 EXPORT_SYMBOL(il_add_station_common); 2080 2081 /** 2082 * il_sta_ucode_deactivate - deactivate ucode status for a station 2083 * 2084 * il->sta_lock must be held 2085 */ 2086 static void 2087 il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) 2088 { 2089 /* Ucode must be active and driver must be non active */ 2090 if ((il->stations[sta_id]. 2091 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != 2092 IL_STA_UCODE_ACTIVE) 2093 IL_ERR("removed non active STA %u\n", sta_id); 2094 2095 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; 2096 2097 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); 2098 D_ASSOC("Removed STA %u\n", sta_id); 2099 } 2100 2101 static int 2102 il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, 2103 bool temporary) 2104 { 2105 struct il_rx_pkt *pkt; 2106 int ret; 2107 2108 unsigned long flags_spin; 2109 struct il_rem_sta_cmd rm_sta_cmd; 2110 2111 struct il_host_cmd cmd = { 2112 .id = C_REM_STA, 2113 .len = sizeof(struct il_rem_sta_cmd), 2114 .flags = CMD_SYNC, 2115 .data = &rm_sta_cmd, 2116 }; 2117 2118 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 2119 rm_sta_cmd.num_sta = 1; 2120 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); 2121 2122 cmd.flags |= CMD_WANT_SKB; 2123 2124 ret = il_send_cmd(il, &cmd); 2125 2126 if (ret) 2127 return ret; 2128 2129 pkt = (struct il_rx_pkt *)cmd.reply_page; 2130 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 2131 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); 2132 ret = -EIO; 2133 } 2134 2135 if (!ret) { 2136 switch (pkt->u.rem_sta.status) { 2137 case REM_STA_SUCCESS_MSK: 2138 if (!temporary) { 2139 spin_lock_irqsave(&il->sta_lock, flags_spin); 2140 il_sta_ucode_deactivate(il, sta_id); 2141 spin_unlock_irqrestore(&il->sta_lock, 2142 flags_spin); 2143 } 2144 D_ASSOC("C_REM_STA PASSED\n"); 2145 break; 2146 default: 2147 ret = -EIO; 2148 IL_ERR("C_REM_STA failed\n"); 2149 break; 2150 } 2151 } 2152 il_free_pages(il, cmd.reply_page); 2153 2154 return ret; 2155 } 2156 2157 /** 2158 * il_remove_station - Remove driver's knowledge of station. 2159 */ 2160 int 2161 il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) 2162 { 2163 unsigned long flags; 2164 2165 if (!il_is_ready(il)) { 2166 D_INFO("Unable to remove station %pM, device not ready.\n", 2167 addr); 2168 /* 2169 * It is typical for stations to be removed when we are 2170 * going down. Return success since device will be down 2171 * soon anyway 2172 */ 2173 return 0; 2174 } 2175 2176 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr); 2177 2178 if (WARN_ON(sta_id == IL_INVALID_STATION)) 2179 return -EINVAL; 2180 2181 spin_lock_irqsave(&il->sta_lock, flags); 2182 2183 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2184 D_INFO("Removing %pM but non DRIVER active\n", addr); 2185 goto out_err; 2186 } 2187 2188 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2189 D_INFO("Removing %pM but non UCODE active\n", addr); 2190 goto out_err; 2191 } 2192 2193 if (il->stations[sta_id].used & IL_STA_LOCAL) { 2194 kfree(il->stations[sta_id].lq); 2195 il->stations[sta_id].lq = NULL; 2196 } 2197 2198 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2199 2200 il->num_stations--; 2201 2202 BUG_ON(il->num_stations < 0); 2203 2204 spin_unlock_irqrestore(&il->sta_lock, flags); 2205 2206 return il_send_remove_station(il, addr, sta_id, false); 2207 out_err: 2208 spin_unlock_irqrestore(&il->sta_lock, flags); 2209 return -EINVAL; 2210 } 2211 EXPORT_SYMBOL_GPL(il_remove_station); 2212 2213 /** 2214 * il_clear_ucode_stations - clear ucode station table bits 2215 * 2216 * This function clears all the bits in the driver indicating 2217 * which stations are active in the ucode. Call when something 2218 * other than explicit station management would cause this in 2219 * the ucode, e.g. unassociated RXON. 2220 */ 2221 void 2222 il_clear_ucode_stations(struct il_priv *il) 2223 { 2224 int i; 2225 unsigned long flags_spin; 2226 bool cleared = false; 2227 2228 D_INFO("Clearing ucode stations in driver\n"); 2229 2230 spin_lock_irqsave(&il->sta_lock, flags_spin); 2231 for (i = 0; i < il->hw_params.max_stations; i++) { 2232 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { 2233 D_INFO("Clearing ucode active for station %d\n", i); 2234 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2235 cleared = true; 2236 } 2237 } 2238 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2239 2240 if (!cleared) 2241 D_INFO("No active stations found to be cleared\n"); 2242 } 2243 EXPORT_SYMBOL(il_clear_ucode_stations); 2244 2245 /** 2246 * il_restore_stations() - Restore driver known stations to device 2247 * 2248 * All stations considered active by driver, but not present in ucode, is 2249 * restored. 2250 * 2251 * Function sleeps. 2252 */ 2253 void 2254 il_restore_stations(struct il_priv *il) 2255 { 2256 struct il_addsta_cmd sta_cmd; 2257 struct il_link_quality_cmd lq; 2258 unsigned long flags_spin; 2259 int i; 2260 bool found = false; 2261 int ret; 2262 bool send_lq; 2263 2264 if (!il_is_ready(il)) { 2265 D_INFO("Not ready yet, not restoring any stations.\n"); 2266 return; 2267 } 2268 2269 D_ASSOC("Restoring all known stations ... start.\n"); 2270 spin_lock_irqsave(&il->sta_lock, flags_spin); 2271 for (i = 0; i < il->hw_params.max_stations; i++) { 2272 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && 2273 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { 2274 D_ASSOC("Restoring sta %pM\n", 2275 il->stations[i].sta.sta.addr); 2276 il->stations[i].sta.mode = 0; 2277 il->stations[i].used |= IL_STA_UCODE_INPROGRESS; 2278 found = true; 2279 } 2280 } 2281 2282 for (i = 0; i < il->hw_params.max_stations; i++) { 2283 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { 2284 memcpy(&sta_cmd, &il->stations[i].sta, 2285 sizeof(struct il_addsta_cmd)); 2286 send_lq = false; 2287 if (il->stations[i].lq) { 2288 memcpy(&lq, il->stations[i].lq, 2289 sizeof(struct il_link_quality_cmd)); 2290 send_lq = true; 2291 } 2292 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2293 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2294 if (ret) { 2295 spin_lock_irqsave(&il->sta_lock, flags_spin); 2296 IL_ERR("Adding station %pM failed.\n", 2297 il->stations[i].sta.sta.addr); 2298 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; 2299 il->stations[i].used &= 2300 ~IL_STA_UCODE_INPROGRESS; 2301 spin_unlock_irqrestore(&il->sta_lock, 2302 flags_spin); 2303 } 2304 /* 2305 * Rate scaling has already been initialized, send 2306 * current LQ command 2307 */ 2308 if (send_lq) 2309 il_send_lq_cmd(il, &lq, CMD_SYNC, true); 2310 spin_lock_irqsave(&il->sta_lock, flags_spin); 2311 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; 2312 } 2313 } 2314 2315 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2316 if (!found) 2317 D_INFO("Restoring all known stations" 2318 " .... no stations to be restored.\n"); 2319 else 2320 D_INFO("Restoring all known stations" " .... complete.\n"); 2321 } 2322 EXPORT_SYMBOL(il_restore_stations); 2323 2324 int 2325 il_get_free_ucode_key_idx(struct il_priv *il) 2326 { 2327 int i; 2328 2329 for (i = 0; i < il->sta_key_max_num; i++) 2330 if (!test_and_set_bit(i, &il->ucode_key_table)) 2331 return i; 2332 2333 return WEP_INVALID_OFFSET; 2334 } 2335 EXPORT_SYMBOL(il_get_free_ucode_key_idx); 2336 2337 void 2338 il_dealloc_bcast_stations(struct il_priv *il) 2339 { 2340 unsigned long flags; 2341 int i; 2342 2343 spin_lock_irqsave(&il->sta_lock, flags); 2344 for (i = 0; i < il->hw_params.max_stations; i++) { 2345 if (!(il->stations[i].used & IL_STA_BCAST)) 2346 continue; 2347 2348 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2349 il->num_stations--; 2350 BUG_ON(il->num_stations < 0); 2351 kfree(il->stations[i].lq); 2352 il->stations[i].lq = NULL; 2353 } 2354 spin_unlock_irqrestore(&il->sta_lock, flags); 2355 } 2356 EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); 2357 2358 #ifdef CONFIG_IWLEGACY_DEBUG 2359 static void 2360 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2361 { 2362 int i; 2363 D_RATE("lq station id 0x%x\n", lq->sta_id); 2364 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, 2365 lq->general_params.dual_stream_ant_msk); 2366 2367 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 2368 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); 2369 } 2370 #else 2371 static inline void 2372 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2373 { 2374 } 2375 #endif 2376 2377 /** 2378 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity 2379 * 2380 * It sometimes happens when a HT rate has been in use and we 2381 * loose connectivity with AP then mac80211 will first tell us that the 2382 * current channel is not HT anymore before removing the station. In such a 2383 * scenario the RXON flags will be updated to indicate we are not 2384 * communicating HT anymore, but the LQ command may still contain HT rates. 2385 * Test for this to prevent driver from sending LQ command between the time 2386 * RXON flags are updated and when LQ command is updated. 2387 */ 2388 static bool 2389 il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq) 2390 { 2391 int i; 2392 2393 if (il->ht.enabled) 2394 return true; 2395 2396 D_INFO("Channel %u is not an HT channel\n", il->active.channel); 2397 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 2398 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { 2399 D_INFO("idx %d of LQ expects HT channel\n", i); 2400 return false; 2401 } 2402 } 2403 return true; 2404 } 2405 2406 /** 2407 * il_send_lq_cmd() - Send link quality command 2408 * @init: This command is sent as part of station initialization right 2409 * after station has been added. 2410 * 2411 * The link quality command is sent as the last step of station creation. 2412 * This is the special case in which init is set and we call a callback in 2413 * this case to clear the state indicating that station creation is in 2414 * progress. 2415 */ 2416 int 2417 il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq, 2418 u8 flags, bool init) 2419 { 2420 int ret = 0; 2421 unsigned long flags_spin; 2422 2423 struct il_host_cmd cmd = { 2424 .id = C_TX_LINK_QUALITY_CMD, 2425 .len = sizeof(struct il_link_quality_cmd), 2426 .flags = flags, 2427 .data = lq, 2428 }; 2429 2430 if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) 2431 return -EINVAL; 2432 2433 spin_lock_irqsave(&il->sta_lock, flags_spin); 2434 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2435 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2436 return -EINVAL; 2437 } 2438 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2439 2440 il_dump_lq_cmd(il, lq); 2441 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 2442 2443 if (il_is_lq_table_valid(il, lq)) 2444 ret = il_send_cmd(il, &cmd); 2445 else 2446 ret = -EINVAL; 2447 2448 if (cmd.flags & CMD_ASYNC) 2449 return ret; 2450 2451 if (init) { 2452 D_INFO("init LQ command complete," 2453 " clearing sta addition status for sta %d\n", 2454 lq->sta_id); 2455 spin_lock_irqsave(&il->sta_lock, flags_spin); 2456 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2457 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2458 } 2459 return ret; 2460 } 2461 EXPORT_SYMBOL(il_send_lq_cmd); 2462 2463 int 2464 il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2465 struct ieee80211_sta *sta) 2466 { 2467 struct il_priv *il = hw->priv; 2468 struct il_station_priv_common *sta_common = (void *)sta->drv_priv; 2469 int ret; 2470 2471 mutex_lock(&il->mutex); 2472 D_MAC80211("enter station %pM\n", sta->addr); 2473 2474 ret = il_remove_station(il, sta_common->sta_id, sta->addr); 2475 if (ret) 2476 IL_ERR("Error removing station %pM\n", sta->addr); 2477 2478 D_MAC80211("leave ret %d\n", ret); 2479 mutex_unlock(&il->mutex); 2480 2481 return ret; 2482 } 2483 EXPORT_SYMBOL(il_mac_sta_remove); 2484 2485 /************************** RX-FUNCTIONS ****************************/ 2486 /* 2487 * Rx theory of operation 2488 * 2489 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 2490 * each of which point to Receive Buffers to be filled by the NIC. These get 2491 * used not only for Rx frames, but for any command response or notification 2492 * from the NIC. The driver and NIC manage the Rx buffers by means 2493 * of idxes into the circular buffer. 2494 * 2495 * Rx Queue Indexes 2496 * The host/firmware share two idx registers for managing the Rx buffers. 2497 * 2498 * The READ idx maps to the first position that the firmware may be writing 2499 * to -- the driver can read up to (but not including) this position and get 2500 * good data. 2501 * The READ idx is managed by the firmware once the card is enabled. 2502 * 2503 * The WRITE idx maps to the last position the driver has read from -- the 2504 * position preceding WRITE is the last slot the firmware can place a packet. 2505 * 2506 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 2507 * WRITE = READ. 2508 * 2509 * During initialization, the host sets up the READ queue position to the first 2510 * IDX position, and WRITE to the last (READ - 1 wrapped) 2511 * 2512 * When the firmware places a packet in a buffer, it will advance the READ idx 2513 * and fire the RX interrupt. The driver can then query the READ idx and 2514 * process as many packets as possible, moving the WRITE idx forward as it 2515 * resets the Rx queue buffers with new memory. 2516 * 2517 * The management in the driver is as follows: 2518 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 2519 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 2520 * to replenish the iwl->rxq->rx_free. 2521 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the 2522 * iwl->rxq is replenished and the READ IDX is updated (updating the 2523 * 'processed' and 'read' driver idxes as well) 2524 * + A received packet is processed and handed to the kernel network stack, 2525 * detached from the iwl->rxq. The driver 'processed' idx is updated. 2526 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 2527 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 2528 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there 2529 * were enough free buffers and RX_STALLED is set it is cleared. 2530 * 2531 * 2532 * Driver sequence: 2533 * 2534 * il_rx_queue_alloc() Allocates rx_free 2535 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls 2536 * il_rx_queue_restock 2537 * il_rx_queue_restock() Moves available buffers from rx_free into Rx 2538 * queue, updates firmware pointers, and updates 2539 * the WRITE idx. If insufficient rx_free buffers 2540 * are available, schedules il_rx_replenish 2541 * 2542 * -- enable interrupts -- 2543 * ISR - il_rx() Detach il_rx_bufs from pool up to the 2544 * READ IDX, detaching the SKB from the pool. 2545 * Moves the packet buffer from queue to rx_used. 2546 * Calls il_rx_queue_restock to refill any empty 2547 * slots. 2548 * ... 2549 * 2550 */ 2551 2552 /** 2553 * il_rx_queue_space - Return number of free slots available in queue. 2554 */ 2555 int 2556 il_rx_queue_space(const struct il_rx_queue *q) 2557 { 2558 int s = q->read - q->write; 2559 if (s <= 0) 2560 s += RX_QUEUE_SIZE; 2561 /* keep some buffer to not confuse full and empty queue */ 2562 s -= 2; 2563 if (s < 0) 2564 s = 0; 2565 return s; 2566 } 2567 EXPORT_SYMBOL(il_rx_queue_space); 2568 2569 /** 2570 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue 2571 */ 2572 void 2573 il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) 2574 { 2575 unsigned long flags; 2576 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; 2577 u32 reg; 2578 2579 spin_lock_irqsave(&q->lock, flags); 2580 2581 if (q->need_update == 0) 2582 goto exit_unlock; 2583 2584 /* If power-saving is in use, make sure device is awake */ 2585 if (test_bit(S_POWER_PMI, &il->status)) { 2586 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2587 2588 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2589 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n", 2590 reg); 2591 il_set_bit(il, CSR_GP_CNTRL, 2592 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2593 goto exit_unlock; 2594 } 2595 2596 q->write_actual = (q->write & ~0x7); 2597 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2598 2599 /* Else device is assumed to be awake */ 2600 } else { 2601 /* Device expects a multiple of 8 */ 2602 q->write_actual = (q->write & ~0x7); 2603 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2604 } 2605 2606 q->need_update = 0; 2607 2608 exit_unlock: 2609 spin_unlock_irqrestore(&q->lock, flags); 2610 } 2611 EXPORT_SYMBOL(il_rx_queue_update_write_ptr); 2612 2613 int 2614 il_rx_queue_alloc(struct il_priv *il) 2615 { 2616 struct il_rx_queue *rxq = &il->rxq; 2617 struct device *dev = &il->pci_dev->dev; 2618 int i; 2619 2620 spin_lock_init(&rxq->lock); 2621 INIT_LIST_HEAD(&rxq->rx_free); 2622 INIT_LIST_HEAD(&rxq->rx_used); 2623 2624 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 2625 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, 2626 GFP_KERNEL); 2627 if (!rxq->bd) 2628 goto err_bd; 2629 2630 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status), 2631 &rxq->rb_stts_dma, GFP_KERNEL); 2632 if (!rxq->rb_stts) 2633 goto err_rb; 2634 2635 /* Fill the rx_used queue with _all_ of the Rx buffers */ 2636 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 2637 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 2638 2639 /* Set us so that we have processed and used all buffers, but have 2640 * not restocked the Rx queue with fresh buffers */ 2641 rxq->read = rxq->write = 0; 2642 rxq->write_actual = 0; 2643 rxq->free_count = 0; 2644 rxq->need_update = 0; 2645 return 0; 2646 2647 err_rb: 2648 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 2649 rxq->bd_dma); 2650 err_bd: 2651 return -ENOMEM; 2652 } 2653 EXPORT_SYMBOL(il_rx_queue_alloc); 2654 2655 void 2656 il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) 2657 { 2658 struct il_rx_pkt *pkt = rxb_addr(rxb); 2659 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); 2660 2661 if (!report->state) { 2662 D_11H("Spectrum Measure Notification: Start\n"); 2663 return; 2664 } 2665 2666 memcpy(&il->measure_report, report, sizeof(*report)); 2667 il->measurement_status |= MEASUREMENT_READY; 2668 } 2669 EXPORT_SYMBOL(il_hdl_spectrum_measurement); 2670 2671 /* 2672 * returns non-zero if packet should be dropped 2673 */ 2674 int 2675 il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, 2676 u32 decrypt_res, struct ieee80211_rx_status *stats) 2677 { 2678 u16 fc = le16_to_cpu(hdr->frame_control); 2679 2680 /* 2681 * All contexts have the same setting here due to it being 2682 * a module parameter, so OK to check any context. 2683 */ 2684 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) 2685 return 0; 2686 2687 if (!(fc & IEEE80211_FCTL_PROTECTED)) 2688 return 0; 2689 2690 D_RX("decrypt_res:0x%x\n", decrypt_res); 2691 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { 2692 case RX_RES_STATUS_SEC_TYPE_TKIP: 2693 /* The uCode has got a bad phase 1 Key, pushes the packet. 2694 * Decryption will be done in SW. */ 2695 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2696 RX_RES_STATUS_BAD_KEY_TTAK) 2697 break; 2698 /* fall through */ 2699 2700 case RX_RES_STATUS_SEC_TYPE_WEP: 2701 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2702 RX_RES_STATUS_BAD_ICV_MIC) { 2703 /* bad ICV, the packet is destroyed since the 2704 * decryption is inplace, drop it */ 2705 D_RX("Packet destroyed\n"); 2706 return -1; 2707 } 2708 /* fall through */ 2709 case RX_RES_STATUS_SEC_TYPE_CCMP: 2710 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2711 RX_RES_STATUS_DECRYPT_OK) { 2712 D_RX("hw decrypt successfully!!!\n"); 2713 stats->flag |= RX_FLAG_DECRYPTED; 2714 } 2715 break; 2716 2717 default: 2718 break; 2719 } 2720 return 0; 2721 } 2722 EXPORT_SYMBOL(il_set_decrypted_flag); 2723 2724 /** 2725 * il_txq_update_write_ptr - Send new write idx to hardware 2726 */ 2727 void 2728 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) 2729 { 2730 u32 reg = 0; 2731 int txq_id = txq->q.id; 2732 2733 if (txq->need_update == 0) 2734 return; 2735 2736 /* if we're trying to save power */ 2737 if (test_bit(S_POWER_PMI, &il->status)) { 2738 /* wake up nic if it's powered down ... 2739 * uCode will wake up, and interrupt us again, so next 2740 * time we'll skip this part. */ 2741 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2742 2743 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2744 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n", 2745 txq_id, reg); 2746 il_set_bit(il, CSR_GP_CNTRL, 2747 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2748 return; 2749 } 2750 2751 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2752 2753 /* 2754 * else not in power-save mode, 2755 * uCode will never sleep when we're 2756 * trying to tx (during RFKILL, we're not trying to tx). 2757 */ 2758 } else 2759 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2760 txq->need_update = 0; 2761 } 2762 EXPORT_SYMBOL(il_txq_update_write_ptr); 2763 2764 /** 2765 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 2766 */ 2767 void 2768 il_tx_queue_unmap(struct il_priv *il, int txq_id) 2769 { 2770 struct il_tx_queue *txq = &il->txq[txq_id]; 2771 struct il_queue *q = &txq->q; 2772 2773 if (q->n_bd == 0) 2774 return; 2775 2776 while (q->write_ptr != q->read_ptr) { 2777 il->ops->txq_free_tfd(il, txq); 2778 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2779 } 2780 } 2781 EXPORT_SYMBOL(il_tx_queue_unmap); 2782 2783 /** 2784 * il_tx_queue_free - Deallocate DMA queue. 2785 * @txq: Transmit queue to deallocate. 2786 * 2787 * Empty queue by removing and destroying all BD's. 2788 * Free all buffers. 2789 * 0-fill, but do not free "txq" descriptor structure. 2790 */ 2791 void 2792 il_tx_queue_free(struct il_priv *il, int txq_id) 2793 { 2794 struct il_tx_queue *txq = &il->txq[txq_id]; 2795 struct device *dev = &il->pci_dev->dev; 2796 int i; 2797 2798 il_tx_queue_unmap(il, txq_id); 2799 2800 /* De-alloc array of command/tx buffers */ 2801 if (txq->cmd) { 2802 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 2803 kfree(txq->cmd[i]); 2804 } 2805 2806 /* De-alloc circular buffer of TFDs */ 2807 if (txq->q.n_bd) 2808 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2809 txq->tfds, txq->q.dma_addr); 2810 2811 /* De-alloc array of per-TFD driver data */ 2812 kfree(txq->skbs); 2813 txq->skbs = NULL; 2814 2815 /* deallocate arrays */ 2816 kfree(txq->cmd); 2817 kfree(txq->meta); 2818 txq->cmd = NULL; 2819 txq->meta = NULL; 2820 2821 /* 0-fill queue descriptor structure */ 2822 memset(txq, 0, sizeof(*txq)); 2823 } 2824 EXPORT_SYMBOL(il_tx_queue_free); 2825 2826 /** 2827 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue 2828 */ 2829 void 2830 il_cmd_queue_unmap(struct il_priv *il) 2831 { 2832 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2833 struct il_queue *q = &txq->q; 2834 int i; 2835 2836 if (q->n_bd == 0) 2837 return; 2838 2839 while (q->read_ptr != q->write_ptr) { 2840 i = il_get_cmd_idx(q, q->read_ptr, 0); 2841 2842 if (txq->meta[i].flags & CMD_MAPPED) { 2843 pci_unmap_single(il->pci_dev, 2844 dma_unmap_addr(&txq->meta[i], mapping), 2845 dma_unmap_len(&txq->meta[i], len), 2846 PCI_DMA_BIDIRECTIONAL); 2847 txq->meta[i].flags = 0; 2848 } 2849 2850 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2851 } 2852 2853 i = q->n_win; 2854 if (txq->meta[i].flags & CMD_MAPPED) { 2855 pci_unmap_single(il->pci_dev, 2856 dma_unmap_addr(&txq->meta[i], mapping), 2857 dma_unmap_len(&txq->meta[i], len), 2858 PCI_DMA_BIDIRECTIONAL); 2859 txq->meta[i].flags = 0; 2860 } 2861 } 2862 EXPORT_SYMBOL(il_cmd_queue_unmap); 2863 2864 /** 2865 * il_cmd_queue_free - Deallocate DMA queue. 2866 * @txq: Transmit queue to deallocate. 2867 * 2868 * Empty queue by removing and destroying all BD's. 2869 * Free all buffers. 2870 * 0-fill, but do not free "txq" descriptor structure. 2871 */ 2872 void 2873 il_cmd_queue_free(struct il_priv *il) 2874 { 2875 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2876 struct device *dev = &il->pci_dev->dev; 2877 int i; 2878 2879 il_cmd_queue_unmap(il); 2880 2881 /* De-alloc array of command/tx buffers */ 2882 if (txq->cmd) { 2883 for (i = 0; i <= TFD_CMD_SLOTS; i++) 2884 kfree(txq->cmd[i]); 2885 } 2886 2887 /* De-alloc circular buffer of TFDs */ 2888 if (txq->q.n_bd) 2889 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2890 txq->tfds, txq->q.dma_addr); 2891 2892 /* deallocate arrays */ 2893 kfree(txq->cmd); 2894 kfree(txq->meta); 2895 txq->cmd = NULL; 2896 txq->meta = NULL; 2897 2898 /* 0-fill queue descriptor structure */ 2899 memset(txq, 0, sizeof(*txq)); 2900 } 2901 EXPORT_SYMBOL(il_cmd_queue_free); 2902 2903 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 2904 * DMA services 2905 * 2906 * Theory of operation 2907 * 2908 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 2909 * of buffer descriptors, each of which points to one or more data buffers for 2910 * the device to read from or fill. Driver and device exchange status of each 2911 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 2912 * entries in each circular buffer, to protect against confusing empty and full 2913 * queue states. 2914 * 2915 * The device reads or writes the data in the queues via the device's several 2916 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 2917 * 2918 * For Tx queue, there are low mark and high mark limits. If, after queuing 2919 * the packet for Tx, free space become < low mark, Tx queue stopped. When 2920 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 2921 * Tx queue resumed. 2922 * 2923 * See more detailed info in 4965.h. 2924 ***************************************************/ 2925 2926 int 2927 il_queue_space(const struct il_queue *q) 2928 { 2929 int s = q->read_ptr - q->write_ptr; 2930 2931 if (q->read_ptr > q->write_ptr) 2932 s -= q->n_bd; 2933 2934 if (s <= 0) 2935 s += q->n_win; 2936 /* keep some reserve to not confuse empty and full situations */ 2937 s -= 2; 2938 if (s < 0) 2939 s = 0; 2940 return s; 2941 } 2942 EXPORT_SYMBOL(il_queue_space); 2943 2944 2945 /** 2946 * il_queue_init - Initialize queue's high/low-water and read/write idxes 2947 */ 2948 static int 2949 il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id) 2950 { 2951 /* 2952 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 2953 * il_queue_inc_wrap and il_queue_dec_wrap are broken. 2954 */ 2955 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 2956 /* FIXME: remove q->n_bd */ 2957 q->n_bd = TFD_QUEUE_SIZE_MAX; 2958 2959 q->n_win = slots; 2960 q->id = id; 2961 2962 /* slots_must be power-of-two size, otherwise 2963 * il_get_cmd_idx is broken. */ 2964 BUG_ON(!is_power_of_2(slots)); 2965 2966 q->low_mark = q->n_win / 4; 2967 if (q->low_mark < 4) 2968 q->low_mark = 4; 2969 2970 q->high_mark = q->n_win / 8; 2971 if (q->high_mark < 2) 2972 q->high_mark = 2; 2973 2974 q->write_ptr = q->read_ptr = 0; 2975 2976 return 0; 2977 } 2978 2979 /** 2980 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue 2981 */ 2982 static int 2983 il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) 2984 { 2985 struct device *dev = &il->pci_dev->dev; 2986 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 2987 2988 /* Driver ilate data, only for Tx (not command) queues, 2989 * not shared with device. */ 2990 if (id != il->cmd_queue) { 2991 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, 2992 sizeof(struct sk_buff *), 2993 GFP_KERNEL); 2994 if (!txq->skbs) { 2995 IL_ERR("Fail to alloc skbs\n"); 2996 goto error; 2997 } 2998 } else 2999 txq->skbs = NULL; 3000 3001 /* Circular buffer of transmit frame descriptors (TFDs), 3002 * shared with device */ 3003 txq->tfds = 3004 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); 3005 if (!txq->tfds) 3006 goto error; 3007 3008 txq->q.id = id; 3009 3010 return 0; 3011 3012 error: 3013 kfree(txq->skbs); 3014 txq->skbs = NULL; 3015 3016 return -ENOMEM; 3017 } 3018 3019 /** 3020 * il_tx_queue_init - Allocate and initialize one tx/cmd queue 3021 */ 3022 int 3023 il_tx_queue_init(struct il_priv *il, u32 txq_id) 3024 { 3025 int i, len, ret; 3026 int slots, actual_slots; 3027 struct il_tx_queue *txq = &il->txq[txq_id]; 3028 3029 /* 3030 * Alloc buffer array for commands (Tx or other types of commands). 3031 * For the command queue (#4/#9), allocate command space + one big 3032 * command for scan, since scan command is very huge; the system will 3033 * not have two scans at the same time, so only one is needed. 3034 * For normal Tx queues (all other queues), no super-size command 3035 * space is needed. 3036 */ 3037 if (txq_id == il->cmd_queue) { 3038 slots = TFD_CMD_SLOTS; 3039 actual_slots = slots + 1; 3040 } else { 3041 slots = TFD_TX_CMD_SLOTS; 3042 actual_slots = slots; 3043 } 3044 3045 txq->meta = 3046 kcalloc(actual_slots, sizeof(struct il_cmd_meta), GFP_KERNEL); 3047 txq->cmd = 3048 kcalloc(actual_slots, sizeof(struct il_device_cmd *), GFP_KERNEL); 3049 3050 if (!txq->meta || !txq->cmd) 3051 goto out_free_arrays; 3052 3053 len = sizeof(struct il_device_cmd); 3054 for (i = 0; i < actual_slots; i++) { 3055 /* only happens for cmd queue */ 3056 if (i == slots) 3057 len = IL_MAX_CMD_SIZE; 3058 3059 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 3060 if (!txq->cmd[i]) 3061 goto err; 3062 } 3063 3064 /* Alloc driver data array and TFD circular buffer */ 3065 ret = il_tx_queue_alloc(il, txq, txq_id); 3066 if (ret) 3067 goto err; 3068 3069 txq->need_update = 0; 3070 3071 /* 3072 * For the default queues 0-3, set up the swq_id 3073 * already -- all others need to get one later 3074 * (if they need one at all). 3075 */ 3076 if (txq_id < 4) 3077 il_set_swq_id(txq, txq_id, txq_id); 3078 3079 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3080 il_queue_init(il, &txq->q, slots, txq_id); 3081 3082 /* Tell device where to find queue */ 3083 il->ops->txq_init(il, txq); 3084 3085 return 0; 3086 err: 3087 for (i = 0; i < actual_slots; i++) 3088 kfree(txq->cmd[i]); 3089 out_free_arrays: 3090 kfree(txq->meta); 3091 txq->meta = NULL; 3092 kfree(txq->cmd); 3093 txq->cmd = NULL; 3094 3095 return -ENOMEM; 3096 } 3097 EXPORT_SYMBOL(il_tx_queue_init); 3098 3099 void 3100 il_tx_queue_reset(struct il_priv *il, u32 txq_id) 3101 { 3102 int slots, actual_slots; 3103 struct il_tx_queue *txq = &il->txq[txq_id]; 3104 3105 if (txq_id == il->cmd_queue) { 3106 slots = TFD_CMD_SLOTS; 3107 actual_slots = TFD_CMD_SLOTS + 1; 3108 } else { 3109 slots = TFD_TX_CMD_SLOTS; 3110 actual_slots = TFD_TX_CMD_SLOTS; 3111 } 3112 3113 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); 3114 txq->need_update = 0; 3115 3116 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3117 il_queue_init(il, &txq->q, slots, txq_id); 3118 3119 /* Tell device where to find queue */ 3120 il->ops->txq_init(il, txq); 3121 } 3122 EXPORT_SYMBOL(il_tx_queue_reset); 3123 3124 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 3125 3126 /** 3127 * il_enqueue_hcmd - enqueue a uCode command 3128 * @il: device ilate data point 3129 * @cmd: a point to the ucode command structure 3130 * 3131 * The function returns < 0 values to indicate the operation is 3132 * failed. On success, it turns the idx (> 0) of command in the 3133 * command queue. 3134 */ 3135 int 3136 il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) 3137 { 3138 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3139 struct il_queue *q = &txq->q; 3140 struct il_device_cmd *out_cmd; 3141 struct il_cmd_meta *out_meta; 3142 dma_addr_t phys_addr; 3143 unsigned long flags; 3144 int len; 3145 u32 idx; 3146 u16 fix_size; 3147 3148 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len); 3149 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); 3150 3151 /* If any of the command structures end up being larger than 3152 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 3153 * we will need to increase the size of the TFD entries 3154 * Also, check to see if command buffer should not exceed the size 3155 * of device_cmd and max_cmd_size. */ 3156 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 3157 !(cmd->flags & CMD_SIZE_HUGE)); 3158 BUG_ON(fix_size > IL_MAX_CMD_SIZE); 3159 3160 if (il_is_rfkill(il) || il_is_ctkill(il)) { 3161 IL_WARN("Not sending command - %s KILL\n", 3162 il_is_rfkill(il) ? "RF" : "CT"); 3163 return -EIO; 3164 } 3165 3166 spin_lock_irqsave(&il->hcmd_lock, flags); 3167 3168 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 3169 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3170 3171 IL_ERR("Restarting adapter due to command queue full\n"); 3172 queue_work(il->workqueue, &il->restart); 3173 return -ENOSPC; 3174 } 3175 3176 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 3177 out_cmd = txq->cmd[idx]; 3178 out_meta = &txq->meta[idx]; 3179 3180 if (WARN_ON(out_meta->flags & CMD_MAPPED)) { 3181 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3182 return -ENOSPC; 3183 } 3184 3185 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 3186 out_meta->flags = cmd->flags | CMD_MAPPED; 3187 if (cmd->flags & CMD_WANT_SKB) 3188 out_meta->source = cmd; 3189 if (cmd->flags & CMD_ASYNC) 3190 out_meta->callback = cmd->callback; 3191 3192 out_cmd->hdr.cmd = cmd->id; 3193 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); 3194 3195 /* At this point, the out_cmd now has all of the incoming cmd 3196 * information */ 3197 3198 out_cmd->hdr.flags = 0; 3199 out_cmd->hdr.sequence = 3200 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); 3201 if (cmd->flags & CMD_SIZE_HUGE) 3202 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 3203 len = sizeof(struct il_device_cmd); 3204 if (idx == TFD_CMD_SLOTS) 3205 len = IL_MAX_CMD_SIZE; 3206 3207 #ifdef CONFIG_IWLEGACY_DEBUG 3208 switch (out_cmd->hdr.cmd) { 3209 case C_TX_LINK_QUALITY_CMD: 3210 case C_SENSITIVITY: 3211 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " 3212 "%d bytes at %d[%d]:%d\n", 3213 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3214 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 3215 q->write_ptr, idx, il->cmd_queue); 3216 break; 3217 default: 3218 D_HC("Sending command %s (#%x), seq: 0x%04X, " 3219 "%d bytes at %d[%d]:%d\n", 3220 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3221 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, 3222 idx, il->cmd_queue); 3223 } 3224 #endif 3225 3226 phys_addr = 3227 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, 3228 PCI_DMA_BIDIRECTIONAL); 3229 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) { 3230 idx = -ENOMEM; 3231 goto out; 3232 } 3233 dma_unmap_addr_set(out_meta, mapping, phys_addr); 3234 dma_unmap_len_set(out_meta, len, fix_size); 3235 3236 txq->need_update = 1; 3237 3238 if (il->ops->txq_update_byte_cnt_tbl) 3239 /* Set up entry in queue's byte count circular buffer */ 3240 il->ops->txq_update_byte_cnt_tbl(il, txq, 0); 3241 3242 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, 3243 U32_PAD(cmd->len)); 3244 3245 /* Increment and update queue's write idx */ 3246 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); 3247 il_txq_update_write_ptr(il, txq); 3248 3249 out: 3250 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3251 return idx; 3252 } 3253 3254 /** 3255 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 3256 * 3257 * When FW advances 'R' idx, all entries between old and new 'R' idx 3258 * need to be reclaimed. As result, some free space forms. If there is 3259 * enough free space (> low mark), wake the stack that feeds us. 3260 */ 3261 static void 3262 il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) 3263 { 3264 struct il_tx_queue *txq = &il->txq[txq_id]; 3265 struct il_queue *q = &txq->q; 3266 int nfreed = 0; 3267 3268 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { 3269 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " 3270 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, 3271 q->write_ptr, q->read_ptr); 3272 return; 3273 } 3274 3275 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 3276 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { 3277 3278 if (nfreed++ > 0) { 3279 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx, 3280 q->write_ptr, q->read_ptr); 3281 queue_work(il->workqueue, &il->restart); 3282 } 3283 3284 } 3285 } 3286 3287 /** 3288 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them 3289 * @rxb: Rx buffer to reclaim 3290 * 3291 * If an Rx buffer has an async callback associated with it the callback 3292 * will be executed. The attached skb (if present) will only be freed 3293 * if the callback returns 1 3294 */ 3295 void 3296 il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) 3297 { 3298 struct il_rx_pkt *pkt = rxb_addr(rxb); 3299 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 3300 int txq_id = SEQ_TO_QUEUE(sequence); 3301 int idx = SEQ_TO_IDX(sequence); 3302 int cmd_idx; 3303 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 3304 struct il_device_cmd *cmd; 3305 struct il_cmd_meta *meta; 3306 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3307 unsigned long flags; 3308 3309 /* If a Tx command is being handled and it isn't in the actual 3310 * command queue then there a command routing bug has been introduced 3311 * in the queue management code. */ 3312 if (WARN 3313 (txq_id != il->cmd_queue, 3314 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 3315 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, 3316 il->txq[il->cmd_queue].q.write_ptr)) { 3317 il_print_hex_error(il, pkt, 32); 3318 return; 3319 } 3320 3321 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); 3322 cmd = txq->cmd[cmd_idx]; 3323 meta = &txq->meta[cmd_idx]; 3324 3325 txq->time_stamp = jiffies; 3326 3327 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), 3328 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); 3329 3330 /* Input error checking is done when commands are added to queue. */ 3331 if (meta->flags & CMD_WANT_SKB) { 3332 meta->source->reply_page = (unsigned long)rxb_addr(rxb); 3333 rxb->page = NULL; 3334 } else if (meta->callback) 3335 meta->callback(il, cmd, pkt); 3336 3337 spin_lock_irqsave(&il->hcmd_lock, flags); 3338 3339 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx); 3340 3341 if (!(meta->flags & CMD_ASYNC)) { 3342 clear_bit(S_HCMD_ACTIVE, &il->status); 3343 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 3344 il_get_cmd_string(cmd->hdr.cmd)); 3345 wake_up(&il->wait_command_queue); 3346 } 3347 3348 /* Mark as unmapped */ 3349 meta->flags = 0; 3350 3351 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3352 } 3353 EXPORT_SYMBOL(il_tx_cmd_complete); 3354 3355 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); 3356 MODULE_VERSION(IWLWIFI_VERSION); 3357 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 3358 MODULE_LICENSE("GPL"); 3359 3360 /* 3361 * set bt_coex_active to true, uCode will do kill/defer 3362 * every time the priority line is asserted (BT is sending signals on the 3363 * priority line in the PCIx). 3364 * set bt_coex_active to false, uCode will ignore the BT activity and 3365 * perform the normal operation 3366 * 3367 * User might experience transmit issue on some platform due to WiFi/BT 3368 * co-exist problem. The possible behaviors are: 3369 * Able to scan and finding all the available AP 3370 * Not able to associate with any AP 3371 * On those platforms, WiFi communication can be restored by set 3372 * "bt_coex_active" module parameter to "false" 3373 * 3374 * default: bt_coex_active = true (BT_COEX_ENABLE) 3375 */ 3376 static bool bt_coex_active = true; 3377 module_param(bt_coex_active, bool, 0444); 3378 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 3379 3380 u32 il_debug_level; 3381 EXPORT_SYMBOL(il_debug_level); 3382 3383 const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3384 EXPORT_SYMBOL(il_bcast_addr); 3385 3386 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 3387 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 3388 static void 3389 il_init_ht_hw_capab(const struct il_priv *il, 3390 struct ieee80211_sta_ht_cap *ht_info, 3391 enum nl80211_band band) 3392 { 3393 u16 max_bit_rate = 0; 3394 u8 rx_chains_num = il->hw_params.rx_chains_num; 3395 u8 tx_chains_num = il->hw_params.tx_chains_num; 3396 3397 ht_info->cap = 0; 3398 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 3399 3400 ht_info->ht_supported = true; 3401 3402 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 3403 max_bit_rate = MAX_BIT_RATE_20_MHZ; 3404 if (il->hw_params.ht40_channel & BIT(band)) { 3405 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 3406 ht_info->cap |= IEEE80211_HT_CAP_SGI_40; 3407 ht_info->mcs.rx_mask[4] = 0x01; 3408 max_bit_rate = MAX_BIT_RATE_40_MHZ; 3409 } 3410 3411 if (il->cfg->mod_params->amsdu_size_8K) 3412 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 3413 3414 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 3415 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 3416 3417 ht_info->mcs.rx_mask[0] = 0xFF; 3418 if (rx_chains_num >= 2) 3419 ht_info->mcs.rx_mask[1] = 0xFF; 3420 if (rx_chains_num >= 3) 3421 ht_info->mcs.rx_mask[2] = 0xFF; 3422 3423 /* Highest supported Rx data rate */ 3424 max_bit_rate *= rx_chains_num; 3425 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); 3426 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); 3427 3428 /* Tx MCS capabilities */ 3429 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 3430 if (tx_chains_num != rx_chains_num) { 3431 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 3432 ht_info->mcs.tx_params |= 3433 ((tx_chains_num - 3434 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 3435 } 3436 } 3437 3438 /** 3439 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom 3440 */ 3441 int 3442 il_init_geos(struct il_priv *il) 3443 { 3444 struct il_channel_info *ch; 3445 struct ieee80211_supported_band *sband; 3446 struct ieee80211_channel *channels; 3447 struct ieee80211_channel *geo_ch; 3448 struct ieee80211_rate *rates; 3449 int i = 0; 3450 s8 max_tx_power = 0; 3451 3452 if (il->bands[NL80211_BAND_2GHZ].n_bitrates || 3453 il->bands[NL80211_BAND_5GHZ].n_bitrates) { 3454 D_INFO("Geography modes already initialized.\n"); 3455 set_bit(S_GEO_CONFIGURED, &il->status); 3456 return 0; 3457 } 3458 3459 channels = 3460 kcalloc(il->channel_count, sizeof(struct ieee80211_channel), 3461 GFP_KERNEL); 3462 if (!channels) 3463 return -ENOMEM; 3464 3465 rates = 3466 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY), 3467 GFP_KERNEL); 3468 if (!rates) { 3469 kfree(channels); 3470 return -ENOMEM; 3471 } 3472 3473 /* 5.2GHz channels start after the 2.4GHz channels */ 3474 sband = &il->bands[NL80211_BAND_5GHZ]; 3475 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; 3476 /* just OFDM */ 3477 sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; 3478 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; 3479 3480 if (il->cfg->sku & IL_SKU_N) 3481 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ); 3482 3483 sband = &il->bands[NL80211_BAND_2GHZ]; 3484 sband->channels = channels; 3485 /* OFDM & CCK */ 3486 sband->bitrates = rates; 3487 sband->n_bitrates = RATE_COUNT_LEGACY; 3488 3489 if (il->cfg->sku & IL_SKU_N) 3490 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ); 3491 3492 il->ieee_channels = channels; 3493 il->ieee_rates = rates; 3494 3495 for (i = 0; i < il->channel_count; i++) { 3496 ch = &il->channel_info[i]; 3497 3498 if (!il_is_channel_valid(ch)) 3499 continue; 3500 3501 sband = &il->bands[ch->band]; 3502 3503 geo_ch = &sband->channels[sband->n_channels++]; 3504 3505 geo_ch->center_freq = 3506 ieee80211_channel_to_frequency(ch->channel, ch->band); 3507 geo_ch->max_power = ch->max_power_avg; 3508 geo_ch->max_antenna_gain = 0xff; 3509 geo_ch->hw_value = ch->channel; 3510 3511 if (il_is_channel_valid(ch)) { 3512 if (!(ch->flags & EEPROM_CHANNEL_IBSS)) 3513 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3514 3515 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) 3516 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3517 3518 if (ch->flags & EEPROM_CHANNEL_RADAR) 3519 geo_ch->flags |= IEEE80211_CHAN_RADAR; 3520 3521 geo_ch->flags |= ch->ht40_extension_channel; 3522 3523 if (ch->max_power_avg > max_tx_power) 3524 max_tx_power = ch->max_power_avg; 3525 } else { 3526 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 3527 } 3528 3529 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, 3530 geo_ch->center_freq, 3531 il_is_channel_a_band(ch) ? "5.2" : "2.4", 3532 geo_ch-> 3533 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid", 3534 geo_ch->flags); 3535 } 3536 3537 il->tx_power_device_lmt = max_tx_power; 3538 il->tx_power_user_lmt = max_tx_power; 3539 il->tx_power_next = max_tx_power; 3540 3541 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 && 3542 (il->cfg->sku & IL_SKU_A)) { 3543 IL_INFO("Incorrectly detected BG card as ABG. " 3544 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", 3545 il->pci_dev->device, il->pci_dev->subsystem_device); 3546 il->cfg->sku &= ~IL_SKU_A; 3547 } 3548 3549 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", 3550 il->bands[NL80211_BAND_2GHZ].n_channels, 3551 il->bands[NL80211_BAND_5GHZ].n_channels); 3552 3553 set_bit(S_GEO_CONFIGURED, &il->status); 3554 3555 return 0; 3556 } 3557 EXPORT_SYMBOL(il_init_geos); 3558 3559 /* 3560 * il_free_geos - undo allocations in il_init_geos 3561 */ 3562 void 3563 il_free_geos(struct il_priv *il) 3564 { 3565 kfree(il->ieee_channels); 3566 kfree(il->ieee_rates); 3567 clear_bit(S_GEO_CONFIGURED, &il->status); 3568 } 3569 EXPORT_SYMBOL(il_free_geos); 3570 3571 static bool 3572 il_is_channel_extension(struct il_priv *il, enum nl80211_band band, 3573 u16 channel, u8 extension_chan_offset) 3574 { 3575 const struct il_channel_info *ch_info; 3576 3577 ch_info = il_get_channel_info(il, band, channel); 3578 if (!il_is_channel_valid(ch_info)) 3579 return false; 3580 3581 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) 3582 return !(ch_info-> 3583 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS); 3584 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) 3585 return !(ch_info-> 3586 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS); 3587 3588 return false; 3589 } 3590 3591 bool 3592 il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap) 3593 { 3594 if (!il->ht.enabled || !il->ht.is_40mhz) 3595 return false; 3596 3597 /* 3598 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 3599 * the bit will not set if it is pure 40MHz case 3600 */ 3601 if (ht_cap && !ht_cap->ht_supported) 3602 return false; 3603 3604 #ifdef CONFIG_IWLEGACY_DEBUGFS 3605 if (il->disable_ht40) 3606 return false; 3607 #endif 3608 3609 return il_is_channel_extension(il, il->band, 3610 le16_to_cpu(il->staging.channel), 3611 il->ht.extension_chan_offset); 3612 } 3613 EXPORT_SYMBOL(il_is_ht40_tx_allowed); 3614 3615 static u16 noinline 3616 il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 3617 { 3618 u16 new_val; 3619 u16 beacon_factor; 3620 3621 /* 3622 * If mac80211 hasn't given us a beacon interval, program 3623 * the default into the device. 3624 */ 3625 if (!beacon_val) 3626 return DEFAULT_BEACON_INTERVAL; 3627 3628 /* 3629 * If the beacon interval we obtained from the peer 3630 * is too large, we'll have to wake up more often 3631 * (and in IBSS case, we'll beacon too much) 3632 * 3633 * For example, if max_beacon_val is 4096, and the 3634 * requested beacon interval is 7000, we'll have to 3635 * use 3500 to be able to wake up on the beacons. 3636 * 3637 * This could badly influence beacon detection stats. 3638 */ 3639 3640 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; 3641 new_val = beacon_val / beacon_factor; 3642 3643 if (!new_val) 3644 new_val = max_beacon_val; 3645 3646 return new_val; 3647 } 3648 3649 int 3650 il_send_rxon_timing(struct il_priv *il) 3651 { 3652 u64 tsf; 3653 s32 interval_tm, rem; 3654 struct ieee80211_conf *conf = NULL; 3655 u16 beacon_int; 3656 struct ieee80211_vif *vif = il->vif; 3657 3658 conf = &il->hw->conf; 3659 3660 lockdep_assert_held(&il->mutex); 3661 3662 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd)); 3663 3664 il->timing.timestamp = cpu_to_le64(il->timestamp); 3665 il->timing.listen_interval = cpu_to_le16(conf->listen_interval); 3666 3667 beacon_int = vif ? vif->bss_conf.beacon_int : 0; 3668 3669 /* 3670 * TODO: For IBSS we need to get atim_win from mac80211, 3671 * for now just always use 0 3672 */ 3673 il->timing.atim_win = 0; 3674 3675 beacon_int = 3676 il_adjust_beacon_interval(beacon_int, 3677 il->hw_params.max_beacon_itrvl * 3678 TIME_UNIT); 3679 il->timing.beacon_interval = cpu_to_le16(beacon_int); 3680 3681 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ 3682 interval_tm = beacon_int * TIME_UNIT; 3683 rem = do_div(tsf, interval_tm); 3684 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 3685 3686 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; 3687 3688 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n", 3689 le16_to_cpu(il->timing.beacon_interval), 3690 le32_to_cpu(il->timing.beacon_init_val), 3691 le16_to_cpu(il->timing.atim_win)); 3692 3693 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing), 3694 &il->timing); 3695 } 3696 EXPORT_SYMBOL(il_send_rxon_timing); 3697 3698 void 3699 il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt) 3700 { 3701 struct il_rxon_cmd *rxon = &il->staging; 3702 3703 if (hw_decrypt) 3704 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 3705 else 3706 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; 3707 3708 } 3709 EXPORT_SYMBOL(il_set_rxon_hwcrypto); 3710 3711 /* validate RXON structure is valid */ 3712 int 3713 il_check_rxon_cmd(struct il_priv *il) 3714 { 3715 struct il_rxon_cmd *rxon = &il->staging; 3716 bool error = false; 3717 3718 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 3719 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { 3720 IL_WARN("check 2.4G: wrong narrow\n"); 3721 error = true; 3722 } 3723 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { 3724 IL_WARN("check 2.4G: wrong radar\n"); 3725 error = true; 3726 } 3727 } else { 3728 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { 3729 IL_WARN("check 5.2G: not short slot!\n"); 3730 error = true; 3731 } 3732 if (rxon->flags & RXON_FLG_CCK_MSK) { 3733 IL_WARN("check 5.2G: CCK!\n"); 3734 error = true; 3735 } 3736 } 3737 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { 3738 IL_WARN("mac/bssid mcast!\n"); 3739 error = true; 3740 } 3741 3742 /* make sure basic rates 6Mbps and 1Mbps are supported */ 3743 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && 3744 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { 3745 IL_WARN("neither 1 nor 6 are basic\n"); 3746 error = true; 3747 } 3748 3749 if (le16_to_cpu(rxon->assoc_id) > 2007) { 3750 IL_WARN("aid > 2007\n"); 3751 error = true; 3752 } 3753 3754 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == 3755 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { 3756 IL_WARN("CCK and short slot\n"); 3757 error = true; 3758 } 3759 3760 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == 3761 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { 3762 IL_WARN("CCK and auto detect"); 3763 error = true; 3764 } 3765 3766 if ((rxon-> 3767 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == 3768 RXON_FLG_TGG_PROTECT_MSK) { 3769 IL_WARN("TGg but no auto-detect\n"); 3770 error = true; 3771 } 3772 3773 if (error) 3774 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel)); 3775 3776 if (error) { 3777 IL_ERR("Invalid RXON\n"); 3778 return -EINVAL; 3779 } 3780 return 0; 3781 } 3782 EXPORT_SYMBOL(il_check_rxon_cmd); 3783 3784 /** 3785 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 3786 * @il: staging_rxon is compared to active_rxon 3787 * 3788 * If the RXON structure is changing enough to require a new tune, 3789 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 3790 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 3791 */ 3792 int 3793 il_full_rxon_required(struct il_priv *il) 3794 { 3795 const struct il_rxon_cmd *staging = &il->staging; 3796 const struct il_rxon_cmd *active = &il->active; 3797 3798 #define CHK(cond) \ 3799 if ((cond)) { \ 3800 D_INFO("need full RXON - " #cond "\n"); \ 3801 return 1; \ 3802 } 3803 3804 #define CHK_NEQ(c1, c2) \ 3805 if ((c1) != (c2)) { \ 3806 D_INFO("need full RXON - " \ 3807 #c1 " != " #c2 " - %d != %d\n", \ 3808 (c1), (c2)); \ 3809 return 1; \ 3810 } 3811 3812 /* These items are only settable from the full RXON command */ 3813 CHK(!il_is_associated(il)); 3814 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr)); 3815 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr)); 3816 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr, 3817 active->wlap_bssid_addr)); 3818 CHK_NEQ(staging->dev_type, active->dev_type); 3819 CHK_NEQ(staging->channel, active->channel); 3820 CHK_NEQ(staging->air_propagation, active->air_propagation); 3821 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, 3822 active->ofdm_ht_single_stream_basic_rates); 3823 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, 3824 active->ofdm_ht_dual_stream_basic_rates); 3825 CHK_NEQ(staging->assoc_id, active->assoc_id); 3826 3827 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can 3828 * be updated with the RXON_ASSOC command -- however only some 3829 * flag transitions are allowed using RXON_ASSOC */ 3830 3831 /* Check if we are not switching bands */ 3832 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, 3833 active->flags & RXON_FLG_BAND_24G_MSK); 3834 3835 /* Check if we are switching association toggle */ 3836 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, 3837 active->filter_flags & RXON_FILTER_ASSOC_MSK); 3838 3839 #undef CHK 3840 #undef CHK_NEQ 3841 3842 return 0; 3843 } 3844 EXPORT_SYMBOL(il_full_rxon_required); 3845 3846 u8 3847 il_get_lowest_plcp(struct il_priv *il) 3848 { 3849 /* 3850 * Assign the lowest rate -- should really get this from 3851 * the beacon skb from mac80211. 3852 */ 3853 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) 3854 return RATE_1M_PLCP; 3855 else 3856 return RATE_6M_PLCP; 3857 } 3858 EXPORT_SYMBOL(il_get_lowest_plcp); 3859 3860 static void 3861 _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3862 { 3863 struct il_rxon_cmd *rxon = &il->staging; 3864 3865 if (!il->ht.enabled) { 3866 rxon->flags &= 3867 ~(RXON_FLG_CHANNEL_MODE_MSK | 3868 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK 3869 | RXON_FLG_HT_PROT_MSK); 3870 return; 3871 } 3872 3873 rxon->flags |= 3874 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); 3875 3876 /* Set up channel bandwidth: 3877 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 3878 /* clear the HT channel mode before set the mode */ 3879 rxon->flags &= 3880 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3881 if (il_is_ht40_tx_allowed(il, NULL)) { 3882 /* pure ht40 */ 3883 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 3884 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 3885 /* Note: control channel is opposite of extension channel */ 3886 switch (il->ht.extension_chan_offset) { 3887 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3888 rxon->flags &= 3889 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3890 break; 3891 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3892 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3893 break; 3894 } 3895 } else { 3896 /* Note: control channel is opposite of extension channel */ 3897 switch (il->ht.extension_chan_offset) { 3898 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3899 rxon->flags &= 3900 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3901 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3902 break; 3903 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3904 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3905 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3906 break; 3907 case IEEE80211_HT_PARAM_CHA_SEC_NONE: 3908 default: 3909 /* channel location only valid if in Mixed mode */ 3910 IL_ERR("invalid extension channel offset\n"); 3911 break; 3912 } 3913 } 3914 } else { 3915 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; 3916 } 3917 3918 if (il->ops->set_rxon_chain) 3919 il->ops->set_rxon_chain(il); 3920 3921 D_ASSOC("rxon flags 0x%X operation mode :0x%X " 3922 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), 3923 il->ht.protection, il->ht.extension_chan_offset); 3924 } 3925 3926 void 3927 il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3928 { 3929 _il_set_rxon_ht(il, ht_conf); 3930 } 3931 EXPORT_SYMBOL(il_set_rxon_ht); 3932 3933 /* Return valid, unused, channel for a passive scan to reset the RF */ 3934 u8 3935 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band) 3936 { 3937 const struct il_channel_info *ch_info; 3938 int i; 3939 u8 channel = 0; 3940 u8 min, max; 3941 3942 if (band == NL80211_BAND_5GHZ) { 3943 min = 14; 3944 max = il->channel_count; 3945 } else { 3946 min = 0; 3947 max = 14; 3948 } 3949 3950 for (i = min; i < max; i++) { 3951 channel = il->channel_info[i].channel; 3952 if (channel == le16_to_cpu(il->staging.channel)) 3953 continue; 3954 3955 ch_info = il_get_channel_info(il, band, channel); 3956 if (il_is_channel_valid(ch_info)) 3957 break; 3958 } 3959 3960 return channel; 3961 } 3962 EXPORT_SYMBOL(il_get_single_channel_number); 3963 3964 /** 3965 * il_set_rxon_channel - Set the band and channel values in staging RXON 3966 * @ch: requested channel as a pointer to struct ieee80211_channel 3967 3968 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 3969 * in the staging RXON flag structure based on the ch->band 3970 */ 3971 int 3972 il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) 3973 { 3974 enum nl80211_band band = ch->band; 3975 u16 channel = ch->hw_value; 3976 3977 if (le16_to_cpu(il->staging.channel) == channel && il->band == band) 3978 return 0; 3979 3980 il->staging.channel = cpu_to_le16(channel); 3981 if (band == NL80211_BAND_5GHZ) 3982 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; 3983 else 3984 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 3985 3986 il->band = band; 3987 3988 D_INFO("Staging channel set to %d [%d]\n", channel, band); 3989 3990 return 0; 3991 } 3992 EXPORT_SYMBOL(il_set_rxon_channel); 3993 3994 void 3995 il_set_flags_for_band(struct il_priv *il, enum nl80211_band band, 3996 struct ieee80211_vif *vif) 3997 { 3998 if (band == NL80211_BAND_5GHZ) { 3999 il->staging.flags &= 4000 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | 4001 RXON_FLG_CCK_MSK); 4002 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 4003 } else { 4004 /* Copied from il_post_associate() */ 4005 if (vif && vif->bss_conf.use_short_slot) 4006 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 4007 else 4008 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 4009 4010 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 4011 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; 4012 il->staging.flags &= ~RXON_FLG_CCK_MSK; 4013 } 4014 } 4015 EXPORT_SYMBOL(il_set_flags_for_band); 4016 4017 /* 4018 * initialize rxon structure with default values from eeprom 4019 */ 4020 void 4021 il_connection_init_rx_config(struct il_priv *il) 4022 { 4023 const struct il_channel_info *ch_info; 4024 4025 memset(&il->staging, 0, sizeof(il->staging)); 4026 4027 switch (il->iw_mode) { 4028 case NL80211_IFTYPE_UNSPECIFIED: 4029 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4030 break; 4031 case NL80211_IFTYPE_STATION: 4032 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4033 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 4034 break; 4035 case NL80211_IFTYPE_ADHOC: 4036 il->staging.dev_type = RXON_DEV_TYPE_IBSS; 4037 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 4038 il->staging.filter_flags = 4039 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 4040 break; 4041 default: 4042 IL_ERR("Unsupported interface type %d\n", il->vif->type); 4043 return; 4044 } 4045 4046 #if 0 4047 /* TODO: Figure out when short_preamble would be set and cache from 4048 * that */ 4049 if (!hw_to_local(il->hw)->short_preamble) 4050 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 4051 else 4052 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 4053 #endif 4054 4055 ch_info = 4056 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel)); 4057 4058 if (!ch_info) 4059 ch_info = &il->channel_info[0]; 4060 4061 il->staging.channel = cpu_to_le16(ch_info->channel); 4062 il->band = ch_info->band; 4063 4064 il_set_flags_for_band(il, il->band, il->vif); 4065 4066 il->staging.ofdm_basic_rates = 4067 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4068 il->staging.cck_basic_rates = 4069 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4070 4071 /* clear both MIX and PURE40 mode flag */ 4072 il->staging.flags &= 4073 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); 4074 if (il->vif) 4075 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN); 4076 4077 il->staging.ofdm_ht_single_stream_basic_rates = 0xff; 4078 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff; 4079 } 4080 EXPORT_SYMBOL(il_connection_init_rx_config); 4081 4082 void 4083 il_set_rate(struct il_priv *il) 4084 { 4085 const struct ieee80211_supported_band *hw = NULL; 4086 struct ieee80211_rate *rate; 4087 int i; 4088 4089 hw = il_get_hw_mode(il, il->band); 4090 if (!hw) { 4091 IL_ERR("Failed to set rate: unable to get hw mode\n"); 4092 return; 4093 } 4094 4095 il->active_rate = 0; 4096 4097 for (i = 0; i < hw->n_bitrates; i++) { 4098 rate = &(hw->bitrates[i]); 4099 if (rate->hw_value < RATE_COUNT_LEGACY) 4100 il->active_rate |= (1 << rate->hw_value); 4101 } 4102 4103 D_RATE("Set active_rate = %0x\n", il->active_rate); 4104 4105 il->staging.cck_basic_rates = 4106 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4107 4108 il->staging.ofdm_basic_rates = 4109 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4110 } 4111 EXPORT_SYMBOL(il_set_rate); 4112 4113 void 4114 il_chswitch_done(struct il_priv *il, bool is_success) 4115 { 4116 if (test_bit(S_EXIT_PENDING, &il->status)) 4117 return; 4118 4119 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4120 ieee80211_chswitch_done(il->vif, is_success); 4121 } 4122 EXPORT_SYMBOL(il_chswitch_done); 4123 4124 void 4125 il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb) 4126 { 4127 struct il_rx_pkt *pkt = rxb_addr(rxb); 4128 struct il_csa_notification *csa = &(pkt->u.csa_notif); 4129 struct il_rxon_cmd *rxon = (void *)&il->active; 4130 4131 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4132 return; 4133 4134 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { 4135 rxon->channel = csa->channel; 4136 il->staging.channel = csa->channel; 4137 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel)); 4138 il_chswitch_done(il, true); 4139 } else { 4140 IL_ERR("CSA notif (fail) : channel %d\n", 4141 le16_to_cpu(csa->channel)); 4142 il_chswitch_done(il, false); 4143 } 4144 } 4145 EXPORT_SYMBOL(il_hdl_csa); 4146 4147 #ifdef CONFIG_IWLEGACY_DEBUG 4148 void 4149 il_print_rx_config_cmd(struct il_priv *il) 4150 { 4151 struct il_rxon_cmd *rxon = &il->staging; 4152 4153 D_RADIO("RX CONFIG:\n"); 4154 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 4155 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 4156 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 4157 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); 4158 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); 4159 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); 4160 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); 4161 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr); 4162 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 4163 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 4164 } 4165 EXPORT_SYMBOL(il_print_rx_config_cmd); 4166 #endif 4167 /** 4168 * il_irq_handle_error - called for HW or SW error interrupt from card 4169 */ 4170 void 4171 il_irq_handle_error(struct il_priv *il) 4172 { 4173 /* Set the FW error flag -- cleared on il_down */ 4174 set_bit(S_FW_ERROR, &il->status); 4175 4176 /* Cancel currently queued command. */ 4177 clear_bit(S_HCMD_ACTIVE, &il->status); 4178 4179 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version); 4180 4181 il->ops->dump_nic_error_log(il); 4182 if (il->ops->dump_fh) 4183 il->ops->dump_fh(il, NULL, false); 4184 #ifdef CONFIG_IWLEGACY_DEBUG 4185 if (il_get_debug_level(il) & IL_DL_FW_ERRORS) 4186 il_print_rx_config_cmd(il); 4187 #endif 4188 4189 wake_up(&il->wait_command_queue); 4190 4191 /* Keep the restart process from trying to send host 4192 * commands by clearing the INIT status bit */ 4193 clear_bit(S_READY, &il->status); 4194 4195 if (!test_bit(S_EXIT_PENDING, &il->status)) { 4196 IL_DBG(IL_DL_FW_ERRORS, 4197 "Restarting adapter due to uCode error.\n"); 4198 4199 if (il->cfg->mod_params->restart_fw) 4200 queue_work(il->workqueue, &il->restart); 4201 } 4202 } 4203 EXPORT_SYMBOL(il_irq_handle_error); 4204 4205 static int 4206 _il_apm_stop_master(struct il_priv *il) 4207 { 4208 int ret = 0; 4209 4210 /* stop device's busmaster DMA activity */ 4211 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 4212 4213 ret = 4214 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, 4215 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 4216 if (ret < 0) 4217 IL_WARN("Master Disable Timed Out, 100 usec\n"); 4218 4219 D_INFO("stop master\n"); 4220 4221 return ret; 4222 } 4223 4224 void 4225 _il_apm_stop(struct il_priv *il) 4226 { 4227 lockdep_assert_held(&il->reg_lock); 4228 4229 D_INFO("Stop card, put in low power state\n"); 4230 4231 /* Stop device's DMA activity */ 4232 _il_apm_stop_master(il); 4233 4234 /* Reset the entire device */ 4235 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 4236 4237 udelay(10); 4238 4239 /* 4240 * Clear "initialization complete" bit to move adapter from 4241 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 4242 */ 4243 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4244 } 4245 EXPORT_SYMBOL(_il_apm_stop); 4246 4247 void 4248 il_apm_stop(struct il_priv *il) 4249 { 4250 unsigned long flags; 4251 4252 spin_lock_irqsave(&il->reg_lock, flags); 4253 _il_apm_stop(il); 4254 spin_unlock_irqrestore(&il->reg_lock, flags); 4255 } 4256 EXPORT_SYMBOL(il_apm_stop); 4257 4258 /* 4259 * Start up NIC's basic functionality after it has been reset 4260 * (e.g. after platform boot, or shutdown via il_apm_stop()) 4261 * NOTE: This does not load uCode nor start the embedded processor 4262 */ 4263 int 4264 il_apm_init(struct il_priv *il) 4265 { 4266 int ret = 0; 4267 u16 lctl; 4268 4269 D_INFO("Init card's basic functions\n"); 4270 4271 /* 4272 * Use "set_bit" below rather than "write", to preserve any hardware 4273 * bits already set by default after reset. 4274 */ 4275 4276 /* Disable L0S exit timer (platform NMI Work/Around) */ 4277 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4278 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 4279 4280 /* 4281 * Disable L0s without affecting L1; 4282 * don't wait for ICH L0s (ICH bug W/A) 4283 */ 4284 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4285 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 4286 4287 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 4288 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 4289 4290 /* 4291 * Enable HAP INTA (interrupt from management bus) to 4292 * wake device's PCI Express link L1a -> L0s 4293 * NOTE: This is no-op for 3945 (non-existent bit) 4294 */ 4295 il_set_bit(il, CSR_HW_IF_CONFIG_REG, 4296 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 4297 4298 /* 4299 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. 4300 * Check if BIOS (or OS) enabled L1-ASPM on this device. 4301 * If so (likely), disable L0S, so device moves directly L0->L1; 4302 * costs negligible amount of power savings. 4303 * If not (unlikely), enable L0S, so there is at least some 4304 * power savings, even without L1. 4305 */ 4306 if (il->cfg->set_l0s) { 4307 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 4308 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { 4309 /* L1-ASPM enabled; disable(!) L0S */ 4310 il_set_bit(il, CSR_GIO_REG, 4311 CSR_GIO_REG_VAL_L0S_ENABLED); 4312 D_POWER("L1 Enabled; Disabling L0S\n"); 4313 } else { 4314 /* L1-ASPM disabled; enable(!) L0S */ 4315 il_clear_bit(il, CSR_GIO_REG, 4316 CSR_GIO_REG_VAL_L0S_ENABLED); 4317 D_POWER("L1 Disabled; Enabling L0S\n"); 4318 } 4319 } 4320 4321 /* Configure analog phase-lock-loop before activating to D0A */ 4322 if (il->cfg->pll_cfg_val) 4323 il_set_bit(il, CSR_ANA_PLL_CFG, 4324 il->cfg->pll_cfg_val); 4325 4326 /* 4327 * Set "initialization complete" bit to move adapter from 4328 * D0U* --> D0A* (powered-up active) state. 4329 */ 4330 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4331 4332 /* 4333 * Wait for clock stabilization; once stabilized, access to 4334 * device-internal resources is supported, e.g. il_wr_prph() 4335 * and accesses to uCode SRAM. 4336 */ 4337 ret = 4338 _il_poll_bit(il, CSR_GP_CNTRL, 4339 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 4340 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 4341 if (ret < 0) { 4342 D_INFO("Failed to init the card\n"); 4343 goto out; 4344 } 4345 4346 /* 4347 * Enable DMA and BSM (if used) clocks, wait for them to stabilize. 4348 * BSM (Boostrap State Machine) is only in 3945 and 4965. 4349 * 4350 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits 4351 * do not disable clocks. This preserves any hardware bits already 4352 * set by default in "CLK_CTRL_REG" after reset. 4353 */ 4354 if (il->cfg->use_bsm) 4355 il_wr_prph(il, APMG_CLK_EN_REG, 4356 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 4357 else 4358 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 4359 udelay(20); 4360 4361 /* Disable L1-Active */ 4362 il_set_bits_prph(il, APMG_PCIDEV_STT_REG, 4363 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 4364 4365 out: 4366 return ret; 4367 } 4368 EXPORT_SYMBOL(il_apm_init); 4369 4370 int 4371 il_set_tx_power(struct il_priv *il, s8 tx_power, bool force) 4372 { 4373 int ret; 4374 s8 prev_tx_power; 4375 bool defer; 4376 4377 lockdep_assert_held(&il->mutex); 4378 4379 if (il->tx_power_user_lmt == tx_power && !force) 4380 return 0; 4381 4382 if (!il->ops->send_tx_power) 4383 return -EOPNOTSUPP; 4384 4385 /* 0 dBm mean 1 milliwatt */ 4386 if (tx_power < 0) { 4387 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power); 4388 return -EINVAL; 4389 } 4390 4391 if (tx_power > il->tx_power_device_lmt) { 4392 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n", 4393 tx_power, il->tx_power_device_lmt); 4394 return -EINVAL; 4395 } 4396 4397 if (!il_is_ready_rf(il)) 4398 return -EIO; 4399 4400 /* scan complete and commit_rxon use tx_power_next value, 4401 * it always need to be updated for newest request */ 4402 il->tx_power_next = tx_power; 4403 4404 /* do not set tx power when scanning or channel changing */ 4405 defer = test_bit(S_SCANNING, &il->status) || 4406 memcmp(&il->active, &il->staging, sizeof(il->staging)); 4407 if (defer && !force) { 4408 D_INFO("Deferring tx power set\n"); 4409 return 0; 4410 } 4411 4412 prev_tx_power = il->tx_power_user_lmt; 4413 il->tx_power_user_lmt = tx_power; 4414 4415 ret = il->ops->send_tx_power(il); 4416 4417 /* if fail to set tx_power, restore the orig. tx power */ 4418 if (ret) { 4419 il->tx_power_user_lmt = prev_tx_power; 4420 il->tx_power_next = prev_tx_power; 4421 } 4422 return ret; 4423 } 4424 EXPORT_SYMBOL(il_set_tx_power); 4425 4426 void 4427 il_send_bt_config(struct il_priv *il) 4428 { 4429 struct il_bt_cmd bt_cmd = { 4430 .lead_time = BT_LEAD_TIME_DEF, 4431 .max_kill = BT_MAX_KILL_DEF, 4432 .kill_ack_mask = 0, 4433 .kill_cts_mask = 0, 4434 }; 4435 4436 if (!bt_coex_active) 4437 bt_cmd.flags = BT_COEX_DISABLE; 4438 else 4439 bt_cmd.flags = BT_COEX_ENABLE; 4440 4441 D_INFO("BT coex %s\n", 4442 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 4443 4444 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd)) 4445 IL_ERR("failed to send BT Coex Config\n"); 4446 } 4447 EXPORT_SYMBOL(il_send_bt_config); 4448 4449 int 4450 il_send_stats_request(struct il_priv *il, u8 flags, bool clear) 4451 { 4452 struct il_stats_cmd stats_cmd = { 4453 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0, 4454 }; 4455 4456 if (flags & CMD_ASYNC) 4457 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd), 4458 &stats_cmd, NULL); 4459 else 4460 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd), 4461 &stats_cmd); 4462 } 4463 EXPORT_SYMBOL(il_send_stats_request); 4464 4465 void 4466 il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb) 4467 { 4468 #ifdef CONFIG_IWLEGACY_DEBUG 4469 struct il_rx_pkt *pkt = rxb_addr(rxb); 4470 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); 4471 D_RX("sleep mode: %d, src: %d\n", 4472 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 4473 #endif 4474 } 4475 EXPORT_SYMBOL(il_hdl_pm_sleep); 4476 4477 void 4478 il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb) 4479 { 4480 struct il_rx_pkt *pkt = rxb_addr(rxb); 4481 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; 4482 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len, 4483 il_get_cmd_string(pkt->hdr.cmd)); 4484 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); 4485 } 4486 EXPORT_SYMBOL(il_hdl_pm_debug_stats); 4487 4488 void 4489 il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb) 4490 { 4491 struct il_rx_pkt *pkt = rxb_addr(rxb); 4492 4493 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) " 4494 "seq 0x%04X ser 0x%08X\n", 4495 le32_to_cpu(pkt->u.err_resp.error_type), 4496 il_get_cmd_string(pkt->u.err_resp.cmd_id), 4497 pkt->u.err_resp.cmd_id, 4498 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 4499 le32_to_cpu(pkt->u.err_resp.error_info)); 4500 } 4501 EXPORT_SYMBOL(il_hdl_error); 4502 4503 void 4504 il_clear_isr_stats(struct il_priv *il) 4505 { 4506 memset(&il->isr_stats, 0, sizeof(il->isr_stats)); 4507 } 4508 4509 int 4510 il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, 4511 const struct ieee80211_tx_queue_params *params) 4512 { 4513 struct il_priv *il = hw->priv; 4514 unsigned long flags; 4515 int q; 4516 4517 D_MAC80211("enter\n"); 4518 4519 if (!il_is_ready_rf(il)) { 4520 D_MAC80211("leave - RF not ready\n"); 4521 return -EIO; 4522 } 4523 4524 if (queue >= AC_NUM) { 4525 D_MAC80211("leave - queue >= AC_NUM %d\n", queue); 4526 return 0; 4527 } 4528 4529 q = AC_NUM - 1 - queue; 4530 4531 spin_lock_irqsave(&il->lock, flags); 4532 4533 il->qos_data.def_qos_parm.ac[q].cw_min = 4534 cpu_to_le16(params->cw_min); 4535 il->qos_data.def_qos_parm.ac[q].cw_max = 4536 cpu_to_le16(params->cw_max); 4537 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 4538 il->qos_data.def_qos_parm.ac[q].edca_txop = 4539 cpu_to_le16((params->txop * 32)); 4540 4541 il->qos_data.def_qos_parm.ac[q].reserved1 = 0; 4542 4543 spin_unlock_irqrestore(&il->lock, flags); 4544 4545 D_MAC80211("leave\n"); 4546 return 0; 4547 } 4548 EXPORT_SYMBOL(il_mac_conf_tx); 4549 4550 int 4551 il_mac_tx_last_beacon(struct ieee80211_hw *hw) 4552 { 4553 struct il_priv *il = hw->priv; 4554 int ret; 4555 4556 D_MAC80211("enter\n"); 4557 4558 ret = (il->ibss_manager == IL_IBSS_MANAGER); 4559 4560 D_MAC80211("leave ret %d\n", ret); 4561 return ret; 4562 } 4563 EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon); 4564 4565 static int 4566 il_set_mode(struct il_priv *il) 4567 { 4568 il_connection_init_rx_config(il); 4569 4570 if (il->ops->set_rxon_chain) 4571 il->ops->set_rxon_chain(il); 4572 4573 return il_commit_rxon(il); 4574 } 4575 4576 int 4577 il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4578 { 4579 struct il_priv *il = hw->priv; 4580 int err; 4581 bool reset; 4582 4583 mutex_lock(&il->mutex); 4584 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4585 4586 if (!il_is_ready_rf(il)) { 4587 IL_WARN("Try to add interface when device not ready\n"); 4588 err = -EINVAL; 4589 goto out; 4590 } 4591 4592 /* 4593 * We do not support multiple virtual interfaces, but on hardware reset 4594 * we have to add the same interface again. 4595 */ 4596 reset = (il->vif == vif); 4597 if (il->vif && !reset) { 4598 err = -EOPNOTSUPP; 4599 goto out; 4600 } 4601 4602 il->vif = vif; 4603 il->iw_mode = vif->type; 4604 4605 err = il_set_mode(il); 4606 if (err) { 4607 IL_WARN("Fail to set mode %d\n", vif->type); 4608 if (!reset) { 4609 il->vif = NULL; 4610 il->iw_mode = NL80211_IFTYPE_STATION; 4611 } 4612 } 4613 4614 out: 4615 D_MAC80211("leave err %d\n", err); 4616 mutex_unlock(&il->mutex); 4617 4618 return err; 4619 } 4620 EXPORT_SYMBOL(il_mac_add_interface); 4621 4622 static void 4623 il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif) 4624 { 4625 lockdep_assert_held(&il->mutex); 4626 4627 if (il->scan_vif == vif) { 4628 il_scan_cancel_timeout(il, 200); 4629 il_force_scan_end(il); 4630 } 4631 4632 il_set_mode(il); 4633 } 4634 4635 void 4636 il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4637 { 4638 struct il_priv *il = hw->priv; 4639 4640 mutex_lock(&il->mutex); 4641 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4642 4643 WARN_ON(il->vif != vif); 4644 il->vif = NULL; 4645 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; 4646 il_teardown_interface(il, vif); 4647 eth_zero_addr(il->bssid); 4648 4649 D_MAC80211("leave\n"); 4650 mutex_unlock(&il->mutex); 4651 } 4652 EXPORT_SYMBOL(il_mac_remove_interface); 4653 4654 int 4655 il_alloc_txq_mem(struct il_priv *il) 4656 { 4657 if (!il->txq) 4658 il->txq = 4659 kcalloc(il->cfg->num_of_queues, 4660 sizeof(struct il_tx_queue), 4661 GFP_KERNEL); 4662 if (!il->txq) { 4663 IL_ERR("Not enough memory for txq\n"); 4664 return -ENOMEM; 4665 } 4666 return 0; 4667 } 4668 EXPORT_SYMBOL(il_alloc_txq_mem); 4669 4670 void 4671 il_free_txq_mem(struct il_priv *il) 4672 { 4673 kfree(il->txq); 4674 il->txq = NULL; 4675 } 4676 EXPORT_SYMBOL(il_free_txq_mem); 4677 4678 int 4679 il_force_reset(struct il_priv *il, bool external) 4680 { 4681 struct il_force_reset *force_reset; 4682 4683 if (test_bit(S_EXIT_PENDING, &il->status)) 4684 return -EINVAL; 4685 4686 force_reset = &il->force_reset; 4687 force_reset->reset_request_count++; 4688 if (!external) { 4689 if (force_reset->last_force_reset_jiffies && 4690 time_after(force_reset->last_force_reset_jiffies + 4691 force_reset->reset_duration, jiffies)) { 4692 D_INFO("force reset rejected\n"); 4693 force_reset->reset_reject_count++; 4694 return -EAGAIN; 4695 } 4696 } 4697 force_reset->reset_success_count++; 4698 force_reset->last_force_reset_jiffies = jiffies; 4699 4700 /* 4701 * if the request is from external(ex: debugfs), 4702 * then always perform the request in regardless the module 4703 * parameter setting 4704 * if the request is from internal (uCode error or driver 4705 * detect failure), then fw_restart module parameter 4706 * need to be check before performing firmware reload 4707 */ 4708 4709 if (!external && !il->cfg->mod_params->restart_fw) { 4710 D_INFO("Cancel firmware reload based on " 4711 "module parameter setting\n"); 4712 return 0; 4713 } 4714 4715 IL_ERR("On demand firmware reload\n"); 4716 4717 /* Set the FW error flag -- cleared on il_down */ 4718 set_bit(S_FW_ERROR, &il->status); 4719 wake_up(&il->wait_command_queue); 4720 /* 4721 * Keep the restart process from trying to send host 4722 * commands by clearing the INIT status bit 4723 */ 4724 clear_bit(S_READY, &il->status); 4725 queue_work(il->workqueue, &il->restart); 4726 4727 return 0; 4728 } 4729 EXPORT_SYMBOL(il_force_reset); 4730 4731 int 4732 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4733 enum nl80211_iftype newtype, bool newp2p) 4734 { 4735 struct il_priv *il = hw->priv; 4736 int err; 4737 4738 mutex_lock(&il->mutex); 4739 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n", 4740 vif->type, vif->addr, newtype, newp2p); 4741 4742 if (newp2p) { 4743 err = -EOPNOTSUPP; 4744 goto out; 4745 } 4746 4747 if (!il->vif || !il_is_ready_rf(il)) { 4748 /* 4749 * Huh? But wait ... this can maybe happen when 4750 * we're in the middle of a firmware restart! 4751 */ 4752 err = -EBUSY; 4753 goto out; 4754 } 4755 4756 /* success */ 4757 vif->type = newtype; 4758 vif->p2p = false; 4759 il->iw_mode = newtype; 4760 il_teardown_interface(il, vif); 4761 err = 0; 4762 4763 out: 4764 D_MAC80211("leave err %d\n", err); 4765 mutex_unlock(&il->mutex); 4766 4767 return err; 4768 } 4769 EXPORT_SYMBOL(il_mac_change_interface); 4770 4771 void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4772 u32 queues, bool drop) 4773 { 4774 struct il_priv *il = hw->priv; 4775 unsigned long timeout = jiffies + msecs_to_jiffies(500); 4776 int i; 4777 4778 mutex_lock(&il->mutex); 4779 D_MAC80211("enter\n"); 4780 4781 if (il->txq == NULL) 4782 goto out; 4783 4784 for (i = 0; i < il->hw_params.max_txq_num; i++) { 4785 struct il_queue *q; 4786 4787 if (i == il->cmd_queue) 4788 continue; 4789 4790 q = &il->txq[i].q; 4791 if (q->read_ptr == q->write_ptr) 4792 continue; 4793 4794 if (time_after(jiffies, timeout)) { 4795 IL_ERR("Failed to flush queue %d\n", q->id); 4796 break; 4797 } 4798 4799 msleep(20); 4800 } 4801 out: 4802 D_MAC80211("leave\n"); 4803 mutex_unlock(&il->mutex); 4804 } 4805 EXPORT_SYMBOL(il_mac_flush); 4806 4807 /* 4808 * On every watchdog tick we check (latest) time stamp. If it does not 4809 * change during timeout period and queue is not empty we reset firmware. 4810 */ 4811 static int 4812 il_check_stuck_queue(struct il_priv *il, int cnt) 4813 { 4814 struct il_tx_queue *txq = &il->txq[cnt]; 4815 struct il_queue *q = &txq->q; 4816 unsigned long timeout; 4817 unsigned long now = jiffies; 4818 int ret; 4819 4820 if (q->read_ptr == q->write_ptr) { 4821 txq->time_stamp = now; 4822 return 0; 4823 } 4824 4825 timeout = 4826 txq->time_stamp + 4827 msecs_to_jiffies(il->cfg->wd_timeout); 4828 4829 if (time_after(now, timeout)) { 4830 IL_ERR("Queue %d stuck for %u ms.\n", q->id, 4831 jiffies_to_msecs(now - txq->time_stamp)); 4832 ret = il_force_reset(il, false); 4833 return (ret == -EAGAIN) ? 0 : 1; 4834 } 4835 4836 return 0; 4837 } 4838 4839 /* 4840 * Making watchdog tick be a quarter of timeout assure we will 4841 * discover the queue hung between timeout and 1.25*timeout 4842 */ 4843 #define IL_WD_TICK(timeout) ((timeout) / 4) 4844 4845 /* 4846 * Watchdog timer callback, we check each tx queue for stuck, if if hung 4847 * we reset the firmware. If everything is fine just rearm the timer. 4848 */ 4849 void 4850 il_bg_watchdog(struct timer_list *t) 4851 { 4852 struct il_priv *il = from_timer(il, t, watchdog); 4853 int cnt; 4854 unsigned long timeout; 4855 4856 if (test_bit(S_EXIT_PENDING, &il->status)) 4857 return; 4858 4859 timeout = il->cfg->wd_timeout; 4860 if (timeout == 0) 4861 return; 4862 4863 /* monitor and check for stuck cmd queue */ 4864 if (il_check_stuck_queue(il, il->cmd_queue)) 4865 return; 4866 4867 /* monitor and check for other stuck queues */ 4868 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4869 /* skip as we already checked the command queue */ 4870 if (cnt == il->cmd_queue) 4871 continue; 4872 if (il_check_stuck_queue(il, cnt)) 4873 return; 4874 } 4875 4876 mod_timer(&il->watchdog, 4877 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4878 } 4879 EXPORT_SYMBOL(il_bg_watchdog); 4880 4881 void 4882 il_setup_watchdog(struct il_priv *il) 4883 { 4884 unsigned int timeout = il->cfg->wd_timeout; 4885 4886 if (timeout) 4887 mod_timer(&il->watchdog, 4888 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4889 else 4890 del_timer(&il->watchdog); 4891 } 4892 EXPORT_SYMBOL(il_setup_watchdog); 4893 4894 /* 4895 * extended beacon time format 4896 * time in usec will be changed into a 32-bit value in extended:internal format 4897 * the extended part is the beacon counts 4898 * the internal part is the time in usec within one beacon interval 4899 */ 4900 u32 4901 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval) 4902 { 4903 u32 quot; 4904 u32 rem; 4905 u32 interval = beacon_interval * TIME_UNIT; 4906 4907 if (!interval || !usec) 4908 return 0; 4909 4910 quot = 4911 (usec / 4912 interval) & (il_beacon_time_mask_high(il, 4913 il->hw_params. 4914 beacon_time_tsf_bits) >> il-> 4915 hw_params.beacon_time_tsf_bits); 4916 rem = 4917 (usec % interval) & il_beacon_time_mask_low(il, 4918 il->hw_params. 4919 beacon_time_tsf_bits); 4920 4921 return (quot << il->hw_params.beacon_time_tsf_bits) + rem; 4922 } 4923 EXPORT_SYMBOL(il_usecs_to_beacons); 4924 4925 /* base is usually what we get from ucode with each received frame, 4926 * the same as HW timer counter counting down 4927 */ 4928 __le32 4929 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, 4930 u32 beacon_interval) 4931 { 4932 u32 base_low = base & il_beacon_time_mask_low(il, 4933 il->hw_params. 4934 beacon_time_tsf_bits); 4935 u32 addon_low = addon & il_beacon_time_mask_low(il, 4936 il->hw_params. 4937 beacon_time_tsf_bits); 4938 u32 interval = beacon_interval * TIME_UNIT; 4939 u32 res = (base & il_beacon_time_mask_high(il, 4940 il->hw_params. 4941 beacon_time_tsf_bits)) + 4942 (addon & il_beacon_time_mask_high(il, 4943 il->hw_params. 4944 beacon_time_tsf_bits)); 4945 4946 if (base_low > addon_low) 4947 res += base_low - addon_low; 4948 else if (base_low < addon_low) { 4949 res += interval + base_low - addon_low; 4950 res += (1 << il->hw_params.beacon_time_tsf_bits); 4951 } else 4952 res += (1 << il->hw_params.beacon_time_tsf_bits); 4953 4954 return cpu_to_le32(res); 4955 } 4956 EXPORT_SYMBOL(il_add_beacon_time); 4957 4958 #ifdef CONFIG_PM_SLEEP 4959 4960 static int 4961 il_pci_suspend(struct device *device) 4962 { 4963 struct pci_dev *pdev = to_pci_dev(device); 4964 struct il_priv *il = pci_get_drvdata(pdev); 4965 4966 /* 4967 * This function is called when system goes into suspend state 4968 * mac80211 will call il_mac_stop() from the mac80211 suspend function 4969 * first but since il_mac_stop() has no knowledge of who the caller is, 4970 * it will not call apm_ops.stop() to stop the DMA operation. 4971 * Calling apm_ops.stop here to make sure we stop the DMA. 4972 */ 4973 il_apm_stop(il); 4974 4975 return 0; 4976 } 4977 4978 static int 4979 il_pci_resume(struct device *device) 4980 { 4981 struct pci_dev *pdev = to_pci_dev(device); 4982 struct il_priv *il = pci_get_drvdata(pdev); 4983 bool hw_rfkill = false; 4984 4985 /* 4986 * We disable the RETRY_TIMEOUT register (0x41) to keep 4987 * PCI Tx retries from interfering with C3 CPU state. 4988 */ 4989 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 4990 4991 il_enable_interrupts(il); 4992 4993 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 4994 hw_rfkill = true; 4995 4996 if (hw_rfkill) 4997 set_bit(S_RFKILL, &il->status); 4998 else 4999 clear_bit(S_RFKILL, &il->status); 5000 5001 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill); 5002 5003 return 0; 5004 } 5005 5006 SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); 5007 EXPORT_SYMBOL(il_pm_ops); 5008 5009 #endif /* CONFIG_PM_SLEEP */ 5010 5011 static void 5012 il_update_qos(struct il_priv *il) 5013 { 5014 if (test_bit(S_EXIT_PENDING, &il->status)) 5015 return; 5016 5017 il->qos_data.def_qos_parm.qos_flags = 0; 5018 5019 if (il->qos_data.qos_active) 5020 il->qos_data.def_qos_parm.qos_flags |= 5021 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 5022 5023 if (il->ht.enabled) 5024 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 5025 5026 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", 5027 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags); 5028 5029 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd), 5030 &il->qos_data.def_qos_parm, NULL); 5031 } 5032 5033 /** 5034 * il_mac_config - mac80211 config callback 5035 */ 5036 int 5037 il_mac_config(struct ieee80211_hw *hw, u32 changed) 5038 { 5039 struct il_priv *il = hw->priv; 5040 const struct il_channel_info *ch_info; 5041 struct ieee80211_conf *conf = &hw->conf; 5042 struct ieee80211_channel *channel = conf->chandef.chan; 5043 struct il_ht_config *ht_conf = &il->current_ht_config; 5044 unsigned long flags = 0; 5045 int ret = 0; 5046 u16 ch; 5047 int scan_active = 0; 5048 bool ht_changed = false; 5049 5050 mutex_lock(&il->mutex); 5051 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value, 5052 changed); 5053 5054 if (unlikely(test_bit(S_SCANNING, &il->status))) { 5055 scan_active = 1; 5056 D_MAC80211("scan active\n"); 5057 } 5058 5059 if (changed & 5060 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { 5061 /* mac80211 uses static for non-HT which is what we want */ 5062 il->current_ht_config.smps = conf->smps_mode; 5063 5064 /* 5065 * Recalculate chain counts. 5066 * 5067 * If monitor mode is enabled then mac80211 will 5068 * set up the SM PS mode to OFF if an HT channel is 5069 * configured. 5070 */ 5071 if (il->ops->set_rxon_chain) 5072 il->ops->set_rxon_chain(il); 5073 } 5074 5075 /* during scanning mac80211 will delay channel setting until 5076 * scan finish with changed = 0 5077 */ 5078 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { 5079 5080 if (scan_active) 5081 goto set_ch_out; 5082 5083 ch = channel->hw_value; 5084 ch_info = il_get_channel_info(il, channel->band, ch); 5085 if (!il_is_channel_valid(ch_info)) { 5086 D_MAC80211("leave - invalid channel\n"); 5087 ret = -EINVAL; 5088 goto set_ch_out; 5089 } 5090 5091 if (il->iw_mode == NL80211_IFTYPE_ADHOC && 5092 !il_is_channel_ibss(ch_info)) { 5093 D_MAC80211("leave - not IBSS channel\n"); 5094 ret = -EINVAL; 5095 goto set_ch_out; 5096 } 5097 5098 spin_lock_irqsave(&il->lock, flags); 5099 5100 /* Configure HT40 channels */ 5101 if (il->ht.enabled != conf_is_ht(conf)) { 5102 il->ht.enabled = conf_is_ht(conf); 5103 ht_changed = true; 5104 } 5105 if (il->ht.enabled) { 5106 if (conf_is_ht40_minus(conf)) { 5107 il->ht.extension_chan_offset = 5108 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 5109 il->ht.is_40mhz = true; 5110 } else if (conf_is_ht40_plus(conf)) { 5111 il->ht.extension_chan_offset = 5112 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 5113 il->ht.is_40mhz = true; 5114 } else { 5115 il->ht.extension_chan_offset = 5116 IEEE80211_HT_PARAM_CHA_SEC_NONE; 5117 il->ht.is_40mhz = false; 5118 } 5119 } else 5120 il->ht.is_40mhz = false; 5121 5122 /* 5123 * Default to no protection. Protection mode will 5124 * later be set from BSS config in il_ht_conf 5125 */ 5126 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 5127 5128 /* if we are switching from ht to 2.4 clear flags 5129 * from any ht related info since 2.4 does not 5130 * support ht */ 5131 if ((le16_to_cpu(il->staging.channel) != ch)) 5132 il->staging.flags = 0; 5133 5134 il_set_rxon_channel(il, channel); 5135 il_set_rxon_ht(il, ht_conf); 5136 5137 il_set_flags_for_band(il, channel->band, il->vif); 5138 5139 spin_unlock_irqrestore(&il->lock, flags); 5140 5141 if (il->ops->update_bcast_stations) 5142 ret = il->ops->update_bcast_stations(il); 5143 5144 set_ch_out: 5145 /* The list of supported rates and rate mask can be different 5146 * for each band; since the band may have changed, reset 5147 * the rate mask to what mac80211 lists */ 5148 il_set_rate(il); 5149 } 5150 5151 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { 5152 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS); 5153 if (!il->power_data.ps_disabled) 5154 IL_WARN_ONCE("Enabling power save might cause firmware crashes\n"); 5155 ret = il_power_update_mode(il, false); 5156 if (ret) 5157 D_MAC80211("Error setting sleep level\n"); 5158 } 5159 5160 if (changed & IEEE80211_CONF_CHANGE_POWER) { 5161 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt, 5162 conf->power_level); 5163 5164 il_set_tx_power(il, conf->power_level, false); 5165 } 5166 5167 if (!il_is_ready(il)) { 5168 D_MAC80211("leave - not ready\n"); 5169 goto out; 5170 } 5171 5172 if (scan_active) 5173 goto out; 5174 5175 if (memcmp(&il->active, &il->staging, sizeof(il->staging))) 5176 il_commit_rxon(il); 5177 else 5178 D_INFO("Not re-sending same RXON configuration.\n"); 5179 if (ht_changed) 5180 il_update_qos(il); 5181 5182 out: 5183 D_MAC80211("leave ret %d\n", ret); 5184 mutex_unlock(&il->mutex); 5185 5186 return ret; 5187 } 5188 EXPORT_SYMBOL(il_mac_config); 5189 5190 void 5191 il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5192 { 5193 struct il_priv *il = hw->priv; 5194 unsigned long flags; 5195 5196 mutex_lock(&il->mutex); 5197 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 5198 5199 spin_lock_irqsave(&il->lock, flags); 5200 5201 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); 5202 5203 /* new association get rid of ibss beacon skb */ 5204 if (il->beacon_skb) 5205 dev_kfree_skb(il->beacon_skb); 5206 il->beacon_skb = NULL; 5207 il->timestamp = 0; 5208 5209 spin_unlock_irqrestore(&il->lock, flags); 5210 5211 il_scan_cancel_timeout(il, 100); 5212 if (!il_is_ready_rf(il)) { 5213 D_MAC80211("leave - not ready\n"); 5214 mutex_unlock(&il->mutex); 5215 return; 5216 } 5217 5218 /* we are restarting association process */ 5219 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5220 il_commit_rxon(il); 5221 5222 il_set_rate(il); 5223 5224 D_MAC80211("leave\n"); 5225 mutex_unlock(&il->mutex); 5226 } 5227 EXPORT_SYMBOL(il_mac_reset_tsf); 5228 5229 static void 5230 il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) 5231 { 5232 struct il_ht_config *ht_conf = &il->current_ht_config; 5233 struct ieee80211_sta *sta; 5234 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 5235 5236 D_ASSOC("enter:\n"); 5237 5238 if (!il->ht.enabled) 5239 return; 5240 5241 il->ht.protection = 5242 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; 5243 il->ht.non_gf_sta_present = 5244 !!(bss_conf-> 5245 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 5246 5247 ht_conf->single_chain_sufficient = false; 5248 5249 switch (vif->type) { 5250 case NL80211_IFTYPE_STATION: 5251 rcu_read_lock(); 5252 sta = ieee80211_find_sta(vif, bss_conf->bssid); 5253 if (sta) { 5254 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 5255 int maxstreams; 5256 5257 maxstreams = 5258 (ht_cap->mcs. 5259 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 5260 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; 5261 maxstreams += 1; 5262 5263 if (ht_cap->mcs.rx_mask[1] == 0 && 5264 ht_cap->mcs.rx_mask[2] == 0) 5265 ht_conf->single_chain_sufficient = true; 5266 if (maxstreams <= 1) 5267 ht_conf->single_chain_sufficient = true; 5268 } else { 5269 /* 5270 * If at all, this can only happen through a race 5271 * when the AP disconnects us while we're still 5272 * setting up the connection, in that case mac80211 5273 * will soon tell us about that. 5274 */ 5275 ht_conf->single_chain_sufficient = true; 5276 } 5277 rcu_read_unlock(); 5278 break; 5279 case NL80211_IFTYPE_ADHOC: 5280 ht_conf->single_chain_sufficient = true; 5281 break; 5282 default: 5283 break; 5284 } 5285 5286 D_ASSOC("leave\n"); 5287 } 5288 5289 static inline void 5290 il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif) 5291 { 5292 /* 5293 * inform the ucode that there is no longer an 5294 * association and that no more packets should be 5295 * sent 5296 */ 5297 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5298 il->staging.assoc_id = 0; 5299 il_commit_rxon(il); 5300 } 5301 5302 static void 5303 il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5304 { 5305 struct il_priv *il = hw->priv; 5306 unsigned long flags; 5307 __le64 timestamp; 5308 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 5309 5310 if (!skb) 5311 return; 5312 5313 D_MAC80211("enter\n"); 5314 5315 lockdep_assert_held(&il->mutex); 5316 5317 if (!il->beacon_enabled) { 5318 IL_ERR("update beacon with no beaconing enabled\n"); 5319 dev_kfree_skb(skb); 5320 return; 5321 } 5322 5323 spin_lock_irqsave(&il->lock, flags); 5324 5325 if (il->beacon_skb) 5326 dev_kfree_skb(il->beacon_skb); 5327 5328 il->beacon_skb = skb; 5329 5330 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 5331 il->timestamp = le64_to_cpu(timestamp); 5332 5333 D_MAC80211("leave\n"); 5334 spin_unlock_irqrestore(&il->lock, flags); 5335 5336 if (!il_is_ready_rf(il)) { 5337 D_MAC80211("leave - RF not ready\n"); 5338 return; 5339 } 5340 5341 il->ops->post_associate(il); 5342 } 5343 5344 void 5345 il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 5346 struct ieee80211_bss_conf *bss_conf, u32 changes) 5347 { 5348 struct il_priv *il = hw->priv; 5349 int ret; 5350 5351 mutex_lock(&il->mutex); 5352 D_MAC80211("enter: changes 0x%x\n", changes); 5353 5354 if (!il_is_alive(il)) { 5355 D_MAC80211("leave - not alive\n"); 5356 mutex_unlock(&il->mutex); 5357 return; 5358 } 5359 5360 if (changes & BSS_CHANGED_QOS) { 5361 unsigned long flags; 5362 5363 spin_lock_irqsave(&il->lock, flags); 5364 il->qos_data.qos_active = bss_conf->qos; 5365 il_update_qos(il); 5366 spin_unlock_irqrestore(&il->lock, flags); 5367 } 5368 5369 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5370 /* FIXME: can we remove beacon_enabled ? */ 5371 if (vif->bss_conf.enable_beacon) 5372 il->beacon_enabled = true; 5373 else 5374 il->beacon_enabled = false; 5375 } 5376 5377 if (changes & BSS_CHANGED_BSSID) { 5378 D_MAC80211("BSSID %pM\n", bss_conf->bssid); 5379 5380 /* 5381 * On passive channel we wait with blocked queues to see if 5382 * there is traffic on that channel. If no frame will be 5383 * received (what is very unlikely since scan detects AP on 5384 * that channel, but theoretically possible), mac80211 associate 5385 * procedure will time out and mac80211 will call us with NULL 5386 * bssid. We have to unblock queues on such condition. 5387 */ 5388 if (is_zero_ether_addr(bss_conf->bssid)) 5389 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); 5390 5391 /* 5392 * If there is currently a HW scan going on in the background, 5393 * then we need to cancel it, otherwise sometimes we are not 5394 * able to authenticate (FIXME: why ?) 5395 */ 5396 if (il_scan_cancel_timeout(il, 100)) { 5397 D_MAC80211("leave - scan abort failed\n"); 5398 mutex_unlock(&il->mutex); 5399 return; 5400 } 5401 5402 /* mac80211 only sets assoc when in STATION mode */ 5403 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 5404 5405 /* FIXME: currently needed in a few places */ 5406 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5407 } 5408 5409 /* 5410 * This needs to be after setting the BSSID in case 5411 * mac80211 decides to do both changes at once because 5412 * it will invoke post_associate. 5413 */ 5414 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) 5415 il_beacon_update(hw, vif); 5416 5417 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 5418 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); 5419 if (bss_conf->use_short_preamble) 5420 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 5421 else 5422 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 5423 } 5424 5425 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 5426 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); 5427 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ) 5428 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; 5429 else 5430 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 5431 if (bss_conf->use_cts_prot) 5432 il->staging.flags |= RXON_FLG_SELF_CTS_EN; 5433 else 5434 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN; 5435 } 5436 5437 if (changes & BSS_CHANGED_BASIC_RATES) { 5438 /* XXX use this information 5439 * 5440 * To do that, remove code from il_set_rate() and put something 5441 * like this here: 5442 * 5443 if (A-band) 5444 il->staging.ofdm_basic_rates = 5445 bss_conf->basic_rates; 5446 else 5447 il->staging.ofdm_basic_rates = 5448 bss_conf->basic_rates >> 4; 5449 il->staging.cck_basic_rates = 5450 bss_conf->basic_rates & 0xF; 5451 */ 5452 } 5453 5454 if (changes & BSS_CHANGED_HT) { 5455 il_ht_conf(il, vif); 5456 5457 if (il->ops->set_rxon_chain) 5458 il->ops->set_rxon_chain(il); 5459 } 5460 5461 if (changes & BSS_CHANGED_ASSOC) { 5462 D_MAC80211("ASSOC %d\n", bss_conf->assoc); 5463 if (bss_conf->assoc) { 5464 il->timestamp = bss_conf->sync_tsf; 5465 5466 if (!il_is_rfkill(il)) 5467 il->ops->post_associate(il); 5468 } else 5469 il_set_no_assoc(il, vif); 5470 } 5471 5472 if (changes && il_is_associated(il) && bss_conf->aid) { 5473 D_MAC80211("Changes (%#x) while associated\n", changes); 5474 ret = il_send_rxon_assoc(il); 5475 if (!ret) { 5476 /* Sync active_rxon with latest change. */ 5477 memcpy((void *)&il->active, &il->staging, 5478 sizeof(struct il_rxon_cmd)); 5479 } 5480 } 5481 5482 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5483 if (vif->bss_conf.enable_beacon) { 5484 memcpy(il->staging.bssid_addr, bss_conf->bssid, 5485 ETH_ALEN); 5486 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5487 il->ops->config_ap(il); 5488 } else 5489 il_set_no_assoc(il, vif); 5490 } 5491 5492 if (changes & BSS_CHANGED_IBSS) { 5493 ret = il->ops->manage_ibss_station(il, vif, 5494 bss_conf->ibss_joined); 5495 if (ret) 5496 IL_ERR("failed to %s IBSS station %pM\n", 5497 bss_conf->ibss_joined ? "add" : "remove", 5498 bss_conf->bssid); 5499 } 5500 5501 D_MAC80211("leave\n"); 5502 mutex_unlock(&il->mutex); 5503 } 5504 EXPORT_SYMBOL(il_mac_bss_info_changed); 5505 5506 irqreturn_t 5507 il_isr(int irq, void *data) 5508 { 5509 struct il_priv *il = data; 5510 u32 inta, inta_mask; 5511 u32 inta_fh; 5512 unsigned long flags; 5513 if (!il) 5514 return IRQ_NONE; 5515 5516 spin_lock_irqsave(&il->lock, flags); 5517 5518 /* Disable (but don't clear!) interrupts here to avoid 5519 * back-to-back ISRs and sporadic interrupts from our NIC. 5520 * If we have something to service, the tasklet will re-enable ints. 5521 * If we *don't* have something, we'll re-enable before leaving here. */ 5522 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */ 5523 _il_wr(il, CSR_INT_MASK, 0x00000000); 5524 5525 /* Discover which interrupts are active/pending */ 5526 inta = _il_rd(il, CSR_INT); 5527 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 5528 5529 /* Ignore interrupt if there's nothing in NIC to service. 5530 * This may be due to IRQ shared with another device, 5531 * or due to sporadic interrupts thrown from our NIC. */ 5532 if (!inta && !inta_fh) { 5533 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); 5534 goto none; 5535 } 5536 5537 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) { 5538 /* Hardware disappeared. It might have already raised 5539 * an interrupt */ 5540 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta); 5541 goto unplugged; 5542 } 5543 5544 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, 5545 inta_fh); 5546 5547 inta &= ~CSR_INT_BIT_SCD; 5548 5549 /* il_irq_tasklet() will service interrupts and re-enable them */ 5550 if (likely(inta || inta_fh)) 5551 tasklet_schedule(&il->irq_tasklet); 5552 5553 unplugged: 5554 spin_unlock_irqrestore(&il->lock, flags); 5555 return IRQ_HANDLED; 5556 5557 none: 5558 /* re-enable interrupts here since we don't have anything to service. */ 5559 /* only Re-enable if disabled by irq */ 5560 if (test_bit(S_INT_ENABLED, &il->status)) 5561 il_enable_interrupts(il); 5562 spin_unlock_irqrestore(&il->lock, flags); 5563 return IRQ_NONE; 5564 } 5565 EXPORT_SYMBOL(il_isr); 5566 5567 /* 5568 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this 5569 * function. 5570 */ 5571 void 5572 il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, 5573 __le16 fc, __le32 *tx_flags) 5574 { 5575 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 5576 *tx_flags |= TX_CMD_FLG_RTS_MSK; 5577 *tx_flags &= ~TX_CMD_FLG_CTS_MSK; 5578 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5579 5580 if (!ieee80211_is_mgmt(fc)) 5581 return; 5582 5583 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { 5584 case cpu_to_le16(IEEE80211_STYPE_AUTH): 5585 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 5586 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): 5587 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): 5588 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5589 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5590 break; 5591 } 5592 } else if (info->control.rates[0]. 5593 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 5594 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5595 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5596 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5597 } 5598 } 5599 EXPORT_SYMBOL(il_tx_cmd_protection); 5600