1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * * Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * * Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * * Neither the name Intel Corporation nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 * 65 *****************************************************************************/ 66 #include <net/mac80211.h> 67 68 #include "iwl-trans.h" 69 #include "iwl-op-mode.h" 70 #include "iwl-fw.h" 71 #include "iwl-debug.h" 72 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ 73 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ 74 #include "iwl-prph.h" 75 #include "iwl-eeprom-parse.h" 76 77 #include "mvm.h" 78 #include "fw-dbg.h" 79 #include "iwl-phy-db.h" 80 81 #define MVM_UCODE_ALIVE_TIMEOUT HZ 82 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ) 83 84 #define UCODE_VALID_OK cpu_to_le32(0x1) 85 86 struct iwl_mvm_alive_data { 87 bool valid; 88 u32 scd_base_addr; 89 }; 90 91 static inline const struct fw_img * 92 iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) 93 { 94 if (ucode_type >= IWL_UCODE_TYPE_MAX) 95 return NULL; 96 97 return &mvm->fw->img[ucode_type]; 98 } 99 100 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) 101 { 102 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { 103 .valid = cpu_to_le32(valid_tx_ant), 104 }; 105 106 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); 107 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, 108 sizeof(tx_ant_cmd), &tx_ant_cmd); 109 } 110 111 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) 112 { 113 int i; 114 struct iwl_rss_config_cmd cmd = { 115 .flags = cpu_to_le32(IWL_RSS_ENABLE), 116 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | 117 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | 118 IWL_RSS_HASH_TYPE_IPV6_TCP | 119 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, 120 }; 121 122 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 123 cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; 124 memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); 125 126 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 127 } 128 129 void iwl_free_fw_paging(struct iwl_mvm *mvm) 130 { 131 int i; 132 133 if (!mvm->fw_paging_db[0].fw_paging_block) 134 return; 135 136 for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { 137 if (!mvm->fw_paging_db[i].fw_paging_block) { 138 IWL_DEBUG_FW(mvm, 139 "Paging: block %d already freed, continue to next page\n", 140 i); 141 142 continue; 143 } 144 145 __free_pages(mvm->fw_paging_db[i].fw_paging_block, 146 get_order(mvm->fw_paging_db[i].fw_paging_size)); 147 } 148 kfree(mvm->trans->paging_download_buf); 149 mvm->trans->paging_download_buf = NULL; 150 151 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); 152 } 153 154 static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) 155 { 156 int sec_idx, idx; 157 u32 offset = 0; 158 159 /* 160 * find where is the paging image start point: 161 * if CPU2 exist and it's in paging format, then the image looks like: 162 * CPU1 sections (2 or more) 163 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 164 * CPU2 sections (not paged) 165 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 166 * non paged to CPU2 paging sec 167 * CPU2 paging CSS 168 * CPU2 paging image (including instruction and data) 169 */ 170 for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { 171 if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { 172 sec_idx++; 173 break; 174 } 175 } 176 177 if (sec_idx >= IWL_UCODE_SECTION_MAX) { 178 IWL_ERR(mvm, "driver didn't find paging image\n"); 179 iwl_free_fw_paging(mvm); 180 return -EINVAL; 181 } 182 183 /* copy the CSS block to the dram */ 184 IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", 185 sec_idx); 186 187 memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), 188 image->sec[sec_idx].data, 189 mvm->fw_paging_db[0].fw_paging_size); 190 191 IWL_DEBUG_FW(mvm, 192 "Paging: copied %d CSS bytes to first block\n", 193 mvm->fw_paging_db[0].fw_paging_size); 194 195 sec_idx++; 196 197 /* 198 * copy the paging blocks to the dram 199 * loop index start from 1 since that CSS block already copied to dram 200 * and CSS index is 0. 201 * loop stop at num_of_paging_blk since that last block is not full. 202 */ 203 for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { 204 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), 205 image->sec[sec_idx].data + offset, 206 mvm->fw_paging_db[idx].fw_paging_size); 207 208 IWL_DEBUG_FW(mvm, 209 "Paging: copied %d paging bytes to block %d\n", 210 mvm->fw_paging_db[idx].fw_paging_size, 211 idx); 212 213 offset += mvm->fw_paging_db[idx].fw_paging_size; 214 } 215 216 /* copy the last paging block */ 217 if (mvm->num_of_pages_in_last_blk > 0) { 218 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), 219 image->sec[sec_idx].data + offset, 220 FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); 221 222 IWL_DEBUG_FW(mvm, 223 "Paging: copied %d pages in the last block %d\n", 224 mvm->num_of_pages_in_last_blk, idx); 225 } 226 227 return 0; 228 } 229 230 static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, 231 const struct fw_img *image) 232 { 233 struct page *block; 234 dma_addr_t phys = 0; 235 int blk_idx = 0; 236 int order, num_of_pages; 237 int dma_enabled; 238 239 if (mvm->fw_paging_db[0].fw_paging_block) 240 return 0; 241 242 dma_enabled = is_device_dma_capable(mvm->trans->dev); 243 244 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ 245 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); 246 247 num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; 248 mvm->num_of_paging_blk = ((num_of_pages - 1) / 249 NUM_OF_PAGE_PER_GROUP) + 1; 250 251 mvm->num_of_pages_in_last_blk = 252 num_of_pages - 253 NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); 254 255 IWL_DEBUG_FW(mvm, 256 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", 257 mvm->num_of_paging_blk, 258 mvm->num_of_pages_in_last_blk); 259 260 /* allocate block of 4Kbytes for paging CSS */ 261 order = get_order(FW_PAGING_SIZE); 262 block = alloc_pages(GFP_KERNEL, order); 263 if (!block) { 264 /* free all the previous pages since we failed */ 265 iwl_free_fw_paging(mvm); 266 return -ENOMEM; 267 } 268 269 mvm->fw_paging_db[blk_idx].fw_paging_block = block; 270 mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; 271 272 if (dma_enabled) { 273 phys = dma_map_page(mvm->trans->dev, block, 0, 274 PAGE_SIZE << order, DMA_BIDIRECTIONAL); 275 if (dma_mapping_error(mvm->trans->dev, phys)) { 276 /* 277 * free the previous pages and the current one since 278 * we failed to map_page. 279 */ 280 iwl_free_fw_paging(mvm); 281 return -ENOMEM; 282 } 283 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; 284 } else { 285 mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | 286 blk_idx << BLOCK_2_EXP_SIZE; 287 } 288 289 IWL_DEBUG_FW(mvm, 290 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", 291 order); 292 293 /* 294 * allocate blocks in dram. 295 * since that CSS allocated in fw_paging_db[0] loop start from index 1 296 */ 297 for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { 298 /* allocate block of PAGING_BLOCK_SIZE (32K) */ 299 order = get_order(PAGING_BLOCK_SIZE); 300 block = alloc_pages(GFP_KERNEL, order); 301 if (!block) { 302 /* free all the previous pages since we failed */ 303 iwl_free_fw_paging(mvm); 304 return -ENOMEM; 305 } 306 307 mvm->fw_paging_db[blk_idx].fw_paging_block = block; 308 mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; 309 310 if (dma_enabled) { 311 phys = dma_map_page(mvm->trans->dev, block, 0, 312 PAGE_SIZE << order, 313 DMA_BIDIRECTIONAL); 314 if (dma_mapping_error(mvm->trans->dev, phys)) { 315 /* 316 * free the previous pages and the current one 317 * since we failed to map_page. 318 */ 319 iwl_free_fw_paging(mvm); 320 return -ENOMEM; 321 } 322 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; 323 } else { 324 mvm->fw_paging_db[blk_idx].fw_paging_phys = 325 PAGING_ADDR_SIG | 326 blk_idx << BLOCK_2_EXP_SIZE; 327 } 328 329 IWL_DEBUG_FW(mvm, 330 "Paging: allocated 32K bytes (order %d) for firmware paging.\n", 331 order); 332 } 333 334 return 0; 335 } 336 337 static int iwl_save_fw_paging(struct iwl_mvm *mvm, 338 const struct fw_img *fw) 339 { 340 int ret; 341 342 ret = iwl_alloc_fw_paging_mem(mvm, fw); 343 if (ret) 344 return ret; 345 346 return iwl_fill_paging_mem(mvm, fw); 347 } 348 349 /* send paging cmd to FW in case CPU2 has paging image */ 350 static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) 351 { 352 int blk_idx; 353 __le32 dev_phy_addr; 354 struct iwl_fw_paging_cmd fw_paging_cmd = { 355 .flags = 356 cpu_to_le32(PAGING_CMD_IS_SECURED | 357 PAGING_CMD_IS_ENABLED | 358 (mvm->num_of_pages_in_last_blk << 359 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), 360 .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), 361 .block_num = cpu_to_le32(mvm->num_of_paging_blk), 362 }; 363 364 /* loop for for all paging blocks + CSS block */ 365 for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { 366 dev_phy_addr = 367 cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >> 368 PAGE_2_EXP_SIZE); 369 fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr; 370 } 371 372 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, 373 IWL_ALWAYS_LONG_GROUP, 0), 374 0, sizeof(fw_paging_cmd), &fw_paging_cmd); 375 } 376 377 /* 378 * Send paging item cmd to FW in case CPU2 has paging image 379 */ 380 static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) 381 { 382 int ret; 383 struct iwl_fw_get_item_cmd fw_get_item_cmd = { 384 .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), 385 }; 386 387 struct iwl_fw_get_item_resp *item_resp; 388 struct iwl_host_cmd cmd = { 389 .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), 390 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 391 .data = { &fw_get_item_cmd, }, 392 }; 393 394 cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); 395 396 ret = iwl_mvm_send_cmd(mvm, &cmd); 397 if (ret) { 398 IWL_ERR(mvm, 399 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", 400 ret); 401 return ret; 402 } 403 404 item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; 405 if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { 406 IWL_ERR(mvm, 407 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", 408 le32_to_cpu(item_resp->item_id)); 409 ret = -EIO; 410 goto exit; 411 } 412 413 mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, 414 GFP_KERNEL); 415 if (!mvm->trans->paging_download_buf) { 416 ret = -ENOMEM; 417 goto exit; 418 } 419 mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); 420 mvm->trans->paging_db = mvm->fw_paging_db; 421 IWL_DEBUG_FW(mvm, 422 "Paging: got paging request address (paging_req_addr 0x%08x)\n", 423 mvm->trans->paging_req_addr); 424 425 exit: 426 iwl_free_resp(&cmd); 427 428 return ret; 429 } 430 431 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 432 struct iwl_rx_packet *pkt, void *data) 433 { 434 struct iwl_mvm *mvm = 435 container_of(notif_wait, struct iwl_mvm, notif_wait); 436 struct iwl_mvm_alive_data *alive_data = data; 437 struct mvm_alive_resp_ver1 *palive1; 438 struct mvm_alive_resp_ver2 *palive2; 439 struct mvm_alive_resp *palive; 440 441 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) { 442 palive1 = (void *)pkt->data; 443 444 mvm->support_umac_log = false; 445 mvm->error_event_table = 446 le32_to_cpu(palive1->error_event_table_ptr); 447 mvm->log_event_table = 448 le32_to_cpu(palive1->log_event_table_ptr); 449 alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr); 450 451 alive_data->valid = le16_to_cpu(palive1->status) == 452 IWL_ALIVE_STATUS_OK; 453 IWL_DEBUG_FW(mvm, 454 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 455 le16_to_cpu(palive1->status), palive1->ver_type, 456 palive1->ver_subtype, palive1->flags); 457 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) { 458 palive2 = (void *)pkt->data; 459 460 mvm->error_event_table = 461 le32_to_cpu(palive2->error_event_table_ptr); 462 mvm->log_event_table = 463 le32_to_cpu(palive2->log_event_table_ptr); 464 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr); 465 mvm->umac_error_event_table = 466 le32_to_cpu(palive2->error_info_addr); 467 mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr); 468 mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size); 469 470 alive_data->valid = le16_to_cpu(palive2->status) == 471 IWL_ALIVE_STATUS_OK; 472 if (mvm->umac_error_event_table) 473 mvm->support_umac_log = true; 474 475 IWL_DEBUG_FW(mvm, 476 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 477 le16_to_cpu(palive2->status), palive2->ver_type, 478 palive2->ver_subtype, palive2->flags); 479 480 IWL_DEBUG_FW(mvm, 481 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 482 palive2->umac_major, palive2->umac_minor); 483 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { 484 palive = (void *)pkt->data; 485 486 mvm->error_event_table = 487 le32_to_cpu(palive->error_event_table_ptr); 488 mvm->log_event_table = 489 le32_to_cpu(palive->log_event_table_ptr); 490 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr); 491 mvm->umac_error_event_table = 492 le32_to_cpu(palive->error_info_addr); 493 mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr); 494 mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size); 495 496 alive_data->valid = le16_to_cpu(palive->status) == 497 IWL_ALIVE_STATUS_OK; 498 if (mvm->umac_error_event_table) 499 mvm->support_umac_log = true; 500 501 IWL_DEBUG_FW(mvm, 502 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 503 le16_to_cpu(palive->status), palive->ver_type, 504 palive->ver_subtype, palive->flags); 505 506 IWL_DEBUG_FW(mvm, 507 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 508 le32_to_cpu(palive->umac_major), 509 le32_to_cpu(palive->umac_minor)); 510 } 511 512 return true; 513 } 514 515 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, 516 struct iwl_rx_packet *pkt, void *data) 517 { 518 struct iwl_phy_db *phy_db = data; 519 520 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { 521 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); 522 return true; 523 } 524 525 WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC)); 526 527 return false; 528 } 529 530 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, 531 enum iwl_ucode_type ucode_type) 532 { 533 struct iwl_notification_wait alive_wait; 534 struct iwl_mvm_alive_data alive_data; 535 const struct fw_img *fw; 536 int ret, i; 537 enum iwl_ucode_type old_type = mvm->cur_ucode; 538 static const u16 alive_cmd[] = { MVM_ALIVE }; 539 struct iwl_sf_region st_fwrd_space; 540 541 if (ucode_type == IWL_UCODE_REGULAR && 542 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && 543 !(fw_has_capa(&mvm->fw->ucode_capa, 544 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) 545 fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); 546 else 547 fw = iwl_get_ucode_image(mvm, ucode_type); 548 if (WARN_ON(!fw)) 549 return -EINVAL; 550 mvm->cur_ucode = ucode_type; 551 mvm->ucode_loaded = false; 552 553 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, 554 alive_cmd, ARRAY_SIZE(alive_cmd), 555 iwl_alive_fn, &alive_data); 556 557 ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); 558 if (ret) { 559 mvm->cur_ucode = old_type; 560 iwl_remove_notification(&mvm->notif_wait, &alive_wait); 561 return ret; 562 } 563 564 /* 565 * Some things may run in the background now, but we 566 * just wait for the ALIVE notification here. 567 */ 568 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, 569 MVM_UCODE_ALIVE_TIMEOUT); 570 if (ret) { 571 if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 572 IWL_ERR(mvm, 573 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 574 iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), 575 iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); 576 mvm->cur_ucode = old_type; 577 return ret; 578 } 579 580 if (!alive_data.valid) { 581 IWL_ERR(mvm, "Loaded ucode is not valid!\n"); 582 mvm->cur_ucode = old_type; 583 return -EIO; 584 } 585 586 /* 587 * update the sdio allocation according to the pointer we get in the 588 * alive notification. 589 */ 590 st_fwrd_space.addr = mvm->sf_space.addr; 591 st_fwrd_space.size = mvm->sf_space.size; 592 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); 593 if (ret) { 594 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret); 595 return ret; 596 } 597 598 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); 599 600 /* 601 * configure and operate fw paging mechanism. 602 * driver configures the paging flow only once, CPU2 paging image 603 * included in the IWL_UCODE_INIT image. 604 */ 605 if (fw->paging_mem_size) { 606 /* 607 * When dma is not enabled, the driver needs to copy / write 608 * the downloaded / uploaded page to / from the smem. 609 * This gets the location of the place were the pages are 610 * stored. 611 */ 612 if (!is_device_dma_capable(mvm->trans->dev)) { 613 ret = iwl_trans_get_paging_item(mvm); 614 if (ret) { 615 IWL_ERR(mvm, "failed to get FW paging item\n"); 616 return ret; 617 } 618 } 619 620 ret = iwl_save_fw_paging(mvm, fw); 621 if (ret) { 622 IWL_ERR(mvm, "failed to save the FW paging image\n"); 623 return ret; 624 } 625 626 ret = iwl_send_paging_cmd(mvm, fw); 627 if (ret) { 628 IWL_ERR(mvm, "failed to send the paging cmd\n"); 629 iwl_free_fw_paging(mvm); 630 return ret; 631 } 632 } 633 634 /* 635 * Note: all the queues are enabled as part of the interface 636 * initialization, but in firmware restart scenarios they 637 * could be stopped, so wake them up. In firmware restart, 638 * mac80211 will have the queues stopped as well until the 639 * reconfiguration completes. During normal startup, they 640 * will be empty. 641 */ 642 643 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); 644 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; 645 646 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) 647 atomic_set(&mvm->mac80211_queue_stop_count[i], 0); 648 649 mvm->ucode_loaded = true; 650 651 return 0; 652 } 653 654 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) 655 { 656 struct iwl_phy_cfg_cmd phy_cfg_cmd; 657 enum iwl_ucode_type ucode_type = mvm->cur_ucode; 658 659 /* Set parameters */ 660 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); 661 phy_cfg_cmd.calib_control.event_trigger = 662 mvm->fw->default_calib[ucode_type].event_trigger; 663 phy_cfg_cmd.calib_control.flow_trigger = 664 mvm->fw->default_calib[ucode_type].flow_trigger; 665 666 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", 667 phy_cfg_cmd.phy_cfg); 668 669 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, 670 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 671 } 672 673 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) 674 { 675 struct iwl_notification_wait calib_wait; 676 static const u16 init_complete[] = { 677 INIT_COMPLETE_NOTIF, 678 CALIB_RES_NOTIF_PHY_DB 679 }; 680 int ret; 681 682 lockdep_assert_held(&mvm->mutex); 683 684 if (WARN_ON_ONCE(mvm->calibrating)) 685 return 0; 686 687 iwl_init_notification_wait(&mvm->notif_wait, 688 &calib_wait, 689 init_complete, 690 ARRAY_SIZE(init_complete), 691 iwl_wait_phy_db_entry, 692 mvm->phy_db); 693 694 /* Will also start the device */ 695 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); 696 if (ret) { 697 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); 698 goto error; 699 } 700 701 ret = iwl_send_bt_init_conf(mvm); 702 if (ret) 703 goto error; 704 705 /* Read the NVM only at driver load time, no need to do this twice */ 706 if (read_nvm) { 707 /* Read nvm */ 708 ret = iwl_nvm_init(mvm, true); 709 if (ret) { 710 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 711 goto error; 712 } 713 } 714 715 /* In case we read the NVM from external file, load it to the NIC */ 716 if (mvm->nvm_file_name) 717 iwl_mvm_load_nvm_to_nic(mvm); 718 719 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 720 WARN_ON(ret); 721 722 /* 723 * abort after reading the nvm in case RF Kill is on, we will complete 724 * the init seq later when RF kill will switch to off 725 */ 726 if (iwl_mvm_is_radio_hw_killed(mvm)) { 727 IWL_DEBUG_RF_KILL(mvm, 728 "jump over all phy activities due to RF kill\n"); 729 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 730 ret = 1; 731 goto out; 732 } 733 734 mvm->calibrating = true; 735 736 /* Send TX valid antennas before triggering calibrations */ 737 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 738 if (ret) 739 goto error; 740 741 /* 742 * Send phy configurations command to init uCode 743 * to start the 16.0 uCode init image internal calibrations. 744 */ 745 ret = iwl_send_phy_cfg_cmd(mvm); 746 if (ret) { 747 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", 748 ret); 749 goto error; 750 } 751 752 /* 753 * Some things may run in the background now, but we 754 * just wait for the calibration complete notification. 755 */ 756 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, 757 MVM_UCODE_CALIB_TIMEOUT); 758 759 if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { 760 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); 761 ret = 1; 762 } 763 goto out; 764 765 error: 766 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 767 out: 768 mvm->calibrating = false; 769 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { 770 /* we want to debug INIT and we have no NVM - fake */ 771 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + 772 sizeof(struct ieee80211_channel) + 773 sizeof(struct ieee80211_rate), 774 GFP_KERNEL); 775 if (!mvm->nvm_data) 776 return -ENOMEM; 777 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; 778 mvm->nvm_data->bands[0].n_channels = 1; 779 mvm->nvm_data->bands[0].n_bitrates = 1; 780 mvm->nvm_data->bands[0].bitrates = 781 (void *)mvm->nvm_data->channels + 1; 782 mvm->nvm_data->bands[0].bitrates->hw_value = 10; 783 } 784 785 return ret; 786 } 787 788 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) 789 { 790 struct iwl_host_cmd cmd = { 791 .id = SHARED_MEM_CFG, 792 .flags = CMD_WANT_SKB, 793 .data = { NULL, }, 794 .len = { 0, }, 795 }; 796 struct iwl_rx_packet *pkt; 797 struct iwl_shared_mem_cfg *mem_cfg; 798 u32 i; 799 800 lockdep_assert_held(&mvm->mutex); 801 802 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) 803 return; 804 805 pkt = cmd.resp_pkt; 806 mem_cfg = (void *)pkt->data; 807 808 mvm->shared_mem_cfg.shared_mem_addr = 809 le32_to_cpu(mem_cfg->shared_mem_addr); 810 mvm->shared_mem_cfg.shared_mem_size = 811 le32_to_cpu(mem_cfg->shared_mem_size); 812 mvm->shared_mem_cfg.sample_buff_addr = 813 le32_to_cpu(mem_cfg->sample_buff_addr); 814 mvm->shared_mem_cfg.sample_buff_size = 815 le32_to_cpu(mem_cfg->sample_buff_size); 816 mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr); 817 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) 818 mvm->shared_mem_cfg.txfifo_size[i] = 819 le32_to_cpu(mem_cfg->txfifo_size[i]); 820 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) 821 mvm->shared_mem_cfg.rxfifo_size[i] = 822 le32_to_cpu(mem_cfg->rxfifo_size[i]); 823 mvm->shared_mem_cfg.page_buff_addr = 824 le32_to_cpu(mem_cfg->page_buff_addr); 825 mvm->shared_mem_cfg.page_buff_size = 826 le32_to_cpu(mem_cfg->page_buff_size); 827 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); 828 829 iwl_free_resp(&cmd); 830 } 831 832 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) 833 { 834 struct iwl_ltr_config_cmd cmd = { 835 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), 836 }; 837 838 if (!mvm->trans->ltr_enabled) 839 return 0; 840 841 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, 842 sizeof(cmd), &cmd); 843 } 844 845 int iwl_mvm_up(struct iwl_mvm *mvm) 846 { 847 int ret, i; 848 struct ieee80211_channel *chan; 849 struct cfg80211_chan_def chandef; 850 851 lockdep_assert_held(&mvm->mutex); 852 853 ret = iwl_trans_start_hw(mvm->trans); 854 if (ret) 855 return ret; 856 857 /* 858 * If we haven't completed the run of the init ucode during 859 * module loading, load init ucode now 860 * (for example, if we were in RFKILL) 861 */ 862 ret = iwl_run_init_mvm_ucode(mvm, false); 863 if (ret && !iwlmvm_mod_params.init_dbg) { 864 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); 865 /* this can't happen */ 866 if (WARN_ON(ret > 0)) 867 ret = -ERFKILL; 868 goto error; 869 } 870 if (!iwlmvm_mod_params.init_dbg) { 871 /* 872 * Stop and start the transport without entering low power 873 * mode. This will save the state of other components on the 874 * device that are triggered by the INIT firwmare (MFUART). 875 */ 876 _iwl_trans_stop_device(mvm->trans, false); 877 ret = _iwl_trans_start_hw(mvm->trans, false); 878 if (ret) 879 goto error; 880 } 881 882 if (iwlmvm_mod_params.init_dbg) 883 return 0; 884 885 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); 886 if (ret) { 887 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); 888 goto error; 889 } 890 891 iwl_mvm_get_shared_mem_conf(mvm); 892 893 ret = iwl_mvm_sf_update(mvm, NULL, false); 894 if (ret) 895 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); 896 897 mvm->fw_dbg_conf = FW_DBG_INVALID; 898 /* if we have a destination, assume EARLY START */ 899 if (mvm->fw->dbg_dest_tlv) 900 mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; 901 iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); 902 903 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 904 if (ret) 905 goto error; 906 907 ret = iwl_send_bt_init_conf(mvm); 908 if (ret) 909 goto error; 910 911 /* Send phy db control command and then phy db calibration*/ 912 ret = iwl_send_phy_db_data(mvm->phy_db); 913 if (ret) 914 goto error; 915 916 ret = iwl_send_phy_cfg_cmd(mvm); 917 if (ret) 918 goto error; 919 920 /* Init RSS configuration */ 921 if (iwl_mvm_has_new_rx_api(mvm)) { 922 ret = iwl_send_rss_cfg_cmd(mvm); 923 if (ret) { 924 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", 925 ret); 926 goto error; 927 } 928 } 929 930 /* init the fw <-> mac80211 STA mapping */ 931 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) 932 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); 933 934 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; 935 936 /* reset quota debouncing buffer - 0xff will yield invalid data */ 937 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); 938 939 /* Add auxiliary station for scanning */ 940 ret = iwl_mvm_add_aux_sta(mvm); 941 if (ret) 942 goto error; 943 944 /* Add all the PHY contexts */ 945 chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; 946 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); 947 for (i = 0; i < NUM_PHY_CTX; i++) { 948 /* 949 * The channel used here isn't relevant as it's 950 * going to be overwritten in the other flows. 951 * For now use the first channel we have. 952 */ 953 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], 954 &chandef, 1, 1); 955 if (ret) 956 goto error; 957 } 958 959 #ifdef CONFIG_THERMAL 960 if (iwl_mvm_is_tt_in_fw(mvm)) { 961 /* in order to give the responsibility of ct-kill and 962 * TX backoff to FW we need to send empty temperature reporting 963 * cmd during init time 964 */ 965 iwl_mvm_send_temp_report_ths_cmd(mvm); 966 } else { 967 /* Initialize tx backoffs to the minimal possible */ 968 iwl_mvm_tt_tx_backoff(mvm, 0); 969 } 970 971 /* TODO: read the budget from BIOS / Platform NVM */ 972 if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) 973 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, 974 mvm->cooling_dev.cur_state); 975 #else 976 /* Initialize tx backoffs to the minimal possible */ 977 iwl_mvm_tt_tx_backoff(mvm, 0); 978 #endif 979 980 WARN_ON(iwl_mvm_config_ltr(mvm)); 981 982 ret = iwl_mvm_power_update_device(mvm); 983 if (ret) 984 goto error; 985 986 /* 987 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx 988 * anyway, so don't init MCC. 989 */ 990 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { 991 ret = iwl_mvm_init_mcc(mvm); 992 if (ret) 993 goto error; 994 } 995 996 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 997 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; 998 ret = iwl_mvm_config_scan(mvm); 999 if (ret) 1000 goto error; 1001 } 1002 1003 if (iwl_mvm_is_csum_supported(mvm) && 1004 mvm->cfg->features & NETIF_F_RXCSUM) 1005 iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); 1006 1007 /* allow FW/transport low power modes if not during restart */ 1008 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1009 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1010 1011 IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); 1012 return 0; 1013 error: 1014 iwl_mvm_stop_device(mvm); 1015 return ret; 1016 } 1017 1018 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) 1019 { 1020 int ret, i; 1021 1022 lockdep_assert_held(&mvm->mutex); 1023 1024 ret = iwl_trans_start_hw(mvm->trans); 1025 if (ret) 1026 return ret; 1027 1028 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); 1029 if (ret) { 1030 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); 1031 goto error; 1032 } 1033 1034 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 1035 if (ret) 1036 goto error; 1037 1038 /* Send phy db control command and then phy db calibration*/ 1039 ret = iwl_send_phy_db_data(mvm->phy_db); 1040 if (ret) 1041 goto error; 1042 1043 ret = iwl_send_phy_cfg_cmd(mvm); 1044 if (ret) 1045 goto error; 1046 1047 /* init the fw <-> mac80211 STA mapping */ 1048 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) 1049 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); 1050 1051 /* Add auxiliary station for scanning */ 1052 ret = iwl_mvm_add_aux_sta(mvm); 1053 if (ret) 1054 goto error; 1055 1056 return 0; 1057 error: 1058 iwl_mvm_stop_device(mvm); 1059 return ret; 1060 } 1061 1062 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, 1063 struct iwl_rx_cmd_buffer *rxb) 1064 { 1065 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1066 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; 1067 u32 flags = le32_to_cpu(card_state_notif->flags); 1068 1069 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", 1070 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 1071 (flags & SW_CARD_DISABLED) ? "Kill" : "On", 1072 (flags & CT_KILL_CARD_DISABLED) ? 1073 "Reached" : "Not reached"); 1074 } 1075 1076 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, 1077 struct iwl_rx_cmd_buffer *rxb) 1078 { 1079 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1080 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; 1081 1082 IWL_DEBUG_INFO(mvm, 1083 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", 1084 le32_to_cpu(mfuart_notif->installed_ver), 1085 le32_to_cpu(mfuart_notif->external_ver), 1086 le32_to_cpu(mfuart_notif->status), 1087 le32_to_cpu(mfuart_notif->duration)); 1088 } 1089