1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #include <net/mac80211.h> 68 #include <linux/netdevice.h> 69 #include <linux/acpi.h> 70 71 #include "iwl-trans.h" 72 #include "iwl-op-mode.h" 73 #include "fw/img.h" 74 #include "iwl-debug.h" 75 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ 76 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ 77 #include "iwl-prph.h" 78 #include "iwl-eeprom-parse.h" 79 80 #include "mvm.h" 81 #include "fw/dbg.h" 82 #include "iwl-phy-db.h" 83 84 #define MVM_UCODE_ALIVE_TIMEOUT HZ 85 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ) 86 87 #define UCODE_VALID_OK cpu_to_le32(0x1) 88 89 struct iwl_mvm_alive_data { 90 bool valid; 91 u32 scd_base_addr; 92 }; 93 94 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) 95 { 96 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { 97 .valid = cpu_to_le32(valid_tx_ant), 98 }; 99 100 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); 101 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, 102 sizeof(tx_ant_cmd), &tx_ant_cmd); 103 } 104 105 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) 106 { 107 int i; 108 struct iwl_rss_config_cmd cmd = { 109 .flags = cpu_to_le32(IWL_RSS_ENABLE), 110 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | 111 IWL_RSS_HASH_TYPE_IPV4_UDP | 112 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | 113 IWL_RSS_HASH_TYPE_IPV6_TCP | 114 IWL_RSS_HASH_TYPE_IPV6_UDP | 115 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, 116 }; 117 118 if (mvm->trans->num_rx_queues == 1) 119 return 0; 120 121 /* Do not direct RSS traffic to Q 0 which is our fallback queue */ 122 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 123 cmd.indirection_table[i] = 124 1 + (i % (mvm->trans->num_rx_queues - 1)); 125 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); 126 127 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 128 } 129 130 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) 131 { 132 struct iwl_dqa_enable_cmd dqa_cmd = { 133 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), 134 }; 135 u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0); 136 int ret; 137 138 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); 139 if (ret) 140 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret); 141 else 142 IWL_DEBUG_FW(mvm, "Working in DQA mode\n"); 143 144 return ret; 145 } 146 147 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, 148 struct iwl_rx_cmd_buffer *rxb) 149 { 150 struct iwl_rx_packet *pkt = rxb_addr(rxb); 151 struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; 152 __le32 *dump_data = mfu_dump_notif->data; 153 int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); 154 int i; 155 156 if (mfu_dump_notif->index_num == 0) 157 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", 158 le32_to_cpu(mfu_dump_notif->assert_id)); 159 160 for (i = 0; i < n_words; i++) 161 IWL_DEBUG_INFO(mvm, 162 "MFUART assert dump, dword %u: 0x%08x\n", 163 le16_to_cpu(mfu_dump_notif->index_num) * 164 n_words + i, 165 le32_to_cpu(dump_data[i])); 166 } 167 168 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 169 struct iwl_rx_packet *pkt, void *data) 170 { 171 struct iwl_mvm *mvm = 172 container_of(notif_wait, struct iwl_mvm, notif_wait); 173 struct iwl_mvm_alive_data *alive_data = data; 174 struct mvm_alive_resp_v3 *palive3; 175 struct mvm_alive_resp *palive; 176 struct iwl_umac_alive *umac; 177 struct iwl_lmac_alive *lmac1; 178 struct iwl_lmac_alive *lmac2 = NULL; 179 u16 status; 180 181 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { 182 palive = (void *)pkt->data; 183 umac = &palive->umac_data; 184 lmac1 = &palive->lmac_data[0]; 185 lmac2 = &palive->lmac_data[1]; 186 status = le16_to_cpu(palive->status); 187 } else { 188 palive3 = (void *)pkt->data; 189 umac = &palive3->umac_data; 190 lmac1 = &palive3->lmac_data; 191 status = le16_to_cpu(palive3->status); 192 } 193 194 mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr); 195 if (lmac2) 196 mvm->error_event_table[1] = 197 le32_to_cpu(lmac2->error_event_table_ptr); 198 mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr); 199 mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr); 200 mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size); 201 202 mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr); 203 204 alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr); 205 alive_data->valid = status == IWL_ALIVE_STATUS_OK; 206 if (mvm->umac_error_event_table) 207 mvm->support_umac_log = true; 208 209 IWL_DEBUG_FW(mvm, 210 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 211 status, lmac1->ver_type, lmac1->ver_subtype); 212 213 if (lmac2) 214 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); 215 216 IWL_DEBUG_FW(mvm, 217 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 218 le32_to_cpu(umac->umac_major), 219 le32_to_cpu(umac->umac_minor)); 220 221 return true; 222 } 223 224 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, 225 struct iwl_rx_packet *pkt, void *data) 226 { 227 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); 228 229 return true; 230 } 231 232 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, 233 struct iwl_rx_packet *pkt, void *data) 234 { 235 struct iwl_phy_db *phy_db = data; 236 237 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { 238 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); 239 return true; 240 } 241 242 WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); 243 244 return false; 245 } 246 247 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, 248 enum iwl_ucode_type ucode_type) 249 { 250 struct iwl_notification_wait alive_wait; 251 struct iwl_mvm_alive_data alive_data; 252 const struct fw_img *fw; 253 int ret, i; 254 enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; 255 static const u16 alive_cmd[] = { MVM_ALIVE }; 256 struct iwl_sf_region st_fwrd_space; 257 258 if (ucode_type == IWL_UCODE_REGULAR && 259 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && 260 !(fw_has_capa(&mvm->fw->ucode_capa, 261 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) 262 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); 263 else 264 fw = iwl_get_ucode_image(mvm->fw, ucode_type); 265 if (WARN_ON(!fw)) 266 return -EINVAL; 267 iwl_fw_set_current_image(&mvm->fwrt, ucode_type); 268 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 269 270 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, 271 alive_cmd, ARRAY_SIZE(alive_cmd), 272 iwl_alive_fn, &alive_data); 273 274 ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); 275 if (ret) { 276 iwl_fw_set_current_image(&mvm->fwrt, old_type); 277 iwl_remove_notification(&mvm->notif_wait, &alive_wait); 278 return ret; 279 } 280 281 /* 282 * Some things may run in the background now, but we 283 * just wait for the ALIVE notification here. 284 */ 285 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, 286 MVM_UCODE_ALIVE_TIMEOUT); 287 if (ret) { 288 struct iwl_trans *trans = mvm->trans; 289 290 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) 291 IWL_ERR(mvm, 292 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 293 iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), 294 iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS)); 295 else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) 296 IWL_ERR(mvm, 297 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 298 iwl_read_prph(trans, SB_CPU_1_STATUS), 299 iwl_read_prph(trans, SB_CPU_2_STATUS)); 300 iwl_fw_set_current_image(&mvm->fwrt, old_type); 301 return ret; 302 } 303 304 if (!alive_data.valid) { 305 IWL_ERR(mvm, "Loaded ucode is not valid!\n"); 306 iwl_fw_set_current_image(&mvm->fwrt, old_type); 307 return -EIO; 308 } 309 310 /* 311 * update the sdio allocation according to the pointer we get in the 312 * alive notification. 313 */ 314 st_fwrd_space.addr = mvm->sf_space.addr; 315 st_fwrd_space.size = mvm->sf_space.size; 316 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); 317 if (ret) { 318 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret); 319 return ret; 320 } 321 322 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); 323 324 /* 325 * Note: all the queues are enabled as part of the interface 326 * initialization, but in firmware restart scenarios they 327 * could be stopped, so wake them up. In firmware restart, 328 * mac80211 will have the queues stopped as well until the 329 * reconfiguration completes. During normal startup, they 330 * will be empty. 331 */ 332 333 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); 334 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; 335 336 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) 337 atomic_set(&mvm->mac80211_queue_stop_count[i], 0); 338 339 set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 340 341 return 0; 342 } 343 344 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) 345 { 346 struct iwl_notification_wait init_wait; 347 struct iwl_nvm_access_complete_cmd nvm_complete = {}; 348 struct iwl_init_extended_cfg_cmd init_cfg = { 349 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), 350 }; 351 static const u16 init_complete[] = { 352 INIT_COMPLETE_NOTIF, 353 }; 354 int ret; 355 356 lockdep_assert_held(&mvm->mutex); 357 358 iwl_init_notification_wait(&mvm->notif_wait, 359 &init_wait, 360 init_complete, 361 ARRAY_SIZE(init_complete), 362 iwl_wait_init_complete, 363 NULL); 364 365 /* Will also start the device */ 366 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); 367 if (ret) { 368 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); 369 goto error; 370 } 371 372 /* Send init config command to mark that we are sending NVM access 373 * commands 374 */ 375 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, 376 INIT_EXTENDED_CFG_CMD), 0, 377 sizeof(init_cfg), &init_cfg); 378 if (ret) { 379 IWL_ERR(mvm, "Failed to run init config command: %d\n", 380 ret); 381 goto error; 382 } 383 384 /* Load NVM to NIC if needed */ 385 if (mvm->nvm_file_name) { 386 iwl_mvm_read_external_nvm(mvm); 387 iwl_mvm_load_nvm_to_nic(mvm); 388 } 389 390 if (IWL_MVM_PARSE_NVM && read_nvm) { 391 ret = iwl_nvm_init(mvm); 392 if (ret) { 393 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 394 goto error; 395 } 396 } 397 398 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, 399 NVM_ACCESS_COMPLETE), 0, 400 sizeof(nvm_complete), &nvm_complete); 401 if (ret) { 402 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", 403 ret); 404 goto error; 405 } 406 407 /* We wait for the INIT complete notification */ 408 ret = iwl_wait_notification(&mvm->notif_wait, &init_wait, 409 MVM_UCODE_ALIVE_TIMEOUT); 410 if (ret) 411 return ret; 412 413 /* Read the NVM only at driver load time, no need to do this twice */ 414 if (!IWL_MVM_PARSE_NVM && read_nvm) { 415 mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt); 416 if (IS_ERR(mvm->nvm_data)) { 417 ret = PTR_ERR(mvm->nvm_data); 418 mvm->nvm_data = NULL; 419 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 420 return ret; 421 } 422 } 423 424 return 0; 425 426 error: 427 iwl_remove_notification(&mvm->notif_wait, &init_wait); 428 return ret; 429 } 430 431 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) 432 { 433 struct iwl_phy_cfg_cmd phy_cfg_cmd; 434 enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; 435 436 /* Set parameters */ 437 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); 438 phy_cfg_cmd.calib_control.event_trigger = 439 mvm->fw->default_calib[ucode_type].event_trigger; 440 phy_cfg_cmd.calib_control.flow_trigger = 441 mvm->fw->default_calib[ucode_type].flow_trigger; 442 443 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", 444 phy_cfg_cmd.phy_cfg); 445 446 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, 447 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 448 } 449 450 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) 451 { 452 struct iwl_notification_wait calib_wait; 453 static const u16 init_complete[] = { 454 INIT_COMPLETE_NOTIF, 455 CALIB_RES_NOTIF_PHY_DB 456 }; 457 int ret; 458 459 if (iwl_mvm_has_unified_ucode(mvm)) 460 return iwl_run_unified_mvm_ucode(mvm, true); 461 462 lockdep_assert_held(&mvm->mutex); 463 464 if (WARN_ON_ONCE(mvm->calibrating)) 465 return 0; 466 467 iwl_init_notification_wait(&mvm->notif_wait, 468 &calib_wait, 469 init_complete, 470 ARRAY_SIZE(init_complete), 471 iwl_wait_phy_db_entry, 472 mvm->phy_db); 473 474 /* Will also start the device */ 475 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); 476 if (ret) { 477 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); 478 goto remove_notif; 479 } 480 481 if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) { 482 ret = iwl_mvm_send_bt_init_conf(mvm); 483 if (ret) 484 goto remove_notif; 485 } 486 487 /* Read the NVM only at driver load time, no need to do this twice */ 488 if (read_nvm) { 489 ret = iwl_nvm_init(mvm); 490 if (ret) { 491 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 492 goto remove_notif; 493 } 494 } 495 496 /* In case we read the NVM from external file, load it to the NIC */ 497 if (mvm->nvm_file_name) 498 iwl_mvm_load_nvm_to_nic(mvm); 499 500 WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans)); 501 502 /* 503 * abort after reading the nvm in case RF Kill is on, we will complete 504 * the init seq later when RF kill will switch to off 505 */ 506 if (iwl_mvm_is_radio_hw_killed(mvm)) { 507 IWL_DEBUG_RF_KILL(mvm, 508 "jump over all phy activities due to RF kill\n"); 509 goto remove_notif; 510 } 511 512 mvm->calibrating = true; 513 514 /* Send TX valid antennas before triggering calibrations */ 515 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 516 if (ret) 517 goto remove_notif; 518 519 ret = iwl_send_phy_cfg_cmd(mvm); 520 if (ret) { 521 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", 522 ret); 523 goto remove_notif; 524 } 525 526 /* 527 * Some things may run in the background now, but we 528 * just wait for the calibration complete notification. 529 */ 530 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, 531 MVM_UCODE_CALIB_TIMEOUT); 532 if (!ret) 533 goto out; 534 535 if (iwl_mvm_is_radio_hw_killed(mvm)) { 536 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); 537 ret = 0; 538 } else { 539 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", 540 ret); 541 } 542 543 goto out; 544 545 remove_notif: 546 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 547 out: 548 mvm->calibrating = false; 549 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { 550 /* we want to debug INIT and we have no NVM - fake */ 551 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + 552 sizeof(struct ieee80211_channel) + 553 sizeof(struct ieee80211_rate), 554 GFP_KERNEL); 555 if (!mvm->nvm_data) 556 return -ENOMEM; 557 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; 558 mvm->nvm_data->bands[0].n_channels = 1; 559 mvm->nvm_data->bands[0].n_bitrates = 1; 560 mvm->nvm_data->bands[0].bitrates = 561 (void *)mvm->nvm_data->channels + 1; 562 mvm->nvm_data->bands[0].bitrates->hw_value = 10; 563 } 564 565 return ret; 566 } 567 568 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) 569 { 570 struct iwl_ltr_config_cmd cmd = { 571 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), 572 }; 573 574 if (!mvm->trans->ltr_enabled) 575 return 0; 576 577 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, 578 sizeof(cmd), &cmd); 579 } 580 581 #ifdef CONFIG_ACPI 582 #define ACPI_WRDS_METHOD "WRDS" 583 #define ACPI_EWRD_METHOD "EWRD" 584 #define ACPI_WGDS_METHOD "WGDS" 585 #define ACPI_WIFI_DOMAIN (0x07) 586 #define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2) 587 #define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \ 588 IWL_MVM_SAR_TABLE_SIZE + 3) 589 #define ACPI_WGDS_WIFI_DATA_SIZE 18 590 #define ACPI_WGDS_NUM_BANDS 2 591 #define ACPI_WGDS_TABLE_SIZE 3 592 593 static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm, 594 union acpi_object *table, 595 struct iwl_mvm_sar_profile *profile, 596 bool enabled) 597 { 598 int i; 599 600 profile->enabled = enabled; 601 602 for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) { 603 if ((table[i].type != ACPI_TYPE_INTEGER) || 604 (table[i].integer.value > U8_MAX)) 605 return -EINVAL; 606 607 profile->table[i] = table[i].integer.value; 608 } 609 610 return 0; 611 } 612 613 static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm, 614 union acpi_object *data, 615 int data_size) 616 { 617 union acpi_object *wifi_pkg = NULL; 618 int i; 619 620 /* 621 * We need at least two packages, one for the revision and one 622 * for the data itself. Also check that the revision is valid 623 * (i.e. it is an integer set to 0). 624 */ 625 if (data->type != ACPI_TYPE_PACKAGE || 626 data->package.count < 2 || 627 data->package.elements[0].type != ACPI_TYPE_INTEGER || 628 data->package.elements[0].integer.value != 0) { 629 IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n"); 630 return ERR_PTR(-EINVAL); 631 } 632 633 /* loop through all the packages to find the one for WiFi */ 634 for (i = 1; i < data->package.count; i++) { 635 union acpi_object *domain; 636 637 wifi_pkg = &data->package.elements[i]; 638 639 /* Skip anything that is not a package with the right 640 * amount of elements (i.e. domain_type, 641 * enabled/disabled plus the actual data size. 642 */ 643 if (wifi_pkg->type != ACPI_TYPE_PACKAGE || 644 wifi_pkg->package.count != data_size) 645 continue; 646 647 domain = &wifi_pkg->package.elements[0]; 648 if (domain->type == ACPI_TYPE_INTEGER && 649 domain->integer.value == ACPI_WIFI_DOMAIN) 650 break; 651 652 wifi_pkg = NULL; 653 } 654 655 if (!wifi_pkg) 656 return ERR_PTR(-ENOENT); 657 658 return wifi_pkg; 659 } 660 661 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) 662 { 663 union acpi_object *wifi_pkg, *table; 664 acpi_handle root_handle; 665 acpi_handle handle; 666 struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL}; 667 acpi_status status; 668 bool enabled; 669 int ret; 670 671 root_handle = ACPI_HANDLE(mvm->dev); 672 if (!root_handle) { 673 IWL_DEBUG_RADIO(mvm, 674 "Could not retrieve root port ACPI handle\n"); 675 return -ENOENT; 676 } 677 678 /* Get the method's handle */ 679 status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD, 680 &handle); 681 if (ACPI_FAILURE(status)) { 682 IWL_DEBUG_RADIO(mvm, "WRDS method not found\n"); 683 return -ENOENT; 684 } 685 686 /* Call WRDS with no arguments */ 687 status = acpi_evaluate_object(handle, NULL, NULL, &wrds); 688 if (ACPI_FAILURE(status)) { 689 IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status); 690 return -ENOENT; 691 } 692 693 wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer, 694 ACPI_WRDS_WIFI_DATA_SIZE); 695 if (IS_ERR(wifi_pkg)) { 696 ret = PTR_ERR(wifi_pkg); 697 goto out_free; 698 } 699 700 if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { 701 ret = -EINVAL; 702 goto out_free; 703 } 704 705 enabled = !!(wifi_pkg->package.elements[1].integer.value); 706 707 /* position of the actual table */ 708 table = &wifi_pkg->package.elements[2]; 709 710 /* The profile from WRDS is officially profile 1, but goes 711 * into sar_profiles[0] (because we don't have a profile 0). 712 */ 713 ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0], 714 enabled); 715 716 out_free: 717 kfree(wrds.pointer); 718 return ret; 719 } 720 721 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) 722 { 723 union acpi_object *wifi_pkg; 724 acpi_handle root_handle; 725 acpi_handle handle; 726 struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL}; 727 acpi_status status; 728 bool enabled; 729 int i, n_profiles, ret; 730 731 root_handle = ACPI_HANDLE(mvm->dev); 732 if (!root_handle) { 733 IWL_DEBUG_RADIO(mvm, 734 "Could not retrieve root port ACPI handle\n"); 735 return -ENOENT; 736 } 737 738 /* Get the method's handle */ 739 status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD, 740 &handle); 741 if (ACPI_FAILURE(status)) { 742 IWL_DEBUG_RADIO(mvm, "EWRD method not found\n"); 743 return -ENOENT; 744 } 745 746 /* Call EWRD with no arguments */ 747 status = acpi_evaluate_object(handle, NULL, NULL, &ewrd); 748 if (ACPI_FAILURE(status)) { 749 IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status); 750 return -ENOENT; 751 } 752 753 wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer, 754 ACPI_EWRD_WIFI_DATA_SIZE); 755 if (IS_ERR(wifi_pkg)) { 756 ret = PTR_ERR(wifi_pkg); 757 goto out_free; 758 } 759 760 if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) || 761 (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) { 762 ret = -EINVAL; 763 goto out_free; 764 } 765 766 enabled = !!(wifi_pkg->package.elements[1].integer.value); 767 n_profiles = wifi_pkg->package.elements[2].integer.value; 768 769 /* in case of BIOS bug */ 770 if (n_profiles <= 0) { 771 ret = -EINVAL; 772 goto out_free; 773 } 774 775 for (i = 0; i < n_profiles; i++) { 776 /* the tables start at element 3 */ 777 static int pos = 3; 778 779 /* The EWRD profiles officially go from 2 to 4, but we 780 * save them in sar_profiles[1-3] (because we don't 781 * have profile 0). So in the array we start from 1. 782 */ 783 ret = iwl_mvm_sar_set_profile(mvm, 784 &wifi_pkg->package.elements[pos], 785 &mvm->sar_profiles[i + 1], 786 enabled); 787 if (ret < 0) 788 break; 789 790 /* go to the next table */ 791 pos += IWL_MVM_SAR_TABLE_SIZE; 792 } 793 794 out_free: 795 kfree(ewrd.pointer); 796 return ret; 797 } 798 799 static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) 800 { 801 union acpi_object *wifi_pkg; 802 acpi_handle root_handle; 803 acpi_handle handle; 804 struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL}; 805 acpi_status status; 806 int i, j, ret; 807 int idx = 1; 808 809 root_handle = ACPI_HANDLE(mvm->dev); 810 if (!root_handle) { 811 IWL_DEBUG_RADIO(mvm, 812 "Could not retrieve root port ACPI handle\n"); 813 return -ENOENT; 814 } 815 816 /* Get the method's handle */ 817 status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD, 818 &handle); 819 if (ACPI_FAILURE(status)) { 820 IWL_DEBUG_RADIO(mvm, "WGDS method not found\n"); 821 return -ENOENT; 822 } 823 824 /* Call WGDS with no arguments */ 825 status = acpi_evaluate_object(handle, NULL, NULL, &wgds); 826 if (ACPI_FAILURE(status)) { 827 IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status); 828 return -ENOENT; 829 } 830 831 wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer, 832 ACPI_WGDS_WIFI_DATA_SIZE); 833 if (IS_ERR(wifi_pkg)) { 834 ret = PTR_ERR(wifi_pkg); 835 goto out_free; 836 } 837 838 for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) { 839 for (j = 0; j < IWL_MVM_GEO_TABLE_SIZE; j++) { 840 union acpi_object *entry; 841 842 entry = &wifi_pkg->package.elements[idx++]; 843 if ((entry->type != ACPI_TYPE_INTEGER) || 844 (entry->integer.value > U8_MAX)) { 845 ret = -EINVAL; 846 goto out_free; 847 } 848 849 mvm->geo_profiles[i].values[j] = entry->integer.value; 850 } 851 } 852 ret = 0; 853 out_free: 854 kfree(wgds.pointer); 855 return ret; 856 } 857 858 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) 859 { 860 struct iwl_dev_tx_power_cmd cmd = { 861 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), 862 }; 863 int i, j, idx; 864 int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; 865 int len = sizeof(cmd); 866 867 BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2); 868 BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS != 869 IWL_MVM_SAR_TABLE_SIZE); 870 871 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 872 len = sizeof(cmd.v3); 873 874 for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { 875 struct iwl_mvm_sar_profile *prof; 876 877 /* don't allow SAR to be disabled (profile 0 means disable) */ 878 if (profs[i] == 0) 879 return -EPERM; 880 881 /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */ 882 if (profs[i] > IWL_MVM_SAR_PROFILE_NUM) 883 return -EINVAL; 884 885 /* profiles go from 1 to 4, so decrement to access the array */ 886 prof = &mvm->sar_profiles[profs[i] - 1]; 887 888 /* if the profile is disabled, do nothing */ 889 if (!prof->enabled) { 890 IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n", 891 profs[i]); 892 /* if one of the profiles is disabled, we fail all */ 893 return -ENOENT; 894 } 895 896 IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); 897 for (j = 0; j < IWL_NUM_SUB_BANDS; j++) { 898 idx = (i * IWL_NUM_SUB_BANDS) + j; 899 cmd.v3.per_chain_restriction[i][j] = 900 cpu_to_le16(prof->table[idx]); 901 IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", 902 j, prof->table[idx]); 903 } 904 } 905 906 IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); 907 908 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); 909 } 910 911 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) 912 { 913 struct iwl_geo_tx_power_profiles_resp *resp; 914 int ret; 915 916 struct iwl_geo_tx_power_profiles_cmd geo_cmd = { 917 .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE), 918 }; 919 struct iwl_host_cmd cmd = { 920 .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT), 921 .len = { sizeof(geo_cmd), }, 922 .flags = CMD_WANT_SKB, 923 .data = { &geo_cmd }, 924 }; 925 926 ret = iwl_mvm_send_cmd(mvm, &cmd); 927 if (ret) { 928 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); 929 return ret; 930 } 931 932 resp = (void *)cmd.resp_pkt->data; 933 ret = le32_to_cpu(resp->profile_idx); 934 if (WARN_ON(ret > IWL_NUM_GEO_PROFILES)) { 935 ret = -EIO; 936 IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret); 937 } 938 939 iwl_free_resp(&cmd); 940 return ret; 941 } 942 943 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) 944 { 945 struct iwl_geo_tx_power_profiles_cmd cmd = { 946 .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES), 947 }; 948 int ret, i, j; 949 u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); 950 951 ret = iwl_mvm_sar_get_wgds_table(mvm); 952 if (ret < 0) { 953 IWL_DEBUG_RADIO(mvm, 954 "Geo SAR BIOS table invalid or unavailable. (%d)\n", 955 ret); 956 /* we don't fail if the table is not available */ 957 return 0; 958 } 959 960 IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); 961 962 BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * 963 ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); 964 965 for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) { 966 struct iwl_per_chain_offset *chain = 967 (struct iwl_per_chain_offset *)&cmd.table[i]; 968 969 for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) { 970 u8 *value; 971 972 value = &mvm->geo_profiles[i].values[j * 973 IWL_GEO_PER_CHAIN_SIZE]; 974 chain[j].max_tx_power = cpu_to_le16(value[0]); 975 chain[j].chain_a = value[1]; 976 chain[j].chain_b = value[2]; 977 IWL_DEBUG_RADIO(mvm, 978 "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", 979 i, j, value[1], value[2], value[0]); 980 } 981 } 982 return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd); 983 } 984 985 #else /* CONFIG_ACPI */ 986 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) 987 { 988 return -ENOENT; 989 } 990 991 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) 992 { 993 return -ENOENT; 994 } 995 996 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) 997 { 998 return 0; 999 } 1000 1001 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, 1002 int prof_b) 1003 { 1004 return -ENOENT; 1005 } 1006 1007 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) 1008 { 1009 return -ENOENT; 1010 } 1011 #endif /* CONFIG_ACPI */ 1012 1013 static int iwl_mvm_sar_init(struct iwl_mvm *mvm) 1014 { 1015 int ret; 1016 1017 ret = iwl_mvm_sar_get_wrds_table(mvm); 1018 if (ret < 0) { 1019 IWL_DEBUG_RADIO(mvm, 1020 "WRDS SAR BIOS table invalid or unavailable. (%d)\n", 1021 ret); 1022 /* if not available, don't fail and don't bother with EWRD */ 1023 return 0; 1024 } 1025 1026 ret = iwl_mvm_sar_get_ewrd_table(mvm); 1027 /* if EWRD is not available, we can still use WRDS, so don't fail */ 1028 if (ret < 0) 1029 IWL_DEBUG_RADIO(mvm, 1030 "EWRD SAR BIOS table invalid or unavailable. (%d)\n", 1031 ret); 1032 1033 /* choose profile 1 (WRDS) as default for both chains */ 1034 ret = iwl_mvm_sar_select_profile(mvm, 1, 1); 1035 1036 /* if we don't have profile 0 from BIOS, just skip it */ 1037 if (ret == -ENOENT) 1038 return 0; 1039 1040 return ret; 1041 } 1042 1043 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) 1044 { 1045 int ret; 1046 1047 if (iwl_mvm_has_unified_ucode(mvm)) 1048 return iwl_run_unified_mvm_ucode(mvm, false); 1049 1050 ret = iwl_run_init_mvm_ucode(mvm, false); 1051 1052 if (iwlmvm_mod_params.init_dbg) 1053 return 0; 1054 1055 if (ret) { 1056 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); 1057 return ret; 1058 } 1059 1060 /* 1061 * Stop and start the transport without entering low power 1062 * mode. This will save the state of other components on the 1063 * device that are triggered by the INIT firwmare (MFUART). 1064 */ 1065 _iwl_trans_stop_device(mvm->trans, false); 1066 ret = _iwl_trans_start_hw(mvm->trans, false); 1067 if (ret) 1068 return ret; 1069 1070 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); 1071 if (ret) 1072 return ret; 1073 1074 return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); 1075 } 1076 1077 int iwl_mvm_up(struct iwl_mvm *mvm) 1078 { 1079 int ret, i; 1080 struct ieee80211_channel *chan; 1081 struct cfg80211_chan_def chandef; 1082 1083 lockdep_assert_held(&mvm->mutex); 1084 1085 ret = iwl_trans_start_hw(mvm->trans); 1086 if (ret) 1087 return ret; 1088 1089 ret = iwl_mvm_load_rt_fw(mvm); 1090 if (ret) { 1091 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); 1092 goto error; 1093 } 1094 1095 iwl_get_shared_mem_conf(&mvm->fwrt); 1096 1097 ret = iwl_mvm_sf_update(mvm, NULL, false); 1098 if (ret) 1099 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); 1100 1101 mvm->fwrt.dump.conf = FW_DBG_INVALID; 1102 /* if we have a destination, assume EARLY START */ 1103 if (mvm->fw->dbg_dest_tlv) 1104 mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; 1105 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); 1106 1107 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 1108 if (ret) 1109 goto error; 1110 1111 if (!iwl_mvm_has_unified_ucode(mvm)) { 1112 /* Send phy db control command and then phy db calibration */ 1113 ret = iwl_send_phy_db_data(mvm->phy_db); 1114 if (ret) 1115 goto error; 1116 1117 ret = iwl_send_phy_cfg_cmd(mvm); 1118 if (ret) 1119 goto error; 1120 } 1121 1122 ret = iwl_mvm_send_bt_init_conf(mvm); 1123 if (ret) 1124 goto error; 1125 1126 /* Init RSS configuration */ 1127 /* TODO - remove a000 disablement when we have RXQ config API */ 1128 if (iwl_mvm_has_new_rx_api(mvm) && 1129 mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) { 1130 ret = iwl_send_rss_cfg_cmd(mvm); 1131 if (ret) { 1132 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", 1133 ret); 1134 goto error; 1135 } 1136 } 1137 1138 /* init the fw <-> mac80211 STA mapping */ 1139 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) 1140 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); 1141 1142 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 1143 1144 /* reset quota debouncing buffer - 0xff will yield invalid data */ 1145 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); 1146 1147 ret = iwl_mvm_send_dqa_cmd(mvm); 1148 if (ret) 1149 goto error; 1150 1151 /* Add auxiliary station for scanning */ 1152 ret = iwl_mvm_add_aux_sta(mvm); 1153 if (ret) 1154 goto error; 1155 1156 /* Add all the PHY contexts */ 1157 chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; 1158 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); 1159 for (i = 0; i < NUM_PHY_CTX; i++) { 1160 /* 1161 * The channel used here isn't relevant as it's 1162 * going to be overwritten in the other flows. 1163 * For now use the first channel we have. 1164 */ 1165 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], 1166 &chandef, 1, 1); 1167 if (ret) 1168 goto error; 1169 } 1170 1171 #ifdef CONFIG_THERMAL 1172 if (iwl_mvm_is_tt_in_fw(mvm)) { 1173 /* in order to give the responsibility of ct-kill and 1174 * TX backoff to FW we need to send empty temperature reporting 1175 * cmd during init time 1176 */ 1177 iwl_mvm_send_temp_report_ths_cmd(mvm); 1178 } else { 1179 /* Initialize tx backoffs to the minimal possible */ 1180 iwl_mvm_tt_tx_backoff(mvm, 0); 1181 } 1182 1183 /* TODO: read the budget from BIOS / Platform NVM */ 1184 1185 /* 1186 * In case there is no budget from BIOS / Platform NVM the default 1187 * budget should be 2000mW (cooling state 0). 1188 */ 1189 if (iwl_mvm_is_ctdp_supported(mvm)) { 1190 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, 1191 mvm->cooling_dev.cur_state); 1192 if (ret) 1193 goto error; 1194 } 1195 #else 1196 /* Initialize tx backoffs to the minimal possible */ 1197 iwl_mvm_tt_tx_backoff(mvm, 0); 1198 #endif 1199 1200 WARN_ON(iwl_mvm_config_ltr(mvm)); 1201 1202 ret = iwl_mvm_power_update_device(mvm); 1203 if (ret) 1204 goto error; 1205 1206 /* 1207 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx 1208 * anyway, so don't init MCC. 1209 */ 1210 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { 1211 ret = iwl_mvm_init_mcc(mvm); 1212 if (ret) 1213 goto error; 1214 } 1215 1216 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1217 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; 1218 ret = iwl_mvm_config_scan(mvm); 1219 if (ret) 1220 goto error; 1221 } 1222 1223 /* allow FW/transport low power modes if not during restart */ 1224 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1225 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1226 1227 ret = iwl_mvm_sar_init(mvm); 1228 if (ret) 1229 goto error; 1230 1231 ret = iwl_mvm_sar_geo_init(mvm); 1232 if (ret) 1233 goto error; 1234 1235 iwl_mvm_leds_sync(mvm); 1236 1237 IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); 1238 return 0; 1239 error: 1240 if (!iwlmvm_mod_params.init_dbg) 1241 iwl_mvm_stop_device(mvm); 1242 return ret; 1243 } 1244 1245 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) 1246 { 1247 int ret, i; 1248 1249 lockdep_assert_held(&mvm->mutex); 1250 1251 ret = iwl_trans_start_hw(mvm->trans); 1252 if (ret) 1253 return ret; 1254 1255 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); 1256 if (ret) { 1257 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); 1258 goto error; 1259 } 1260 1261 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 1262 if (ret) 1263 goto error; 1264 1265 /* Send phy db control command and then phy db calibration*/ 1266 ret = iwl_send_phy_db_data(mvm->phy_db); 1267 if (ret) 1268 goto error; 1269 1270 ret = iwl_send_phy_cfg_cmd(mvm); 1271 if (ret) 1272 goto error; 1273 1274 /* init the fw <-> mac80211 STA mapping */ 1275 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) 1276 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); 1277 1278 /* Add auxiliary station for scanning */ 1279 ret = iwl_mvm_add_aux_sta(mvm); 1280 if (ret) 1281 goto error; 1282 1283 return 0; 1284 error: 1285 iwl_mvm_stop_device(mvm); 1286 return ret; 1287 } 1288 1289 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, 1290 struct iwl_rx_cmd_buffer *rxb) 1291 { 1292 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1293 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; 1294 u32 flags = le32_to_cpu(card_state_notif->flags); 1295 1296 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", 1297 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 1298 (flags & SW_CARD_DISABLED) ? "Kill" : "On", 1299 (flags & CT_KILL_CARD_DISABLED) ? 1300 "Reached" : "Not reached"); 1301 } 1302 1303 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, 1304 struct iwl_rx_cmd_buffer *rxb) 1305 { 1306 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1307 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; 1308 1309 IWL_DEBUG_INFO(mvm, 1310 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", 1311 le32_to_cpu(mfuart_notif->installed_ver), 1312 le32_to_cpu(mfuart_notif->external_ver), 1313 le32_to_cpu(mfuart_notif->status), 1314 le32_to_cpu(mfuart_notif->duration)); 1315 1316 if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) 1317 IWL_DEBUG_INFO(mvm, 1318 "MFUART: image size: 0x%08x\n", 1319 le32_to_cpu(mfuart_notif->image_size)); 1320 } 1321