1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 * Copyright (C) 2015 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * * Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * * Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * * Neither the name Intel Corporation nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 * 65 *****************************************************************************/ 66 #include <net/mac80211.h> 67 68 #include "iwl-debug.h" 69 #include "iwl-io.h" 70 #include "iwl-prph.h" 71 #include "fw-dbg.h" 72 #include "mvm.h" 73 #include "fw-api-rs.h" 74 75 /* 76 * Will return 0 even if the cmd failed when RFKILL is asserted unless 77 * CMD_WANT_SKB is set in cmd->flags. 78 */ 79 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) 80 { 81 int ret; 82 83 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) 84 if (WARN_ON(mvm->d3_test_active)) 85 return -EIO; 86 #endif 87 88 /* 89 * Synchronous commands from this op-mode must hold 90 * the mutex, this ensures we don't try to send two 91 * (or more) synchronous commands at a time. 92 */ 93 if (!(cmd->flags & CMD_ASYNC)) 94 lockdep_assert_held(&mvm->mutex); 95 96 ret = iwl_trans_send_cmd(mvm->trans, cmd); 97 98 /* 99 * If the caller wants the SKB, then don't hide any problems, the 100 * caller might access the response buffer which will be NULL if 101 * the command failed. 102 */ 103 if (cmd->flags & CMD_WANT_SKB) 104 return ret; 105 106 /* Silently ignore failures if RFKILL is asserted */ 107 if (!ret || ret == -ERFKILL) 108 return 0; 109 return ret; 110 } 111 112 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, 113 u32 flags, u16 len, const void *data) 114 { 115 struct iwl_host_cmd cmd = { 116 .id = id, 117 .len = { len, }, 118 .data = { data, }, 119 .flags = flags, 120 }; 121 122 return iwl_mvm_send_cmd(mvm, &cmd); 123 } 124 125 /* 126 * We assume that the caller set the status to the success value 127 */ 128 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, 129 u32 *status) 130 { 131 struct iwl_rx_packet *pkt; 132 struct iwl_cmd_response *resp; 133 int ret, resp_len; 134 135 lockdep_assert_held(&mvm->mutex); 136 137 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) 138 if (WARN_ON(mvm->d3_test_active)) 139 return -EIO; 140 #endif 141 142 /* 143 * Only synchronous commands can wait for status, 144 * we use WANT_SKB so the caller can't. 145 */ 146 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), 147 "cmd flags %x", cmd->flags)) 148 return -EINVAL; 149 150 cmd->flags |= CMD_WANT_SKB; 151 152 ret = iwl_trans_send_cmd(mvm->trans, cmd); 153 if (ret == -ERFKILL) { 154 /* 155 * The command failed because of RFKILL, don't update 156 * the status, leave it as success and return 0. 157 */ 158 return 0; 159 } else if (ret) { 160 return ret; 161 } 162 163 pkt = cmd->resp_pkt; 164 /* Can happen if RFKILL is asserted */ 165 if (!pkt) { 166 ret = 0; 167 goto out_free_resp; 168 } 169 170 resp_len = iwl_rx_packet_payload_len(pkt); 171 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 172 ret = -EIO; 173 goto out_free_resp; 174 } 175 176 resp = (void *)pkt->data; 177 *status = le32_to_cpu(resp->status); 178 out_free_resp: 179 iwl_free_resp(cmd); 180 return ret; 181 } 182 183 /* 184 * We assume that the caller set the status to the sucess value 185 */ 186 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, 187 const void *data, u32 *status) 188 { 189 struct iwl_host_cmd cmd = { 190 .id = id, 191 .len = { len, }, 192 .data = { data, }, 193 }; 194 195 return iwl_mvm_send_cmd_status(mvm, &cmd, status); 196 } 197 198 #define IWL_DECLARE_RATE_INFO(r) \ 199 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP 200 201 /* 202 * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP 203 */ 204 static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { 205 IWL_DECLARE_RATE_INFO(1), 206 IWL_DECLARE_RATE_INFO(2), 207 IWL_DECLARE_RATE_INFO(5), 208 IWL_DECLARE_RATE_INFO(11), 209 IWL_DECLARE_RATE_INFO(6), 210 IWL_DECLARE_RATE_INFO(9), 211 IWL_DECLARE_RATE_INFO(12), 212 IWL_DECLARE_RATE_INFO(18), 213 IWL_DECLARE_RATE_INFO(24), 214 IWL_DECLARE_RATE_INFO(36), 215 IWL_DECLARE_RATE_INFO(48), 216 IWL_DECLARE_RATE_INFO(54), 217 }; 218 219 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, 220 enum ieee80211_band band) 221 { 222 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; 223 int idx; 224 int band_offset = 0; 225 226 /* Legacy rate format, search for match in table */ 227 if (band == IEEE80211_BAND_5GHZ) 228 band_offset = IWL_FIRST_OFDM_RATE; 229 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) 230 if (fw_rate_idx_to_plcp[idx] == rate) 231 return idx - band_offset; 232 233 return -1; 234 } 235 236 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) 237 { 238 /* Get PLCP rate for tx_cmd->rate_n_flags */ 239 return fw_rate_idx_to_plcp[rate_idx]; 240 } 241 242 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 243 { 244 struct iwl_rx_packet *pkt = rxb_addr(rxb); 245 struct iwl_error_resp *err_resp = (void *)pkt->data; 246 247 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", 248 le32_to_cpu(err_resp->error_type), err_resp->cmd_id); 249 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", 250 le16_to_cpu(err_resp->bad_cmd_seq_num), 251 le32_to_cpu(err_resp->error_service)); 252 IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", 253 le64_to_cpu(err_resp->timestamp)); 254 } 255 256 /* 257 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. 258 * The parameter should also be a combination of ANT_[ABC]. 259 */ 260 u8 first_antenna(u8 mask) 261 { 262 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ 263 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */ 264 return BIT(0); 265 return BIT(ffs(mask) - 1); 266 } 267 268 /* 269 * Toggles between TX antennas to send the probe request on. 270 * Receives the bitmask of valid TX antennas and the *index* used 271 * for the last TX, and returns the next valid *index* to use. 272 * In order to set it in the tx_cmd, must do BIT(idx). 273 */ 274 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) 275 { 276 u8 ind = last_idx; 277 int i; 278 279 for (i = 0; i < RATE_MCS_ANT_NUM; i++) { 280 ind = (ind + 1) % RATE_MCS_ANT_NUM; 281 if (valid & BIT(ind)) 282 return ind; 283 } 284 285 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); 286 return last_idx; 287 } 288 289 static const struct { 290 const char *name; 291 u8 num; 292 } advanced_lookup[] = { 293 { "NMI_INTERRUPT_WDG", 0x34 }, 294 { "SYSASSERT", 0x35 }, 295 { "UCODE_VERSION_MISMATCH", 0x37 }, 296 { "BAD_COMMAND", 0x38 }, 297 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 298 { "FATAL_ERROR", 0x3D }, 299 { "NMI_TRM_HW_ERR", 0x46 }, 300 { "NMI_INTERRUPT_TRM", 0x4C }, 301 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 302 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 303 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 304 { "NMI_INTERRUPT_HOST", 0x66 }, 305 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 306 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 307 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 308 { "ADVANCED_SYSASSERT", 0 }, 309 }; 310 311 static const char *desc_lookup(u32 num) 312 { 313 int i; 314 315 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) 316 if (advanced_lookup[i].num == num) 317 return advanced_lookup[i].name; 318 319 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 320 return advanced_lookup[i].name; 321 } 322 323 /* 324 * Note: This structure is read from the device with IO accesses, 325 * and the reading already does the endian conversion. As it is 326 * read with u32-sized accesses, any members with a different size 327 * need to be ordered correctly though! 328 */ 329 struct iwl_error_event_table_v1 { 330 u32 valid; /* (nonzero) valid, (0) log is empty */ 331 u32 error_id; /* type of error */ 332 u32 pc; /* program counter */ 333 u32 blink1; /* branch link */ 334 u32 blink2; /* branch link */ 335 u32 ilink1; /* interrupt link */ 336 u32 ilink2; /* interrupt link */ 337 u32 data1; /* error-specific data */ 338 u32 data2; /* error-specific data */ 339 u32 data3; /* error-specific data */ 340 u32 bcon_time; /* beacon timer */ 341 u32 tsf_low; /* network timestamp function timer */ 342 u32 tsf_hi; /* network timestamp function timer */ 343 u32 gp1; /* GP1 timer register */ 344 u32 gp2; /* GP2 timer register */ 345 u32 gp3; /* GP3 timer register */ 346 u32 ucode_ver; /* uCode version */ 347 u32 hw_ver; /* HW Silicon version */ 348 u32 brd_ver; /* HW board version */ 349 u32 log_pc; /* log program counter */ 350 u32 frame_ptr; /* frame pointer */ 351 u32 stack_ptr; /* stack pointer */ 352 u32 hcmd; /* last host command header */ 353 u32 isr0; /* isr status register LMPM_NIC_ISR0: 354 * rxtx_flag */ 355 u32 isr1; /* isr status register LMPM_NIC_ISR1: 356 * host_flag */ 357 u32 isr2; /* isr status register LMPM_NIC_ISR2: 358 * enc_flag */ 359 u32 isr3; /* isr status register LMPM_NIC_ISR3: 360 * time_flag */ 361 u32 isr4; /* isr status register LMPM_NIC_ISR4: 362 * wico interrupt */ 363 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ 364 u32 wait_event; /* wait event() caller address */ 365 u32 l2p_control; /* L2pControlField */ 366 u32 l2p_duration; /* L2pDurationField */ 367 u32 l2p_mhvalid; /* L2pMhValidBits */ 368 u32 l2p_addr_match; /* L2pAddrMatchStat */ 369 u32 lmpm_pmg_sel; /* indicate which clocks are turned on 370 * (LMPM_PMG_SEL) */ 371 u32 u_timestamp; /* indicate when the date and time of the 372 * compilation */ 373 u32 flow_handler; /* FH read/write pointers, RX credit */ 374 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; 375 376 struct iwl_error_event_table { 377 u32 valid; /* (nonzero) valid, (0) log is empty */ 378 u32 error_id; /* type of error */ 379 u32 pc; /* program counter */ 380 u32 blink1; /* branch link */ 381 u32 blink2; /* branch link */ 382 u32 ilink1; /* interrupt link */ 383 u32 ilink2; /* interrupt link */ 384 u32 data1; /* error-specific data */ 385 u32 data2; /* error-specific data */ 386 u32 data3; /* error-specific data */ 387 u32 bcon_time; /* beacon timer */ 388 u32 tsf_low; /* network timestamp function timer */ 389 u32 tsf_hi; /* network timestamp function timer */ 390 u32 gp1; /* GP1 timer register */ 391 u32 gp2; /* GP2 timer register */ 392 u32 gp3; /* GP3 timer register */ 393 u32 major; /* uCode version major */ 394 u32 minor; /* uCode version minor */ 395 u32 hw_ver; /* HW Silicon version */ 396 u32 brd_ver; /* HW board version */ 397 u32 log_pc; /* log program counter */ 398 u32 frame_ptr; /* frame pointer */ 399 u32 stack_ptr; /* stack pointer */ 400 u32 hcmd; /* last host command header */ 401 u32 isr0; /* isr status register LMPM_NIC_ISR0: 402 * rxtx_flag */ 403 u32 isr1; /* isr status register LMPM_NIC_ISR1: 404 * host_flag */ 405 u32 isr2; /* isr status register LMPM_NIC_ISR2: 406 * enc_flag */ 407 u32 isr3; /* isr status register LMPM_NIC_ISR3: 408 * time_flag */ 409 u32 isr4; /* isr status register LMPM_NIC_ISR4: 410 * wico interrupt */ 411 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ 412 u32 wait_event; /* wait event() caller address */ 413 u32 l2p_control; /* L2pControlField */ 414 u32 l2p_duration; /* L2pDurationField */ 415 u32 l2p_mhvalid; /* L2pMhValidBits */ 416 u32 l2p_addr_match; /* L2pAddrMatchStat */ 417 u32 lmpm_pmg_sel; /* indicate which clocks are turned on 418 * (LMPM_PMG_SEL) */ 419 u32 u_timestamp; /* indicate when the date and time of the 420 * compilation */ 421 u32 flow_handler; /* FH read/write pointers, RX credit */ 422 } __packed /* LOG_ERROR_TABLE_API_S_VER_2 */; 423 424 /* 425 * UMAC error struct - relevant starting from family 8000 chip. 426 * Note: This structure is read from the device with IO accesses, 427 * and the reading already does the endian conversion. As it is 428 * read with u32-sized accesses, any members with a different size 429 * need to be ordered correctly though! 430 */ 431 struct iwl_umac_error_event_table { 432 u32 valid; /* (nonzero) valid, (0) log is empty */ 433 u32 error_id; /* type of error */ 434 u32 blink1; /* branch link */ 435 u32 blink2; /* branch link */ 436 u32 ilink1; /* interrupt link */ 437 u32 ilink2; /* interrupt link */ 438 u32 data1; /* error-specific data */ 439 u32 data2; /* error-specific data */ 440 u32 data3; /* error-specific data */ 441 u32 umac_major; 442 u32 umac_minor; 443 u32 frame_pointer; /* core register 27*/ 444 u32 stack_pointer; /* core register 28 */ 445 u32 cmd_header; /* latest host cmd sent to UMAC */ 446 u32 nic_isr_pref; /* ISR status register */ 447 } __packed; 448 449 #define ERROR_START_OFFSET (1 * sizeof(u32)) 450 #define ERROR_ELEM_SIZE (7 * sizeof(u32)) 451 452 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) 453 { 454 struct iwl_trans *trans = mvm->trans; 455 struct iwl_umac_error_event_table table; 456 u32 base; 457 458 base = mvm->umac_error_event_table; 459 460 if (base < 0x800000) { 461 IWL_ERR(mvm, 462 "Not valid error log pointer 0x%08X for %s uCode\n", 463 base, 464 (mvm->cur_ucode == IWL_UCODE_INIT) 465 ? "Init" : "RT"); 466 return; 467 } 468 469 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); 470 471 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 472 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 473 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", 474 mvm->status, table.valid); 475 } 476 477 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, 478 desc_lookup(table.error_id)); 479 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); 480 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); 481 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); 482 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); 483 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); 484 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); 485 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); 486 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); 487 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); 488 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); 489 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); 490 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); 491 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); 492 } 493 494 static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) 495 { 496 struct iwl_trans *trans = mvm->trans; 497 struct iwl_error_event_table_v1 table; 498 u32 base; 499 500 base = mvm->error_event_table; 501 if (mvm->cur_ucode == IWL_UCODE_INIT) { 502 if (!base) 503 base = mvm->fw->init_errlog_ptr; 504 } else { 505 if (!base) 506 base = mvm->fw->inst_errlog_ptr; 507 } 508 509 if (base < 0x800000) { 510 IWL_ERR(mvm, 511 "Not valid error log pointer 0x%08X for %s uCode\n", 512 base, 513 (mvm->cur_ucode == IWL_UCODE_INIT) 514 ? "Init" : "RT"); 515 return; 516 } 517 518 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); 519 520 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 521 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 522 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", 523 mvm->status, table.valid); 524 } 525 526 /* Do not change this output - scripts rely on it */ 527 528 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); 529 530 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, 531 table.data1, table.data2, table.data3, 532 table.blink1, table.blink2, table.ilink1, 533 table.ilink2, table.bcon_time, table.gp1, 534 table.gp2, table.gp3, table.ucode_ver, 0, 535 table.hw_ver, table.brd_ver); 536 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, 537 desc_lookup(table.error_id)); 538 IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); 539 IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); 540 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); 541 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); 542 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); 543 IWL_ERR(mvm, "0x%08X | data1\n", table.data1); 544 IWL_ERR(mvm, "0x%08X | data2\n", table.data2); 545 IWL_ERR(mvm, "0x%08X | data3\n", table.data3); 546 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); 547 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); 548 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); 549 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); 550 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); 551 IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); 552 IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver); 553 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); 554 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); 555 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); 556 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); 557 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); 558 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); 559 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); 560 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); 561 IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); 562 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); 563 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); 564 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); 565 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); 566 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); 567 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 568 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); 569 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); 570 571 if (mvm->support_umac_log) 572 iwl_mvm_dump_umac_error_log(mvm); 573 } 574 575 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) 576 { 577 struct iwl_trans *trans = mvm->trans; 578 struct iwl_error_event_table table; 579 u32 base; 580 581 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) { 582 iwl_mvm_dump_nic_error_log_old(mvm); 583 return; 584 } 585 586 base = mvm->error_event_table; 587 if (mvm->cur_ucode == IWL_UCODE_INIT) { 588 if (!base) 589 base = mvm->fw->init_errlog_ptr; 590 } else { 591 if (!base) 592 base = mvm->fw->inst_errlog_ptr; 593 } 594 595 if (base < 0x800000) { 596 IWL_ERR(mvm, 597 "Not valid error log pointer 0x%08X for %s uCode\n", 598 base, 599 (mvm->cur_ucode == IWL_UCODE_INIT) 600 ? "Init" : "RT"); 601 return; 602 } 603 604 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); 605 606 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 607 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 608 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", 609 mvm->status, table.valid); 610 } 611 612 /* Do not change this output - scripts rely on it */ 613 614 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); 615 616 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, 617 table.data1, table.data2, table.data3, 618 table.blink1, table.blink2, table.ilink1, 619 table.ilink2, table.bcon_time, table.gp1, 620 table.gp2, table.gp3, table.major, 621 table.minor, table.hw_ver, table.brd_ver); 622 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, 623 desc_lookup(table.error_id)); 624 IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); 625 IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); 626 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); 627 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); 628 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); 629 IWL_ERR(mvm, "0x%08X | data1\n", table.data1); 630 IWL_ERR(mvm, "0x%08X | data2\n", table.data2); 631 IWL_ERR(mvm, "0x%08X | data3\n", table.data3); 632 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); 633 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); 634 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); 635 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); 636 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); 637 IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); 638 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); 639 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); 640 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); 641 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); 642 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); 643 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); 644 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); 645 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); 646 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); 647 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); 648 IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); 649 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); 650 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); 651 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); 652 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); 653 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); 654 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 655 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); 656 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); 657 658 if (mvm->support_umac_log) 659 iwl_mvm_dump_umac_error_log(mvm); 660 } 661 662 int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) 663 { 664 int i; 665 666 lockdep_assert_held(&mvm->queue_info_lock); 667 668 for (i = minq; i <= maxq; i++) 669 if (mvm->queue_info[i].hw_queue_refcount == 0 && 670 !mvm->queue_info[i].setup_reserved) 671 return i; 672 673 return -ENOSPC; 674 } 675 676 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 677 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, 678 unsigned int wdg_timeout) 679 { 680 bool enable_queue = true; 681 682 spin_lock_bh(&mvm->queue_info_lock); 683 684 /* Make sure this TID isn't already enabled */ 685 if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { 686 spin_unlock_bh(&mvm->queue_info_lock); 687 IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n", 688 cfg->tid); 689 return; 690 } 691 692 /* Update mappings and refcounts */ 693 mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); 694 mvm->queue_info[queue].hw_queue_refcount++; 695 if (mvm->queue_info[queue].hw_queue_refcount > 1) 696 enable_queue = false; 697 mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); 698 699 IWL_DEBUG_TX_QUEUES(mvm, 700 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 701 queue, mvm->queue_info[queue].hw_queue_refcount, 702 mvm->queue_info[queue].hw_queue_to_mac80211); 703 704 spin_unlock_bh(&mvm->queue_info_lock); 705 706 /* Send the enabling command if we need to */ 707 if (enable_queue) { 708 struct iwl_scd_txq_cfg_cmd cmd = { 709 .scd_queue = queue, 710 .enable = 1, 711 .window = cfg->frame_limit, 712 .sta_id = cfg->sta_id, 713 .ssn = cpu_to_le16(ssn), 714 .tx_fifo = cfg->fifo, 715 .aggregate = cfg->aggregate, 716 .tid = cfg->tid, 717 }; 718 719 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, 720 wdg_timeout); 721 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), 722 &cmd), 723 "Failed to configure queue %d on FIFO %d\n", queue, 724 cfg->fifo); 725 } 726 } 727 728 void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 729 u8 tid, u8 flags) 730 { 731 struct iwl_scd_txq_cfg_cmd cmd = { 732 .scd_queue = queue, 733 .enable = 0, 734 }; 735 bool remove_mac_queue = true; 736 int ret; 737 738 spin_lock_bh(&mvm->queue_info_lock); 739 740 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { 741 spin_unlock_bh(&mvm->queue_info_lock); 742 return; 743 } 744 745 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 746 747 /* 748 * If there is another TID with the same AC - don't remove the MAC queue 749 * from the mapping 750 */ 751 if (tid < IWL_MAX_TID_COUNT) { 752 unsigned long tid_bitmap = 753 mvm->queue_info[queue].tid_bitmap; 754 int ac = tid_to_mac80211_ac[tid]; 755 int i; 756 757 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { 758 if (tid_to_mac80211_ac[i] == ac) 759 remove_mac_queue = false; 760 } 761 } 762 763 if (remove_mac_queue) 764 mvm->queue_info[queue].hw_queue_to_mac80211 &= 765 ~BIT(mac80211_queue); 766 mvm->queue_info[queue].hw_queue_refcount--; 767 768 cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; 769 770 IWL_DEBUG_TX_QUEUES(mvm, 771 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 772 queue, 773 mvm->queue_info[queue].hw_queue_refcount, 774 mvm->queue_info[queue].hw_queue_to_mac80211); 775 776 /* If the queue is still enabled - nothing left to do in this func */ 777 if (cmd.enable) { 778 spin_unlock_bh(&mvm->queue_info_lock); 779 return; 780 } 781 782 /* Make sure queue info is correct even though we overwrite it */ 783 WARN(mvm->queue_info[queue].hw_queue_refcount || 784 mvm->queue_info[queue].tid_bitmap || 785 mvm->queue_info[queue].hw_queue_to_mac80211, 786 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", 787 queue, mvm->queue_info[queue].hw_queue_refcount, 788 mvm->queue_info[queue].hw_queue_to_mac80211, 789 mvm->queue_info[queue].tid_bitmap); 790 791 /* If we are here - the queue is freed and we can zero out these vals */ 792 mvm->queue_info[queue].hw_queue_refcount = 0; 793 mvm->queue_info[queue].tid_bitmap = 0; 794 mvm->queue_info[queue].hw_queue_to_mac80211 = 0; 795 796 spin_unlock_bh(&mvm->queue_info_lock); 797 798 iwl_trans_txq_disable(mvm->trans, queue, false); 799 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, 800 sizeof(cmd), &cmd); 801 if (ret) 802 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 803 queue, ret); 804 } 805 806 /** 807 * iwl_mvm_send_lq_cmd() - Send link quality command 808 * @init: This command is sent as part of station initialization right 809 * after station has been added. 810 * 811 * The link quality command is sent as the last step of station creation. 812 * This is the special case in which init is set and we call a callback in 813 * this case to clear the state indicating that station creation is in 814 * progress. 815 */ 816 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) 817 { 818 struct iwl_host_cmd cmd = { 819 .id = LQ_CMD, 820 .len = { sizeof(struct iwl_lq_cmd), }, 821 .flags = init ? 0 : CMD_ASYNC, 822 .data = { lq, }, 823 }; 824 825 if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT)) 826 return -EINVAL; 827 828 return iwl_mvm_send_cmd(mvm, &cmd); 829 } 830 831 /** 832 * iwl_mvm_update_smps - Get a request to change the SMPS mode 833 * @req_type: The part of the driver who call for a change. 834 * @smps_requests: The request to change the SMPS mode. 835 * 836 * Get a requst to change the SMPS mode, 837 * and change it according to all other requests in the driver. 838 */ 839 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 840 enum iwl_mvm_smps_type_request req_type, 841 enum ieee80211_smps_mode smps_request) 842 { 843 struct iwl_mvm_vif *mvmvif; 844 enum ieee80211_smps_mode smps_mode; 845 int i; 846 847 lockdep_assert_held(&mvm->mutex); 848 849 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ 850 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) 851 return; 852 853 if (vif->type == NL80211_IFTYPE_AP) 854 smps_mode = IEEE80211_SMPS_OFF; 855 else 856 smps_mode = IEEE80211_SMPS_AUTOMATIC; 857 858 mvmvif = iwl_mvm_vif_from_mac80211(vif); 859 mvmvif->smps_requests[req_type] = smps_request; 860 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { 861 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { 862 smps_mode = IEEE80211_SMPS_STATIC; 863 break; 864 } 865 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) 866 smps_mode = IEEE80211_SMPS_DYNAMIC; 867 } 868 869 ieee80211_request_smps(vif, smps_mode); 870 } 871 872 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) 873 { 874 struct iwl_statistics_cmd scmd = { 875 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, 876 }; 877 struct iwl_host_cmd cmd = { 878 .id = STATISTICS_CMD, 879 .len[0] = sizeof(scmd), 880 .data[0] = &scmd, 881 .flags = CMD_WANT_SKB, 882 }; 883 int ret; 884 885 ret = iwl_mvm_send_cmd(mvm, &cmd); 886 if (ret) 887 return ret; 888 889 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); 890 iwl_free_resp(&cmd); 891 892 if (clear) 893 iwl_mvm_accu_radio_stats(mvm); 894 895 return 0; 896 } 897 898 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) 899 { 900 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; 901 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; 902 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; 903 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; 904 } 905 906 static void iwl_mvm_diversity_iter(void *_data, u8 *mac, 907 struct ieee80211_vif *vif) 908 { 909 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 910 bool *result = _data; 911 int i; 912 913 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { 914 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || 915 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) 916 *result = false; 917 } 918 } 919 920 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) 921 { 922 bool result = true; 923 924 lockdep_assert_held(&mvm->mutex); 925 926 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) 927 return false; 928 929 if (mvm->cfg->rx_with_siso_diversity) 930 return false; 931 932 ieee80211_iterate_active_interfaces_atomic( 933 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 934 iwl_mvm_diversity_iter, &result); 935 936 return result; 937 } 938 939 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 940 bool value) 941 { 942 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 943 int res; 944 945 lockdep_assert_held(&mvm->mutex); 946 947 if (mvmvif->low_latency == value) 948 return 0; 949 950 mvmvif->low_latency = value; 951 952 res = iwl_mvm_update_quotas(mvm, false, NULL); 953 if (res) 954 return res; 955 956 iwl_mvm_bt_coex_vif_change(mvm); 957 958 return iwl_mvm_power_update_mac(mvm); 959 } 960 961 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) 962 { 963 bool *result = _data; 964 965 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif))) 966 *result = true; 967 } 968 969 bool iwl_mvm_low_latency(struct iwl_mvm *mvm) 970 { 971 bool result = false; 972 973 ieee80211_iterate_active_interfaces_atomic( 974 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 975 iwl_mvm_ll_iter, &result); 976 977 return result; 978 } 979 980 struct iwl_bss_iter_data { 981 struct ieee80211_vif *vif; 982 bool error; 983 }; 984 985 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, 986 struct ieee80211_vif *vif) 987 { 988 struct iwl_bss_iter_data *data = _data; 989 990 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 991 return; 992 993 if (data->vif) { 994 data->error = true; 995 return; 996 } 997 998 data->vif = vif; 999 } 1000 1001 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) 1002 { 1003 struct iwl_bss_iter_data bss_iter_data = {}; 1004 1005 ieee80211_iterate_active_interfaces_atomic( 1006 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1007 iwl_mvm_bss_iface_iterator, &bss_iter_data); 1008 1009 if (bss_iter_data.error) { 1010 IWL_ERR(mvm, "More than one managed interface active!\n"); 1011 return ERR_PTR(-EINVAL); 1012 } 1013 1014 return bss_iter_data.vif; 1015 } 1016 1017 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, 1018 struct ieee80211_vif *vif, 1019 bool tdls, bool cmd_q) 1020 { 1021 struct iwl_fw_dbg_trigger_tlv *trigger; 1022 struct iwl_fw_dbg_trigger_txq_timer *txq_timer; 1023 unsigned int default_timeout = 1024 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; 1025 1026 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) 1027 return iwlmvm_mod_params.tfd_q_hang_detect ? 1028 default_timeout : IWL_WATCHDOG_DISABLED; 1029 1030 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); 1031 txq_timer = (void *)trigger->data; 1032 1033 if (tdls) 1034 return le32_to_cpu(txq_timer->tdls); 1035 1036 if (cmd_q) 1037 return le32_to_cpu(txq_timer->command_queue); 1038 1039 if (WARN_ON(!vif)) 1040 return default_timeout; 1041 1042 switch (ieee80211_vif_type_p2p(vif)) { 1043 case NL80211_IFTYPE_ADHOC: 1044 return le32_to_cpu(txq_timer->ibss); 1045 case NL80211_IFTYPE_STATION: 1046 return le32_to_cpu(txq_timer->bss); 1047 case NL80211_IFTYPE_AP: 1048 return le32_to_cpu(txq_timer->softap); 1049 case NL80211_IFTYPE_P2P_CLIENT: 1050 return le32_to_cpu(txq_timer->p2p_client); 1051 case NL80211_IFTYPE_P2P_GO: 1052 return le32_to_cpu(txq_timer->p2p_go); 1053 case NL80211_IFTYPE_P2P_DEVICE: 1054 return le32_to_cpu(txq_timer->p2p_device); 1055 default: 1056 WARN_ON(1); 1057 return mvm->cfg->base_params->wd_timeout; 1058 } 1059 } 1060 1061 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1062 const char *errmsg) 1063 { 1064 struct iwl_fw_dbg_trigger_tlv *trig; 1065 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 1066 1067 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) 1068 goto out; 1069 1070 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); 1071 trig_mlme = (void *)trig->data; 1072 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 1073 goto out; 1074 1075 if (trig_mlme->stop_connection_loss && 1076 --trig_mlme->stop_connection_loss) 1077 goto out; 1078 1079 iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); 1080 1081 out: 1082 ieee80211_connection_loss(vif); 1083 } 1084