1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 * Copyright (C) 2015 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * * Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * * Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * * Neither the name Intel Corporation nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 * 65 *****************************************************************************/ 66 #include <net/mac80211.h> 67 68 #include "iwl-debug.h" 69 #include "iwl-io.h" 70 #include "iwl-prph.h" 71 #include "fw-dbg.h" 72 #include "mvm.h" 73 #include "fw-api-rs.h" 74 75 /* 76 * Will return 0 even if the cmd failed when RFKILL is asserted unless 77 * CMD_WANT_SKB is set in cmd->flags. 78 */ 79 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) 80 { 81 int ret; 82 83 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) 84 if (WARN_ON(mvm->d3_test_active)) 85 return -EIO; 86 #endif 87 88 /* 89 * Synchronous commands from this op-mode must hold 90 * the mutex, this ensures we don't try to send two 91 * (or more) synchronous commands at a time. 92 */ 93 if (!(cmd->flags & CMD_ASYNC)) 94 lockdep_assert_held(&mvm->mutex); 95 96 ret = iwl_trans_send_cmd(mvm->trans, cmd); 97 98 /* 99 * If the caller wants the SKB, then don't hide any problems, the 100 * caller might access the response buffer which will be NULL if 101 * the command failed. 102 */ 103 if (cmd->flags & CMD_WANT_SKB) 104 return ret; 105 106 /* Silently ignore failures if RFKILL is asserted */ 107 if (!ret || ret == -ERFKILL) 108 return 0; 109 return ret; 110 } 111 112 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, 113 u32 flags, u16 len, const void *data) 114 { 115 struct iwl_host_cmd cmd = { 116 .id = id, 117 .len = { len, }, 118 .data = { data, }, 119 .flags = flags, 120 }; 121 122 return iwl_mvm_send_cmd(mvm, &cmd); 123 } 124 125 /* 126 * We assume that the caller set the status to the success value 127 */ 128 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, 129 u32 *status) 130 { 131 struct iwl_rx_packet *pkt; 132 struct iwl_cmd_response *resp; 133 int ret, resp_len; 134 135 lockdep_assert_held(&mvm->mutex); 136 137 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) 138 if (WARN_ON(mvm->d3_test_active)) 139 return -EIO; 140 #endif 141 142 /* 143 * Only synchronous commands can wait for status, 144 * we use WANT_SKB so the caller can't. 145 */ 146 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), 147 "cmd flags %x", cmd->flags)) 148 return -EINVAL; 149 150 cmd->flags |= CMD_WANT_SKB; 151 152 ret = iwl_trans_send_cmd(mvm->trans, cmd); 153 if (ret == -ERFKILL) { 154 /* 155 * The command failed because of RFKILL, don't update 156 * the status, leave it as success and return 0. 157 */ 158 return 0; 159 } else if (ret) { 160 return ret; 161 } 162 163 pkt = cmd->resp_pkt; 164 /* Can happen if RFKILL is asserted */ 165 if (!pkt) { 166 ret = 0; 167 goto out_free_resp; 168 } 169 170 resp_len = iwl_rx_packet_payload_len(pkt); 171 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 172 ret = -EIO; 173 goto out_free_resp; 174 } 175 176 resp = (void *)pkt->data; 177 *status = le32_to_cpu(resp->status); 178 out_free_resp: 179 iwl_free_resp(cmd); 180 return ret; 181 } 182 183 /* 184 * We assume that the caller set the status to the sucess value 185 */ 186 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, 187 const void *data, u32 *status) 188 { 189 struct iwl_host_cmd cmd = { 190 .id = id, 191 .len = { len, }, 192 .data = { data, }, 193 }; 194 195 return iwl_mvm_send_cmd_status(mvm, &cmd, status); 196 } 197 198 #define IWL_DECLARE_RATE_INFO(r) \ 199 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP 200 201 /* 202 * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP 203 */ 204 static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { 205 IWL_DECLARE_RATE_INFO(1), 206 IWL_DECLARE_RATE_INFO(2), 207 IWL_DECLARE_RATE_INFO(5), 208 IWL_DECLARE_RATE_INFO(11), 209 IWL_DECLARE_RATE_INFO(6), 210 IWL_DECLARE_RATE_INFO(9), 211 IWL_DECLARE_RATE_INFO(12), 212 IWL_DECLARE_RATE_INFO(18), 213 IWL_DECLARE_RATE_INFO(24), 214 IWL_DECLARE_RATE_INFO(36), 215 IWL_DECLARE_RATE_INFO(48), 216 IWL_DECLARE_RATE_INFO(54), 217 }; 218 219 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, 220 enum nl80211_band band) 221 { 222 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; 223 int idx; 224 int band_offset = 0; 225 226 /* Legacy rate format, search for match in table */ 227 if (band == NL80211_BAND_5GHZ) 228 band_offset = IWL_FIRST_OFDM_RATE; 229 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) 230 if (fw_rate_idx_to_plcp[idx] == rate) 231 return idx - band_offset; 232 233 return -1; 234 } 235 236 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) 237 { 238 /* Get PLCP rate for tx_cmd->rate_n_flags */ 239 return fw_rate_idx_to_plcp[rate_idx]; 240 } 241 242 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 243 { 244 struct iwl_rx_packet *pkt = rxb_addr(rxb); 245 struct iwl_error_resp *err_resp = (void *)pkt->data; 246 247 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", 248 le32_to_cpu(err_resp->error_type), err_resp->cmd_id); 249 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", 250 le16_to_cpu(err_resp->bad_cmd_seq_num), 251 le32_to_cpu(err_resp->error_service)); 252 IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", 253 le64_to_cpu(err_resp->timestamp)); 254 } 255 256 /* 257 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. 258 * The parameter should also be a combination of ANT_[ABC]. 259 */ 260 u8 first_antenna(u8 mask) 261 { 262 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ 263 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */ 264 return BIT(0); 265 return BIT(ffs(mask) - 1); 266 } 267 268 /* 269 * Toggles between TX antennas to send the probe request on. 270 * Receives the bitmask of valid TX antennas and the *index* used 271 * for the last TX, and returns the next valid *index* to use. 272 * In order to set it in the tx_cmd, must do BIT(idx). 273 */ 274 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) 275 { 276 u8 ind = last_idx; 277 int i; 278 279 for (i = 0; i < RATE_MCS_ANT_NUM; i++) { 280 ind = (ind + 1) % RATE_MCS_ANT_NUM; 281 if (valid & BIT(ind)) 282 return ind; 283 } 284 285 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); 286 return last_idx; 287 } 288 289 static const struct { 290 const char *name; 291 u8 num; 292 } advanced_lookup[] = { 293 { "NMI_INTERRUPT_WDG", 0x34 }, 294 { "SYSASSERT", 0x35 }, 295 { "UCODE_VERSION_MISMATCH", 0x37 }, 296 { "BAD_COMMAND", 0x38 }, 297 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 298 { "FATAL_ERROR", 0x3D }, 299 { "NMI_TRM_HW_ERR", 0x46 }, 300 { "NMI_INTERRUPT_TRM", 0x4C }, 301 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 302 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 303 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 304 { "NMI_INTERRUPT_HOST", 0x66 }, 305 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 306 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 307 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 308 { "ADVANCED_SYSASSERT", 0 }, 309 }; 310 311 static const char *desc_lookup(u32 num) 312 { 313 int i; 314 315 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) 316 if (advanced_lookup[i].num == num) 317 return advanced_lookup[i].name; 318 319 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 320 return advanced_lookup[i].name; 321 } 322 323 /* 324 * Note: This structure is read from the device with IO accesses, 325 * and the reading already does the endian conversion. As it is 326 * read with u32-sized accesses, any members with a different size 327 * need to be ordered correctly though! 328 */ 329 struct iwl_error_event_table_v1 { 330 u32 valid; /* (nonzero) valid, (0) log is empty */ 331 u32 error_id; /* type of error */ 332 u32 pc; /* program counter */ 333 u32 blink1; /* branch link */ 334 u32 blink2; /* branch link */ 335 u32 ilink1; /* interrupt link */ 336 u32 ilink2; /* interrupt link */ 337 u32 data1; /* error-specific data */ 338 u32 data2; /* error-specific data */ 339 u32 data3; /* error-specific data */ 340 u32 bcon_time; /* beacon timer */ 341 u32 tsf_low; /* network timestamp function timer */ 342 u32 tsf_hi; /* network timestamp function timer */ 343 u32 gp1; /* GP1 timer register */ 344 u32 gp2; /* GP2 timer register */ 345 u32 gp3; /* GP3 timer register */ 346 u32 ucode_ver; /* uCode version */ 347 u32 hw_ver; /* HW Silicon version */ 348 u32 brd_ver; /* HW board version */ 349 u32 log_pc; /* log program counter */ 350 u32 frame_ptr; /* frame pointer */ 351 u32 stack_ptr; /* stack pointer */ 352 u32 hcmd; /* last host command header */ 353 u32 isr0; /* isr status register LMPM_NIC_ISR0: 354 * rxtx_flag */ 355 u32 isr1; /* isr status register LMPM_NIC_ISR1: 356 * host_flag */ 357 u32 isr2; /* isr status register LMPM_NIC_ISR2: 358 * enc_flag */ 359 u32 isr3; /* isr status register LMPM_NIC_ISR3: 360 * time_flag */ 361 u32 isr4; /* isr status register LMPM_NIC_ISR4: 362 * wico interrupt */ 363 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ 364 u32 wait_event; /* wait event() caller address */ 365 u32 l2p_control; /* L2pControlField */ 366 u32 l2p_duration; /* L2pDurationField */ 367 u32 l2p_mhvalid; /* L2pMhValidBits */ 368 u32 l2p_addr_match; /* L2pAddrMatchStat */ 369 u32 lmpm_pmg_sel; /* indicate which clocks are turned on 370 * (LMPM_PMG_SEL) */ 371 u32 u_timestamp; /* indicate when the date and time of the 372 * compilation */ 373 u32 flow_handler; /* FH read/write pointers, RX credit */ 374 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; 375 376 struct iwl_error_event_table { 377 u32 valid; /* (nonzero) valid, (0) log is empty */ 378 u32 error_id; /* type of error */ 379 u32 trm_hw_status0; /* TRM HW status */ 380 u32 trm_hw_status1; /* TRM HW status */ 381 u32 blink2; /* branch link */ 382 u32 ilink1; /* interrupt link */ 383 u32 ilink2; /* interrupt link */ 384 u32 data1; /* error-specific data */ 385 u32 data2; /* error-specific data */ 386 u32 data3; /* error-specific data */ 387 u32 bcon_time; /* beacon timer */ 388 u32 tsf_low; /* network timestamp function timer */ 389 u32 tsf_hi; /* network timestamp function timer */ 390 u32 gp1; /* GP1 timer register */ 391 u32 gp2; /* GP2 timer register */ 392 u32 fw_rev_type; /* firmware revision type */ 393 u32 major; /* uCode version major */ 394 u32 minor; /* uCode version minor */ 395 u32 hw_ver; /* HW Silicon version */ 396 u32 brd_ver; /* HW board version */ 397 u32 log_pc; /* log program counter */ 398 u32 frame_ptr; /* frame pointer */ 399 u32 stack_ptr; /* stack pointer */ 400 u32 hcmd; /* last host command header */ 401 u32 isr0; /* isr status register LMPM_NIC_ISR0: 402 * rxtx_flag */ 403 u32 isr1; /* isr status register LMPM_NIC_ISR1: 404 * host_flag */ 405 u32 isr2; /* isr status register LMPM_NIC_ISR2: 406 * enc_flag */ 407 u32 isr3; /* isr status register LMPM_NIC_ISR3: 408 * time_flag */ 409 u32 isr4; /* isr status register LMPM_NIC_ISR4: 410 * wico interrupt */ 411 u32 last_cmd_id; /* last HCMD id handled by the firmware */ 412 u32 wait_event; /* wait event() caller address */ 413 u32 l2p_control; /* L2pControlField */ 414 u32 l2p_duration; /* L2pDurationField */ 415 u32 l2p_mhvalid; /* L2pMhValidBits */ 416 u32 l2p_addr_match; /* L2pAddrMatchStat */ 417 u32 lmpm_pmg_sel; /* indicate which clocks are turned on 418 * (LMPM_PMG_SEL) */ 419 u32 u_timestamp; /* indicate when the date and time of the 420 * compilation */ 421 u32 flow_handler; /* FH read/write pointers, RX credit */ 422 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 423 424 /* 425 * UMAC error struct - relevant starting from family 8000 chip. 426 * Note: This structure is read from the device with IO accesses, 427 * and the reading already does the endian conversion. As it is 428 * read with u32-sized accesses, any members with a different size 429 * need to be ordered correctly though! 430 */ 431 struct iwl_umac_error_event_table { 432 u32 valid; /* (nonzero) valid, (0) log is empty */ 433 u32 error_id; /* type of error */ 434 u32 blink1; /* branch link */ 435 u32 blink2; /* branch link */ 436 u32 ilink1; /* interrupt link */ 437 u32 ilink2; /* interrupt link */ 438 u32 data1; /* error-specific data */ 439 u32 data2; /* error-specific data */ 440 u32 data3; /* error-specific data */ 441 u32 umac_major; 442 u32 umac_minor; 443 u32 frame_pointer; /* core register 27*/ 444 u32 stack_pointer; /* core register 28 */ 445 u32 cmd_header; /* latest host cmd sent to UMAC */ 446 u32 nic_isr_pref; /* ISR status register */ 447 } __packed; 448 449 #define ERROR_START_OFFSET (1 * sizeof(u32)) 450 #define ERROR_ELEM_SIZE (7 * sizeof(u32)) 451 452 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) 453 { 454 struct iwl_trans *trans = mvm->trans; 455 struct iwl_umac_error_event_table table; 456 u32 base; 457 458 base = mvm->umac_error_event_table; 459 460 if (base < 0x800000) { 461 IWL_ERR(mvm, 462 "Not valid error log pointer 0x%08X for %s uCode\n", 463 base, 464 (mvm->cur_ucode == IWL_UCODE_INIT) 465 ? "Init" : "RT"); 466 return; 467 } 468 469 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); 470 471 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 472 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 473 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", 474 mvm->status, table.valid); 475 } 476 477 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, 478 desc_lookup(table.error_id)); 479 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); 480 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); 481 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); 482 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); 483 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); 484 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); 485 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); 486 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); 487 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); 488 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); 489 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); 490 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); 491 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); 492 } 493 494 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) 495 { 496 struct iwl_trans *trans = mvm->trans; 497 struct iwl_error_event_table table; 498 u32 base; 499 500 base = mvm->error_event_table; 501 if (mvm->cur_ucode == IWL_UCODE_INIT) { 502 if (!base) 503 base = mvm->fw->init_errlog_ptr; 504 } else { 505 if (!base) 506 base = mvm->fw->inst_errlog_ptr; 507 } 508 509 if (base < 0x800000) { 510 IWL_ERR(mvm, 511 "Not valid error log pointer 0x%08X for %s uCode\n", 512 base, 513 (mvm->cur_ucode == IWL_UCODE_INIT) 514 ? "Init" : "RT"); 515 return; 516 } 517 518 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); 519 520 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 521 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 522 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", 523 mvm->status, table.valid); 524 } 525 526 /* Do not change this output - scripts rely on it */ 527 528 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); 529 530 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, 531 table.data1, table.data2, table.data3, 532 table.blink2, table.ilink1, 533 table.ilink2, table.bcon_time, table.gp1, 534 table.gp2, table.fw_rev_type, table.major, 535 table.minor, table.hw_ver, table.brd_ver); 536 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, 537 desc_lookup(table.error_id)); 538 IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); 539 IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); 540 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); 541 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); 542 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); 543 IWL_ERR(mvm, "0x%08X | data1\n", table.data1); 544 IWL_ERR(mvm, "0x%08X | data2\n", table.data2); 545 IWL_ERR(mvm, "0x%08X | data3\n", table.data3); 546 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); 547 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); 548 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); 549 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); 550 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); 551 IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type); 552 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); 553 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); 554 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); 555 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); 556 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); 557 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); 558 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); 559 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); 560 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); 561 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); 562 IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id); 563 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); 564 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); 565 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); 566 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); 567 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); 568 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 569 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); 570 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); 571 572 if (mvm->support_umac_log) 573 iwl_mvm_dump_umac_error_log(mvm); 574 } 575 576 int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) 577 { 578 int i; 579 580 lockdep_assert_held(&mvm->queue_info_lock); 581 582 for (i = minq; i <= maxq; i++) 583 if (mvm->queue_info[i].hw_queue_refcount == 0 && 584 !mvm->queue_info[i].setup_reserved) 585 return i; 586 587 return -ENOSPC; 588 } 589 590 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 591 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, 592 unsigned int wdg_timeout) 593 { 594 bool enable_queue = true; 595 596 spin_lock_bh(&mvm->queue_info_lock); 597 598 /* Make sure this TID isn't already enabled */ 599 if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { 600 spin_unlock_bh(&mvm->queue_info_lock); 601 IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n", 602 cfg->tid); 603 return; 604 } 605 606 /* Update mappings and refcounts */ 607 mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); 608 mvm->queue_info[queue].hw_queue_refcount++; 609 if (mvm->queue_info[queue].hw_queue_refcount > 1) 610 enable_queue = false; 611 else 612 mvm->queue_info[queue].ra_sta_id = cfg->sta_id; 613 mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); 614 615 IWL_DEBUG_TX_QUEUES(mvm, 616 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 617 queue, mvm->queue_info[queue].hw_queue_refcount, 618 mvm->queue_info[queue].hw_queue_to_mac80211); 619 620 spin_unlock_bh(&mvm->queue_info_lock); 621 622 /* Send the enabling command if we need to */ 623 if (enable_queue) { 624 struct iwl_scd_txq_cfg_cmd cmd = { 625 .scd_queue = queue, 626 .enable = 1, 627 .window = cfg->frame_limit, 628 .sta_id = cfg->sta_id, 629 .ssn = cpu_to_le16(ssn), 630 .tx_fifo = cfg->fifo, 631 .aggregate = cfg->aggregate, 632 .tid = cfg->tid, 633 }; 634 635 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, 636 wdg_timeout); 637 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), 638 &cmd), 639 "Failed to configure queue %d on FIFO %d\n", queue, 640 cfg->fifo); 641 } 642 } 643 644 void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 645 u8 tid, u8 flags) 646 { 647 struct iwl_scd_txq_cfg_cmd cmd = { 648 .scd_queue = queue, 649 .enable = 0, 650 }; 651 bool remove_mac_queue = true; 652 int ret; 653 654 spin_lock_bh(&mvm->queue_info_lock); 655 656 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { 657 spin_unlock_bh(&mvm->queue_info_lock); 658 return; 659 } 660 661 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 662 663 /* 664 * If there is another TID with the same AC - don't remove the MAC queue 665 * from the mapping 666 */ 667 if (tid < IWL_MAX_TID_COUNT) { 668 unsigned long tid_bitmap = 669 mvm->queue_info[queue].tid_bitmap; 670 int ac = tid_to_mac80211_ac[tid]; 671 int i; 672 673 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { 674 if (tid_to_mac80211_ac[i] == ac) 675 remove_mac_queue = false; 676 } 677 } 678 679 if (remove_mac_queue) 680 mvm->queue_info[queue].hw_queue_to_mac80211 &= 681 ~BIT(mac80211_queue); 682 mvm->queue_info[queue].hw_queue_refcount--; 683 684 cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; 685 686 IWL_DEBUG_TX_QUEUES(mvm, 687 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 688 queue, 689 mvm->queue_info[queue].hw_queue_refcount, 690 mvm->queue_info[queue].hw_queue_to_mac80211); 691 692 /* If the queue is still enabled - nothing left to do in this func */ 693 if (cmd.enable) { 694 spin_unlock_bh(&mvm->queue_info_lock); 695 return; 696 } 697 698 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 699 700 /* Make sure queue info is correct even though we overwrite it */ 701 WARN(mvm->queue_info[queue].hw_queue_refcount || 702 mvm->queue_info[queue].tid_bitmap || 703 mvm->queue_info[queue].hw_queue_to_mac80211, 704 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", 705 queue, mvm->queue_info[queue].hw_queue_refcount, 706 mvm->queue_info[queue].hw_queue_to_mac80211, 707 mvm->queue_info[queue].tid_bitmap); 708 709 /* If we are here - the queue is freed and we can zero out these vals */ 710 mvm->queue_info[queue].hw_queue_refcount = 0; 711 mvm->queue_info[queue].tid_bitmap = 0; 712 mvm->queue_info[queue].hw_queue_to_mac80211 = 0; 713 714 spin_unlock_bh(&mvm->queue_info_lock); 715 716 iwl_trans_txq_disable(mvm->trans, queue, false); 717 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, 718 sizeof(cmd), &cmd); 719 if (ret) 720 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 721 queue, ret); 722 } 723 724 /** 725 * iwl_mvm_send_lq_cmd() - Send link quality command 726 * @init: This command is sent as part of station initialization right 727 * after station has been added. 728 * 729 * The link quality command is sent as the last step of station creation. 730 * This is the special case in which init is set and we call a callback in 731 * this case to clear the state indicating that station creation is in 732 * progress. 733 */ 734 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) 735 { 736 struct iwl_host_cmd cmd = { 737 .id = LQ_CMD, 738 .len = { sizeof(struct iwl_lq_cmd), }, 739 .flags = init ? 0 : CMD_ASYNC, 740 .data = { lq, }, 741 }; 742 743 if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT)) 744 return -EINVAL; 745 746 return iwl_mvm_send_cmd(mvm, &cmd); 747 } 748 749 /** 750 * iwl_mvm_update_smps - Get a request to change the SMPS mode 751 * @req_type: The part of the driver who call for a change. 752 * @smps_requests: The request to change the SMPS mode. 753 * 754 * Get a requst to change the SMPS mode, 755 * and change it according to all other requests in the driver. 756 */ 757 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 758 enum iwl_mvm_smps_type_request req_type, 759 enum ieee80211_smps_mode smps_request) 760 { 761 struct iwl_mvm_vif *mvmvif; 762 enum ieee80211_smps_mode smps_mode; 763 int i; 764 765 lockdep_assert_held(&mvm->mutex); 766 767 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ 768 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) 769 return; 770 771 if (vif->type == NL80211_IFTYPE_AP) 772 smps_mode = IEEE80211_SMPS_OFF; 773 else 774 smps_mode = IEEE80211_SMPS_AUTOMATIC; 775 776 mvmvif = iwl_mvm_vif_from_mac80211(vif); 777 mvmvif->smps_requests[req_type] = smps_request; 778 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { 779 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { 780 smps_mode = IEEE80211_SMPS_STATIC; 781 break; 782 } 783 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) 784 smps_mode = IEEE80211_SMPS_DYNAMIC; 785 } 786 787 ieee80211_request_smps(vif, smps_mode); 788 } 789 790 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) 791 { 792 struct iwl_statistics_cmd scmd = { 793 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, 794 }; 795 struct iwl_host_cmd cmd = { 796 .id = STATISTICS_CMD, 797 .len[0] = sizeof(scmd), 798 .data[0] = &scmd, 799 .flags = CMD_WANT_SKB, 800 }; 801 int ret; 802 803 ret = iwl_mvm_send_cmd(mvm, &cmd); 804 if (ret) 805 return ret; 806 807 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); 808 iwl_free_resp(&cmd); 809 810 if (clear) 811 iwl_mvm_accu_radio_stats(mvm); 812 813 return 0; 814 } 815 816 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) 817 { 818 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; 819 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; 820 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; 821 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; 822 } 823 824 static void iwl_mvm_diversity_iter(void *_data, u8 *mac, 825 struct ieee80211_vif *vif) 826 { 827 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 828 bool *result = _data; 829 int i; 830 831 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { 832 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || 833 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) 834 *result = false; 835 } 836 } 837 838 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) 839 { 840 bool result = true; 841 842 lockdep_assert_held(&mvm->mutex); 843 844 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) 845 return false; 846 847 if (mvm->cfg->rx_with_siso_diversity) 848 return false; 849 850 ieee80211_iterate_active_interfaces_atomic( 851 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 852 iwl_mvm_diversity_iter, &result); 853 854 return result; 855 } 856 857 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 858 bool prev) 859 { 860 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 861 int res; 862 863 lockdep_assert_held(&mvm->mutex); 864 865 if (iwl_mvm_vif_low_latency(mvmvif) == prev) 866 return 0; 867 868 res = iwl_mvm_update_quotas(mvm, false, NULL); 869 if (res) 870 return res; 871 872 iwl_mvm_bt_coex_vif_change(mvm); 873 874 return iwl_mvm_power_update_mac(mvm); 875 } 876 877 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) 878 { 879 bool *result = _data; 880 881 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif))) 882 *result = true; 883 } 884 885 bool iwl_mvm_low_latency(struct iwl_mvm *mvm) 886 { 887 bool result = false; 888 889 ieee80211_iterate_active_interfaces_atomic( 890 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 891 iwl_mvm_ll_iter, &result); 892 893 return result; 894 } 895 896 struct iwl_bss_iter_data { 897 struct ieee80211_vif *vif; 898 bool error; 899 }; 900 901 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, 902 struct ieee80211_vif *vif) 903 { 904 struct iwl_bss_iter_data *data = _data; 905 906 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 907 return; 908 909 if (data->vif) { 910 data->error = true; 911 return; 912 } 913 914 data->vif = vif; 915 } 916 917 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) 918 { 919 struct iwl_bss_iter_data bss_iter_data = {}; 920 921 ieee80211_iterate_active_interfaces_atomic( 922 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 923 iwl_mvm_bss_iface_iterator, &bss_iter_data); 924 925 if (bss_iter_data.error) { 926 IWL_ERR(mvm, "More than one managed interface active!\n"); 927 return ERR_PTR(-EINVAL); 928 } 929 930 return bss_iter_data.vif; 931 } 932 933 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, 934 struct ieee80211_vif *vif, 935 bool tdls, bool cmd_q) 936 { 937 struct iwl_fw_dbg_trigger_tlv *trigger; 938 struct iwl_fw_dbg_trigger_txq_timer *txq_timer; 939 unsigned int default_timeout = 940 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; 941 942 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) 943 return iwlmvm_mod_params.tfd_q_hang_detect ? 944 default_timeout : IWL_WATCHDOG_DISABLED; 945 946 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); 947 txq_timer = (void *)trigger->data; 948 949 if (tdls) 950 return le32_to_cpu(txq_timer->tdls); 951 952 if (cmd_q) 953 return le32_to_cpu(txq_timer->command_queue); 954 955 if (WARN_ON(!vif)) 956 return default_timeout; 957 958 switch (ieee80211_vif_type_p2p(vif)) { 959 case NL80211_IFTYPE_ADHOC: 960 return le32_to_cpu(txq_timer->ibss); 961 case NL80211_IFTYPE_STATION: 962 return le32_to_cpu(txq_timer->bss); 963 case NL80211_IFTYPE_AP: 964 return le32_to_cpu(txq_timer->softap); 965 case NL80211_IFTYPE_P2P_CLIENT: 966 return le32_to_cpu(txq_timer->p2p_client); 967 case NL80211_IFTYPE_P2P_GO: 968 return le32_to_cpu(txq_timer->p2p_go); 969 case NL80211_IFTYPE_P2P_DEVICE: 970 return le32_to_cpu(txq_timer->p2p_device); 971 default: 972 WARN_ON(1); 973 return mvm->cfg->base_params->wd_timeout; 974 } 975 } 976 977 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 978 const char *errmsg) 979 { 980 struct iwl_fw_dbg_trigger_tlv *trig; 981 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 982 983 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) 984 goto out; 985 986 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); 987 trig_mlme = (void *)trig->data; 988 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 989 goto out; 990 991 if (trig_mlme->stop_connection_loss && 992 --trig_mlme->stop_connection_loss) 993 goto out; 994 995 iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); 996 997 out: 998 ieee80211_connection_loss(vif); 999 } 1000 1001 int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, 1002 enum iwl_lqm_cmd_operatrions operation, 1003 u32 duration, u32 timeout) 1004 { 1005 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); 1006 struct iwl_link_qual_msrmnt_cmd cmd = { 1007 .cmd_operation = cpu_to_le32(operation), 1008 .mac_id = cpu_to_le32(mvm_vif->id), 1009 .measurement_time = cpu_to_le32(duration), 1010 .timeout = cpu_to_le32(timeout), 1011 }; 1012 u32 cmdid = 1013 iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0); 1014 int ret; 1015 1016 if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa, 1017 IWL_UCODE_TLV_CAPA_LQM_SUPPORT)) 1018 return -EOPNOTSUPP; 1019 1020 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 1021 return -EINVAL; 1022 1023 switch (operation) { 1024 case LQM_CMD_OPERATION_START_MEASUREMENT: 1025 if (iwl_mvm_lqm_active(mvm_vif->mvm)) 1026 return -EBUSY; 1027 if (!vif->bss_conf.assoc) 1028 return -EINVAL; 1029 mvm_vif->lqm_active = true; 1030 break; 1031 case LQM_CMD_OPERATION_STOP_MEASUREMENT: 1032 if (!iwl_mvm_lqm_active(mvm_vif->mvm)) 1033 return -EINVAL; 1034 break; 1035 default: 1036 return -EINVAL; 1037 } 1038 1039 ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd), 1040 &cmd); 1041 1042 /* command failed - roll back lqm_active state */ 1043 if (ret) { 1044 mvm_vif->lqm_active = 1045 operation == LQM_CMD_OPERATION_STOP_MEASUREMENT; 1046 } 1047 1048 return ret; 1049 } 1050 1051 static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac, 1052 struct ieee80211_vif *vif) 1053 { 1054 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); 1055 bool *lqm_active = _data; 1056 1057 *lqm_active = *lqm_active || mvm_vif->lqm_active; 1058 } 1059 1060 bool iwl_mvm_lqm_active(struct iwl_mvm *mvm) 1061 { 1062 bool ret = false; 1063 1064 lockdep_assert_held(&mvm->mutex); 1065 ieee80211_iterate_active_interfaces_atomic( 1066 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1067 iwl_mvm_lqm_active_iterator, &ret); 1068 1069 return ret; 1070 } 1071