1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 37 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #include <net/mac80211.h> 68 69 #include "iwl-debug.h" 70 #include "iwl-io.h" 71 #include "iwl-prph.h" 72 #include "fw-dbg.h" 73 #include "mvm.h" 74 #include "fw-api-rs.h" 75 76 /* 77 * Will return 0 even if the cmd failed when RFKILL is asserted unless 78 * CMD_WANT_SKB is set in cmd->flags. 79 */ 80 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) 81 { 82 int ret; 83 84 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) 85 if (WARN_ON(mvm->d3_test_active)) 86 return -EIO; 87 #endif 88 89 /* 90 * Synchronous commands from this op-mode must hold 91 * the mutex, this ensures we don't try to send two 92 * (or more) synchronous commands at a time. 93 */ 94 if (!(cmd->flags & CMD_ASYNC)) { 95 lockdep_assert_held(&mvm->mutex); 96 if (!(cmd->flags & CMD_SEND_IN_IDLE)) 97 iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD); 98 } 99 100 ret = iwl_trans_send_cmd(mvm->trans, cmd); 101 102 if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE))) 103 iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD); 104 105 /* 106 * If the caller wants the SKB, then don't hide any problems, the 107 * caller might access the response buffer which will be NULL if 108 * the command failed. 109 */ 110 if (cmd->flags & CMD_WANT_SKB) 111 return ret; 112 113 /* Silently ignore failures if RFKILL is asserted */ 114 if (!ret || ret == -ERFKILL) 115 return 0; 116 return ret; 117 } 118 119 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, 120 u32 flags, u16 len, const void *data) 121 { 122 struct iwl_host_cmd cmd = { 123 .id = id, 124 .len = { len, }, 125 .data = { data, }, 126 .flags = flags, 127 }; 128 129 return iwl_mvm_send_cmd(mvm, &cmd); 130 } 131 132 /* 133 * We assume that the caller set the status to the success value 134 */ 135 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, 136 u32 *status) 137 { 138 struct iwl_rx_packet *pkt; 139 struct iwl_cmd_response *resp; 140 int ret, resp_len; 141 142 lockdep_assert_held(&mvm->mutex); 143 144 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) 145 if (WARN_ON(mvm->d3_test_active)) 146 return -EIO; 147 #endif 148 149 /* 150 * Only synchronous commands can wait for status, 151 * we use WANT_SKB so the caller can't. 152 */ 153 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), 154 "cmd flags %x", cmd->flags)) 155 return -EINVAL; 156 157 cmd->flags |= CMD_WANT_SKB; 158 159 ret = iwl_trans_send_cmd(mvm->trans, cmd); 160 if (ret == -ERFKILL) { 161 /* 162 * The command failed because of RFKILL, don't update 163 * the status, leave it as success and return 0. 164 */ 165 return 0; 166 } else if (ret) { 167 return ret; 168 } 169 170 pkt = cmd->resp_pkt; 171 /* Can happen if RFKILL is asserted */ 172 if (!pkt) { 173 ret = 0; 174 goto out_free_resp; 175 } 176 177 resp_len = iwl_rx_packet_payload_len(pkt); 178 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 179 ret = -EIO; 180 goto out_free_resp; 181 } 182 183 resp = (void *)pkt->data; 184 *status = le32_to_cpu(resp->status); 185 out_free_resp: 186 iwl_free_resp(cmd); 187 return ret; 188 } 189 190 /* 191 * We assume that the caller set the status to the sucess value 192 */ 193 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, 194 const void *data, u32 *status) 195 { 196 struct iwl_host_cmd cmd = { 197 .id = id, 198 .len = { len, }, 199 .data = { data, }, 200 }; 201 202 return iwl_mvm_send_cmd_status(mvm, &cmd, status); 203 } 204 205 #define IWL_DECLARE_RATE_INFO(r) \ 206 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP 207 208 /* 209 * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP 210 */ 211 static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { 212 IWL_DECLARE_RATE_INFO(1), 213 IWL_DECLARE_RATE_INFO(2), 214 IWL_DECLARE_RATE_INFO(5), 215 IWL_DECLARE_RATE_INFO(11), 216 IWL_DECLARE_RATE_INFO(6), 217 IWL_DECLARE_RATE_INFO(9), 218 IWL_DECLARE_RATE_INFO(12), 219 IWL_DECLARE_RATE_INFO(18), 220 IWL_DECLARE_RATE_INFO(24), 221 IWL_DECLARE_RATE_INFO(36), 222 IWL_DECLARE_RATE_INFO(48), 223 IWL_DECLARE_RATE_INFO(54), 224 }; 225 226 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, 227 enum nl80211_band band) 228 { 229 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; 230 int idx; 231 int band_offset = 0; 232 233 /* Legacy rate format, search for match in table */ 234 if (band == NL80211_BAND_5GHZ) 235 band_offset = IWL_FIRST_OFDM_RATE; 236 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) 237 if (fw_rate_idx_to_plcp[idx] == rate) 238 return idx - band_offset; 239 240 return -1; 241 } 242 243 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) 244 { 245 /* Get PLCP rate for tx_cmd->rate_n_flags */ 246 return fw_rate_idx_to_plcp[rate_idx]; 247 } 248 249 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 250 { 251 struct iwl_rx_packet *pkt = rxb_addr(rxb); 252 struct iwl_error_resp *err_resp = (void *)pkt->data; 253 254 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", 255 le32_to_cpu(err_resp->error_type), err_resp->cmd_id); 256 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", 257 le16_to_cpu(err_resp->bad_cmd_seq_num), 258 le32_to_cpu(err_resp->error_service)); 259 IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", 260 le64_to_cpu(err_resp->timestamp)); 261 } 262 263 /* 264 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. 265 * The parameter should also be a combination of ANT_[ABC]. 266 */ 267 u8 first_antenna(u8 mask) 268 { 269 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ 270 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */ 271 return BIT(0); 272 return BIT(ffs(mask) - 1); 273 } 274 275 /* 276 * Toggles between TX antennas to send the probe request on. 277 * Receives the bitmask of valid TX antennas and the *index* used 278 * for the last TX, and returns the next valid *index* to use. 279 * In order to set it in the tx_cmd, must do BIT(idx). 280 */ 281 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) 282 { 283 u8 ind = last_idx; 284 int i; 285 286 for (i = 0; i < RATE_MCS_ANT_NUM; i++) { 287 ind = (ind + 1) % RATE_MCS_ANT_NUM; 288 if (valid & BIT(ind)) 289 return ind; 290 } 291 292 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); 293 return last_idx; 294 } 295 296 static const struct { 297 const char *name; 298 u8 num; 299 } advanced_lookup[] = { 300 { "NMI_INTERRUPT_WDG", 0x34 }, 301 { "SYSASSERT", 0x35 }, 302 { "UCODE_VERSION_MISMATCH", 0x37 }, 303 { "BAD_COMMAND", 0x38 }, 304 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 305 { "FATAL_ERROR", 0x3D }, 306 { "NMI_TRM_HW_ERR", 0x46 }, 307 { "NMI_INTERRUPT_TRM", 0x4C }, 308 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 309 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 310 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 311 { "NMI_INTERRUPT_HOST", 0x66 }, 312 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 313 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 314 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 315 { "ADVANCED_SYSASSERT", 0 }, 316 }; 317 318 static const char *desc_lookup(u32 num) 319 { 320 int i; 321 322 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) 323 if (advanced_lookup[i].num == num) 324 return advanced_lookup[i].name; 325 326 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 327 return advanced_lookup[i].name; 328 } 329 330 /* 331 * Note: This structure is read from the device with IO accesses, 332 * and the reading already does the endian conversion. As it is 333 * read with u32-sized accesses, any members with a different size 334 * need to be ordered correctly though! 335 */ 336 struct iwl_error_event_table_v1 { 337 u32 valid; /* (nonzero) valid, (0) log is empty */ 338 u32 error_id; /* type of error */ 339 u32 pc; /* program counter */ 340 u32 blink1; /* branch link */ 341 u32 blink2; /* branch link */ 342 u32 ilink1; /* interrupt link */ 343 u32 ilink2; /* interrupt link */ 344 u32 data1; /* error-specific data */ 345 u32 data2; /* error-specific data */ 346 u32 data3; /* error-specific data */ 347 u32 bcon_time; /* beacon timer */ 348 u32 tsf_low; /* network timestamp function timer */ 349 u32 tsf_hi; /* network timestamp function timer */ 350 u32 gp1; /* GP1 timer register */ 351 u32 gp2; /* GP2 timer register */ 352 u32 gp3; /* GP3 timer register */ 353 u32 ucode_ver; /* uCode version */ 354 u32 hw_ver; /* HW Silicon version */ 355 u32 brd_ver; /* HW board version */ 356 u32 log_pc; /* log program counter */ 357 u32 frame_ptr; /* frame pointer */ 358 u32 stack_ptr; /* stack pointer */ 359 u32 hcmd; /* last host command header */ 360 u32 isr0; /* isr status register LMPM_NIC_ISR0: 361 * rxtx_flag */ 362 u32 isr1; /* isr status register LMPM_NIC_ISR1: 363 * host_flag */ 364 u32 isr2; /* isr status register LMPM_NIC_ISR2: 365 * enc_flag */ 366 u32 isr3; /* isr status register LMPM_NIC_ISR3: 367 * time_flag */ 368 u32 isr4; /* isr status register LMPM_NIC_ISR4: 369 * wico interrupt */ 370 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ 371 u32 wait_event; /* wait event() caller address */ 372 u32 l2p_control; /* L2pControlField */ 373 u32 l2p_duration; /* L2pDurationField */ 374 u32 l2p_mhvalid; /* L2pMhValidBits */ 375 u32 l2p_addr_match; /* L2pAddrMatchStat */ 376 u32 lmpm_pmg_sel; /* indicate which clocks are turned on 377 * (LMPM_PMG_SEL) */ 378 u32 u_timestamp; /* indicate when the date and time of the 379 * compilation */ 380 u32 flow_handler; /* FH read/write pointers, RX credit */ 381 } __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; 382 383 struct iwl_error_event_table { 384 u32 valid; /* (nonzero) valid, (0) log is empty */ 385 u32 error_id; /* type of error */ 386 u32 trm_hw_status0; /* TRM HW status */ 387 u32 trm_hw_status1; /* TRM HW status */ 388 u32 blink2; /* branch link */ 389 u32 ilink1; /* interrupt link */ 390 u32 ilink2; /* interrupt link */ 391 u32 data1; /* error-specific data */ 392 u32 data2; /* error-specific data */ 393 u32 data3; /* error-specific data */ 394 u32 bcon_time; /* beacon timer */ 395 u32 tsf_low; /* network timestamp function timer */ 396 u32 tsf_hi; /* network timestamp function timer */ 397 u32 gp1; /* GP1 timer register */ 398 u32 gp2; /* GP2 timer register */ 399 u32 fw_rev_type; /* firmware revision type */ 400 u32 major; /* uCode version major */ 401 u32 minor; /* uCode version minor */ 402 u32 hw_ver; /* HW Silicon version */ 403 u32 brd_ver; /* HW board version */ 404 u32 log_pc; /* log program counter */ 405 u32 frame_ptr; /* frame pointer */ 406 u32 stack_ptr; /* stack pointer */ 407 u32 hcmd; /* last host command header */ 408 u32 isr0; /* isr status register LMPM_NIC_ISR0: 409 * rxtx_flag */ 410 u32 isr1; /* isr status register LMPM_NIC_ISR1: 411 * host_flag */ 412 u32 isr2; /* isr status register LMPM_NIC_ISR2: 413 * enc_flag */ 414 u32 isr3; /* isr status register LMPM_NIC_ISR3: 415 * time_flag */ 416 u32 isr4; /* isr status register LMPM_NIC_ISR4: 417 * wico interrupt */ 418 u32 last_cmd_id; /* last HCMD id handled by the firmware */ 419 u32 wait_event; /* wait event() caller address */ 420 u32 l2p_control; /* L2pControlField */ 421 u32 l2p_duration; /* L2pDurationField */ 422 u32 l2p_mhvalid; /* L2pMhValidBits */ 423 u32 l2p_addr_match; /* L2pAddrMatchStat */ 424 u32 lmpm_pmg_sel; /* indicate which clocks are turned on 425 * (LMPM_PMG_SEL) */ 426 u32 u_timestamp; /* indicate when the date and time of the 427 * compilation */ 428 u32 flow_handler; /* FH read/write pointers, RX credit */ 429 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 430 431 /* 432 * UMAC error struct - relevant starting from family 8000 chip. 433 * Note: This structure is read from the device with IO accesses, 434 * and the reading already does the endian conversion. As it is 435 * read with u32-sized accesses, any members with a different size 436 * need to be ordered correctly though! 437 */ 438 struct iwl_umac_error_event_table { 439 u32 valid; /* (nonzero) valid, (0) log is empty */ 440 u32 error_id; /* type of error */ 441 u32 blink1; /* branch link */ 442 u32 blink2; /* branch link */ 443 u32 ilink1; /* interrupt link */ 444 u32 ilink2; /* interrupt link */ 445 u32 data1; /* error-specific data */ 446 u32 data2; /* error-specific data */ 447 u32 data3; /* error-specific data */ 448 u32 umac_major; 449 u32 umac_minor; 450 u32 frame_pointer; /* core register 27*/ 451 u32 stack_pointer; /* core register 28 */ 452 u32 cmd_header; /* latest host cmd sent to UMAC */ 453 u32 nic_isr_pref; /* ISR status register */ 454 } __packed; 455 456 #define ERROR_START_OFFSET (1 * sizeof(u32)) 457 #define ERROR_ELEM_SIZE (7 * sizeof(u32)) 458 459 static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) 460 { 461 struct iwl_trans *trans = mvm->trans; 462 struct iwl_umac_error_event_table table; 463 u32 base; 464 465 base = mvm->umac_error_event_table; 466 467 if (base < 0x800000) { 468 IWL_ERR(mvm, 469 "Not valid error log pointer 0x%08X for %s uCode\n", 470 base, 471 (mvm->cur_ucode == IWL_UCODE_INIT) 472 ? "Init" : "RT"); 473 return; 474 } 475 476 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); 477 478 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 479 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 480 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", 481 mvm->status, table.valid); 482 } 483 484 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, 485 desc_lookup(table.error_id)); 486 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); 487 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); 488 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); 489 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); 490 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); 491 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); 492 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); 493 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major); 494 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor); 495 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); 496 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); 497 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); 498 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); 499 } 500 501 static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) 502 { 503 struct iwl_trans *trans = mvm->trans; 504 struct iwl_error_event_table table; 505 506 if (mvm->cur_ucode == IWL_UCODE_INIT) { 507 if (!base) 508 base = mvm->fw->init_errlog_ptr; 509 } else { 510 if (!base) 511 base = mvm->fw->inst_errlog_ptr; 512 } 513 514 if (base < 0x400000) { 515 IWL_ERR(mvm, 516 "Not valid error log pointer 0x%08X for %s uCode\n", 517 base, 518 (mvm->cur_ucode == IWL_UCODE_INIT) 519 ? "Init" : "RT"); 520 return; 521 } 522 523 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); 524 525 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 526 IWL_ERR(trans, "Start IWL Error Log Dump:\n"); 527 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", 528 mvm->status, table.valid); 529 } 530 531 /* Do not change this output - scripts rely on it */ 532 533 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); 534 535 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, 536 table.data1, table.data2, table.data3, 537 table.blink2, table.ilink1, 538 table.ilink2, table.bcon_time, table.gp1, 539 table.gp2, table.fw_rev_type, table.major, 540 table.minor, table.hw_ver, table.brd_ver); 541 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, 542 desc_lookup(table.error_id)); 543 IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); 544 IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); 545 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); 546 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); 547 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); 548 IWL_ERR(mvm, "0x%08X | data1\n", table.data1); 549 IWL_ERR(mvm, "0x%08X | data2\n", table.data2); 550 IWL_ERR(mvm, "0x%08X | data3\n", table.data3); 551 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time); 552 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low); 553 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); 554 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); 555 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); 556 IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type); 557 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); 558 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); 559 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); 560 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver); 561 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd); 562 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0); 563 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1); 564 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); 565 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); 566 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); 567 IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id); 568 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); 569 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); 570 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); 571 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); 572 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); 573 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 574 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp); 575 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); 576 } 577 578 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) 579 { 580 iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); 581 582 if (mvm->error_event_table[1]) 583 iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]); 584 585 if (mvm->support_umac_log) 586 iwl_mvm_dump_umac_error_log(mvm); 587 } 588 589 int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) 590 { 591 int i; 592 593 lockdep_assert_held(&mvm->queue_info_lock); 594 595 /* This should not be hit with new TX path */ 596 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 597 return -ENOSPC; 598 599 /* Start by looking for a free queue */ 600 for (i = minq; i <= maxq; i++) 601 if (mvm->queue_info[i].hw_queue_refcount == 0 && 602 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) 603 return i; 604 605 /* 606 * If no free queue found - settle for an inactive one to reconfigure 607 * Make sure that the inactive queue either already belongs to this STA, 608 * or that if it belongs to another one - it isn't the reserved queue 609 */ 610 for (i = minq; i <= maxq; i++) 611 if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE && 612 (sta_id == mvm->queue_info[i].ra_sta_id || 613 !mvm->queue_info[i].reserved)) 614 return i; 615 616 return -ENOSPC; 617 } 618 619 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, 620 int tid, int frame_limit, u16 ssn) 621 { 622 struct iwl_scd_txq_cfg_cmd cmd = { 623 .scd_queue = queue, 624 .action = SCD_CFG_ENABLE_QUEUE, 625 .window = frame_limit, 626 .sta_id = sta_id, 627 .ssn = cpu_to_le16(ssn), 628 .tx_fifo = fifo, 629 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 630 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), 631 .tid = tid, 632 }; 633 int ret; 634 635 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 636 return -EINVAL; 637 638 spin_lock_bh(&mvm->queue_info_lock); 639 if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, 640 "Trying to reconfig unallocated queue %d\n", queue)) { 641 spin_unlock_bh(&mvm->queue_info_lock); 642 return -ENXIO; 643 } 644 spin_unlock_bh(&mvm->queue_info_lock); 645 646 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); 647 648 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 649 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", 650 queue, fifo, ret); 651 652 return ret; 653 } 654 655 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, 656 int mac80211_queue, u8 sta_id, u8 tid) 657 { 658 bool enable_queue = true; 659 660 spin_lock_bh(&mvm->queue_info_lock); 661 662 /* Make sure this TID isn't already enabled */ 663 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { 664 spin_unlock_bh(&mvm->queue_info_lock); 665 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", 666 queue, tid); 667 return false; 668 } 669 670 /* Update mappings and refcounts */ 671 if (mvm->queue_info[queue].hw_queue_refcount > 0) 672 enable_queue = false; 673 674 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); 675 676 mvm->queue_info[queue].hw_queue_refcount++; 677 mvm->queue_info[queue].tid_bitmap |= BIT(tid); 678 mvm->queue_info[queue].ra_sta_id = sta_id; 679 680 if (enable_queue) { 681 if (tid != IWL_MAX_TID_COUNT) 682 mvm->queue_info[queue].mac80211_ac = 683 tid_to_mac80211_ac[tid]; 684 else 685 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; 686 687 mvm->queue_info[queue].txq_tid = tid; 688 } 689 690 IWL_DEBUG_TX_QUEUES(mvm, 691 "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 692 queue, mvm->queue_info[queue].hw_queue_refcount, 693 mvm->hw_queue_to_mac80211[queue]); 694 695 spin_unlock_bh(&mvm->queue_info_lock); 696 697 return enable_queue; 698 } 699 700 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, 701 u8 sta_id, u8 tid, unsigned int timeout) 702 { 703 struct iwl_tx_queue_cfg_cmd cmd = { 704 .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), 705 .sta_id = sta_id, 706 .tid = tid, 707 }; 708 int queue; 709 710 if (cmd.tid == IWL_MAX_TID_COUNT) 711 cmd.tid = IWL_MGMT_TID; 712 queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd, 713 SCD_QUEUE_CFG, timeout); 714 715 if (queue < 0) { 716 IWL_DEBUG_TX_QUEUES(mvm, 717 "Failed allocating TXQ for sta %d tid %d, ret: %d\n", 718 sta_id, tid, queue); 719 return queue; 720 } 721 722 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", 723 queue, sta_id, tid); 724 725 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); 726 IWL_DEBUG_TX_QUEUES(mvm, 727 "Enabling TXQ #%d (mac80211 map:0x%x)\n", 728 queue, mvm->hw_queue_to_mac80211[queue]); 729 730 return queue; 731 } 732 733 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 734 u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, 735 unsigned int wdg_timeout) 736 { 737 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 738 return; 739 740 /* Send the enabling command if we need to */ 741 if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, 742 cfg->sta_id, cfg->tid)) { 743 struct iwl_scd_txq_cfg_cmd cmd = { 744 .scd_queue = queue, 745 .action = SCD_CFG_ENABLE_QUEUE, 746 .window = cfg->frame_limit, 747 .sta_id = cfg->sta_id, 748 .ssn = cpu_to_le16(ssn), 749 .tx_fifo = cfg->fifo, 750 .aggregate = cfg->aggregate, 751 .tid = cfg->tid, 752 }; 753 754 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, 755 wdg_timeout); 756 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, 757 sizeof(struct iwl_scd_txq_cfg_cmd), 758 &cmd), 759 "Failed to configure queue %d on FIFO %d\n", queue, 760 cfg->fifo); 761 } 762 } 763 764 int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 765 u8 tid, u8 flags) 766 { 767 struct iwl_scd_txq_cfg_cmd cmd = { 768 .scd_queue = queue, 769 .action = SCD_CFG_DISABLE_QUEUE, 770 }; 771 bool remove_mac_queue = true; 772 int ret; 773 774 if (iwl_mvm_has_new_tx_api(mvm)) { 775 spin_lock_bh(&mvm->queue_info_lock); 776 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); 777 spin_unlock_bh(&mvm->queue_info_lock); 778 779 iwl_trans_txq_free(mvm->trans, queue); 780 781 return 0; 782 } 783 784 spin_lock_bh(&mvm->queue_info_lock); 785 786 if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { 787 spin_unlock_bh(&mvm->queue_info_lock); 788 return 0; 789 } 790 791 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 792 793 /* 794 * If there is another TID with the same AC - don't remove the MAC queue 795 * from the mapping 796 */ 797 if (tid < IWL_MAX_TID_COUNT) { 798 unsigned long tid_bitmap = 799 mvm->queue_info[queue].tid_bitmap; 800 int ac = tid_to_mac80211_ac[tid]; 801 int i; 802 803 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { 804 if (tid_to_mac80211_ac[i] == ac) 805 remove_mac_queue = false; 806 } 807 } 808 809 if (remove_mac_queue) 810 mvm->hw_queue_to_mac80211[queue] &= 811 ~BIT(mac80211_queue); 812 mvm->queue_info[queue].hw_queue_refcount--; 813 814 cmd.action = mvm->queue_info[queue].hw_queue_refcount ? 815 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; 816 if (cmd.action == SCD_CFG_DISABLE_QUEUE) 817 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; 818 819 IWL_DEBUG_TX_QUEUES(mvm, 820 "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 821 queue, 822 mvm->queue_info[queue].hw_queue_refcount, 823 mvm->hw_queue_to_mac80211[queue]); 824 825 /* If the queue is still enabled - nothing left to do in this func */ 826 if (cmd.action == SCD_CFG_ENABLE_QUEUE) { 827 spin_unlock_bh(&mvm->queue_info_lock); 828 return 0; 829 } 830 831 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 832 cmd.tid = mvm->queue_info[queue].txq_tid; 833 834 /* Make sure queue info is correct even though we overwrite it */ 835 WARN(mvm->queue_info[queue].hw_queue_refcount || 836 mvm->queue_info[queue].tid_bitmap || 837 mvm->hw_queue_to_mac80211[queue], 838 "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", 839 queue, mvm->queue_info[queue].hw_queue_refcount, 840 mvm->hw_queue_to_mac80211[queue], 841 mvm->queue_info[queue].tid_bitmap); 842 843 /* If we are here - the queue is freed and we can zero out these vals */ 844 mvm->queue_info[queue].hw_queue_refcount = 0; 845 mvm->queue_info[queue].tid_bitmap = 0; 846 mvm->hw_queue_to_mac80211[queue] = 0; 847 848 /* Regardless if this is a reserved TXQ for a STA - mark it as false */ 849 mvm->queue_info[queue].reserved = false; 850 851 spin_unlock_bh(&mvm->queue_info_lock); 852 853 iwl_trans_txq_disable(mvm->trans, queue, false); 854 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, 855 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); 856 857 if (ret) 858 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 859 queue, ret); 860 return ret; 861 } 862 863 /** 864 * iwl_mvm_send_lq_cmd() - Send link quality command 865 * @init: This command is sent as part of station initialization right 866 * after station has been added. 867 * 868 * The link quality command is sent as the last step of station creation. 869 * This is the special case in which init is set and we call a callback in 870 * this case to clear the state indicating that station creation is in 871 * progress. 872 */ 873 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) 874 { 875 struct iwl_host_cmd cmd = { 876 .id = LQ_CMD, 877 .len = { sizeof(struct iwl_lq_cmd), }, 878 .flags = init ? 0 : CMD_ASYNC, 879 .data = { lq, }, 880 }; 881 882 if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA)) 883 return -EINVAL; 884 885 return iwl_mvm_send_cmd(mvm, &cmd); 886 } 887 888 /** 889 * iwl_mvm_update_smps - Get a request to change the SMPS mode 890 * @req_type: The part of the driver who call for a change. 891 * @smps_requests: The request to change the SMPS mode. 892 * 893 * Get a requst to change the SMPS mode, 894 * and change it according to all other requests in the driver. 895 */ 896 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 897 enum iwl_mvm_smps_type_request req_type, 898 enum ieee80211_smps_mode smps_request) 899 { 900 struct iwl_mvm_vif *mvmvif; 901 enum ieee80211_smps_mode smps_mode; 902 int i; 903 904 lockdep_assert_held(&mvm->mutex); 905 906 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ 907 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) 908 return; 909 910 if (vif->type == NL80211_IFTYPE_AP) 911 smps_mode = IEEE80211_SMPS_OFF; 912 else 913 smps_mode = IEEE80211_SMPS_AUTOMATIC; 914 915 mvmvif = iwl_mvm_vif_from_mac80211(vif); 916 mvmvif->smps_requests[req_type] = smps_request; 917 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { 918 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { 919 smps_mode = IEEE80211_SMPS_STATIC; 920 break; 921 } 922 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) 923 smps_mode = IEEE80211_SMPS_DYNAMIC; 924 } 925 926 ieee80211_request_smps(vif, smps_mode); 927 } 928 929 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) 930 { 931 struct iwl_statistics_cmd scmd = { 932 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, 933 }; 934 struct iwl_host_cmd cmd = { 935 .id = STATISTICS_CMD, 936 .len[0] = sizeof(scmd), 937 .data[0] = &scmd, 938 .flags = CMD_WANT_SKB, 939 }; 940 int ret; 941 942 ret = iwl_mvm_send_cmd(mvm, &cmd); 943 if (ret) 944 return ret; 945 946 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); 947 iwl_free_resp(&cmd); 948 949 if (clear) 950 iwl_mvm_accu_radio_stats(mvm); 951 952 return 0; 953 } 954 955 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) 956 { 957 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; 958 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; 959 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; 960 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; 961 } 962 963 static void iwl_mvm_diversity_iter(void *_data, u8 *mac, 964 struct ieee80211_vif *vif) 965 { 966 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 967 bool *result = _data; 968 int i; 969 970 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { 971 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || 972 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) 973 *result = false; 974 } 975 } 976 977 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) 978 { 979 bool result = true; 980 981 lockdep_assert_held(&mvm->mutex); 982 983 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) 984 return false; 985 986 if (mvm->cfg->rx_with_siso_diversity) 987 return false; 988 989 ieee80211_iterate_active_interfaces_atomic( 990 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 991 iwl_mvm_diversity_iter, &result); 992 993 return result; 994 } 995 996 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 997 bool prev) 998 { 999 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1000 int res; 1001 1002 lockdep_assert_held(&mvm->mutex); 1003 1004 if (iwl_mvm_vif_low_latency(mvmvif) == prev) 1005 return 0; 1006 1007 res = iwl_mvm_update_quotas(mvm, false, NULL); 1008 if (res) 1009 return res; 1010 1011 iwl_mvm_bt_coex_vif_change(mvm); 1012 1013 return iwl_mvm_power_update_mac(mvm); 1014 } 1015 1016 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) 1017 { 1018 bool *result = _data; 1019 1020 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif))) 1021 *result = true; 1022 } 1023 1024 bool iwl_mvm_low_latency(struct iwl_mvm *mvm) 1025 { 1026 bool result = false; 1027 1028 ieee80211_iterate_active_interfaces_atomic( 1029 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1030 iwl_mvm_ll_iter, &result); 1031 1032 return result; 1033 } 1034 1035 struct iwl_bss_iter_data { 1036 struct ieee80211_vif *vif; 1037 bool error; 1038 }; 1039 1040 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, 1041 struct ieee80211_vif *vif) 1042 { 1043 struct iwl_bss_iter_data *data = _data; 1044 1045 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 1046 return; 1047 1048 if (data->vif) { 1049 data->error = true; 1050 return; 1051 } 1052 1053 data->vif = vif; 1054 } 1055 1056 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) 1057 { 1058 struct iwl_bss_iter_data bss_iter_data = {}; 1059 1060 ieee80211_iterate_active_interfaces_atomic( 1061 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1062 iwl_mvm_bss_iface_iterator, &bss_iter_data); 1063 1064 if (bss_iter_data.error) { 1065 IWL_ERR(mvm, "More than one managed interface active!\n"); 1066 return ERR_PTR(-EINVAL); 1067 } 1068 1069 return bss_iter_data.vif; 1070 } 1071 1072 struct iwl_sta_iter_data { 1073 bool assoc; 1074 }; 1075 1076 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac, 1077 struct ieee80211_vif *vif) 1078 { 1079 struct iwl_sta_iter_data *data = _data; 1080 1081 if (vif->type != NL80211_IFTYPE_STATION) 1082 return; 1083 1084 if (vif->bss_conf.assoc) 1085 data->assoc = true; 1086 } 1087 1088 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm) 1089 { 1090 struct iwl_sta_iter_data data = { 1091 .assoc = false, 1092 }; 1093 1094 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 1095 IEEE80211_IFACE_ITER_NORMAL, 1096 iwl_mvm_sta_iface_iterator, 1097 &data); 1098 return data.assoc; 1099 } 1100 1101 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, 1102 struct ieee80211_vif *vif, 1103 bool tdls, bool cmd_q) 1104 { 1105 struct iwl_fw_dbg_trigger_tlv *trigger; 1106 struct iwl_fw_dbg_trigger_txq_timer *txq_timer; 1107 unsigned int default_timeout = 1108 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; 1109 1110 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) 1111 return iwlmvm_mod_params.tfd_q_hang_detect ? 1112 default_timeout : IWL_WATCHDOG_DISABLED; 1113 1114 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); 1115 txq_timer = (void *)trigger->data; 1116 1117 if (tdls) 1118 return le32_to_cpu(txq_timer->tdls); 1119 1120 if (cmd_q) 1121 return le32_to_cpu(txq_timer->command_queue); 1122 1123 if (WARN_ON(!vif)) 1124 return default_timeout; 1125 1126 switch (ieee80211_vif_type_p2p(vif)) { 1127 case NL80211_IFTYPE_ADHOC: 1128 return le32_to_cpu(txq_timer->ibss); 1129 case NL80211_IFTYPE_STATION: 1130 return le32_to_cpu(txq_timer->bss); 1131 case NL80211_IFTYPE_AP: 1132 return le32_to_cpu(txq_timer->softap); 1133 case NL80211_IFTYPE_P2P_CLIENT: 1134 return le32_to_cpu(txq_timer->p2p_client); 1135 case NL80211_IFTYPE_P2P_GO: 1136 return le32_to_cpu(txq_timer->p2p_go); 1137 case NL80211_IFTYPE_P2P_DEVICE: 1138 return le32_to_cpu(txq_timer->p2p_device); 1139 default: 1140 WARN_ON(1); 1141 return mvm->cfg->base_params->wd_timeout; 1142 } 1143 } 1144 1145 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1146 const char *errmsg) 1147 { 1148 struct iwl_fw_dbg_trigger_tlv *trig; 1149 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 1150 1151 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) 1152 goto out; 1153 1154 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); 1155 trig_mlme = (void *)trig->data; 1156 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) 1157 goto out; 1158 1159 if (trig_mlme->stop_connection_loss && 1160 --trig_mlme->stop_connection_loss) 1161 goto out; 1162 1163 iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); 1164 1165 out: 1166 ieee80211_connection_loss(vif); 1167 } 1168 1169 /* 1170 * Remove inactive TIDs of a given queue. 1171 * If all queue TIDs are inactive - mark the queue as inactive 1172 * If only some the queue TIDs are inactive - unmap them from the queue 1173 */ 1174 static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, 1175 struct iwl_mvm_sta *mvmsta, int queue, 1176 unsigned long tid_bitmap) 1177 { 1178 int tid; 1179 1180 lockdep_assert_held(&mvmsta->lock); 1181 lockdep_assert_held(&mvm->queue_info_lock); 1182 1183 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1184 return; 1185 1186 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ 1187 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1188 /* If some TFDs are still queued - don't mark TID as inactive */ 1189 if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid])) 1190 tid_bitmap &= ~BIT(tid); 1191 } 1192 1193 /* If all TIDs in the queue are inactive - mark queue as inactive. */ 1194 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { 1195 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; 1196 1197 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) 1198 mvmsta->tid_data[tid].is_tid_active = false; 1199 1200 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n", 1201 queue); 1202 return; 1203 } 1204 1205 /* 1206 * If we are here, this is a shared queue and not all TIDs timed-out. 1207 * Remove the ones that did. 1208 */ 1209 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1210 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; 1211 1212 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 1213 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); 1214 mvm->queue_info[queue].hw_queue_refcount--; 1215 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 1216 mvmsta->tid_data[tid].is_tid_active = false; 1217 1218 IWL_DEBUG_TX_QUEUES(mvm, 1219 "Removing inactive TID %d from shared Q:%d\n", 1220 tid, queue); 1221 } 1222 1223 IWL_DEBUG_TX_QUEUES(mvm, 1224 "TXQ #%d left with tid bitmap 0x%x\n", queue, 1225 mvm->queue_info[queue].tid_bitmap); 1226 1227 /* 1228 * There may be different TIDs with the same mac queues, so make 1229 * sure all TIDs have existing corresponding mac queues enabled 1230 */ 1231 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1232 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1233 mvm->hw_queue_to_mac80211[queue] |= 1234 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); 1235 } 1236 1237 /* If the queue is marked as shared - "unshare" it */ 1238 if (mvm->queue_info[queue].hw_queue_refcount == 1 && 1239 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { 1240 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING; 1241 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", 1242 queue); 1243 } 1244 } 1245 1246 void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) 1247 { 1248 unsigned long timeout_queues_map = 0; 1249 unsigned long now = jiffies; 1250 int i; 1251 1252 if (iwl_mvm_has_new_tx_api(mvm)) 1253 return; 1254 1255 spin_lock_bh(&mvm->queue_info_lock); 1256 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) 1257 if (mvm->queue_info[i].hw_queue_refcount > 0) 1258 timeout_queues_map |= BIT(i); 1259 spin_unlock_bh(&mvm->queue_info_lock); 1260 1261 rcu_read_lock(); 1262 1263 /* 1264 * If a queue time outs - mark it as INACTIVE (don't remove right away 1265 * if we don't have to.) This is an optimization in case traffic comes 1266 * later, and we don't HAVE to use a currently-inactive queue 1267 */ 1268 for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) { 1269 struct ieee80211_sta *sta; 1270 struct iwl_mvm_sta *mvmsta; 1271 u8 sta_id; 1272 int tid; 1273 unsigned long inactive_tid_bitmap = 0; 1274 unsigned long queue_tid_bitmap; 1275 1276 spin_lock_bh(&mvm->queue_info_lock); 1277 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; 1278 1279 /* If TXQ isn't in active use anyway - nothing to do here... */ 1280 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && 1281 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) { 1282 spin_unlock_bh(&mvm->queue_info_lock); 1283 continue; 1284 } 1285 1286 /* Check to see if there are inactive TIDs on this queue */ 1287 for_each_set_bit(tid, &queue_tid_bitmap, 1288 IWL_MAX_TID_COUNT + 1) { 1289 if (time_after(mvm->queue_info[i].last_frame_time[tid] + 1290 IWL_MVM_DQA_QUEUE_TIMEOUT, now)) 1291 continue; 1292 1293 inactive_tid_bitmap |= BIT(tid); 1294 } 1295 spin_unlock_bh(&mvm->queue_info_lock); 1296 1297 /* If all TIDs are active - finish check on this queue */ 1298 if (!inactive_tid_bitmap) 1299 continue; 1300 1301 /* 1302 * If we are here - the queue hadn't been served recently and is 1303 * in use 1304 */ 1305 1306 sta_id = mvm->queue_info[i].ra_sta_id; 1307 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1308 1309 /* 1310 * If the STA doesn't exist anymore, it isn't an error. It could 1311 * be that it was removed since getting the queues, and in this 1312 * case it should've inactivated its queues anyway. 1313 */ 1314 if (IS_ERR_OR_NULL(sta)) 1315 continue; 1316 1317 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1318 1319 spin_lock_bh(&mvmsta->lock); 1320 spin_lock(&mvm->queue_info_lock); 1321 iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, 1322 inactive_tid_bitmap); 1323 spin_unlock(&mvm->queue_info_lock); 1324 spin_unlock_bh(&mvmsta->lock); 1325 } 1326 1327 rcu_read_unlock(); 1328 } 1329 1330 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) 1331 { 1332 bool ps_disabled; 1333 1334 lockdep_assert_held(&mvm->mutex); 1335 1336 /* Disable power save when reading GP2 */ 1337 ps_disabled = mvm->ps_disabled; 1338 if (!ps_disabled) { 1339 mvm->ps_disabled = true; 1340 iwl_mvm_power_update_device(mvm); 1341 } 1342 1343 *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); 1344 *boottime = ktime_get_boot_ns(); 1345 1346 if (!ps_disabled) { 1347 mvm->ps_disabled = ps_disabled; 1348 iwl_mvm_power_update_device(mvm); 1349 } 1350 } 1351 1352 int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, 1353 enum iwl_lqm_cmd_operatrions operation, 1354 u32 duration, u32 timeout) 1355 { 1356 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); 1357 struct iwl_link_qual_msrmnt_cmd cmd = { 1358 .cmd_operation = cpu_to_le32(operation), 1359 .mac_id = cpu_to_le32(mvm_vif->id), 1360 .measurement_time = cpu_to_le32(duration), 1361 .timeout = cpu_to_le32(timeout), 1362 }; 1363 u32 cmdid = 1364 iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0); 1365 int ret; 1366 1367 if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa, 1368 IWL_UCODE_TLV_CAPA_LQM_SUPPORT)) 1369 return -EOPNOTSUPP; 1370 1371 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) 1372 return -EINVAL; 1373 1374 switch (operation) { 1375 case LQM_CMD_OPERATION_START_MEASUREMENT: 1376 if (iwl_mvm_lqm_active(mvm_vif->mvm)) 1377 return -EBUSY; 1378 if (!vif->bss_conf.assoc) 1379 return -EINVAL; 1380 mvm_vif->lqm_active = true; 1381 break; 1382 case LQM_CMD_OPERATION_STOP_MEASUREMENT: 1383 if (!iwl_mvm_lqm_active(mvm_vif->mvm)) 1384 return -EINVAL; 1385 break; 1386 default: 1387 return -EINVAL; 1388 } 1389 1390 ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd), 1391 &cmd); 1392 1393 /* command failed - roll back lqm_active state */ 1394 if (ret) { 1395 mvm_vif->lqm_active = 1396 operation == LQM_CMD_OPERATION_STOP_MEASUREMENT; 1397 } 1398 1399 return ret; 1400 } 1401 1402 static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac, 1403 struct ieee80211_vif *vif) 1404 { 1405 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); 1406 bool *lqm_active = _data; 1407 1408 *lqm_active = *lqm_active || mvm_vif->lqm_active; 1409 } 1410 1411 bool iwl_mvm_lqm_active(struct iwl_mvm *mvm) 1412 { 1413 bool ret = false; 1414 1415 lockdep_assert_held(&mvm->mutex); 1416 ieee80211_iterate_active_interfaces_atomic( 1417 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1418 iwl_mvm_lqm_active_iterator, &ret); 1419 1420 return ret; 1421 } 1422