1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #include <linux/pci.h> 68 #include <linux/pci-aspm.h> 69 #include <linux/interrupt.h> 70 #include <linux/debugfs.h> 71 #include <linux/sched.h> 72 #include <linux/bitops.h> 73 #include <linux/gfp.h> 74 #include <linux/vmalloc.h> 75 76 #include "iwl-drv.h" 77 #include "iwl-trans.h" 78 #include "iwl-csr.h" 79 #include "iwl-prph.h" 80 #include "iwl-scd.h" 81 #include "iwl-agn-hw.h" 82 #include "iwl-fw-error-dump.h" 83 #include "internal.h" 84 #include "iwl-fh.h" 85 86 /* extended range in FW SRAM */ 87 #define IWL_FW_MEM_EXTENDED_START 0x40000 88 #define IWL_FW_MEM_EXTENDED_END 0x57FFF 89 90 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 91 { 92 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 93 94 if (!trans_pcie->fw_mon_page) 95 return; 96 97 dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys, 98 trans_pcie->fw_mon_size, DMA_FROM_DEVICE); 99 __free_pages(trans_pcie->fw_mon_page, 100 get_order(trans_pcie->fw_mon_size)); 101 trans_pcie->fw_mon_page = NULL; 102 trans_pcie->fw_mon_phys = 0; 103 trans_pcie->fw_mon_size = 0; 104 } 105 106 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 107 { 108 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 109 struct page *page = NULL; 110 dma_addr_t phys; 111 u32 size = 0; 112 u8 power; 113 114 if (!max_power) { 115 /* default max_power is maximum */ 116 max_power = 26; 117 } else { 118 max_power += 11; 119 } 120 121 if (WARN(max_power > 26, 122 "External buffer size for monitor is too big %d, check the FW TLV\n", 123 max_power)) 124 return; 125 126 if (trans_pcie->fw_mon_page) { 127 dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys, 128 trans_pcie->fw_mon_size, 129 DMA_FROM_DEVICE); 130 return; 131 } 132 133 phys = 0; 134 for (power = max_power; power >= 11; power--) { 135 int order; 136 137 size = BIT(power); 138 order = get_order(size); 139 page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO, 140 order); 141 if (!page) 142 continue; 143 144 phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order, 145 DMA_FROM_DEVICE); 146 if (dma_mapping_error(trans->dev, phys)) { 147 __free_pages(page, order); 148 page = NULL; 149 continue; 150 } 151 IWL_INFO(trans, 152 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n", 153 size, order); 154 break; 155 } 156 157 if (WARN_ON_ONCE(!page)) 158 return; 159 160 if (power != max_power) 161 IWL_ERR(trans, 162 "Sorry - debug buffer is only %luK while you requested %luK\n", 163 (unsigned long)BIT(power - 10), 164 (unsigned long)BIT(max_power - 10)); 165 166 trans_pcie->fw_mon_page = page; 167 trans_pcie->fw_mon_phys = phys; 168 trans_pcie->fw_mon_size = size; 169 } 170 171 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 172 { 173 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 174 ((reg & 0x0000ffff) | (2 << 28))); 175 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 176 } 177 178 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 179 { 180 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 181 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 182 ((reg & 0x0000ffff) | (3 << 28))); 183 } 184 185 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 186 { 187 if (trans->cfg->apmg_not_supported) 188 return; 189 190 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 191 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 192 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 193 ~APMG_PS_CTRL_MSK_PWR_SRC); 194 else 195 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 196 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 197 ~APMG_PS_CTRL_MSK_PWR_SRC); 198 } 199 200 /* PCI registers */ 201 #define PCI_CFG_RETRY_TIMEOUT 0x041 202 203 static void iwl_pcie_apm_config(struct iwl_trans *trans) 204 { 205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 206 u16 lctl; 207 u16 cap; 208 209 /* 210 * HW bug W/A for instability in PCIe bus L0S->L1 transition. 211 * Check if BIOS (or OS) enabled L1-ASPM on this device. 212 * If so (likely), disable L0S, so device moves directly L0->L1; 213 * costs negligible amount of power savings. 214 * If not (unlikely), enable L0S, so there is at least some 215 * power savings, even without L1. 216 */ 217 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 218 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) 219 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 220 else 221 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 222 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 223 224 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 225 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 226 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n", 227 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 228 trans->ltr_enabled ? "En" : "Dis"); 229 } 230 231 /* 232 * Start up NIC's basic functionality after it has been reset 233 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 234 * NOTE: This does not load uCode nor start the embedded processor 235 */ 236 static int iwl_pcie_apm_init(struct iwl_trans *trans) 237 { 238 int ret = 0; 239 IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 240 241 /* 242 * Use "set_bit" below rather than "write", to preserve any hardware 243 * bits already set by default after reset. 244 */ 245 246 /* Disable L0S exit timer (platform NMI Work/Around) */ 247 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 248 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 249 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 250 251 /* 252 * Disable L0s without affecting L1; 253 * don't wait for ICH L0s (ICH bug W/A) 254 */ 255 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 256 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 257 258 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 259 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 260 261 /* 262 * Enable HAP INTA (interrupt from management bus) to 263 * wake device's PCI Express link L1a -> L0s 264 */ 265 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 266 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 267 268 iwl_pcie_apm_config(trans); 269 270 /* Configure analog phase-lock-loop before activating to D0A */ 271 if (trans->cfg->base_params->pll_cfg_val) 272 iwl_set_bit(trans, CSR_ANA_PLL_CFG, 273 trans->cfg->base_params->pll_cfg_val); 274 275 /* 276 * Set "initialization complete" bit to move adapter from 277 * D0U* --> D0A* (powered-up active) state. 278 */ 279 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 280 281 /* 282 * Wait for clock stabilization; once stabilized, access to 283 * device-internal resources is supported, e.g. iwl_write_prph() 284 * and accesses to uCode SRAM. 285 */ 286 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 287 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 288 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 289 if (ret < 0) { 290 IWL_DEBUG_INFO(trans, "Failed to init the card\n"); 291 goto out; 292 } 293 294 if (trans->cfg->host_interrupt_operation_mode) { 295 /* 296 * This is a bit of an abuse - This is needed for 7260 / 3160 297 * only check host_interrupt_operation_mode even if this is 298 * not related to host_interrupt_operation_mode. 299 * 300 * Enable the oscillator to count wake up time for L1 exit. This 301 * consumes slightly more power (100uA) - but allows to be sure 302 * that we wake up from L1 on time. 303 * 304 * This looks weird: read twice the same register, discard the 305 * value, set a bit, and yet again, read that same register 306 * just to discard the value. But that's the way the hardware 307 * seems to like it. 308 */ 309 iwl_read_prph(trans, OSC_CLK); 310 iwl_read_prph(trans, OSC_CLK); 311 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 312 iwl_read_prph(trans, OSC_CLK); 313 iwl_read_prph(trans, OSC_CLK); 314 } 315 316 /* 317 * Enable DMA clock and wait for it to stabilize. 318 * 319 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 320 * bits do not disable clocks. This preserves any hardware 321 * bits already set by default in "CLK_CTRL_REG" after reset. 322 */ 323 if (!trans->cfg->apmg_not_supported) { 324 iwl_write_prph(trans, APMG_CLK_EN_REG, 325 APMG_CLK_VAL_DMA_CLK_RQT); 326 udelay(20); 327 328 /* Disable L1-Active */ 329 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 330 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 331 332 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 333 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 334 APMG_RTC_INT_STT_RFKILL); 335 } 336 337 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 338 339 out: 340 return ret; 341 } 342 343 /* 344 * Enable LP XTAL to avoid HW bug where device may consume much power if 345 * FW is not loaded after device reset. LP XTAL is disabled by default 346 * after device HW reset. Do it only if XTAL is fed by internal source. 347 * Configure device's "persistence" mode to avoid resetting XTAL again when 348 * SHRD_HW_RST occurs in S3. 349 */ 350 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 351 { 352 int ret; 353 u32 apmg_gp1_reg; 354 u32 apmg_xtal_cfg_reg; 355 u32 dl_cfg_reg; 356 357 /* Force XTAL ON */ 358 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 359 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 360 361 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 362 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 363 364 udelay(10); 365 366 /* 367 * Set "initialization complete" bit to move adapter from 368 * D0U* --> D0A* (powered-up active) state. 369 */ 370 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 371 372 /* 373 * Wait for clock stabilization; once stabilized, access to 374 * device-internal resources is possible. 375 */ 376 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 377 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 378 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 379 25000); 380 if (WARN_ON(ret < 0)) { 381 IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); 382 /* Release XTAL ON request */ 383 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 384 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 385 return; 386 } 387 388 /* 389 * Clear "disable persistence" to avoid LP XTAL resetting when 390 * SHRD_HW_RST is applied in S3. 391 */ 392 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 393 APMG_PCIDEV_STT_VAL_PERSIST_DIS); 394 395 /* 396 * Force APMG XTAL to be active to prevent its disabling by HW 397 * caused by APMG idle state. 398 */ 399 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 400 SHR_APMG_XTAL_CFG_REG); 401 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 402 apmg_xtal_cfg_reg | 403 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 404 405 /* 406 * Reset entire device again - do controller reset (results in 407 * SHRD_HW_RST). Turn MAC off before proceeding. 408 */ 409 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 410 411 udelay(10); 412 413 /* Enable LP XTAL by indirect access through CSR */ 414 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 415 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 416 SHR_APMG_GP1_WF_XTAL_LP_EN | 417 SHR_APMG_GP1_CHICKEN_BIT_SELECT); 418 419 /* Clear delay line clock power up */ 420 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 421 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 422 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 423 424 /* 425 * Enable persistence mode to avoid LP XTAL resetting when 426 * SHRD_HW_RST is applied in S3. 427 */ 428 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 429 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 430 431 /* 432 * Clear "initialization complete" bit to move adapter from 433 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 434 */ 435 iwl_clear_bit(trans, CSR_GP_CNTRL, 436 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 437 438 /* Activates XTAL resources monitor */ 439 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 440 CSR_MONITOR_XTAL_RESOURCES); 441 442 /* Release XTAL ON request */ 443 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 444 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 445 udelay(10); 446 447 /* Release APMG XTAL */ 448 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 449 apmg_xtal_cfg_reg & 450 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 451 } 452 453 static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) 454 { 455 int ret = 0; 456 457 /* stop device's busmaster DMA activity */ 458 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 459 460 ret = iwl_poll_bit(trans, CSR_RESET, 461 CSR_RESET_REG_FLAG_MASTER_DISABLED, 462 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 463 if (ret < 0) 464 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 465 466 IWL_DEBUG_INFO(trans, "stop master\n"); 467 468 return ret; 469 } 470 471 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 472 { 473 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 474 475 if (op_mode_leave) { 476 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 477 iwl_pcie_apm_init(trans); 478 479 /* inform ME that we are leaving */ 480 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 481 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 482 APMG_PCIDEV_STT_VAL_WAKE_ME); 483 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 484 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 485 CSR_RESET_LINK_PWR_MGMT_DISABLED); 486 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 487 CSR_HW_IF_CONFIG_REG_PREPARE | 488 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 489 mdelay(1); 490 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 491 CSR_RESET_LINK_PWR_MGMT_DISABLED); 492 } 493 mdelay(5); 494 } 495 496 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 497 498 /* Stop device's DMA activity */ 499 iwl_pcie_apm_stop_master(trans); 500 501 if (trans->cfg->lp_xtal_workaround) { 502 iwl_pcie_apm_lp_xtal_enable(trans); 503 return; 504 } 505 506 /* Reset the entire device */ 507 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 508 509 udelay(10); 510 511 /* 512 * Clear "initialization complete" bit to move adapter from 513 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 514 */ 515 iwl_clear_bit(trans, CSR_GP_CNTRL, 516 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 517 } 518 519 static int iwl_pcie_nic_init(struct iwl_trans *trans) 520 { 521 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 522 523 /* nic_init */ 524 spin_lock(&trans_pcie->irq_lock); 525 iwl_pcie_apm_init(trans); 526 527 spin_unlock(&trans_pcie->irq_lock); 528 529 iwl_pcie_set_pwr(trans, false); 530 531 iwl_op_mode_nic_config(trans->op_mode); 532 533 /* Allocate the RX queue, or reset if it is already allocated */ 534 iwl_pcie_rx_init(trans); 535 536 /* Allocate or reset and init all Tx and Command queues */ 537 if (iwl_pcie_tx_init(trans)) 538 return -ENOMEM; 539 540 if (trans->cfg->base_params->shadow_reg_enable) { 541 /* enable shadow regs in HW */ 542 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 543 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 544 } 545 546 return 0; 547 } 548 549 #define HW_READY_TIMEOUT (50) 550 551 /* Note: returns poll_bit return value, which is >= 0 if success */ 552 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 553 { 554 int ret; 555 556 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 557 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 558 559 /* See if we got it */ 560 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 561 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 562 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 563 HW_READY_TIMEOUT); 564 565 if (ret >= 0) 566 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 567 568 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 569 return ret; 570 } 571 572 /* Note: returns standard 0/-ERROR code */ 573 static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 574 { 575 int ret; 576 int t = 0; 577 int iter; 578 579 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 580 581 ret = iwl_pcie_set_hw_ready(trans); 582 /* If the card is ready, exit 0 */ 583 if (ret >= 0) 584 return 0; 585 586 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 587 CSR_RESET_LINK_PWR_MGMT_DISABLED); 588 msleep(1); 589 590 for (iter = 0; iter < 10; iter++) { 591 /* If HW is not ready, prepare the conditions to check again */ 592 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 593 CSR_HW_IF_CONFIG_REG_PREPARE); 594 595 do { 596 ret = iwl_pcie_set_hw_ready(trans); 597 if (ret >= 0) 598 return 0; 599 600 usleep_range(200, 1000); 601 t += 200; 602 } while (t < 150000); 603 msleep(25); 604 } 605 606 IWL_ERR(trans, "Couldn't prepare the card\n"); 607 608 return ret; 609 } 610 611 /* 612 * ucode 613 */ 614 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, 615 dma_addr_t phy_addr, u32 byte_cnt) 616 { 617 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 618 int ret; 619 620 trans_pcie->ucode_write_complete = false; 621 622 iwl_write_direct32(trans, 623 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 624 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 625 626 iwl_write_direct32(trans, 627 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 628 dst_addr); 629 630 iwl_write_direct32(trans, 631 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 632 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 633 634 iwl_write_direct32(trans, 635 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 636 (iwl_get_dma_hi_addr(phy_addr) 637 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 638 639 iwl_write_direct32(trans, 640 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 641 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 642 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 643 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 644 645 iwl_write_direct32(trans, 646 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 647 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 648 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 649 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 650 651 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 652 trans_pcie->ucode_write_complete, 5 * HZ); 653 if (!ret) { 654 IWL_ERR(trans, "Failed to load firmware chunk!\n"); 655 return -ETIMEDOUT; 656 } 657 658 return 0; 659 } 660 661 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 662 const struct fw_desc *section) 663 { 664 u8 *v_addr; 665 dma_addr_t p_addr; 666 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 667 int ret = 0; 668 669 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 670 section_num); 671 672 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 673 GFP_KERNEL | __GFP_NOWARN); 674 if (!v_addr) { 675 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 676 chunk_sz = PAGE_SIZE; 677 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 678 &p_addr, GFP_KERNEL); 679 if (!v_addr) 680 return -ENOMEM; 681 } 682 683 for (offset = 0; offset < section->len; offset += chunk_sz) { 684 u32 copy_size, dst_addr; 685 bool extended_addr = false; 686 687 copy_size = min_t(u32, chunk_sz, section->len - offset); 688 dst_addr = section->offset + offset; 689 690 if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 691 dst_addr <= IWL_FW_MEM_EXTENDED_END) 692 extended_addr = true; 693 694 if (extended_addr) 695 iwl_set_bits_prph(trans, LMPM_CHICK, 696 LMPM_CHICK_EXTENDED_ADDR_SPACE); 697 698 memcpy(v_addr, (u8 *)section->data + offset, copy_size); 699 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 700 copy_size); 701 702 if (extended_addr) 703 iwl_clear_bits_prph(trans, LMPM_CHICK, 704 LMPM_CHICK_EXTENDED_ADDR_SPACE); 705 706 if (ret) { 707 IWL_ERR(trans, 708 "Could not load the [%d] uCode section\n", 709 section_num); 710 break; 711 } 712 } 713 714 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 715 return ret; 716 } 717 718 /* 719 * Driver Takes the ownership on secure machine before FW load 720 * and prevent race with the BT load. 721 * W/A for ROM bug. (should be remove in the next Si step) 722 */ 723 static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans) 724 { 725 u32 val, loop = 1000; 726 727 /* 728 * Check the RSA semaphore is accessible. 729 * If the HW isn't locked and the rsa semaphore isn't accessible, 730 * we are in trouble. 731 */ 732 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 733 if (val & (BIT(1) | BIT(17))) { 734 IWL_INFO(trans, 735 "can't access the RSA semaphore it is write protected\n"); 736 return 0; 737 } 738 739 /* take ownership on the AUX IF */ 740 iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK); 741 iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK); 742 743 do { 744 iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1); 745 val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS); 746 if (val == 0x1) { 747 iwl_write_prph(trans, RSA_ENABLE, 0); 748 return 0; 749 } 750 751 udelay(10); 752 loop--; 753 } while (loop > 0); 754 755 IWL_ERR(trans, "Failed to take ownership on secure machine\n"); 756 return -EIO; 757 } 758 759 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 760 const struct fw_img *image, 761 int cpu, 762 int *first_ucode_section) 763 { 764 int shift_param; 765 int i, ret = 0, sec_num = 0x1; 766 u32 val, last_read_idx = 0; 767 768 if (cpu == 1) { 769 shift_param = 0; 770 *first_ucode_section = 0; 771 } else { 772 shift_param = 16; 773 (*first_ucode_section)++; 774 } 775 776 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { 777 last_read_idx = i; 778 779 /* 780 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 781 * CPU1 to CPU2. 782 * PAGING_SEPARATOR_SECTION delimiter - separate between 783 * CPU2 non paged to CPU2 paging sec. 784 */ 785 if (!image->sec[i].data || 786 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 787 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 788 IWL_DEBUG_FW(trans, 789 "Break since Data not valid or Empty section, sec = %d\n", 790 i); 791 break; 792 } 793 794 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 795 if (ret) 796 return ret; 797 798 /* Notify the ucode of the loaded section number and status */ 799 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 800 val = val | (sec_num << shift_param); 801 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 802 sec_num = (sec_num << 1) | 0x1; 803 } 804 805 *first_ucode_section = last_read_idx; 806 807 if (cpu == 1) 808 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); 809 else 810 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 811 812 return 0; 813 } 814 815 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 816 const struct fw_img *image, 817 int cpu, 818 int *first_ucode_section) 819 { 820 int shift_param; 821 int i, ret = 0; 822 u32 last_read_idx = 0; 823 824 if (cpu == 1) { 825 shift_param = 0; 826 *first_ucode_section = 0; 827 } else { 828 shift_param = 16; 829 (*first_ucode_section)++; 830 } 831 832 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { 833 last_read_idx = i; 834 835 /* 836 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 837 * CPU1 to CPU2. 838 * PAGING_SEPARATOR_SECTION delimiter - separate between 839 * CPU2 non paged to CPU2 paging sec. 840 */ 841 if (!image->sec[i].data || 842 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 843 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 844 IWL_DEBUG_FW(trans, 845 "Break since Data not valid or Empty section, sec = %d\n", 846 i); 847 break; 848 } 849 850 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 851 if (ret) 852 return ret; 853 } 854 855 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 856 iwl_set_bits_prph(trans, 857 CSR_UCODE_LOAD_STATUS_ADDR, 858 (LMPM_CPU_UCODE_LOADING_COMPLETED | 859 LMPM_CPU_HDRS_LOADING_COMPLETED | 860 LMPM_CPU_UCODE_LOADING_STARTED) << 861 shift_param); 862 863 *first_ucode_section = last_read_idx; 864 865 return 0; 866 } 867 868 static void iwl_pcie_apply_destination(struct iwl_trans *trans) 869 { 870 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 871 const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; 872 int i; 873 874 if (dest->version) 875 IWL_ERR(trans, 876 "DBG DEST version is %d - expect issues\n", 877 dest->version); 878 879 IWL_INFO(trans, "Applying debug destination %s\n", 880 get_fw_dbg_mode_string(dest->monitor_mode)); 881 882 if (dest->monitor_mode == EXTERNAL_MODE) 883 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 884 else 885 IWL_WARN(trans, "PCI should have external buffer debug\n"); 886 887 for (i = 0; i < trans->dbg_dest_reg_num; i++) { 888 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 889 u32 val = le32_to_cpu(dest->reg_ops[i].val); 890 891 switch (dest->reg_ops[i].op) { 892 case CSR_ASSIGN: 893 iwl_write32(trans, addr, val); 894 break; 895 case CSR_SETBIT: 896 iwl_set_bit(trans, addr, BIT(val)); 897 break; 898 case CSR_CLEARBIT: 899 iwl_clear_bit(trans, addr, BIT(val)); 900 break; 901 case PRPH_ASSIGN: 902 iwl_write_prph(trans, addr, val); 903 break; 904 case PRPH_SETBIT: 905 iwl_set_bits_prph(trans, addr, BIT(val)); 906 break; 907 case PRPH_CLEARBIT: 908 iwl_clear_bits_prph(trans, addr, BIT(val)); 909 break; 910 case PRPH_BLOCKBIT: 911 if (iwl_read_prph(trans, addr) & BIT(val)) { 912 IWL_ERR(trans, 913 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 914 val, addr); 915 goto monitor; 916 } 917 break; 918 default: 919 IWL_ERR(trans, "FW debug - unknown OP %d\n", 920 dest->reg_ops[i].op); 921 break; 922 } 923 } 924 925 monitor: 926 if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { 927 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 928 trans_pcie->fw_mon_phys >> dest->base_shift); 929 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 930 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 931 (trans_pcie->fw_mon_phys + 932 trans_pcie->fw_mon_size - 256) >> 933 dest->end_shift); 934 else 935 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 936 (trans_pcie->fw_mon_phys + 937 trans_pcie->fw_mon_size) >> 938 dest->end_shift); 939 } 940 } 941 942 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 943 const struct fw_img *image) 944 { 945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 946 int ret = 0; 947 int first_ucode_section; 948 949 IWL_DEBUG_FW(trans, "working with %s CPU\n", 950 image->is_dual_cpus ? "Dual" : "Single"); 951 952 /* load to FW the binary non secured sections of CPU1 */ 953 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 954 if (ret) 955 return ret; 956 957 if (image->is_dual_cpus) { 958 /* set CPU2 header address */ 959 iwl_write_prph(trans, 960 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 961 LMPM_SECURE_CPU2_HDR_MEM_SPACE); 962 963 /* load to FW the binary sections of CPU2 */ 964 ret = iwl_pcie_load_cpu_sections(trans, image, 2, 965 &first_ucode_section); 966 if (ret) 967 return ret; 968 } 969 970 /* supported for 7000 only for the moment */ 971 if (iwlwifi_mod_params.fw_monitor && 972 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 973 iwl_pcie_alloc_fw_monitor(trans, 0); 974 975 if (trans_pcie->fw_mon_size) { 976 iwl_write_prph(trans, MON_BUFF_BASE_ADDR, 977 trans_pcie->fw_mon_phys >> 4); 978 iwl_write_prph(trans, MON_BUFF_END_ADDR, 979 (trans_pcie->fw_mon_phys + 980 trans_pcie->fw_mon_size) >> 4); 981 } 982 } else if (trans->dbg_dest_tlv) { 983 iwl_pcie_apply_destination(trans); 984 } 985 986 /* release CPU reset */ 987 iwl_write32(trans, CSR_RESET, 0); 988 989 return 0; 990 } 991 992 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 993 const struct fw_img *image) 994 { 995 int ret = 0; 996 int first_ucode_section; 997 998 IWL_DEBUG_FW(trans, "working with %s CPU\n", 999 image->is_dual_cpus ? "Dual" : "Single"); 1000 1001 if (trans->dbg_dest_tlv) 1002 iwl_pcie_apply_destination(trans); 1003 1004 /* TODO: remove in the next Si step */ 1005 ret = iwl_pcie_rsa_race_bug_wa(trans); 1006 if (ret) 1007 return ret; 1008 1009 /* configure the ucode to be ready to get the secured image */ 1010 /* release CPU reset */ 1011 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 1012 1013 /* load to FW the binary Secured sections of CPU1 */ 1014 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 1015 &first_ucode_section); 1016 if (ret) 1017 return ret; 1018 1019 /* load to FW the binary sections of CPU2 */ 1020 return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 1021 &first_ucode_section); 1022 } 1023 1024 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1025 const struct fw_img *fw, bool run_in_rfkill) 1026 { 1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1028 bool hw_rfkill; 1029 int ret; 1030 1031 mutex_lock(&trans_pcie->mutex); 1032 1033 /* Someone called stop_device, don't try to start_fw */ 1034 if (trans_pcie->is_down) { 1035 IWL_WARN(trans, 1036 "Can't start_fw since the HW hasn't been started\n"); 1037 ret = EIO; 1038 goto out; 1039 } 1040 1041 /* This may fail if AMT took ownership of the device */ 1042 if (iwl_pcie_prepare_card_hw(trans)) { 1043 IWL_WARN(trans, "Exit HW not ready\n"); 1044 ret = -EIO; 1045 goto out; 1046 } 1047 1048 iwl_enable_rfkill_int(trans); 1049 1050 /* If platform's RF_KILL switch is NOT set to KILL */ 1051 hw_rfkill = iwl_is_rfkill_set(trans); 1052 if (hw_rfkill) 1053 set_bit(STATUS_RFKILL, &trans->status); 1054 else 1055 clear_bit(STATUS_RFKILL, &trans->status); 1056 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1057 if (hw_rfkill && !run_in_rfkill) { 1058 ret = -ERFKILL; 1059 goto out; 1060 } 1061 1062 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1063 1064 ret = iwl_pcie_nic_init(trans); 1065 if (ret) { 1066 IWL_ERR(trans, "Unable to init nic\n"); 1067 goto out; 1068 } 1069 1070 /* make sure rfkill handshake bits are cleared */ 1071 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1072 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1073 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1074 1075 /* clear (again), then enable host interrupts */ 1076 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1077 iwl_enable_interrupts(trans); 1078 1079 /* really make sure rfkill handshake bits are cleared */ 1080 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1081 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1082 1083 /* Load the given image to the HW */ 1084 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1085 ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1086 else 1087 ret = iwl_pcie_load_given_ucode(trans, fw); 1088 1089 out: 1090 mutex_unlock(&trans_pcie->mutex); 1091 return ret; 1092 } 1093 1094 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1095 { 1096 iwl_pcie_reset_ict(trans); 1097 iwl_pcie_tx_start(trans, scd_addr); 1098 } 1099 1100 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1101 { 1102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1103 bool hw_rfkill, was_hw_rfkill; 1104 1105 lockdep_assert_held(&trans_pcie->mutex); 1106 1107 if (trans_pcie->is_down) 1108 return; 1109 1110 trans_pcie->is_down = true; 1111 1112 was_hw_rfkill = iwl_is_rfkill_set(trans); 1113 1114 /* tell the device to stop sending interrupts */ 1115 spin_lock(&trans_pcie->irq_lock); 1116 iwl_disable_interrupts(trans); 1117 spin_unlock(&trans_pcie->irq_lock); 1118 1119 /* device going down, Stop using ICT table */ 1120 iwl_pcie_disable_ict(trans); 1121 1122 /* 1123 * If a HW restart happens during firmware loading, 1124 * then the firmware loading might call this function 1125 * and later it might be called again due to the 1126 * restart. So don't process again if the device is 1127 * already dead. 1128 */ 1129 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1130 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); 1131 iwl_pcie_tx_stop(trans); 1132 iwl_pcie_rx_stop(trans); 1133 1134 /* Power-down device's busmaster DMA clocks */ 1135 if (!trans->cfg->apmg_not_supported) { 1136 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1137 APMG_CLK_VAL_DMA_CLK_RQT); 1138 udelay(5); 1139 } 1140 } 1141 1142 /* Make sure (redundant) we've released our request to stay awake */ 1143 iwl_clear_bit(trans, CSR_GP_CNTRL, 1144 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1145 1146 /* Stop the device, and put it in low power state */ 1147 iwl_pcie_apm_stop(trans, false); 1148 1149 /* stop and reset the on-board processor */ 1150 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1151 udelay(20); 1152 1153 /* 1154 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1155 * This is a bug in certain verions of the hardware. 1156 * Certain devices also keep sending HW RF kill interrupt all 1157 * the time, unless the interrupt is ACKed even if the interrupt 1158 * should be masked. Re-ACK all the interrupts here. 1159 */ 1160 spin_lock(&trans_pcie->irq_lock); 1161 iwl_disable_interrupts(trans); 1162 spin_unlock(&trans_pcie->irq_lock); 1163 1164 1165 /* clear all status bits */ 1166 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1167 clear_bit(STATUS_INT_ENABLED, &trans->status); 1168 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1169 clear_bit(STATUS_RFKILL, &trans->status); 1170 1171 /* 1172 * Even if we stop the HW, we still want the RF kill 1173 * interrupt 1174 */ 1175 iwl_enable_rfkill_int(trans); 1176 1177 /* 1178 * Check again since the RF kill state may have changed while 1179 * all the interrupts were disabled, in this case we couldn't 1180 * receive the RF kill interrupt and update the state in the 1181 * op_mode. 1182 * Don't call the op_mode if the rkfill state hasn't changed. 1183 * This allows the op_mode to call stop_device from the rfkill 1184 * notification without endless recursion. Under very rare 1185 * circumstances, we might have a small recursion if the rfkill 1186 * state changed exactly now while we were called from stop_device. 1187 * This is very unlikely but can happen and is supported. 1188 */ 1189 hw_rfkill = iwl_is_rfkill_set(trans); 1190 if (hw_rfkill) 1191 set_bit(STATUS_RFKILL, &trans->status); 1192 else 1193 clear_bit(STATUS_RFKILL, &trans->status); 1194 if (hw_rfkill != was_hw_rfkill) 1195 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1196 1197 /* re-take ownership to prevent other users from stealing the deivce */ 1198 iwl_pcie_prepare_card_hw(trans); 1199 } 1200 1201 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1202 { 1203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1204 1205 mutex_lock(&trans_pcie->mutex); 1206 _iwl_trans_pcie_stop_device(trans, low_power); 1207 mutex_unlock(&trans_pcie->mutex); 1208 } 1209 1210 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1211 { 1212 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1213 IWL_TRANS_GET_PCIE_TRANS(trans); 1214 1215 lockdep_assert_held(&trans_pcie->mutex); 1216 1217 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) 1218 _iwl_trans_pcie_stop_device(trans, true); 1219 } 1220 1221 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) 1222 { 1223 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1224 1225 if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { 1226 /* Enable persistence mode to avoid reset */ 1227 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1228 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1229 } 1230 1231 iwl_disable_interrupts(trans); 1232 1233 /* 1234 * in testing mode, the host stays awake and the 1235 * hardware won't be reset (not even partially) 1236 */ 1237 if (test) 1238 return; 1239 1240 iwl_pcie_disable_ict(trans); 1241 1242 synchronize_irq(trans_pcie->pci_dev->irq); 1243 1244 iwl_clear_bit(trans, CSR_GP_CNTRL, 1245 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1246 iwl_clear_bit(trans, CSR_GP_CNTRL, 1247 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1248 1249 if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) { 1250 /* 1251 * reset TX queues -- some of their registers reset during S3 1252 * so if we don't reset everything here the D3 image would try 1253 * to execute some invalid memory upon resume 1254 */ 1255 iwl_trans_pcie_tx_reset(trans); 1256 } 1257 1258 iwl_pcie_set_pwr(trans, true); 1259 } 1260 1261 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1262 enum iwl_d3_status *status, 1263 bool test) 1264 { 1265 u32 val; 1266 int ret; 1267 1268 if (test) { 1269 iwl_enable_interrupts(trans); 1270 *status = IWL_D3_STATUS_ALIVE; 1271 return 0; 1272 } 1273 1274 /* 1275 * Also enables interrupts - none will happen as the device doesn't 1276 * know we're waking it up, only when the opmode actually tells it 1277 * after this call. 1278 */ 1279 iwl_pcie_reset_ict(trans); 1280 1281 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1282 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1283 1284 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1285 udelay(2); 1286 1287 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1288 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1289 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1290 25000); 1291 if (ret < 0) { 1292 IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); 1293 return ret; 1294 } 1295 1296 iwl_pcie_set_pwr(trans, false); 1297 1298 if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { 1299 iwl_clear_bit(trans, CSR_GP_CNTRL, 1300 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1301 } else { 1302 iwl_trans_pcie_tx_reset(trans); 1303 1304 ret = iwl_pcie_rx_init(trans); 1305 if (ret) { 1306 IWL_ERR(trans, 1307 "Failed to resume the device (RX reset)\n"); 1308 return ret; 1309 } 1310 } 1311 1312 val = iwl_read32(trans, CSR_RESET); 1313 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1314 *status = IWL_D3_STATUS_RESET; 1315 else 1316 *status = IWL_D3_STATUS_ALIVE; 1317 1318 return 0; 1319 } 1320 1321 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) 1322 { 1323 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1324 bool hw_rfkill; 1325 int err; 1326 1327 lockdep_assert_held(&trans_pcie->mutex); 1328 1329 err = iwl_pcie_prepare_card_hw(trans); 1330 if (err) { 1331 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1332 return err; 1333 } 1334 1335 /* Reset the entire device */ 1336 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1337 1338 usleep_range(10, 15); 1339 1340 iwl_pcie_apm_init(trans); 1341 1342 /* From now on, the op_mode will be kept updated about RF kill state */ 1343 iwl_enable_rfkill_int(trans); 1344 1345 /* Set is_down to false here so that...*/ 1346 trans_pcie->is_down = false; 1347 1348 hw_rfkill = iwl_is_rfkill_set(trans); 1349 if (hw_rfkill) 1350 set_bit(STATUS_RFKILL, &trans->status); 1351 else 1352 clear_bit(STATUS_RFKILL, &trans->status); 1353 /* ... rfkill can call stop_device and set it false if needed */ 1354 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1355 1356 return 0; 1357 } 1358 1359 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) 1360 { 1361 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1362 int ret; 1363 1364 mutex_lock(&trans_pcie->mutex); 1365 ret = _iwl_trans_pcie_start_hw(trans, low_power); 1366 mutex_unlock(&trans_pcie->mutex); 1367 1368 return ret; 1369 } 1370 1371 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1372 { 1373 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1374 1375 mutex_lock(&trans_pcie->mutex); 1376 1377 /* disable interrupts - don't enable HW RF kill interrupt */ 1378 spin_lock(&trans_pcie->irq_lock); 1379 iwl_disable_interrupts(trans); 1380 spin_unlock(&trans_pcie->irq_lock); 1381 1382 iwl_pcie_apm_stop(trans, true); 1383 1384 spin_lock(&trans_pcie->irq_lock); 1385 iwl_disable_interrupts(trans); 1386 spin_unlock(&trans_pcie->irq_lock); 1387 1388 iwl_pcie_disable_ict(trans); 1389 1390 mutex_unlock(&trans_pcie->mutex); 1391 1392 synchronize_irq(trans_pcie->pci_dev->irq); 1393 } 1394 1395 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1396 { 1397 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1398 } 1399 1400 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1401 { 1402 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1403 } 1404 1405 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1406 { 1407 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1408 } 1409 1410 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1411 { 1412 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1413 ((reg & 0x000FFFFF) | (3 << 24))); 1414 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1415 } 1416 1417 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1418 u32 val) 1419 { 1420 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1421 ((addr & 0x000FFFFF) | (3 << 24))); 1422 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1423 } 1424 1425 static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 1426 { 1427 WARN_ON(1); 1428 return 0; 1429 } 1430 1431 static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1432 const struct iwl_trans_config *trans_cfg) 1433 { 1434 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1435 1436 trans_pcie->cmd_queue = trans_cfg->cmd_queue; 1437 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; 1438 trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 1439 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1440 trans_pcie->n_no_reclaim_cmds = 0; 1441 else 1442 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 1443 if (trans_pcie->n_no_reclaim_cmds) 1444 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1445 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1446 1447 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 1448 trans_pcie->rx_page_order = 1449 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 1450 1451 trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; 1452 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; 1453 trans_pcie->scd_set_active = trans_cfg->scd_set_active; 1454 trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; 1455 1456 trans->command_groups = trans_cfg->command_groups; 1457 trans->command_groups_size = trans_cfg->command_groups_size; 1458 1459 /* init ref_count to 1 (should be cleared when ucode is loaded) */ 1460 trans_pcie->ref_count = 1; 1461 1462 /* Initialize NAPI here - it should be before registering to mac80211 1463 * in the opmode but after the HW struct is allocated. 1464 * As this function may be called again in some corner cases don't 1465 * do anything if NAPI was already initialized. 1466 */ 1467 if (!trans_pcie->napi.poll) { 1468 init_dummy_netdev(&trans_pcie->napi_dev); 1469 netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, 1470 iwl_pcie_dummy_napi_poll, 64); 1471 } 1472 } 1473 1474 void iwl_trans_pcie_free(struct iwl_trans *trans) 1475 { 1476 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1477 int i; 1478 1479 synchronize_irq(trans_pcie->pci_dev->irq); 1480 1481 iwl_pcie_tx_free(trans); 1482 iwl_pcie_rx_free(trans); 1483 1484 free_irq(trans_pcie->pci_dev->irq, trans); 1485 iwl_pcie_free_ict(trans); 1486 1487 pci_disable_msi(trans_pcie->pci_dev); 1488 iounmap(trans_pcie->hw_base); 1489 pci_release_regions(trans_pcie->pci_dev); 1490 pci_disable_device(trans_pcie->pci_dev); 1491 1492 if (trans_pcie->napi.poll) 1493 netif_napi_del(&trans_pcie->napi); 1494 1495 iwl_pcie_free_fw_monitor(trans); 1496 1497 for_each_possible_cpu(i) { 1498 struct iwl_tso_hdr_page *p = 1499 per_cpu_ptr(trans_pcie->tso_hdr_page, i); 1500 1501 if (p->page) 1502 __free_page(p->page); 1503 } 1504 1505 free_percpu(trans_pcie->tso_hdr_page); 1506 iwl_trans_free(trans); 1507 } 1508 1509 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 1510 { 1511 if (state) 1512 set_bit(STATUS_TPOWER_PMI, &trans->status); 1513 else 1514 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1515 } 1516 1517 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, 1518 unsigned long *flags) 1519 { 1520 int ret; 1521 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1522 1523 spin_lock_irqsave(&trans_pcie->reg_lock, *flags); 1524 1525 if (trans_pcie->cmd_hold_nic_awake) 1526 goto out; 1527 1528 /* this bit wakes up the NIC */ 1529 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1530 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1531 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1532 udelay(2); 1533 1534 /* 1535 * These bits say the device is running, and should keep running for 1536 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 1537 * but they do not indicate that embedded SRAM is restored yet; 1538 * 3945 and 4965 have volatile SRAM, and must save/restore contents 1539 * to/from host DRAM when sleeping/waking for power-saving. 1540 * Each direction takes approximately 1/4 millisecond; with this 1541 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 1542 * series of register accesses are expected (e.g. reading Event Log), 1543 * to keep device from sleeping. 1544 * 1545 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 1546 * SRAM is okay/restored. We don't check that here because this call 1547 * is just for hardware register access; but GP1 MAC_SLEEP check is a 1548 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). 1549 * 1550 * 5000 series and later (including 1000 series) have non-volatile SRAM, 1551 * and do not save/restore SRAM when power cycling. 1552 */ 1553 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1554 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1555 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1556 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 1557 if (unlikely(ret < 0)) { 1558 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 1559 WARN_ONCE(1, 1560 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 1561 iwl_read32(trans, CSR_GP_CNTRL)); 1562 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); 1563 return false; 1564 } 1565 1566 out: 1567 /* 1568 * Fool sparse by faking we release the lock - sparse will 1569 * track nic_access anyway. 1570 */ 1571 __release(&trans_pcie->reg_lock); 1572 return true; 1573 } 1574 1575 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, 1576 unsigned long *flags) 1577 { 1578 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1579 1580 lockdep_assert_held(&trans_pcie->reg_lock); 1581 1582 /* 1583 * Fool sparse by faking we acquiring the lock - sparse will 1584 * track nic_access anyway. 1585 */ 1586 __acquire(&trans_pcie->reg_lock); 1587 1588 if (trans_pcie->cmd_hold_nic_awake) 1589 goto out; 1590 1591 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1592 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1593 /* 1594 * Above we read the CSR_GP_CNTRL register, which will flush 1595 * any previous writes, but we need the write that clears the 1596 * MAC_ACCESS_REQ bit to be performed before any other writes 1597 * scheduled on different CPUs (after we drop reg_lock). 1598 */ 1599 mmiowb(); 1600 out: 1601 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); 1602 } 1603 1604 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 1605 void *buf, int dwords) 1606 { 1607 unsigned long flags; 1608 int offs, ret = 0; 1609 u32 *vals = buf; 1610 1611 if (iwl_trans_grab_nic_access(trans, &flags)) { 1612 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); 1613 for (offs = 0; offs < dwords; offs++) 1614 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); 1615 iwl_trans_release_nic_access(trans, &flags); 1616 } else { 1617 ret = -EBUSY; 1618 } 1619 return ret; 1620 } 1621 1622 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 1623 const void *buf, int dwords) 1624 { 1625 unsigned long flags; 1626 int offs, ret = 0; 1627 const u32 *vals = buf; 1628 1629 if (iwl_trans_grab_nic_access(trans, &flags)) { 1630 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 1631 for (offs = 0; offs < dwords; offs++) 1632 iwl_write32(trans, HBUS_TARG_MEM_WDAT, 1633 vals ? vals[offs] : 0); 1634 iwl_trans_release_nic_access(trans, &flags); 1635 } else { 1636 ret = -EBUSY; 1637 } 1638 return ret; 1639 } 1640 1641 static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, 1642 unsigned long txqs, 1643 bool freeze) 1644 { 1645 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1646 int queue; 1647 1648 for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 1649 struct iwl_txq *txq = &trans_pcie->txq[queue]; 1650 unsigned long now; 1651 1652 spin_lock_bh(&txq->lock); 1653 1654 now = jiffies; 1655 1656 if (txq->frozen == freeze) 1657 goto next_queue; 1658 1659 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 1660 freeze ? "Freezing" : "Waking", queue); 1661 1662 txq->frozen = freeze; 1663 1664 if (txq->q.read_ptr == txq->q.write_ptr) 1665 goto next_queue; 1666 1667 if (freeze) { 1668 if (unlikely(time_after(now, 1669 txq->stuck_timer.expires))) { 1670 /* 1671 * The timer should have fired, maybe it is 1672 * spinning right now on the lock. 1673 */ 1674 goto next_queue; 1675 } 1676 /* remember how long until the timer fires */ 1677 txq->frozen_expiry_remainder = 1678 txq->stuck_timer.expires - now; 1679 del_timer(&txq->stuck_timer); 1680 goto next_queue; 1681 } 1682 1683 /* 1684 * Wake a non-empty queue -> arm timer with the 1685 * remainder before it froze 1686 */ 1687 mod_timer(&txq->stuck_timer, 1688 now + txq->frozen_expiry_remainder); 1689 1690 next_queue: 1691 spin_unlock_bh(&txq->lock); 1692 } 1693 } 1694 1695 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 1696 { 1697 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1698 int i; 1699 1700 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 1701 struct iwl_txq *txq = &trans_pcie->txq[i]; 1702 1703 if (i == trans_pcie->cmd_queue) 1704 continue; 1705 1706 spin_lock_bh(&txq->lock); 1707 1708 if (!block && !(WARN_ON_ONCE(!txq->block))) { 1709 txq->block--; 1710 if (!txq->block) { 1711 iwl_write32(trans, HBUS_TARG_WRPTR, 1712 txq->q.write_ptr | (i << 8)); 1713 } 1714 } else if (block) { 1715 txq->block++; 1716 } 1717 1718 spin_unlock_bh(&txq->lock); 1719 } 1720 } 1721 1722 #define IWL_FLUSH_WAIT_MS 2000 1723 1724 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) 1725 { 1726 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1727 struct iwl_txq *txq; 1728 struct iwl_queue *q; 1729 int cnt; 1730 unsigned long now = jiffies; 1731 u32 scd_sram_addr; 1732 u8 buf[16]; 1733 int ret = 0; 1734 1735 /* waiting for all the tx frames complete might take a while */ 1736 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 1737 u8 wr_ptr; 1738 1739 if (cnt == trans_pcie->cmd_queue) 1740 continue; 1741 if (!test_bit(cnt, trans_pcie->queue_used)) 1742 continue; 1743 if (!(BIT(cnt) & txq_bm)) 1744 continue; 1745 1746 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); 1747 txq = &trans_pcie->txq[cnt]; 1748 q = &txq->q; 1749 wr_ptr = ACCESS_ONCE(q->write_ptr); 1750 1751 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) && 1752 !time_after(jiffies, 1753 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 1754 u8 write_ptr = ACCESS_ONCE(q->write_ptr); 1755 1756 if (WARN_ONCE(wr_ptr != write_ptr, 1757 "WR pointer moved while flushing %d -> %d\n", 1758 wr_ptr, write_ptr)) 1759 return -ETIMEDOUT; 1760 msleep(1); 1761 } 1762 1763 if (q->read_ptr != q->write_ptr) { 1764 IWL_ERR(trans, 1765 "fail to flush all tx fifo queues Q %d\n", cnt); 1766 ret = -ETIMEDOUT; 1767 break; 1768 } 1769 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt); 1770 } 1771 1772 if (!ret) 1773 return 0; 1774 1775 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", 1776 txq->q.read_ptr, txq->q.write_ptr); 1777 1778 scd_sram_addr = trans_pcie->scd_base_addr + 1779 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); 1780 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); 1781 1782 iwl_print_hex_error(trans, buf, sizeof(buf)); 1783 1784 for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++) 1785 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt, 1786 iwl_read_direct32(trans, FH_TX_TRB_REG(cnt))); 1787 1788 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 1789 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt)); 1790 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 1791 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 1792 u32 tbl_dw = 1793 iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr + 1794 SCD_TRANS_TBL_OFFSET_QUEUE(cnt)); 1795 1796 if (cnt & 0x1) 1797 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; 1798 else 1799 tbl_dw = tbl_dw & 0x0000FFFF; 1800 1801 IWL_ERR(trans, 1802 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", 1803 cnt, active ? "" : "in", fifo, tbl_dw, 1804 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & 1805 (TFD_QUEUE_SIZE_MAX - 1), 1806 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); 1807 } 1808 1809 return ret; 1810 } 1811 1812 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 1813 u32 mask, u32 value) 1814 { 1815 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1816 unsigned long flags; 1817 1818 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1819 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 1820 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1821 } 1822 1823 void iwl_trans_pcie_ref(struct iwl_trans *trans) 1824 { 1825 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1826 unsigned long flags; 1827 1828 if (iwlwifi_mod_params.d0i3_disable) 1829 return; 1830 1831 spin_lock_irqsave(&trans_pcie->ref_lock, flags); 1832 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); 1833 trans_pcie->ref_count++; 1834 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); 1835 } 1836 1837 void iwl_trans_pcie_unref(struct iwl_trans *trans) 1838 { 1839 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1840 unsigned long flags; 1841 1842 if (iwlwifi_mod_params.d0i3_disable) 1843 return; 1844 1845 spin_lock_irqsave(&trans_pcie->ref_lock, flags); 1846 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); 1847 if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) { 1848 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); 1849 return; 1850 } 1851 trans_pcie->ref_count--; 1852 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); 1853 } 1854 1855 static const char *get_csr_string(int cmd) 1856 { 1857 #define IWL_CMD(x) case x: return #x 1858 switch (cmd) { 1859 IWL_CMD(CSR_HW_IF_CONFIG_REG); 1860 IWL_CMD(CSR_INT_COALESCING); 1861 IWL_CMD(CSR_INT); 1862 IWL_CMD(CSR_INT_MASK); 1863 IWL_CMD(CSR_FH_INT_STATUS); 1864 IWL_CMD(CSR_GPIO_IN); 1865 IWL_CMD(CSR_RESET); 1866 IWL_CMD(CSR_GP_CNTRL); 1867 IWL_CMD(CSR_HW_REV); 1868 IWL_CMD(CSR_EEPROM_REG); 1869 IWL_CMD(CSR_EEPROM_GP); 1870 IWL_CMD(CSR_OTP_GP_REG); 1871 IWL_CMD(CSR_GIO_REG); 1872 IWL_CMD(CSR_GP_UCODE_REG); 1873 IWL_CMD(CSR_GP_DRIVER_REG); 1874 IWL_CMD(CSR_UCODE_DRV_GP1); 1875 IWL_CMD(CSR_UCODE_DRV_GP2); 1876 IWL_CMD(CSR_LED_REG); 1877 IWL_CMD(CSR_DRAM_INT_TBL_REG); 1878 IWL_CMD(CSR_GIO_CHICKEN_BITS); 1879 IWL_CMD(CSR_ANA_PLL_CFG); 1880 IWL_CMD(CSR_HW_REV_WA_REG); 1881 IWL_CMD(CSR_MONITOR_STATUS_REG); 1882 IWL_CMD(CSR_DBG_HPET_MEM_REG); 1883 default: 1884 return "UNKNOWN"; 1885 } 1886 #undef IWL_CMD 1887 } 1888 1889 void iwl_pcie_dump_csr(struct iwl_trans *trans) 1890 { 1891 int i; 1892 static const u32 csr_tbl[] = { 1893 CSR_HW_IF_CONFIG_REG, 1894 CSR_INT_COALESCING, 1895 CSR_INT, 1896 CSR_INT_MASK, 1897 CSR_FH_INT_STATUS, 1898 CSR_GPIO_IN, 1899 CSR_RESET, 1900 CSR_GP_CNTRL, 1901 CSR_HW_REV, 1902 CSR_EEPROM_REG, 1903 CSR_EEPROM_GP, 1904 CSR_OTP_GP_REG, 1905 CSR_GIO_REG, 1906 CSR_GP_UCODE_REG, 1907 CSR_GP_DRIVER_REG, 1908 CSR_UCODE_DRV_GP1, 1909 CSR_UCODE_DRV_GP2, 1910 CSR_LED_REG, 1911 CSR_DRAM_INT_TBL_REG, 1912 CSR_GIO_CHICKEN_BITS, 1913 CSR_ANA_PLL_CFG, 1914 CSR_MONITOR_STATUS_REG, 1915 CSR_HW_REV_WA_REG, 1916 CSR_DBG_HPET_MEM_REG 1917 }; 1918 IWL_ERR(trans, "CSR values:\n"); 1919 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 1920 "CSR_INT_PERIODIC_REG)\n"); 1921 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 1922 IWL_ERR(trans, " %25s: 0X%08x\n", 1923 get_csr_string(csr_tbl[i]), 1924 iwl_read32(trans, csr_tbl[i])); 1925 } 1926 } 1927 1928 #ifdef CONFIG_IWLWIFI_DEBUGFS 1929 /* create and remove of files */ 1930 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 1931 if (!debugfs_create_file(#name, mode, parent, trans, \ 1932 &iwl_dbgfs_##name##_ops)) \ 1933 goto err; \ 1934 } while (0) 1935 1936 /* file operation */ 1937 #define DEBUGFS_READ_FILE_OPS(name) \ 1938 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 1939 .read = iwl_dbgfs_##name##_read, \ 1940 .open = simple_open, \ 1941 .llseek = generic_file_llseek, \ 1942 }; 1943 1944 #define DEBUGFS_WRITE_FILE_OPS(name) \ 1945 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 1946 .write = iwl_dbgfs_##name##_write, \ 1947 .open = simple_open, \ 1948 .llseek = generic_file_llseek, \ 1949 }; 1950 1951 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 1952 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 1953 .write = iwl_dbgfs_##name##_write, \ 1954 .read = iwl_dbgfs_##name##_read, \ 1955 .open = simple_open, \ 1956 .llseek = generic_file_llseek, \ 1957 }; 1958 1959 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, 1960 char __user *user_buf, 1961 size_t count, loff_t *ppos) 1962 { 1963 struct iwl_trans *trans = file->private_data; 1964 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1965 struct iwl_txq *txq; 1966 struct iwl_queue *q; 1967 char *buf; 1968 int pos = 0; 1969 int cnt; 1970 int ret; 1971 size_t bufsz; 1972 1973 bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; 1974 1975 if (!trans_pcie->txq) 1976 return -EAGAIN; 1977 1978 buf = kzalloc(bufsz, GFP_KERNEL); 1979 if (!buf) 1980 return -ENOMEM; 1981 1982 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 1983 txq = &trans_pcie->txq[cnt]; 1984 q = &txq->q; 1985 pos += scnprintf(buf + pos, bufsz - pos, 1986 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", 1987 cnt, q->read_ptr, q->write_ptr, 1988 !!test_bit(cnt, trans_pcie->queue_used), 1989 !!test_bit(cnt, trans_pcie->queue_stopped), 1990 txq->need_update, txq->frozen, 1991 (cnt == trans_pcie->cmd_queue ? " HCMD" : "")); 1992 } 1993 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1994 kfree(buf); 1995 return ret; 1996 } 1997 1998 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 1999 char __user *user_buf, 2000 size_t count, loff_t *ppos) 2001 { 2002 struct iwl_trans *trans = file->private_data; 2003 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2004 struct iwl_rxq *rxq = &trans_pcie->rxq; 2005 char buf[256]; 2006 int pos = 0; 2007 const size_t bufsz = sizeof(buf); 2008 2009 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", 2010 rxq->read); 2011 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", 2012 rxq->write); 2013 pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n", 2014 rxq->write_actual); 2015 pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n", 2016 rxq->need_update); 2017 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", 2018 rxq->free_count); 2019 if (rxq->rb_stts) { 2020 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", 2021 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); 2022 } else { 2023 pos += scnprintf(buf + pos, bufsz - pos, 2024 "closed_rb_num: Not Allocated\n"); 2025 } 2026 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2027 } 2028 2029 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2030 char __user *user_buf, 2031 size_t count, loff_t *ppos) 2032 { 2033 struct iwl_trans *trans = file->private_data; 2034 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2035 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2036 2037 int pos = 0; 2038 char *buf; 2039 int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2040 ssize_t ret; 2041 2042 buf = kzalloc(bufsz, GFP_KERNEL); 2043 if (!buf) 2044 return -ENOMEM; 2045 2046 pos += scnprintf(buf + pos, bufsz - pos, 2047 "Interrupt Statistics Report:\n"); 2048 2049 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2050 isr_stats->hw); 2051 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2052 isr_stats->sw); 2053 if (isr_stats->sw || isr_stats->hw) { 2054 pos += scnprintf(buf + pos, bufsz - pos, 2055 "\tLast Restarting Code: 0x%X\n", 2056 isr_stats->err_code); 2057 } 2058 #ifdef CONFIG_IWLWIFI_DEBUG 2059 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2060 isr_stats->sch); 2061 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2062 isr_stats->alive); 2063 #endif 2064 pos += scnprintf(buf + pos, bufsz - pos, 2065 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2066 2067 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2068 isr_stats->ctkill); 2069 2070 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2071 isr_stats->wakeup); 2072 2073 pos += scnprintf(buf + pos, bufsz - pos, 2074 "Rx command responses:\t\t %u\n", isr_stats->rx); 2075 2076 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2077 isr_stats->tx); 2078 2079 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2080 isr_stats->unhandled); 2081 2082 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2083 kfree(buf); 2084 return ret; 2085 } 2086 2087 static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2088 const char __user *user_buf, 2089 size_t count, loff_t *ppos) 2090 { 2091 struct iwl_trans *trans = file->private_data; 2092 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2093 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2094 2095 char buf[8]; 2096 int buf_size; 2097 u32 reset_flag; 2098 2099 memset(buf, 0, sizeof(buf)); 2100 buf_size = min(count, sizeof(buf) - 1); 2101 if (copy_from_user(buf, user_buf, buf_size)) 2102 return -EFAULT; 2103 if (sscanf(buf, "%x", &reset_flag) != 1) 2104 return -EFAULT; 2105 if (reset_flag == 0) 2106 memset(isr_stats, 0, sizeof(*isr_stats)); 2107 2108 return count; 2109 } 2110 2111 static ssize_t iwl_dbgfs_csr_write(struct file *file, 2112 const char __user *user_buf, 2113 size_t count, loff_t *ppos) 2114 { 2115 struct iwl_trans *trans = file->private_data; 2116 char buf[8]; 2117 int buf_size; 2118 int csr; 2119 2120 memset(buf, 0, sizeof(buf)); 2121 buf_size = min(count, sizeof(buf) - 1); 2122 if (copy_from_user(buf, user_buf, buf_size)) 2123 return -EFAULT; 2124 if (sscanf(buf, "%d", &csr) != 1) 2125 return -EFAULT; 2126 2127 iwl_pcie_dump_csr(trans); 2128 2129 return count; 2130 } 2131 2132 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2133 char __user *user_buf, 2134 size_t count, loff_t *ppos) 2135 { 2136 struct iwl_trans *trans = file->private_data; 2137 char *buf = NULL; 2138 ssize_t ret; 2139 2140 ret = iwl_dump_fh(trans, &buf); 2141 if (ret < 0) 2142 return ret; 2143 if (!buf) 2144 return -EINVAL; 2145 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2146 kfree(buf); 2147 return ret; 2148 } 2149 2150 DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2151 DEBUGFS_READ_FILE_OPS(fh_reg); 2152 DEBUGFS_READ_FILE_OPS(rx_queue); 2153 DEBUGFS_READ_FILE_OPS(tx_queue); 2154 DEBUGFS_WRITE_FILE_OPS(csr); 2155 2156 /* Create the debugfs files and directories */ 2157 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 2158 { 2159 struct dentry *dir = trans->dbgfs_dir; 2160 2161 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); 2162 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); 2163 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); 2164 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); 2165 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 2166 return 0; 2167 2168 err: 2169 IWL_ERR(trans, "failed to create the trans debugfs entry\n"); 2170 return -ENOMEM; 2171 } 2172 #endif /*CONFIG_IWLWIFI_DEBUGFS */ 2173 2174 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) 2175 { 2176 u32 cmdlen = 0; 2177 int i; 2178 2179 for (i = 0; i < IWL_NUM_OF_TBS; i++) 2180 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i); 2181 2182 return cmdlen; 2183 } 2184 2185 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 2186 struct iwl_fw_error_dump_data **data, 2187 int allocated_rb_nums) 2188 { 2189 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2190 int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 2191 struct iwl_rxq *rxq = &trans_pcie->rxq; 2192 u32 i, r, j, rb_len = 0; 2193 2194 spin_lock(&rxq->lock); 2195 2196 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 2197 2198 for (i = rxq->read, j = 0; 2199 i != r && j < allocated_rb_nums; 2200 i = (i + 1) & RX_QUEUE_MASK, j++) { 2201 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 2202 struct iwl_fw_error_dump_rb *rb; 2203 2204 dma_unmap_page(trans->dev, rxb->page_dma, max_len, 2205 DMA_FROM_DEVICE); 2206 2207 rb_len += sizeof(**data) + sizeof(*rb) + max_len; 2208 2209 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 2210 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 2211 rb = (void *)(*data)->data; 2212 rb->index = cpu_to_le32(i); 2213 memcpy(rb->data, page_address(rxb->page), max_len); 2214 /* remap the page for the free benefit */ 2215 rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, 2216 max_len, 2217 DMA_FROM_DEVICE); 2218 2219 *data = iwl_fw_error_next_data(*data); 2220 } 2221 2222 spin_unlock(&rxq->lock); 2223 2224 return rb_len; 2225 } 2226 #define IWL_CSR_TO_DUMP (0x250) 2227 2228 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 2229 struct iwl_fw_error_dump_data **data) 2230 { 2231 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 2232 __le32 *val; 2233 int i; 2234 2235 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 2236 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 2237 val = (void *)(*data)->data; 2238 2239 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 2240 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 2241 2242 *data = iwl_fw_error_next_data(*data); 2243 2244 return csr_len; 2245 } 2246 2247 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 2248 struct iwl_fw_error_dump_data **data) 2249 { 2250 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 2251 unsigned long flags; 2252 __le32 *val; 2253 int i; 2254 2255 if (!iwl_trans_grab_nic_access(trans, &flags)) 2256 return 0; 2257 2258 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 2259 (*data)->len = cpu_to_le32(fh_regs_len); 2260 val = (void *)(*data)->data; 2261 2262 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) 2263 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 2264 2265 iwl_trans_release_nic_access(trans, &flags); 2266 2267 *data = iwl_fw_error_next_data(*data); 2268 2269 return sizeof(**data) + fh_regs_len; 2270 } 2271 2272 static u32 2273 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 2274 struct iwl_fw_error_dump_fw_mon *fw_mon_data, 2275 u32 monitor_len) 2276 { 2277 u32 buf_size_in_dwords = (monitor_len >> 2); 2278 u32 *buffer = (u32 *)fw_mon_data->data; 2279 unsigned long flags; 2280 u32 i; 2281 2282 if (!iwl_trans_grab_nic_access(trans, &flags)) 2283 return 0; 2284 2285 iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 2286 for (i = 0; i < buf_size_in_dwords; i++) 2287 buffer[i] = iwl_read_prph_no_grab(trans, 2288 MON_DMARB_RD_DATA_ADDR); 2289 iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 2290 2291 iwl_trans_release_nic_access(trans, &flags); 2292 2293 return monitor_len; 2294 } 2295 2296 static u32 2297 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 2298 struct iwl_fw_error_dump_data **data, 2299 u32 monitor_len) 2300 { 2301 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2302 u32 len = 0; 2303 2304 if ((trans_pcie->fw_mon_page && 2305 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || 2306 trans->dbg_dest_tlv) { 2307 struct iwl_fw_error_dump_fw_mon *fw_mon_data; 2308 u32 base, write_ptr, wrap_cnt; 2309 2310 /* If there was a dest TLV - use the values from there */ 2311 if (trans->dbg_dest_tlv) { 2312 write_ptr = 2313 le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); 2314 wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); 2315 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); 2316 } else { 2317 base = MON_BUFF_BASE_ADDR; 2318 write_ptr = MON_BUFF_WRPTR; 2319 wrap_cnt = MON_BUFF_CYCLE_CNT; 2320 } 2321 2322 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 2323 fw_mon_data = (void *)(*data)->data; 2324 fw_mon_data->fw_mon_wr_ptr = 2325 cpu_to_le32(iwl_read_prph(trans, write_ptr)); 2326 fw_mon_data->fw_mon_cycle_cnt = 2327 cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 2328 fw_mon_data->fw_mon_base_ptr = 2329 cpu_to_le32(iwl_read_prph(trans, base)); 2330 2331 len += sizeof(**data) + sizeof(*fw_mon_data); 2332 if (trans_pcie->fw_mon_page) { 2333 /* 2334 * The firmware is now asserted, it won't write anything 2335 * to the buffer. CPU can take ownership to fetch the 2336 * data. The buffer will be handed back to the device 2337 * before the firmware will be restarted. 2338 */ 2339 dma_sync_single_for_cpu(trans->dev, 2340 trans_pcie->fw_mon_phys, 2341 trans_pcie->fw_mon_size, 2342 DMA_FROM_DEVICE); 2343 memcpy(fw_mon_data->data, 2344 page_address(trans_pcie->fw_mon_page), 2345 trans_pcie->fw_mon_size); 2346 2347 monitor_len = trans_pcie->fw_mon_size; 2348 } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { 2349 /* 2350 * Update pointers to reflect actual values after 2351 * shifting 2352 */ 2353 base = iwl_read_prph(trans, base) << 2354 trans->dbg_dest_tlv->base_shift; 2355 iwl_trans_read_mem(trans, base, fw_mon_data->data, 2356 monitor_len / sizeof(u32)); 2357 } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { 2358 monitor_len = 2359 iwl_trans_pci_dump_marbh_monitor(trans, 2360 fw_mon_data, 2361 monitor_len); 2362 } else { 2363 /* Didn't match anything - output no monitor data */ 2364 monitor_len = 0; 2365 } 2366 2367 len += monitor_len; 2368 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 2369 } 2370 2371 return len; 2372 } 2373 2374 static struct iwl_trans_dump_data 2375 *iwl_trans_pcie_dump_data(struct iwl_trans *trans, 2376 const struct iwl_fw_dbg_trigger_tlv *trigger) 2377 { 2378 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2379 struct iwl_fw_error_dump_data *data; 2380 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; 2381 struct iwl_fw_error_dump_txcmd *txcmd; 2382 struct iwl_trans_dump_data *dump_data; 2383 u32 len, num_rbs; 2384 u32 monitor_len; 2385 int i, ptr; 2386 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); 2387 2388 /* transport dump header */ 2389 len = sizeof(*dump_data); 2390 2391 /* host commands */ 2392 len += sizeof(*data) + 2393 cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); 2394 2395 /* FW monitor */ 2396 if (trans_pcie->fw_mon_page) { 2397 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + 2398 trans_pcie->fw_mon_size; 2399 monitor_len = trans_pcie->fw_mon_size; 2400 } else if (trans->dbg_dest_tlv) { 2401 u32 base, end; 2402 2403 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); 2404 end = le32_to_cpu(trans->dbg_dest_tlv->end_reg); 2405 2406 base = iwl_read_prph(trans, base) << 2407 trans->dbg_dest_tlv->base_shift; 2408 end = iwl_read_prph(trans, end) << 2409 trans->dbg_dest_tlv->end_shift; 2410 2411 /* Make "end" point to the actual end */ 2412 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 || 2413 trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) 2414 end += (1 << trans->dbg_dest_tlv->end_shift); 2415 monitor_len = end - base; 2416 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + 2417 monitor_len; 2418 } else { 2419 monitor_len = 0; 2420 } 2421 2422 if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { 2423 dump_data = vzalloc(len); 2424 if (!dump_data) 2425 return NULL; 2426 2427 data = (void *)dump_data->data; 2428 len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 2429 dump_data->len = len; 2430 2431 return dump_data; 2432 } 2433 2434 /* CSR registers */ 2435 len += sizeof(*data) + IWL_CSR_TO_DUMP; 2436 2437 /* FH registers */ 2438 len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); 2439 2440 if (dump_rbs) { 2441 /* RBs */ 2442 num_rbs = le16_to_cpu(ACCESS_ONCE( 2443 trans_pcie->rxq.rb_stts->closed_rb_num)) 2444 & 0x0FFF; 2445 num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; 2446 len += num_rbs * (sizeof(*data) + 2447 sizeof(struct iwl_fw_error_dump_rb) + 2448 (PAGE_SIZE << trans_pcie->rx_page_order)); 2449 } 2450 2451 dump_data = vzalloc(len); 2452 if (!dump_data) 2453 return NULL; 2454 2455 len = 0; 2456 data = (void *)dump_data->data; 2457 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 2458 txcmd = (void *)data->data; 2459 spin_lock_bh(&cmdq->lock); 2460 ptr = cmdq->q.write_ptr; 2461 for (i = 0; i < cmdq->q.n_window; i++) { 2462 u8 idx = get_cmd_index(&cmdq->q, ptr); 2463 u32 caplen, cmdlen; 2464 2465 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]); 2466 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 2467 2468 if (cmdlen) { 2469 len += sizeof(*txcmd) + caplen; 2470 txcmd->cmdlen = cpu_to_le32(cmdlen); 2471 txcmd->caplen = cpu_to_le32(caplen); 2472 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); 2473 txcmd = (void *)((u8 *)txcmd->data + caplen); 2474 } 2475 2476 ptr = iwl_queue_dec_wrap(ptr); 2477 } 2478 spin_unlock_bh(&cmdq->lock); 2479 2480 data->len = cpu_to_le32(len); 2481 len += sizeof(*data); 2482 data = iwl_fw_error_next_data(data); 2483 2484 len += iwl_trans_pcie_dump_csr(trans, &data); 2485 len += iwl_trans_pcie_fh_regs_dump(trans, &data); 2486 if (dump_rbs) 2487 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 2488 2489 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 2490 2491 dump_data->len = len; 2492 2493 return dump_data; 2494 } 2495 2496 static const struct iwl_trans_ops trans_ops_pcie = { 2497 .start_hw = iwl_trans_pcie_start_hw, 2498 .op_mode_leave = iwl_trans_pcie_op_mode_leave, 2499 .fw_alive = iwl_trans_pcie_fw_alive, 2500 .start_fw = iwl_trans_pcie_start_fw, 2501 .stop_device = iwl_trans_pcie_stop_device, 2502 2503 .d3_suspend = iwl_trans_pcie_d3_suspend, 2504 .d3_resume = iwl_trans_pcie_d3_resume, 2505 2506 .send_cmd = iwl_trans_pcie_send_hcmd, 2507 2508 .tx = iwl_trans_pcie_tx, 2509 .reclaim = iwl_trans_pcie_reclaim, 2510 2511 .txq_disable = iwl_trans_pcie_txq_disable, 2512 .txq_enable = iwl_trans_pcie_txq_enable, 2513 2514 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, 2515 .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, 2516 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, 2517 2518 .write8 = iwl_trans_pcie_write8, 2519 .write32 = iwl_trans_pcie_write32, 2520 .read32 = iwl_trans_pcie_read32, 2521 .read_prph = iwl_trans_pcie_read_prph, 2522 .write_prph = iwl_trans_pcie_write_prph, 2523 .read_mem = iwl_trans_pcie_read_mem, 2524 .write_mem = iwl_trans_pcie_write_mem, 2525 .configure = iwl_trans_pcie_configure, 2526 .set_pmi = iwl_trans_pcie_set_pmi, 2527 .grab_nic_access = iwl_trans_pcie_grab_nic_access, 2528 .release_nic_access = iwl_trans_pcie_release_nic_access, 2529 .set_bits_mask = iwl_trans_pcie_set_bits_mask, 2530 2531 .ref = iwl_trans_pcie_ref, 2532 .unref = iwl_trans_pcie_unref, 2533 2534 .dump_data = iwl_trans_pcie_dump_data, 2535 }; 2536 2537 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 2538 const struct pci_device_id *ent, 2539 const struct iwl_cfg *cfg) 2540 { 2541 struct iwl_trans_pcie *trans_pcie; 2542 struct iwl_trans *trans; 2543 u16 pci_cmd; 2544 int ret; 2545 2546 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2547 &pdev->dev, cfg, &trans_ops_pcie, 0); 2548 if (!trans) 2549 return ERR_PTR(-ENOMEM); 2550 2551 trans->max_skb_frags = IWL_PCIE_MAX_FRAGS; 2552 2553 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2554 2555 trans_pcie->trans = trans; 2556 spin_lock_init(&trans_pcie->irq_lock); 2557 spin_lock_init(&trans_pcie->reg_lock); 2558 spin_lock_init(&trans_pcie->ref_lock); 2559 mutex_init(&trans_pcie->mutex); 2560 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2561 trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); 2562 if (!trans_pcie->tso_hdr_page) { 2563 ret = -ENOMEM; 2564 goto out_no_pci; 2565 } 2566 2567 ret = pci_enable_device(pdev); 2568 if (ret) 2569 goto out_no_pci; 2570 2571 if (!cfg->base_params->pcie_l1_allowed) { 2572 /* 2573 * W/A - seems to solve weird behavior. We need to remove this 2574 * if we don't want to stay in L1 all the time. This wastes a 2575 * lot of power. 2576 */ 2577 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 2578 PCIE_LINK_STATE_L1 | 2579 PCIE_LINK_STATE_CLKPM); 2580 } 2581 2582 pci_set_master(pdev); 2583 2584 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2585 if (!ret) 2586 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2587 if (ret) { 2588 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2589 if (!ret) 2590 ret = pci_set_consistent_dma_mask(pdev, 2591 DMA_BIT_MASK(32)); 2592 /* both attempts failed: */ 2593 if (ret) { 2594 dev_err(&pdev->dev, "No suitable DMA available\n"); 2595 goto out_pci_disable_device; 2596 } 2597 } 2598 2599 ret = pci_request_regions(pdev, DRV_NAME); 2600 if (ret) { 2601 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2602 goto out_pci_disable_device; 2603 } 2604 2605 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2606 if (!trans_pcie->hw_base) { 2607 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2608 ret = -ENODEV; 2609 goto out_pci_release_regions; 2610 } 2611 2612 /* We disable the RETRY_TIMEOUT register (0x41) to keep 2613 * PCI Tx retries from interfering with C3 CPU state */ 2614 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 2615 2616 trans->dev = &pdev->dev; 2617 trans_pcie->pci_dev = pdev; 2618 iwl_disable_interrupts(trans); 2619 2620 ret = pci_enable_msi(pdev); 2621 if (ret) { 2622 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); 2623 /* enable rfkill interrupt: hw bug w/a */ 2624 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2625 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 2626 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 2627 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 2628 } 2629 } 2630 2631 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 2632 /* 2633 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 2634 * changed, and now the revision step also includes bit 0-1 (no more 2635 * "dash" value). To keep hw_rev backwards compatible - we'll store it 2636 * in the old format. 2637 */ 2638 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2639 unsigned long flags; 2640 2641 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2642 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2643 2644 ret = iwl_pcie_prepare_card_hw(trans); 2645 if (ret) { 2646 IWL_WARN(trans, "Exit HW not ready\n"); 2647 goto out_pci_disable_msi; 2648 } 2649 2650 /* 2651 * in-order to recognize C step driver should read chip version 2652 * id located at the AUX bus MISC address space. 2653 */ 2654 iwl_set_bit(trans, CSR_GP_CNTRL, 2655 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 2656 udelay(2); 2657 2658 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 2659 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 2660 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 2661 25000); 2662 if (ret < 0) { 2663 IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); 2664 goto out_pci_disable_msi; 2665 } 2666 2667 if (iwl_trans_grab_nic_access(trans, &flags)) { 2668 u32 hw_step; 2669 2670 hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG); 2671 hw_step |= ENABLE_WFPM; 2672 iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step); 2673 hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG); 2674 hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; 2675 if (hw_step == 0x3) 2676 trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | 2677 (SILICON_C_STEP << 2); 2678 iwl_trans_release_nic_access(trans, &flags); 2679 } 2680 } 2681 2682 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 2683 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 2684 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 2685 2686 /* Initialize the wait queue for commands */ 2687 init_waitqueue_head(&trans_pcie->wait_command_queue); 2688 2689 ret = iwl_pcie_alloc_ict(trans); 2690 if (ret) 2691 goto out_pci_disable_msi; 2692 2693 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2694 iwl_pcie_irq_handler, 2695 IRQF_SHARED, DRV_NAME, trans); 2696 if (ret) { 2697 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2698 goto out_free_ict; 2699 } 2700 2701 trans_pcie->inta_mask = CSR_INI_SET_MASK; 2702 2703 return trans; 2704 2705 out_free_ict: 2706 iwl_pcie_free_ict(trans); 2707 out_pci_disable_msi: 2708 pci_disable_msi(pdev); 2709 out_pci_release_regions: 2710 pci_release_regions(pdev); 2711 out_pci_disable_device: 2712 pci_disable_device(pdev); 2713 out_no_pci: 2714 free_percpu(trans_pcie->tso_hdr_page); 2715 iwl_trans_free(trans); 2716 return ERR_PTR(ret); 2717 } 2718