1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3e705c121SKalle Valo * This file is provided under a dual BSD/GPLv2 license. When using or 4e705c121SKalle Valo * redistributing this file, you may do so under either license. 5e705c121SKalle Valo * 6e705c121SKalle Valo * GPL LICENSE SUMMARY 7e705c121SKalle Valo * 8e705c121SKalle Valo * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. 9e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10afb84431SEmmanuel Grumbach * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11e705c121SKalle Valo * 12e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify 13e705c121SKalle Valo * it under the terms of version 2 of the GNU General Public License as 14e705c121SKalle Valo * published by the Free Software Foundation. 15e705c121SKalle Valo * 16e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but 17e705c121SKalle Valo * WITHOUT ANY WARRANTY; without even the implied warranty of 18e705c121SKalle Valo * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19e705c121SKalle Valo * General Public License for more details. 20e705c121SKalle Valo * 21e705c121SKalle Valo * You should have received a copy of the GNU General Public License 22e705c121SKalle Valo * along with this program; if not, write to the Free Software 23e705c121SKalle Valo * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24e705c121SKalle Valo * USA 25e705c121SKalle Valo * 26e705c121SKalle Valo * The full GNU General Public License is included in this distribution 27e705c121SKalle Valo * in the file called COPYING. 28e705c121SKalle Valo * 29e705c121SKalle Valo * Contact Information: 30cb2f8277SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 31e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32e705c121SKalle Valo * 33e705c121SKalle Valo * BSD LICENSE 34e705c121SKalle Valo * 35e705c121SKalle Valo * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. 36e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37afb84431SEmmanuel Grumbach * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38e705c121SKalle Valo * All rights reserved. 39e705c121SKalle Valo * 40e705c121SKalle Valo * Redistribution and use in source and binary forms, with or without 41e705c121SKalle Valo * modification, are permitted provided that the following conditions 42e705c121SKalle Valo * are met: 43e705c121SKalle Valo * 44e705c121SKalle Valo * * Redistributions of source code must retain the above copyright 45e705c121SKalle Valo * notice, this list of conditions and the following disclaimer. 46e705c121SKalle Valo * * Redistributions in binary form must reproduce the above copyright 47e705c121SKalle Valo * notice, this list of conditions and the following disclaimer in 48e705c121SKalle Valo * the documentation and/or other materials provided with the 49e705c121SKalle Valo * distribution. 50e705c121SKalle Valo * * Neither the name Intel Corporation nor the names of its 51e705c121SKalle Valo * contributors may be used to endorse or promote products derived 52e705c121SKalle Valo * from this software without specific prior written permission. 53e705c121SKalle Valo * 54e705c121SKalle Valo * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55e705c121SKalle Valo * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56e705c121SKalle Valo * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57e705c121SKalle Valo * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58e705c121SKalle Valo * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59e705c121SKalle Valo * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60e705c121SKalle Valo * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61e705c121SKalle Valo * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62e705c121SKalle Valo * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63e705c121SKalle Valo * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64e705c121SKalle Valo * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65e705c121SKalle Valo * 66e705c121SKalle Valo *****************************************************************************/ 67e705c121SKalle Valo #include <linux/pci.h> 68e705c121SKalle Valo #include <linux/pci-aspm.h> 69e705c121SKalle Valo #include <linux/interrupt.h> 70e705c121SKalle Valo #include <linux/debugfs.h> 71e705c121SKalle Valo #include <linux/sched.h> 72e705c121SKalle Valo #include <linux/bitops.h> 73e705c121SKalle Valo #include <linux/gfp.h> 74e705c121SKalle Valo #include <linux/vmalloc.h> 75b3ff1270SLuca Coelho #include <linux/pm_runtime.h> 76e705c121SKalle Valo 77e705c121SKalle Valo #include "iwl-drv.h" 78e705c121SKalle Valo #include "iwl-trans.h" 79e705c121SKalle Valo #include "iwl-csr.h" 80e705c121SKalle Valo #include "iwl-prph.h" 81e705c121SKalle Valo #include "iwl-scd.h" 82e705c121SKalle Valo #include "iwl-agn-hw.h" 83d962f9b1SJohannes Berg #include "fw/error-dump.h" 84e705c121SKalle Valo #include "internal.h" 85e705c121SKalle Valo #include "iwl-fh.h" 86e705c121SKalle Valo 87e705c121SKalle Valo /* extended range in FW SRAM */ 88e705c121SKalle Valo #define IWL_FW_MEM_EXTENDED_START 0x40000 89e705c121SKalle Valo #define IWL_FW_MEM_EXTENDED_END 0x57FFF 90e705c121SKalle Valo 91e705c121SKalle Valo static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 92e705c121SKalle Valo { 93e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 94e705c121SKalle Valo 95e705c121SKalle Valo if (!trans_pcie->fw_mon_page) 96e705c121SKalle Valo return; 97e705c121SKalle Valo 98e705c121SKalle Valo dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys, 99e705c121SKalle Valo trans_pcie->fw_mon_size, DMA_FROM_DEVICE); 100e705c121SKalle Valo __free_pages(trans_pcie->fw_mon_page, 101e705c121SKalle Valo get_order(trans_pcie->fw_mon_size)); 102e705c121SKalle Valo trans_pcie->fw_mon_page = NULL; 103e705c121SKalle Valo trans_pcie->fw_mon_phys = 0; 104e705c121SKalle Valo trans_pcie->fw_mon_size = 0; 105e705c121SKalle Valo } 106e705c121SKalle Valo 107e705c121SKalle Valo static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 108e705c121SKalle Valo { 109e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 110e705c121SKalle Valo struct page *page = NULL; 111e705c121SKalle Valo dma_addr_t phys; 112e705c121SKalle Valo u32 size = 0; 113e705c121SKalle Valo u8 power; 114e705c121SKalle Valo 115e705c121SKalle Valo if (!max_power) { 116e705c121SKalle Valo /* default max_power is maximum */ 117e705c121SKalle Valo max_power = 26; 118e705c121SKalle Valo } else { 119e705c121SKalle Valo max_power += 11; 120e705c121SKalle Valo } 121e705c121SKalle Valo 122e705c121SKalle Valo if (WARN(max_power > 26, 123e705c121SKalle Valo "External buffer size for monitor is too big %d, check the FW TLV\n", 124e705c121SKalle Valo max_power)) 125e705c121SKalle Valo return; 126e705c121SKalle Valo 127e705c121SKalle Valo if (trans_pcie->fw_mon_page) { 128e705c121SKalle Valo dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys, 129e705c121SKalle Valo trans_pcie->fw_mon_size, 130e705c121SKalle Valo DMA_FROM_DEVICE); 131e705c121SKalle Valo return; 132e705c121SKalle Valo } 133e705c121SKalle Valo 134e705c121SKalle Valo phys = 0; 135e705c121SKalle Valo for (power = max_power; power >= 11; power--) { 136e705c121SKalle Valo int order; 137e705c121SKalle Valo 138e705c121SKalle Valo size = BIT(power); 139e705c121SKalle Valo order = get_order(size); 140e705c121SKalle Valo page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO, 141e705c121SKalle Valo order); 142e705c121SKalle Valo if (!page) 143e705c121SKalle Valo continue; 144e705c121SKalle Valo 145e705c121SKalle Valo phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order, 146e705c121SKalle Valo DMA_FROM_DEVICE); 147e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys)) { 148e705c121SKalle Valo __free_pages(page, order); 149e705c121SKalle Valo page = NULL; 150e705c121SKalle Valo continue; 151e705c121SKalle Valo } 152e705c121SKalle Valo IWL_INFO(trans, 153e705c121SKalle Valo "Allocated 0x%08x bytes (order %d) for firmware monitor.\n", 154e705c121SKalle Valo size, order); 155e705c121SKalle Valo break; 156e705c121SKalle Valo } 157e705c121SKalle Valo 158e705c121SKalle Valo if (WARN_ON_ONCE(!page)) 159e705c121SKalle Valo return; 160e705c121SKalle Valo 161e705c121SKalle Valo if (power != max_power) 162e705c121SKalle Valo IWL_ERR(trans, 163e705c121SKalle Valo "Sorry - debug buffer is only %luK while you requested %luK\n", 164e705c121SKalle Valo (unsigned long)BIT(power - 10), 165e705c121SKalle Valo (unsigned long)BIT(max_power - 10)); 166e705c121SKalle Valo 167e705c121SKalle Valo trans_pcie->fw_mon_page = page; 168e705c121SKalle Valo trans_pcie->fw_mon_phys = phys; 169e705c121SKalle Valo trans_pcie->fw_mon_size = size; 170e705c121SKalle Valo } 171e705c121SKalle Valo 172e705c121SKalle Valo static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 173e705c121SKalle Valo { 174e705c121SKalle Valo iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 175e705c121SKalle Valo ((reg & 0x0000ffff) | (2 << 28))); 176e705c121SKalle Valo return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 177e705c121SKalle Valo } 178e705c121SKalle Valo 179e705c121SKalle Valo static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 180e705c121SKalle Valo { 181e705c121SKalle Valo iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 182e705c121SKalle Valo iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 183e705c121SKalle Valo ((reg & 0x0000ffff) | (3 << 28))); 184e705c121SKalle Valo } 185e705c121SKalle Valo 186e705c121SKalle Valo static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 187e705c121SKalle Valo { 188e705c121SKalle Valo if (trans->cfg->apmg_not_supported) 189e705c121SKalle Valo return; 190e705c121SKalle Valo 191e705c121SKalle Valo if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 192e705c121SKalle Valo iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 193e705c121SKalle Valo APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 194e705c121SKalle Valo ~APMG_PS_CTRL_MSK_PWR_SRC); 195e705c121SKalle Valo else 196e705c121SKalle Valo iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 197e705c121SKalle Valo APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 198e705c121SKalle Valo ~APMG_PS_CTRL_MSK_PWR_SRC); 199e705c121SKalle Valo } 200e705c121SKalle Valo 201e705c121SKalle Valo /* PCI registers */ 202e705c121SKalle Valo #define PCI_CFG_RETRY_TIMEOUT 0x041 203e705c121SKalle Valo 204eda50cdeSSara Sharon void iwl_pcie_apm_config(struct iwl_trans *trans) 205e705c121SKalle Valo { 206e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 207e705c121SKalle Valo u16 lctl; 208e705c121SKalle Valo u16 cap; 209e705c121SKalle Valo 210e705c121SKalle Valo /* 211e705c121SKalle Valo * HW bug W/A for instability in PCIe bus L0S->L1 transition. 212e705c121SKalle Valo * Check if BIOS (or OS) enabled L1-ASPM on this device. 213e705c121SKalle Valo * If so (likely), disable L0S, so device moves directly L0->L1; 214e705c121SKalle Valo * costs negligible amount of power savings. 215e705c121SKalle Valo * If not (unlikely), enable L0S, so there is at least some 216e705c121SKalle Valo * power savings, even without L1. 217e705c121SKalle Valo */ 218e705c121SKalle Valo pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 219e705c121SKalle Valo if (lctl & PCI_EXP_LNKCTL_ASPM_L1) 220e705c121SKalle Valo iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 221e705c121SKalle Valo else 222e705c121SKalle Valo iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 223e705c121SKalle Valo trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 224e705c121SKalle Valo 225e705c121SKalle Valo pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 226e705c121SKalle Valo trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 227d74a61fcSLuca Coelho IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", 228e705c121SKalle Valo (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 229e705c121SKalle Valo trans->ltr_enabled ? "En" : "Dis"); 230e705c121SKalle Valo } 231e705c121SKalle Valo 232e705c121SKalle Valo /* 233e705c121SKalle Valo * Start up NIC's basic functionality after it has been reset 234e705c121SKalle Valo * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 235e705c121SKalle Valo * NOTE: This does not load uCode nor start the embedded processor 236e705c121SKalle Valo */ 237e705c121SKalle Valo static int iwl_pcie_apm_init(struct iwl_trans *trans) 238e705c121SKalle Valo { 23952b6e168SEmmanuel Grumbach int ret; 24052b6e168SEmmanuel Grumbach 241e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 242e705c121SKalle Valo 243e705c121SKalle Valo /* 244e705c121SKalle Valo * Use "set_bit" below rather than "write", to preserve any hardware 245e705c121SKalle Valo * bits already set by default after reset. 246e705c121SKalle Valo */ 247e705c121SKalle Valo 248e705c121SKalle Valo /* Disable L0S exit timer (platform NMI Work/Around) */ 2496e584873SSara Sharon if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) 250e705c121SKalle Valo iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 251e705c121SKalle Valo CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 252e705c121SKalle Valo 253e705c121SKalle Valo /* 254e705c121SKalle Valo * Disable L0s without affecting L1; 255e705c121SKalle Valo * don't wait for ICH L0s (ICH bug W/A) 256e705c121SKalle Valo */ 257e705c121SKalle Valo iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 258e705c121SKalle Valo CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 259e705c121SKalle Valo 260e705c121SKalle Valo /* Set FH wait threshold to maximum (HW error during stress W/A) */ 261e705c121SKalle Valo iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 262e705c121SKalle Valo 263e705c121SKalle Valo /* 264e705c121SKalle Valo * Enable HAP INTA (interrupt from management bus) to 265e705c121SKalle Valo * wake device's PCI Express link L1a -> L0s 266e705c121SKalle Valo */ 267e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 268e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 269e705c121SKalle Valo 270e705c121SKalle Valo iwl_pcie_apm_config(trans); 271e705c121SKalle Valo 272e705c121SKalle Valo /* Configure analog phase-lock-loop before activating to D0A */ 27377d76931SJohannes Berg if (trans->cfg->base_params->pll_cfg) 27477d76931SJohannes Berg iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 275e705c121SKalle Valo 276e705c121SKalle Valo /* 277e705c121SKalle Valo * Set "initialization complete" bit to move adapter from 278e705c121SKalle Valo * D0U* --> D0A* (powered-up active) state. 279e705c121SKalle Valo */ 280e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 281e705c121SKalle Valo 282e705c121SKalle Valo /* 283e705c121SKalle Valo * Wait for clock stabilization; once stabilized, access to 284e705c121SKalle Valo * device-internal resources is supported, e.g. iwl_write_prph() 285e705c121SKalle Valo * and accesses to uCode SRAM. 286e705c121SKalle Valo */ 287e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 288e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 289e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 290e705c121SKalle Valo if (ret < 0) { 29152b6e168SEmmanuel Grumbach IWL_ERR(trans, "Failed to init the card\n"); 29252b6e168SEmmanuel Grumbach return ret; 293e705c121SKalle Valo } 294e705c121SKalle Valo 295e705c121SKalle Valo if (trans->cfg->host_interrupt_operation_mode) { 296e705c121SKalle Valo /* 297e705c121SKalle Valo * This is a bit of an abuse - This is needed for 7260 / 3160 298e705c121SKalle Valo * only check host_interrupt_operation_mode even if this is 299e705c121SKalle Valo * not related to host_interrupt_operation_mode. 300e705c121SKalle Valo * 301e705c121SKalle Valo * Enable the oscillator to count wake up time for L1 exit. This 302e705c121SKalle Valo * consumes slightly more power (100uA) - but allows to be sure 303e705c121SKalle Valo * that we wake up from L1 on time. 304e705c121SKalle Valo * 305e705c121SKalle Valo * This looks weird: read twice the same register, discard the 306e705c121SKalle Valo * value, set a bit, and yet again, read that same register 307e705c121SKalle Valo * just to discard the value. But that's the way the hardware 308e705c121SKalle Valo * seems to like it. 309e705c121SKalle Valo */ 310e705c121SKalle Valo iwl_read_prph(trans, OSC_CLK); 311e705c121SKalle Valo iwl_read_prph(trans, OSC_CLK); 312e705c121SKalle Valo iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 313e705c121SKalle Valo iwl_read_prph(trans, OSC_CLK); 314e705c121SKalle Valo iwl_read_prph(trans, OSC_CLK); 315e705c121SKalle Valo } 316e705c121SKalle Valo 317e705c121SKalle Valo /* 318e705c121SKalle Valo * Enable DMA clock and wait for it to stabilize. 319e705c121SKalle Valo * 320e705c121SKalle Valo * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 321e705c121SKalle Valo * bits do not disable clocks. This preserves any hardware 322e705c121SKalle Valo * bits already set by default in "CLK_CTRL_REG" after reset. 323e705c121SKalle Valo */ 324e705c121SKalle Valo if (!trans->cfg->apmg_not_supported) { 325e705c121SKalle Valo iwl_write_prph(trans, APMG_CLK_EN_REG, 326e705c121SKalle Valo APMG_CLK_VAL_DMA_CLK_RQT); 327e705c121SKalle Valo udelay(20); 328e705c121SKalle Valo 329e705c121SKalle Valo /* Disable L1-Active */ 330e705c121SKalle Valo iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 331e705c121SKalle Valo APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 332e705c121SKalle Valo 333e705c121SKalle Valo /* Clear the interrupt in APMG if the NIC is in RFKILL */ 334e705c121SKalle Valo iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 335e705c121SKalle Valo APMG_RTC_INT_STT_RFKILL); 336e705c121SKalle Valo } 337e705c121SKalle Valo 338e705c121SKalle Valo set_bit(STATUS_DEVICE_ENABLED, &trans->status); 339e705c121SKalle Valo 34052b6e168SEmmanuel Grumbach return 0; 341e705c121SKalle Valo } 342e705c121SKalle Valo 343e705c121SKalle Valo /* 344e705c121SKalle Valo * Enable LP XTAL to avoid HW bug where device may consume much power if 345e705c121SKalle Valo * FW is not loaded after device reset. LP XTAL is disabled by default 346e705c121SKalle Valo * after device HW reset. Do it only if XTAL is fed by internal source. 347e705c121SKalle Valo * Configure device's "persistence" mode to avoid resetting XTAL again when 348e705c121SKalle Valo * SHRD_HW_RST occurs in S3. 349e705c121SKalle Valo */ 350e705c121SKalle Valo static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 351e705c121SKalle Valo { 352e705c121SKalle Valo int ret; 353e705c121SKalle Valo u32 apmg_gp1_reg; 354e705c121SKalle Valo u32 apmg_xtal_cfg_reg; 355e705c121SKalle Valo u32 dl_cfg_reg; 356e705c121SKalle Valo 357e705c121SKalle Valo /* Force XTAL ON */ 358e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 359e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 360e705c121SKalle Valo 361099a628bSEmmanuel Grumbach iwl_pcie_sw_reset(trans); 362e705c121SKalle Valo 363e705c121SKalle Valo /* 364e705c121SKalle Valo * Set "initialization complete" bit to move adapter from 365e705c121SKalle Valo * D0U* --> D0A* (powered-up active) state. 366e705c121SKalle Valo */ 367e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 368e705c121SKalle Valo 369e705c121SKalle Valo /* 370e705c121SKalle Valo * Wait for clock stabilization; once stabilized, access to 371e705c121SKalle Valo * device-internal resources is possible. 372e705c121SKalle Valo */ 373e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 374e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 375e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 376e705c121SKalle Valo 25000); 377e705c121SKalle Valo if (WARN_ON(ret < 0)) { 378e705c121SKalle Valo IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); 379e705c121SKalle Valo /* Release XTAL ON request */ 380e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 381e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 382e705c121SKalle Valo return; 383e705c121SKalle Valo } 384e705c121SKalle Valo 385e705c121SKalle Valo /* 386e705c121SKalle Valo * Clear "disable persistence" to avoid LP XTAL resetting when 387e705c121SKalle Valo * SHRD_HW_RST is applied in S3. 388e705c121SKalle Valo */ 389e705c121SKalle Valo iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 390e705c121SKalle Valo APMG_PCIDEV_STT_VAL_PERSIST_DIS); 391e705c121SKalle Valo 392e705c121SKalle Valo /* 393e705c121SKalle Valo * Force APMG XTAL to be active to prevent its disabling by HW 394e705c121SKalle Valo * caused by APMG idle state. 395e705c121SKalle Valo */ 396e705c121SKalle Valo apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 397e705c121SKalle Valo SHR_APMG_XTAL_CFG_REG); 398e705c121SKalle Valo iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 399e705c121SKalle Valo apmg_xtal_cfg_reg | 400e705c121SKalle Valo SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 401e705c121SKalle Valo 402099a628bSEmmanuel Grumbach iwl_pcie_sw_reset(trans); 403e705c121SKalle Valo 404e705c121SKalle Valo /* Enable LP XTAL by indirect access through CSR */ 405e705c121SKalle Valo apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 406e705c121SKalle Valo iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 407e705c121SKalle Valo SHR_APMG_GP1_WF_XTAL_LP_EN | 408e705c121SKalle Valo SHR_APMG_GP1_CHICKEN_BIT_SELECT); 409e705c121SKalle Valo 410e705c121SKalle Valo /* Clear delay line clock power up */ 411e705c121SKalle Valo dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 412e705c121SKalle Valo iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 413e705c121SKalle Valo ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 414e705c121SKalle Valo 415e705c121SKalle Valo /* 416e705c121SKalle Valo * Enable persistence mode to avoid LP XTAL resetting when 417e705c121SKalle Valo * SHRD_HW_RST is applied in S3. 418e705c121SKalle Valo */ 419e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 420e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 421e705c121SKalle Valo 422e705c121SKalle Valo /* 423e705c121SKalle Valo * Clear "initialization complete" bit to move adapter from 424e705c121SKalle Valo * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 425e705c121SKalle Valo */ 426e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 427e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 428e705c121SKalle Valo 429e705c121SKalle Valo /* Activates XTAL resources monitor */ 430e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 431e705c121SKalle Valo CSR_MONITOR_XTAL_RESOURCES); 432e705c121SKalle Valo 433e705c121SKalle Valo /* Release XTAL ON request */ 434e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 435e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 436e705c121SKalle Valo udelay(10); 437e705c121SKalle Valo 438e705c121SKalle Valo /* Release APMG XTAL */ 439e705c121SKalle Valo iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 440e705c121SKalle Valo apmg_xtal_cfg_reg & 441e705c121SKalle Valo ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 442e705c121SKalle Valo } 443e705c121SKalle Valo 444e8c8935eSJohannes Berg void iwl_pcie_apm_stop_master(struct iwl_trans *trans) 445e705c121SKalle Valo { 446e8c8935eSJohannes Berg int ret; 447e705c121SKalle Valo 448e705c121SKalle Valo /* stop device's busmaster DMA activity */ 449e705c121SKalle Valo iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 450e705c121SKalle Valo 451e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_RESET, 452e705c121SKalle Valo CSR_RESET_REG_FLAG_MASTER_DISABLED, 453e705c121SKalle Valo CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 454e705c121SKalle Valo if (ret < 0) 455e705c121SKalle Valo IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 456e705c121SKalle Valo 457e705c121SKalle Valo IWL_DEBUG_INFO(trans, "stop master\n"); 458e705c121SKalle Valo } 459e705c121SKalle Valo 460e705c121SKalle Valo static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 461e705c121SKalle Valo { 462e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 463e705c121SKalle Valo 464e705c121SKalle Valo if (op_mode_leave) { 465e705c121SKalle Valo if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 466e705c121SKalle Valo iwl_pcie_apm_init(trans); 467e705c121SKalle Valo 468e705c121SKalle Valo /* inform ME that we are leaving */ 469e705c121SKalle Valo if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 470e705c121SKalle Valo iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 471e705c121SKalle Valo APMG_PCIDEV_STT_VAL_WAKE_ME); 4726e584873SSara Sharon else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) { 473e705c121SKalle Valo iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 474e705c121SKalle Valo CSR_RESET_LINK_PWR_MGMT_DISABLED); 475e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 476e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_PREPARE | 477e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_ENABLE_PME); 478e705c121SKalle Valo mdelay(1); 479e705c121SKalle Valo iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 480e705c121SKalle Valo CSR_RESET_LINK_PWR_MGMT_DISABLED); 481e705c121SKalle Valo } 482e705c121SKalle Valo mdelay(5); 483e705c121SKalle Valo } 484e705c121SKalle Valo 485e705c121SKalle Valo clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 486e705c121SKalle Valo 487e705c121SKalle Valo /* Stop device's DMA activity */ 488e705c121SKalle Valo iwl_pcie_apm_stop_master(trans); 489e705c121SKalle Valo 490e705c121SKalle Valo if (trans->cfg->lp_xtal_workaround) { 491e705c121SKalle Valo iwl_pcie_apm_lp_xtal_enable(trans); 492e705c121SKalle Valo return; 493e705c121SKalle Valo } 494e705c121SKalle Valo 495099a628bSEmmanuel Grumbach iwl_pcie_sw_reset(trans); 496e705c121SKalle Valo 497e705c121SKalle Valo /* 498e705c121SKalle Valo * Clear "initialization complete" bit to move adapter from 499e705c121SKalle Valo * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 500e705c121SKalle Valo */ 501e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 502e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 503e705c121SKalle Valo } 504e705c121SKalle Valo 505e705c121SKalle Valo static int iwl_pcie_nic_init(struct iwl_trans *trans) 506e705c121SKalle Valo { 507e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 50852b6e168SEmmanuel Grumbach int ret; 509e705c121SKalle Valo 510e705c121SKalle Valo /* nic_init */ 511e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 51252b6e168SEmmanuel Grumbach ret = iwl_pcie_apm_init(trans); 513e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 514e705c121SKalle Valo 51552b6e168SEmmanuel Grumbach if (ret) 51652b6e168SEmmanuel Grumbach return ret; 51752b6e168SEmmanuel Grumbach 518e705c121SKalle Valo iwl_pcie_set_pwr(trans, false); 519e705c121SKalle Valo 520e705c121SKalle Valo iwl_op_mode_nic_config(trans->op_mode); 521e705c121SKalle Valo 522e705c121SKalle Valo /* Allocate the RX queue, or reset if it is already allocated */ 523e705c121SKalle Valo iwl_pcie_rx_init(trans); 524e705c121SKalle Valo 525e705c121SKalle Valo /* Allocate or reset and init all Tx and Command queues */ 526e705c121SKalle Valo if (iwl_pcie_tx_init(trans)) 527e705c121SKalle Valo return -ENOMEM; 528e705c121SKalle Valo 529e705c121SKalle Valo if (trans->cfg->base_params->shadow_reg_enable) { 530e705c121SKalle Valo /* enable shadow regs in HW */ 531e705c121SKalle Valo iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 532e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 533e705c121SKalle Valo } 534e705c121SKalle Valo 535e705c121SKalle Valo return 0; 536e705c121SKalle Valo } 537e705c121SKalle Valo 538e705c121SKalle Valo #define HW_READY_TIMEOUT (50) 539e705c121SKalle Valo 540e705c121SKalle Valo /* Note: returns poll_bit return value, which is >= 0 if success */ 541e705c121SKalle Valo static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 542e705c121SKalle Valo { 543e705c121SKalle Valo int ret; 544e705c121SKalle Valo 545e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 546e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 547e705c121SKalle Valo 548e705c121SKalle Valo /* See if we got it */ 549e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 550e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 551e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 552e705c121SKalle Valo HW_READY_TIMEOUT); 553e705c121SKalle Valo 554e705c121SKalle Valo if (ret >= 0) 555e705c121SKalle Valo iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 556e705c121SKalle Valo 557e705c121SKalle Valo IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 558e705c121SKalle Valo return ret; 559e705c121SKalle Valo } 560e705c121SKalle Valo 561e705c121SKalle Valo /* Note: returns standard 0/-ERROR code */ 562eda50cdeSSara Sharon int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 563e705c121SKalle Valo { 564e705c121SKalle Valo int ret; 565e705c121SKalle Valo int t = 0; 566e705c121SKalle Valo int iter; 567e705c121SKalle Valo 568e705c121SKalle Valo IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 569e705c121SKalle Valo 570e705c121SKalle Valo ret = iwl_pcie_set_hw_ready(trans); 571e705c121SKalle Valo /* If the card is ready, exit 0 */ 572e705c121SKalle Valo if (ret >= 0) 573e705c121SKalle Valo return 0; 574e705c121SKalle Valo 575e705c121SKalle Valo iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 576e705c121SKalle Valo CSR_RESET_LINK_PWR_MGMT_DISABLED); 577192185d6SJohannes Berg usleep_range(1000, 2000); 578e705c121SKalle Valo 579e705c121SKalle Valo for (iter = 0; iter < 10; iter++) { 580e705c121SKalle Valo /* If HW is not ready, prepare the conditions to check again */ 581e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 582e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_PREPARE); 583e705c121SKalle Valo 584e705c121SKalle Valo do { 585e705c121SKalle Valo ret = iwl_pcie_set_hw_ready(trans); 586e705c121SKalle Valo if (ret >= 0) 587e705c121SKalle Valo return 0; 588e705c121SKalle Valo 589e705c121SKalle Valo usleep_range(200, 1000); 590e705c121SKalle Valo t += 200; 591e705c121SKalle Valo } while (t < 150000); 592e705c121SKalle Valo msleep(25); 593e705c121SKalle Valo } 594e705c121SKalle Valo 595e705c121SKalle Valo IWL_ERR(trans, "Couldn't prepare the card\n"); 596e705c121SKalle Valo 597e705c121SKalle Valo return ret; 598e705c121SKalle Valo } 599e705c121SKalle Valo 600e705c121SKalle Valo /* 601e705c121SKalle Valo * ucode 602e705c121SKalle Valo */ 603564cdce7SSara Sharon static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 604564cdce7SSara Sharon u32 dst_addr, dma_addr_t phy_addr, 605564cdce7SSara Sharon u32 byte_cnt) 606e705c121SKalle Valo { 607bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 608e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 609e705c121SKalle Valo 610bac842daSEmmanuel Grumbach iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 611e705c121SKalle Valo dst_addr); 612e705c121SKalle Valo 613bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 614e705c121SKalle Valo phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 615e705c121SKalle Valo 616bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 617e705c121SKalle Valo (iwl_get_dma_hi_addr(phy_addr) 618e705c121SKalle Valo << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 619e705c121SKalle Valo 620bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 621bac842daSEmmanuel Grumbach BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 622bac842daSEmmanuel Grumbach BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 623e705c121SKalle Valo FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 624e705c121SKalle Valo 625bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 626e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 627e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 628e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 629564cdce7SSara Sharon } 630e705c121SKalle Valo 631564cdce7SSara Sharon static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 632564cdce7SSara Sharon u32 dst_addr, dma_addr_t phy_addr, 633564cdce7SSara Sharon u32 byte_cnt) 634564cdce7SSara Sharon { 635564cdce7SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 636564cdce7SSara Sharon unsigned long flags; 637564cdce7SSara Sharon int ret; 638564cdce7SSara Sharon 639564cdce7SSara Sharon trans_pcie->ucode_write_complete = false; 640564cdce7SSara Sharon 641564cdce7SSara Sharon if (!iwl_trans_grab_nic_access(trans, &flags)) 642564cdce7SSara Sharon return -EIO; 643564cdce7SSara Sharon 644564cdce7SSara Sharon iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 645564cdce7SSara Sharon byte_cnt); 646bac842daSEmmanuel Grumbach iwl_trans_release_nic_access(trans, &flags); 647bac842daSEmmanuel Grumbach 648e705c121SKalle Valo ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 649e705c121SKalle Valo trans_pcie->ucode_write_complete, 5 * HZ); 650e705c121SKalle Valo if (!ret) { 651e705c121SKalle Valo IWL_ERR(trans, "Failed to load firmware chunk!\n"); 652e705c121SKalle Valo return -ETIMEDOUT; 653e705c121SKalle Valo } 654e705c121SKalle Valo 655e705c121SKalle Valo return 0; 656e705c121SKalle Valo } 657e705c121SKalle Valo 658e705c121SKalle Valo static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 659e705c121SKalle Valo const struct fw_desc *section) 660e705c121SKalle Valo { 661e705c121SKalle Valo u8 *v_addr; 662e705c121SKalle Valo dma_addr_t p_addr; 663e705c121SKalle Valo u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 664e705c121SKalle Valo int ret = 0; 665e705c121SKalle Valo 666e705c121SKalle Valo IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 667e705c121SKalle Valo section_num); 668e705c121SKalle Valo 669e705c121SKalle Valo v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 670e705c121SKalle Valo GFP_KERNEL | __GFP_NOWARN); 671e705c121SKalle Valo if (!v_addr) { 672e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 673e705c121SKalle Valo chunk_sz = PAGE_SIZE; 674e705c121SKalle Valo v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 675e705c121SKalle Valo &p_addr, GFP_KERNEL); 676e705c121SKalle Valo if (!v_addr) 677e705c121SKalle Valo return -ENOMEM; 678e705c121SKalle Valo } 679e705c121SKalle Valo 680e705c121SKalle Valo for (offset = 0; offset < section->len; offset += chunk_sz) { 681e705c121SKalle Valo u32 copy_size, dst_addr; 682e705c121SKalle Valo bool extended_addr = false; 683e705c121SKalle Valo 684e705c121SKalle Valo copy_size = min_t(u32, chunk_sz, section->len - offset); 685e705c121SKalle Valo dst_addr = section->offset + offset; 686e705c121SKalle Valo 687e705c121SKalle Valo if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 688e705c121SKalle Valo dst_addr <= IWL_FW_MEM_EXTENDED_END) 689e705c121SKalle Valo extended_addr = true; 690e705c121SKalle Valo 691e705c121SKalle Valo if (extended_addr) 692e705c121SKalle Valo iwl_set_bits_prph(trans, LMPM_CHICK, 693e705c121SKalle Valo LMPM_CHICK_EXTENDED_ADDR_SPACE); 694e705c121SKalle Valo 695e705c121SKalle Valo memcpy(v_addr, (u8 *)section->data + offset, copy_size); 696e705c121SKalle Valo ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 697e705c121SKalle Valo copy_size); 698e705c121SKalle Valo 699e705c121SKalle Valo if (extended_addr) 700e705c121SKalle Valo iwl_clear_bits_prph(trans, LMPM_CHICK, 701e705c121SKalle Valo LMPM_CHICK_EXTENDED_ADDR_SPACE); 702e705c121SKalle Valo 703e705c121SKalle Valo if (ret) { 704e705c121SKalle Valo IWL_ERR(trans, 705e705c121SKalle Valo "Could not load the [%d] uCode section\n", 706e705c121SKalle Valo section_num); 707e705c121SKalle Valo break; 708e705c121SKalle Valo } 709e705c121SKalle Valo } 710e705c121SKalle Valo 711e705c121SKalle Valo dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 712e705c121SKalle Valo return ret; 713e705c121SKalle Valo } 714e705c121SKalle Valo 715e705c121SKalle Valo static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 716e705c121SKalle Valo const struct fw_img *image, 717e705c121SKalle Valo int cpu, 718e705c121SKalle Valo int *first_ucode_section) 719e705c121SKalle Valo { 720e705c121SKalle Valo int shift_param; 721e705c121SKalle Valo int i, ret = 0, sec_num = 0x1; 722e705c121SKalle Valo u32 val, last_read_idx = 0; 723e705c121SKalle Valo 724e705c121SKalle Valo if (cpu == 1) { 725e705c121SKalle Valo shift_param = 0; 726e705c121SKalle Valo *first_ucode_section = 0; 727e705c121SKalle Valo } else { 728e705c121SKalle Valo shift_param = 16; 729e705c121SKalle Valo (*first_ucode_section)++; 730e705c121SKalle Valo } 731e705c121SKalle Valo 732eef187a7SSara Sharon for (i = *first_ucode_section; i < image->num_sec; i++) { 733e705c121SKalle Valo last_read_idx = i; 734e705c121SKalle Valo 735e705c121SKalle Valo /* 736e705c121SKalle Valo * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 737e705c121SKalle Valo * CPU1 to CPU2. 738e705c121SKalle Valo * PAGING_SEPARATOR_SECTION delimiter - separate between 739e705c121SKalle Valo * CPU2 non paged to CPU2 paging sec. 740e705c121SKalle Valo */ 741e705c121SKalle Valo if (!image->sec[i].data || 742e705c121SKalle Valo image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 743e705c121SKalle Valo image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 744e705c121SKalle Valo IWL_DEBUG_FW(trans, 745e705c121SKalle Valo "Break since Data not valid or Empty section, sec = %d\n", 746e705c121SKalle Valo i); 747e705c121SKalle Valo break; 748e705c121SKalle Valo } 749e705c121SKalle Valo 750e705c121SKalle Valo ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 751e705c121SKalle Valo if (ret) 752e705c121SKalle Valo return ret; 753e705c121SKalle Valo 754d6a2c5c7SSara Sharon /* Notify ucode of loaded section number and status */ 755e705c121SKalle Valo val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 756e705c121SKalle Valo val = val | (sec_num << shift_param); 757e705c121SKalle Valo iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 758eda50cdeSSara Sharon 759e705c121SKalle Valo sec_num = (sec_num << 1) | 0x1; 760e705c121SKalle Valo } 761e705c121SKalle Valo 762e705c121SKalle Valo *first_ucode_section = last_read_idx; 763e705c121SKalle Valo 7642aabdbdcSEmmanuel Grumbach iwl_enable_interrupts(trans); 7652aabdbdcSEmmanuel Grumbach 766d6a2c5c7SSara Sharon if (trans->cfg->use_tfh) { 767e705c121SKalle Valo if (cpu == 1) 768d6a2c5c7SSara Sharon iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 769d6a2c5c7SSara Sharon 0xFFFF); 770e705c121SKalle Valo else 771d6a2c5c7SSara Sharon iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 772d6a2c5c7SSara Sharon 0xFFFFFFFF); 773d6a2c5c7SSara Sharon } else { 774d6a2c5c7SSara Sharon if (cpu == 1) 775d6a2c5c7SSara Sharon iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 776d6a2c5c7SSara Sharon 0xFFFF); 777d6a2c5c7SSara Sharon else 778d6a2c5c7SSara Sharon iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 779d6a2c5c7SSara Sharon 0xFFFFFFFF); 780d6a2c5c7SSara Sharon } 781e705c121SKalle Valo 782e705c121SKalle Valo return 0; 783e705c121SKalle Valo } 784e705c121SKalle Valo 785e705c121SKalle Valo static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 786e705c121SKalle Valo const struct fw_img *image, 787e705c121SKalle Valo int cpu, 788e705c121SKalle Valo int *first_ucode_section) 789e705c121SKalle Valo { 790e705c121SKalle Valo int i, ret = 0; 791e705c121SKalle Valo u32 last_read_idx = 0; 792e705c121SKalle Valo 7933ce4a038SKirtika Ruchandani if (cpu == 1) 794e705c121SKalle Valo *first_ucode_section = 0; 7953ce4a038SKirtika Ruchandani else 796e705c121SKalle Valo (*first_ucode_section)++; 797e705c121SKalle Valo 798eef187a7SSara Sharon for (i = *first_ucode_section; i < image->num_sec; i++) { 799e705c121SKalle Valo last_read_idx = i; 800e705c121SKalle Valo 801e705c121SKalle Valo /* 802e705c121SKalle Valo * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 803e705c121SKalle Valo * CPU1 to CPU2. 804e705c121SKalle Valo * PAGING_SEPARATOR_SECTION delimiter - separate between 805e705c121SKalle Valo * CPU2 non paged to CPU2 paging sec. 806e705c121SKalle Valo */ 807e705c121SKalle Valo if (!image->sec[i].data || 808e705c121SKalle Valo image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 809e705c121SKalle Valo image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 810e705c121SKalle Valo IWL_DEBUG_FW(trans, 811e705c121SKalle Valo "Break since Data not valid or Empty section, sec = %d\n", 812e705c121SKalle Valo i); 813e705c121SKalle Valo break; 814e705c121SKalle Valo } 815e705c121SKalle Valo 816e705c121SKalle Valo ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 817e705c121SKalle Valo if (ret) 818e705c121SKalle Valo return ret; 819e705c121SKalle Valo } 820e705c121SKalle Valo 821e705c121SKalle Valo *first_ucode_section = last_read_idx; 822e705c121SKalle Valo 823e705c121SKalle Valo return 0; 824e705c121SKalle Valo } 825e705c121SKalle Valo 826c9be849dSLiad Kaufman void iwl_pcie_apply_destination(struct iwl_trans *trans) 827e705c121SKalle Valo { 828e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 829e705c121SKalle Valo const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; 830e705c121SKalle Valo int i; 831e705c121SKalle Valo 832e705c121SKalle Valo if (dest->version) 833e705c121SKalle Valo IWL_ERR(trans, 834e705c121SKalle Valo "DBG DEST version is %d - expect issues\n", 835e705c121SKalle Valo dest->version); 836e705c121SKalle Valo 837e705c121SKalle Valo IWL_INFO(trans, "Applying debug destination %s\n", 838e705c121SKalle Valo get_fw_dbg_mode_string(dest->monitor_mode)); 839e705c121SKalle Valo 840e705c121SKalle Valo if (dest->monitor_mode == EXTERNAL_MODE) 841e705c121SKalle Valo iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 842e705c121SKalle Valo else 843e705c121SKalle Valo IWL_WARN(trans, "PCI should have external buffer debug\n"); 844e705c121SKalle Valo 845e705c121SKalle Valo for (i = 0; i < trans->dbg_dest_reg_num; i++) { 846e705c121SKalle Valo u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 847e705c121SKalle Valo u32 val = le32_to_cpu(dest->reg_ops[i].val); 848e705c121SKalle Valo 849e705c121SKalle Valo switch (dest->reg_ops[i].op) { 850e705c121SKalle Valo case CSR_ASSIGN: 851e705c121SKalle Valo iwl_write32(trans, addr, val); 852e705c121SKalle Valo break; 853e705c121SKalle Valo case CSR_SETBIT: 854e705c121SKalle Valo iwl_set_bit(trans, addr, BIT(val)); 855e705c121SKalle Valo break; 856e705c121SKalle Valo case CSR_CLEARBIT: 857e705c121SKalle Valo iwl_clear_bit(trans, addr, BIT(val)); 858e705c121SKalle Valo break; 859e705c121SKalle Valo case PRPH_ASSIGN: 860e705c121SKalle Valo iwl_write_prph(trans, addr, val); 861e705c121SKalle Valo break; 862e705c121SKalle Valo case PRPH_SETBIT: 863e705c121SKalle Valo iwl_set_bits_prph(trans, addr, BIT(val)); 864e705c121SKalle Valo break; 865e705c121SKalle Valo case PRPH_CLEARBIT: 866e705c121SKalle Valo iwl_clear_bits_prph(trans, addr, BIT(val)); 867e705c121SKalle Valo break; 868e705c121SKalle Valo case PRPH_BLOCKBIT: 869e705c121SKalle Valo if (iwl_read_prph(trans, addr) & BIT(val)) { 870e705c121SKalle Valo IWL_ERR(trans, 871e705c121SKalle Valo "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 872e705c121SKalle Valo val, addr); 873e705c121SKalle Valo goto monitor; 874e705c121SKalle Valo } 875e705c121SKalle Valo break; 876e705c121SKalle Valo default: 877e705c121SKalle Valo IWL_ERR(trans, "FW debug - unknown OP %d\n", 878e705c121SKalle Valo dest->reg_ops[i].op); 879e705c121SKalle Valo break; 880e705c121SKalle Valo } 881e705c121SKalle Valo } 882e705c121SKalle Valo 883e705c121SKalle Valo monitor: 884e705c121SKalle Valo if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { 885e705c121SKalle Valo iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 886e705c121SKalle Valo trans_pcie->fw_mon_phys >> dest->base_shift); 8876e584873SSara Sharon if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) 888e705c121SKalle Valo iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 889e705c121SKalle Valo (trans_pcie->fw_mon_phys + 89062d7476dSEmmanuel Grumbach trans_pcie->fw_mon_size - 256) >> 89162d7476dSEmmanuel Grumbach dest->end_shift); 89262d7476dSEmmanuel Grumbach else 89362d7476dSEmmanuel Grumbach iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 89462d7476dSEmmanuel Grumbach (trans_pcie->fw_mon_phys + 89562d7476dSEmmanuel Grumbach trans_pcie->fw_mon_size) >> 89662d7476dSEmmanuel Grumbach dest->end_shift); 897e705c121SKalle Valo } 898e705c121SKalle Valo } 899e705c121SKalle Valo 900e705c121SKalle Valo static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 901e705c121SKalle Valo const struct fw_img *image) 902e705c121SKalle Valo { 903e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 904e705c121SKalle Valo int ret = 0; 905e705c121SKalle Valo int first_ucode_section; 906e705c121SKalle Valo 907e705c121SKalle Valo IWL_DEBUG_FW(trans, "working with %s CPU\n", 908e705c121SKalle Valo image->is_dual_cpus ? "Dual" : "Single"); 909e705c121SKalle Valo 910e705c121SKalle Valo /* load to FW the binary non secured sections of CPU1 */ 911e705c121SKalle Valo ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 912e705c121SKalle Valo if (ret) 913e705c121SKalle Valo return ret; 914e705c121SKalle Valo 915e705c121SKalle Valo if (image->is_dual_cpus) { 916e705c121SKalle Valo /* set CPU2 header address */ 917e705c121SKalle Valo iwl_write_prph(trans, 918e705c121SKalle Valo LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 919e705c121SKalle Valo LMPM_SECURE_CPU2_HDR_MEM_SPACE); 920e705c121SKalle Valo 921e705c121SKalle Valo /* load to FW the binary sections of CPU2 */ 922e705c121SKalle Valo ret = iwl_pcie_load_cpu_sections(trans, image, 2, 923e705c121SKalle Valo &first_ucode_section); 924e705c121SKalle Valo if (ret) 925e705c121SKalle Valo return ret; 926e705c121SKalle Valo } 927e705c121SKalle Valo 928e705c121SKalle Valo /* supported for 7000 only for the moment */ 929e705c121SKalle Valo if (iwlwifi_mod_params.fw_monitor && 930e705c121SKalle Valo trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 931e705c121SKalle Valo iwl_pcie_alloc_fw_monitor(trans, 0); 932e705c121SKalle Valo 933e705c121SKalle Valo if (trans_pcie->fw_mon_size) { 934e705c121SKalle Valo iwl_write_prph(trans, MON_BUFF_BASE_ADDR, 935e705c121SKalle Valo trans_pcie->fw_mon_phys >> 4); 936e705c121SKalle Valo iwl_write_prph(trans, MON_BUFF_END_ADDR, 937e705c121SKalle Valo (trans_pcie->fw_mon_phys + 938e705c121SKalle Valo trans_pcie->fw_mon_size) >> 4); 939e705c121SKalle Valo } 940e705c121SKalle Valo } else if (trans->dbg_dest_tlv) { 941e705c121SKalle Valo iwl_pcie_apply_destination(trans); 942e705c121SKalle Valo } 943e705c121SKalle Valo 9442aabdbdcSEmmanuel Grumbach iwl_enable_interrupts(trans); 9452aabdbdcSEmmanuel Grumbach 946e705c121SKalle Valo /* release CPU reset */ 947e705c121SKalle Valo iwl_write32(trans, CSR_RESET, 0); 948e705c121SKalle Valo 949e705c121SKalle Valo return 0; 950e705c121SKalle Valo } 951e705c121SKalle Valo 952e705c121SKalle Valo static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 953e705c121SKalle Valo const struct fw_img *image) 954e705c121SKalle Valo { 955e705c121SKalle Valo int ret = 0; 956e705c121SKalle Valo int first_ucode_section; 957e705c121SKalle Valo 958e705c121SKalle Valo IWL_DEBUG_FW(trans, "working with %s CPU\n", 959e705c121SKalle Valo image->is_dual_cpus ? "Dual" : "Single"); 960e705c121SKalle Valo 961e705c121SKalle Valo if (trans->dbg_dest_tlv) 962e705c121SKalle Valo iwl_pcie_apply_destination(trans); 963e705c121SKalle Valo 96482ea7966SSara Sharon IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 96582ea7966SSara Sharon iwl_read_prph(trans, WFPM_GP2)); 96682ea7966SSara Sharon 96782ea7966SSara Sharon /* 96882ea7966SSara Sharon * Set default value. On resume reading the values that were 96982ea7966SSara Sharon * zeored can provide debug data on the resume flow. 97082ea7966SSara Sharon * This is for debugging only and has no functional impact. 97182ea7966SSara Sharon */ 97282ea7966SSara Sharon iwl_write_prph(trans, WFPM_GP2, 0x01010101); 97382ea7966SSara Sharon 974e705c121SKalle Valo /* configure the ucode to be ready to get the secured image */ 975e705c121SKalle Valo /* release CPU reset */ 976e705c121SKalle Valo iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 977e705c121SKalle Valo 978e705c121SKalle Valo /* load to FW the binary Secured sections of CPU1 */ 979e705c121SKalle Valo ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 980e705c121SKalle Valo &first_ucode_section); 981e705c121SKalle Valo if (ret) 982e705c121SKalle Valo return ret; 983e705c121SKalle Valo 984e705c121SKalle Valo /* load to FW the binary sections of CPU2 */ 985e705c121SKalle Valo return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 986e705c121SKalle Valo &first_ucode_section); 987e705c121SKalle Valo } 988e705c121SKalle Valo 9899ad8fd0bSJohannes Berg bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) 990727c02dfSSara Sharon { 991326477e4SJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 992727c02dfSSara Sharon bool hw_rfkill = iwl_is_rfkill_set(trans); 993326477e4SJohannes Berg bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 994326477e4SJohannes Berg bool report; 995727c02dfSSara Sharon 996326477e4SJohannes Berg if (hw_rfkill) { 997326477e4SJohannes Berg set_bit(STATUS_RFKILL_HW, &trans->status); 998326477e4SJohannes Berg set_bit(STATUS_RFKILL_OPMODE, &trans->status); 999326477e4SJohannes Berg } else { 1000326477e4SJohannes Berg clear_bit(STATUS_RFKILL_HW, &trans->status); 1001326477e4SJohannes Berg if (trans_pcie->opmode_down) 1002326477e4SJohannes Berg clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1003326477e4SJohannes Berg } 1004727c02dfSSara Sharon 1005326477e4SJohannes Berg report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1006326477e4SJohannes Berg 1007326477e4SJohannes Berg if (prev != report) 1008326477e4SJohannes Berg iwl_trans_pcie_rf_kill(trans, report); 1009727c02dfSSara Sharon 1010727c02dfSSara Sharon return hw_rfkill; 1011727c02dfSSara Sharon } 1012727c02dfSSara Sharon 10137ca00409SHaim Dreyfuss struct iwl_causes_list { 10147ca00409SHaim Dreyfuss u32 cause_num; 10157ca00409SHaim Dreyfuss u32 mask_reg; 10167ca00409SHaim Dreyfuss u8 addr; 10177ca00409SHaim Dreyfuss }; 10187ca00409SHaim Dreyfuss 10197ca00409SHaim Dreyfuss static struct iwl_causes_list causes_list[] = { 10207ca00409SHaim Dreyfuss {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, 10217ca00409SHaim Dreyfuss {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, 10227ca00409SHaim Dreyfuss {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, 10237ca00409SHaim Dreyfuss {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, 10247ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, 10257ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, 10267ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, 10277ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, 10287ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, 10297ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, 10307ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, 10317ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, 10327ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, 10337ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, 10347ca00409SHaim Dreyfuss }; 10357ca00409SHaim Dreyfuss 10367ca00409SHaim Dreyfuss static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 10377ca00409SHaim Dreyfuss { 10387ca00409SHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 10397ca00409SHaim Dreyfuss int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 10407ca00409SHaim Dreyfuss int i; 10417ca00409SHaim Dreyfuss 10427ca00409SHaim Dreyfuss /* 10437ca00409SHaim Dreyfuss * Access all non RX causes and map them to the default irq. 10447ca00409SHaim Dreyfuss * In case we are missing at least one interrupt vector, 10457ca00409SHaim Dreyfuss * the first interrupt vector will serve non-RX and FBQ causes. 10467ca00409SHaim Dreyfuss */ 10477ca00409SHaim Dreyfuss for (i = 0; i < ARRAY_SIZE(causes_list); i++) { 10487ca00409SHaim Dreyfuss iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); 10497ca00409SHaim Dreyfuss iwl_clear_bit(trans, causes_list[i].mask_reg, 10507ca00409SHaim Dreyfuss causes_list[i].cause_num); 10517ca00409SHaim Dreyfuss } 10527ca00409SHaim Dreyfuss } 10537ca00409SHaim Dreyfuss 10547ca00409SHaim Dreyfuss static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 10557ca00409SHaim Dreyfuss { 10567ca00409SHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 10577ca00409SHaim Dreyfuss u32 offset = 10587ca00409SHaim Dreyfuss trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 10597ca00409SHaim Dreyfuss u32 val, idx; 10607ca00409SHaim Dreyfuss 10617ca00409SHaim Dreyfuss /* 10627ca00409SHaim Dreyfuss * The first RX queue - fallback queue, which is designated for 10637ca00409SHaim Dreyfuss * management frame, command responses etc, is always mapped to the 10647ca00409SHaim Dreyfuss * first interrupt vector. The other RX queues are mapped to 10657ca00409SHaim Dreyfuss * the other (N - 2) interrupt vectors. 10667ca00409SHaim Dreyfuss */ 10677ca00409SHaim Dreyfuss val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 10687ca00409SHaim Dreyfuss for (idx = 1; idx < trans->num_rx_queues; idx++) { 10697ca00409SHaim Dreyfuss iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 10707ca00409SHaim Dreyfuss MSIX_FH_INT_CAUSES_Q(idx - offset)); 10717ca00409SHaim Dreyfuss val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 10727ca00409SHaim Dreyfuss } 10737ca00409SHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 10747ca00409SHaim Dreyfuss 10757ca00409SHaim Dreyfuss val = MSIX_FH_INT_CAUSES_Q(0); 10767ca00409SHaim Dreyfuss if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 10777ca00409SHaim Dreyfuss val |= MSIX_NON_AUTO_CLEAR_CAUSE; 10787ca00409SHaim Dreyfuss iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 10797ca00409SHaim Dreyfuss 10807ca00409SHaim Dreyfuss if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 10817ca00409SHaim Dreyfuss iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 10827ca00409SHaim Dreyfuss } 10837ca00409SHaim Dreyfuss 108477c09bc8SSara Sharon void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 10857ca00409SHaim Dreyfuss { 10867ca00409SHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 10877ca00409SHaim Dreyfuss 10887ca00409SHaim Dreyfuss if (!trans_pcie->msix_enabled) { 1089d7270d61SHaim Dreyfuss if (trans->cfg->mq_rx_supported && 1090d7270d61SHaim Dreyfuss test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 10917ca00409SHaim Dreyfuss iwl_write_prph(trans, UREG_CHICK, 10927ca00409SHaim Dreyfuss UREG_CHICK_MSI_ENABLE); 10937ca00409SHaim Dreyfuss return; 10947ca00409SHaim Dreyfuss } 1095d7270d61SHaim Dreyfuss /* 1096d7270d61SHaim Dreyfuss * The IVAR table needs to be configured again after reset, 1097d7270d61SHaim Dreyfuss * but if the device is disabled, we can't write to 1098d7270d61SHaim Dreyfuss * prph. 1099d7270d61SHaim Dreyfuss */ 1100d7270d61SHaim Dreyfuss if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 11017ca00409SHaim Dreyfuss iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 11027ca00409SHaim Dreyfuss 11037ca00409SHaim Dreyfuss /* 11047ca00409SHaim Dreyfuss * Each cause from the causes list above and the RX causes is 11057ca00409SHaim Dreyfuss * represented as a byte in the IVAR table. The first nibble 11067ca00409SHaim Dreyfuss * represents the bound interrupt vector of the cause, the second 11077ca00409SHaim Dreyfuss * represents no auto clear for this cause. This will be set if its 11087ca00409SHaim Dreyfuss * interrupt vector is bound to serve other causes. 11097ca00409SHaim Dreyfuss */ 11107ca00409SHaim Dreyfuss iwl_pcie_map_rx_causes(trans); 11117ca00409SHaim Dreyfuss 11127ca00409SHaim Dreyfuss iwl_pcie_map_non_rx_causes(trans); 111383730058SHaim Dreyfuss } 11147ca00409SHaim Dreyfuss 111583730058SHaim Dreyfuss static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 111683730058SHaim Dreyfuss { 111783730058SHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 111883730058SHaim Dreyfuss 111983730058SHaim Dreyfuss iwl_pcie_conf_msix_hw(trans_pcie); 112083730058SHaim Dreyfuss 112183730058SHaim Dreyfuss if (!trans_pcie->msix_enabled) 112283730058SHaim Dreyfuss return; 112383730058SHaim Dreyfuss 112483730058SHaim Dreyfuss trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 11257ca00409SHaim Dreyfuss trans_pcie->fh_mask = trans_pcie->fh_init_mask; 112683730058SHaim Dreyfuss trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 11277ca00409SHaim Dreyfuss trans_pcie->hw_mask = trans_pcie->hw_init_mask; 11287ca00409SHaim Dreyfuss } 11297ca00409SHaim Dreyfuss 1130e705c121SKalle Valo static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1131e705c121SKalle Valo { 1132e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1133e705c121SKalle Valo 1134e705c121SKalle Valo lockdep_assert_held(&trans_pcie->mutex); 1135e705c121SKalle Valo 1136e705c121SKalle Valo if (trans_pcie->is_down) 1137e705c121SKalle Valo return; 1138e705c121SKalle Valo 1139e705c121SKalle Valo trans_pcie->is_down = true; 1140e705c121SKalle Valo 1141e705c121SKalle Valo /* tell the device to stop sending interrupts */ 1142e705c121SKalle Valo iwl_disable_interrupts(trans); 1143e705c121SKalle Valo 1144e705c121SKalle Valo /* device going down, Stop using ICT table */ 1145e705c121SKalle Valo iwl_pcie_disable_ict(trans); 1146e705c121SKalle Valo 1147e705c121SKalle Valo /* 1148e705c121SKalle Valo * If a HW restart happens during firmware loading, 1149e705c121SKalle Valo * then the firmware loading might call this function 1150e705c121SKalle Valo * and later it might be called again due to the 1151e705c121SKalle Valo * restart. So don't process again if the device is 1152e705c121SKalle Valo * already dead. 1153e705c121SKalle Valo */ 1154e705c121SKalle Valo if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1155a6bd005fSEmmanuel Grumbach IWL_DEBUG_INFO(trans, 1156a6bd005fSEmmanuel Grumbach "DEVICE_ENABLED bit was set and is now cleared\n"); 1157e705c121SKalle Valo iwl_pcie_tx_stop(trans); 1158e705c121SKalle Valo iwl_pcie_rx_stop(trans); 1159e705c121SKalle Valo 1160e705c121SKalle Valo /* Power-down device's busmaster DMA clocks */ 1161e705c121SKalle Valo if (!trans->cfg->apmg_not_supported) { 1162e705c121SKalle Valo iwl_write_prph(trans, APMG_CLK_DIS_REG, 1163e705c121SKalle Valo APMG_CLK_VAL_DMA_CLK_RQT); 1164e705c121SKalle Valo udelay(5); 1165e705c121SKalle Valo } 1166e705c121SKalle Valo } 1167e705c121SKalle Valo 1168e705c121SKalle Valo /* Make sure (redundant) we've released our request to stay awake */ 1169e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 1170e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1171e705c121SKalle Valo 1172e705c121SKalle Valo /* Stop the device, and put it in low power state */ 1173e705c121SKalle Valo iwl_pcie_apm_stop(trans, false); 1174e705c121SKalle Valo 1175099a628bSEmmanuel Grumbach iwl_pcie_sw_reset(trans); 1176e705c121SKalle Valo 1177e705c121SKalle Valo /* 1178f4a1f04aSGolan Ben Ami * Upon stop, the IVAR table gets erased, so msi-x won't 1179f4a1f04aSGolan Ben Ami * work. This causes a bug in RF-KILL flows, since the interrupt 1180f4a1f04aSGolan Ben Ami * that enables radio won't fire on the correct irq, and the 1181f4a1f04aSGolan Ben Ami * driver won't be able to handle the interrupt. 1182f4a1f04aSGolan Ben Ami * Configure the IVAR table again after reset. 1183f4a1f04aSGolan Ben Ami */ 1184f4a1f04aSGolan Ben Ami iwl_pcie_conf_msix_hw(trans_pcie); 1185f4a1f04aSGolan Ben Ami 1186f4a1f04aSGolan Ben Ami /* 1187e705c121SKalle Valo * Upon stop, the APM issues an interrupt if HW RF kill is set. 1188e705c121SKalle Valo * This is a bug in certain verions of the hardware. 1189e705c121SKalle Valo * Certain devices also keep sending HW RF kill interrupt all 1190e705c121SKalle Valo * the time, unless the interrupt is ACKed even if the interrupt 1191e705c121SKalle Valo * should be masked. Re-ACK all the interrupts here. 1192e705c121SKalle Valo */ 1193e705c121SKalle Valo iwl_disable_interrupts(trans); 1194e705c121SKalle Valo 1195e705c121SKalle Valo /* clear all status bits */ 1196e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1197e705c121SKalle Valo clear_bit(STATUS_INT_ENABLED, &trans->status); 1198e705c121SKalle Valo clear_bit(STATUS_TPOWER_PMI, &trans->status); 1199e705c121SKalle Valo 1200e705c121SKalle Valo /* 1201e705c121SKalle Valo * Even if we stop the HW, we still want the RF kill 1202e705c121SKalle Valo * interrupt 1203e705c121SKalle Valo */ 1204e705c121SKalle Valo iwl_enable_rfkill_int(trans); 1205e705c121SKalle Valo 1206a6bd005fSEmmanuel Grumbach /* re-take ownership to prevent other users from stealing the device */ 1207e705c121SKalle Valo iwl_pcie_prepare_card_hw(trans); 1208e705c121SKalle Valo } 1209e705c121SKalle Valo 1210eda50cdeSSara Sharon void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 12112e5d4a8fSHaim Dreyfuss { 12122e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 12132e5d4a8fSHaim Dreyfuss 12142e5d4a8fSHaim Dreyfuss if (trans_pcie->msix_enabled) { 12152e5d4a8fSHaim Dreyfuss int i; 12162e5d4a8fSHaim Dreyfuss 1217496d83caSHaim Dreyfuss for (i = 0; i < trans_pcie->alloc_vecs; i++) 12182e5d4a8fSHaim Dreyfuss synchronize_irq(trans_pcie->msix_entries[i].vector); 12192e5d4a8fSHaim Dreyfuss } else { 12202e5d4a8fSHaim Dreyfuss synchronize_irq(trans_pcie->pci_dev->irq); 12212e5d4a8fSHaim Dreyfuss } 12222e5d4a8fSHaim Dreyfuss } 12232e5d4a8fSHaim Dreyfuss 1224a6bd005fSEmmanuel Grumbach static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1225a6bd005fSEmmanuel Grumbach const struct fw_img *fw, bool run_in_rfkill) 1226a6bd005fSEmmanuel Grumbach { 1227a6bd005fSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1228a6bd005fSEmmanuel Grumbach bool hw_rfkill; 1229a6bd005fSEmmanuel Grumbach int ret; 1230a6bd005fSEmmanuel Grumbach 1231a6bd005fSEmmanuel Grumbach /* This may fail if AMT took ownership of the device */ 1232a6bd005fSEmmanuel Grumbach if (iwl_pcie_prepare_card_hw(trans)) { 1233a6bd005fSEmmanuel Grumbach IWL_WARN(trans, "Exit HW not ready\n"); 1234a6bd005fSEmmanuel Grumbach ret = -EIO; 1235a6bd005fSEmmanuel Grumbach goto out; 1236a6bd005fSEmmanuel Grumbach } 1237a6bd005fSEmmanuel Grumbach 1238a6bd005fSEmmanuel Grumbach iwl_enable_rfkill_int(trans); 1239a6bd005fSEmmanuel Grumbach 1240a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1241a6bd005fSEmmanuel Grumbach 1242a6bd005fSEmmanuel Grumbach /* 1243a6bd005fSEmmanuel Grumbach * We enabled the RF-Kill interrupt and the handler may very 1244a6bd005fSEmmanuel Grumbach * well be running. Disable the interrupts to make sure no other 1245a6bd005fSEmmanuel Grumbach * interrupt can be fired. 1246a6bd005fSEmmanuel Grumbach */ 1247a6bd005fSEmmanuel Grumbach iwl_disable_interrupts(trans); 1248a6bd005fSEmmanuel Grumbach 1249a6bd005fSEmmanuel Grumbach /* Make sure it finished running */ 12502e5d4a8fSHaim Dreyfuss iwl_pcie_synchronize_irqs(trans); 1251a6bd005fSEmmanuel Grumbach 1252a6bd005fSEmmanuel Grumbach mutex_lock(&trans_pcie->mutex); 1253a6bd005fSEmmanuel Grumbach 1254a6bd005fSEmmanuel Grumbach /* If platform's RF_KILL switch is NOT set to KILL */ 12559ad8fd0bSJohannes Berg hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1256a6bd005fSEmmanuel Grumbach if (hw_rfkill && !run_in_rfkill) { 1257a6bd005fSEmmanuel Grumbach ret = -ERFKILL; 1258a6bd005fSEmmanuel Grumbach goto out; 1259a6bd005fSEmmanuel Grumbach } 1260a6bd005fSEmmanuel Grumbach 1261a6bd005fSEmmanuel Grumbach /* Someone called stop_device, don't try to start_fw */ 1262a6bd005fSEmmanuel Grumbach if (trans_pcie->is_down) { 1263a6bd005fSEmmanuel Grumbach IWL_WARN(trans, 1264a6bd005fSEmmanuel Grumbach "Can't start_fw since the HW hasn't been started\n"); 126520aa99bbSAnton Protopopov ret = -EIO; 1266a6bd005fSEmmanuel Grumbach goto out; 1267a6bd005fSEmmanuel Grumbach } 1268a6bd005fSEmmanuel Grumbach 1269a6bd005fSEmmanuel Grumbach /* make sure rfkill handshake bits are cleared */ 1270a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1271a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1272a6bd005fSEmmanuel Grumbach CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1273a6bd005fSEmmanuel Grumbach 1274a6bd005fSEmmanuel Grumbach /* clear (again), then enable host interrupts */ 1275a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1276a6bd005fSEmmanuel Grumbach 1277a6bd005fSEmmanuel Grumbach ret = iwl_pcie_nic_init(trans); 1278a6bd005fSEmmanuel Grumbach if (ret) { 1279a6bd005fSEmmanuel Grumbach IWL_ERR(trans, "Unable to init nic\n"); 1280a6bd005fSEmmanuel Grumbach goto out; 1281a6bd005fSEmmanuel Grumbach } 1282a6bd005fSEmmanuel Grumbach 1283a6bd005fSEmmanuel Grumbach /* 1284a6bd005fSEmmanuel Grumbach * Now, we load the firmware and don't want to be interrupted, even 1285a6bd005fSEmmanuel Grumbach * by the RF-Kill interrupt (hence mask all the interrupt besides the 1286a6bd005fSEmmanuel Grumbach * FH_TX interrupt which is needed to load the firmware). If the 1287a6bd005fSEmmanuel Grumbach * RF-Kill switch is toggled, we will find out after having loaded 1288a6bd005fSEmmanuel Grumbach * the firmware and return the proper value to the caller. 1289a6bd005fSEmmanuel Grumbach */ 1290a6bd005fSEmmanuel Grumbach iwl_enable_fw_load_int(trans); 1291a6bd005fSEmmanuel Grumbach 1292a6bd005fSEmmanuel Grumbach /* really make sure rfkill handshake bits are cleared */ 1293a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1294a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1295a6bd005fSEmmanuel Grumbach 1296a6bd005fSEmmanuel Grumbach /* Load the given image to the HW */ 12976e584873SSara Sharon if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1298a6bd005fSEmmanuel Grumbach ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1299a6bd005fSEmmanuel Grumbach else 1300a6bd005fSEmmanuel Grumbach ret = iwl_pcie_load_given_ucode(trans, fw); 1301a6bd005fSEmmanuel Grumbach 1302a6bd005fSEmmanuel Grumbach /* re-check RF-Kill state since we may have missed the interrupt */ 13039ad8fd0bSJohannes Berg hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1304a6bd005fSEmmanuel Grumbach if (hw_rfkill && !run_in_rfkill) 1305a6bd005fSEmmanuel Grumbach ret = -ERFKILL; 1306a6bd005fSEmmanuel Grumbach 1307a6bd005fSEmmanuel Grumbach out: 1308a6bd005fSEmmanuel Grumbach mutex_unlock(&trans_pcie->mutex); 1309a6bd005fSEmmanuel Grumbach return ret; 1310a6bd005fSEmmanuel Grumbach } 1311a6bd005fSEmmanuel Grumbach 1312a6bd005fSEmmanuel Grumbach static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1313a6bd005fSEmmanuel Grumbach { 1314a6bd005fSEmmanuel Grumbach iwl_pcie_reset_ict(trans); 1315a6bd005fSEmmanuel Grumbach iwl_pcie_tx_start(trans, scd_addr); 1316a6bd005fSEmmanuel Grumbach } 1317a6bd005fSEmmanuel Grumbach 1318326477e4SJohannes Berg void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1319326477e4SJohannes Berg bool was_in_rfkill) 1320326477e4SJohannes Berg { 1321326477e4SJohannes Berg bool hw_rfkill; 1322326477e4SJohannes Berg 1323326477e4SJohannes Berg /* 1324326477e4SJohannes Berg * Check again since the RF kill state may have changed while 1325326477e4SJohannes Berg * all the interrupts were disabled, in this case we couldn't 1326326477e4SJohannes Berg * receive the RF kill interrupt and update the state in the 1327326477e4SJohannes Berg * op_mode. 1328326477e4SJohannes Berg * Don't call the op_mode if the rkfill state hasn't changed. 1329326477e4SJohannes Berg * This allows the op_mode to call stop_device from the rfkill 1330326477e4SJohannes Berg * notification without endless recursion. Under very rare 1331326477e4SJohannes Berg * circumstances, we might have a small recursion if the rfkill 1332326477e4SJohannes Berg * state changed exactly now while we were called from stop_device. 1333326477e4SJohannes Berg * This is very unlikely but can happen and is supported. 1334326477e4SJohannes Berg */ 1335326477e4SJohannes Berg hw_rfkill = iwl_is_rfkill_set(trans); 1336326477e4SJohannes Berg if (hw_rfkill) { 1337326477e4SJohannes Berg set_bit(STATUS_RFKILL_HW, &trans->status); 1338326477e4SJohannes Berg set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1339326477e4SJohannes Berg } else { 1340326477e4SJohannes Berg clear_bit(STATUS_RFKILL_HW, &trans->status); 1341326477e4SJohannes Berg clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1342326477e4SJohannes Berg } 1343326477e4SJohannes Berg if (hw_rfkill != was_in_rfkill) 1344326477e4SJohannes Berg iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1345326477e4SJohannes Berg } 1346326477e4SJohannes Berg 1347e705c121SKalle Valo static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1348e705c121SKalle Valo { 1349e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1350326477e4SJohannes Berg bool was_in_rfkill; 1351e705c121SKalle Valo 1352e705c121SKalle Valo mutex_lock(&trans_pcie->mutex); 1353326477e4SJohannes Berg trans_pcie->opmode_down = true; 1354326477e4SJohannes Berg was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1355e705c121SKalle Valo _iwl_trans_pcie_stop_device(trans, low_power); 1356326477e4SJohannes Berg iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); 1357e705c121SKalle Valo mutex_unlock(&trans_pcie->mutex); 1358e705c121SKalle Valo } 1359e705c121SKalle Valo 1360e705c121SKalle Valo void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1361e705c121SKalle Valo { 1362e705c121SKalle Valo struct iwl_trans_pcie __maybe_unused *trans_pcie = 1363e705c121SKalle Valo IWL_TRANS_GET_PCIE_TRANS(trans); 1364e705c121SKalle Valo 1365e705c121SKalle Valo lockdep_assert_held(&trans_pcie->mutex); 1366e705c121SKalle Valo 1367326477e4SJohannes Berg IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", 1368326477e4SJohannes Berg state ? "disabled" : "enabled"); 136977c09bc8SSara Sharon if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { 137077c09bc8SSara Sharon if (trans->cfg->gen2) 137177c09bc8SSara Sharon _iwl_trans_pcie_gen2_stop_device(trans, true); 137277c09bc8SSara Sharon else 1373e705c121SKalle Valo _iwl_trans_pcie_stop_device(trans, true); 1374e705c121SKalle Valo } 137577c09bc8SSara Sharon } 1376e705c121SKalle Valo 137723ae6128SMatti Gottlieb static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 137823ae6128SMatti Gottlieb bool reset) 1379e705c121SKalle Valo { 138023ae6128SMatti Gottlieb if (!reset) { 1381e705c121SKalle Valo /* Enable persistence mode to avoid reset */ 1382e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1383e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1384e705c121SKalle Valo } 1385e705c121SKalle Valo 1386e705c121SKalle Valo iwl_disable_interrupts(trans); 1387e705c121SKalle Valo 1388e705c121SKalle Valo /* 1389e705c121SKalle Valo * in testing mode, the host stays awake and the 1390e705c121SKalle Valo * hardware won't be reset (not even partially) 1391e705c121SKalle Valo */ 1392e705c121SKalle Valo if (test) 1393e705c121SKalle Valo return; 1394e705c121SKalle Valo 1395e705c121SKalle Valo iwl_pcie_disable_ict(trans); 1396e705c121SKalle Valo 13972e5d4a8fSHaim Dreyfuss iwl_pcie_synchronize_irqs(trans); 1398e705c121SKalle Valo 1399e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 1400e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1401e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 1402e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1403e705c121SKalle Valo 14041316d595SSara Sharon iwl_pcie_enable_rx_wake(trans, false); 14051316d595SSara Sharon 140623ae6128SMatti Gottlieb if (reset) { 1407e705c121SKalle Valo /* 1408e705c121SKalle Valo * reset TX queues -- some of their registers reset during S3 1409e705c121SKalle Valo * so if we don't reset everything here the D3 image would try 1410e705c121SKalle Valo * to execute some invalid memory upon resume 1411e705c121SKalle Valo */ 1412e705c121SKalle Valo iwl_trans_pcie_tx_reset(trans); 1413e705c121SKalle Valo } 1414e705c121SKalle Valo 1415e705c121SKalle Valo iwl_pcie_set_pwr(trans, true); 1416e705c121SKalle Valo } 1417e705c121SKalle Valo 1418e705c121SKalle Valo static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1419e705c121SKalle Valo enum iwl_d3_status *status, 142023ae6128SMatti Gottlieb bool test, bool reset) 1421e705c121SKalle Valo { 1422d7270d61SHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1423e705c121SKalle Valo u32 val; 1424e705c121SKalle Valo int ret; 1425e705c121SKalle Valo 1426e705c121SKalle Valo if (test) { 1427e705c121SKalle Valo iwl_enable_interrupts(trans); 1428e705c121SKalle Valo *status = IWL_D3_STATUS_ALIVE; 1429e705c121SKalle Valo return 0; 1430e705c121SKalle Valo } 1431e705c121SKalle Valo 14321316d595SSara Sharon iwl_pcie_enable_rx_wake(trans, true); 14331316d595SSara Sharon 1434e705c121SKalle Valo /* 1435d7270d61SHaim Dreyfuss * Reconfigure IVAR table in case of MSIX or reset ict table in 1436d7270d61SHaim Dreyfuss * MSI mode since HW reset erased it. 1437d7270d61SHaim Dreyfuss * Also enables interrupts - none will happen as 1438d7270d61SHaim Dreyfuss * the device doesn't know we're waking it up, only when 1439d7270d61SHaim Dreyfuss * the opmode actually tells it after this call. 1440e705c121SKalle Valo */ 1441d7270d61SHaim Dreyfuss iwl_pcie_conf_msix_hw(trans_pcie); 1442d7270d61SHaim Dreyfuss if (!trans_pcie->msix_enabled) 1443e705c121SKalle Valo iwl_pcie_reset_ict(trans); 144418dcb9a9SSara Sharon iwl_enable_interrupts(trans); 1445e705c121SKalle Valo 1446e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1447e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1448e705c121SKalle Valo 14496e584873SSara Sharon if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1450e705c121SKalle Valo udelay(2); 1451e705c121SKalle Valo 1452e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1453e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1454e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1455e705c121SKalle Valo 25000); 1456e705c121SKalle Valo if (ret < 0) { 1457e705c121SKalle Valo IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); 1458e705c121SKalle Valo return ret; 1459e705c121SKalle Valo } 1460e705c121SKalle Valo 1461e705c121SKalle Valo iwl_pcie_set_pwr(trans, false); 1462e705c121SKalle Valo 146323ae6128SMatti Gottlieb if (!reset) { 1464e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 1465e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1466e705c121SKalle Valo } else { 1467e705c121SKalle Valo iwl_trans_pcie_tx_reset(trans); 1468e705c121SKalle Valo 1469e705c121SKalle Valo ret = iwl_pcie_rx_init(trans); 1470e705c121SKalle Valo if (ret) { 1471e705c121SKalle Valo IWL_ERR(trans, 1472e705c121SKalle Valo "Failed to resume the device (RX reset)\n"); 1473e705c121SKalle Valo return ret; 1474e705c121SKalle Valo } 1475e705c121SKalle Valo } 1476e705c121SKalle Valo 147782ea7966SSara Sharon IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 147882ea7966SSara Sharon iwl_read_prph(trans, WFPM_GP2)); 147982ea7966SSara Sharon 1480e705c121SKalle Valo val = iwl_read32(trans, CSR_RESET); 1481e705c121SKalle Valo if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1482e705c121SKalle Valo *status = IWL_D3_STATUS_RESET; 1483e705c121SKalle Valo else 1484e705c121SKalle Valo *status = IWL_D3_STATUS_ALIVE; 1485e705c121SKalle Valo 1486e705c121SKalle Valo return 0; 1487e705c121SKalle Valo } 1488e705c121SKalle Valo 14892e5d4a8fSHaim Dreyfuss static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 14902e5d4a8fSHaim Dreyfuss struct iwl_trans *trans) 14912e5d4a8fSHaim Dreyfuss { 14922e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 14939fb064dfSHaim Dreyfuss int max_irqs, num_irqs, i, ret, nr_online_cpus; 14942e5d4a8fSHaim Dreyfuss u16 pci_cmd; 14952e5d4a8fSHaim Dreyfuss 149606f4b081SSara Sharon if (!trans->cfg->mq_rx_supported) 149706f4b081SSara Sharon goto enable_msi; 149806f4b081SSara Sharon 14999fb064dfSHaim Dreyfuss nr_online_cpus = num_online_cpus(); 15009fb064dfSHaim Dreyfuss max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES); 150106f4b081SSara Sharon for (i = 0; i < max_irqs; i++) 15022e5d4a8fSHaim Dreyfuss trans_pcie->msix_entries[i].entry = i; 15032e5d4a8fSHaim Dreyfuss 150406f4b081SSara Sharon num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 15052e5d4a8fSHaim Dreyfuss MSIX_MIN_INTERRUPT_VECTORS, 150606f4b081SSara Sharon max_irqs); 150706f4b081SSara Sharon if (num_irqs < 0) { 1508496d83caSHaim Dreyfuss IWL_DEBUG_INFO(trans, 150906f4b081SSara Sharon "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 151006f4b081SSara Sharon num_irqs); 151106f4b081SSara Sharon goto enable_msi; 1512496d83caSHaim Dreyfuss } 151306f4b081SSara Sharon trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1514496d83caSHaim Dreyfuss 15152e5d4a8fSHaim Dreyfuss IWL_DEBUG_INFO(trans, 151606f4b081SSara Sharon "MSI-X enabled. %d interrupt vectors were allocated\n", 151706f4b081SSara Sharon num_irqs); 151806f4b081SSara Sharon 1519496d83caSHaim Dreyfuss /* 152006f4b081SSara Sharon * In case the OS provides fewer interrupts than requested, different 152106f4b081SSara Sharon * causes will share the same interrupt vector as follows: 1522496d83caSHaim Dreyfuss * One interrupt less: non rx causes shared with FBQ. 1523496d83caSHaim Dreyfuss * Two interrupts less: non rx causes shared with FBQ and RSS. 1524496d83caSHaim Dreyfuss * More than two interrupts: we will use fewer RSS queues. 1525496d83caSHaim Dreyfuss */ 15269fb064dfSHaim Dreyfuss if (num_irqs <= nr_online_cpus) { 152706f4b081SSara Sharon trans_pcie->trans->num_rx_queues = num_irqs + 1; 1528496d83caSHaim Dreyfuss trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1529496d83caSHaim Dreyfuss IWL_SHARED_IRQ_FIRST_RSS; 15309fb064dfSHaim Dreyfuss } else if (num_irqs == nr_online_cpus + 1) { 153106f4b081SSara Sharon trans_pcie->trans->num_rx_queues = num_irqs; 1532496d83caSHaim Dreyfuss trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1533496d83caSHaim Dreyfuss } else { 153406f4b081SSara Sharon trans_pcie->trans->num_rx_queues = num_irqs - 1; 1535496d83caSHaim Dreyfuss } 15362e5d4a8fSHaim Dreyfuss 153706f4b081SSara Sharon trans_pcie->alloc_vecs = num_irqs; 1538496d83caSHaim Dreyfuss trans_pcie->msix_enabled = true; 15392e5d4a8fSHaim Dreyfuss return; 15402e5d4a8fSHaim Dreyfuss 154106f4b081SSara Sharon enable_msi: 154206f4b081SSara Sharon ret = pci_enable_msi(pdev); 154306f4b081SSara Sharon if (ret) { 154406f4b081SSara Sharon dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 15452e5d4a8fSHaim Dreyfuss /* enable rfkill interrupt: hw bug w/a */ 15462e5d4a8fSHaim Dreyfuss pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 15472e5d4a8fSHaim Dreyfuss if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 15482e5d4a8fSHaim Dreyfuss pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 15492e5d4a8fSHaim Dreyfuss pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 15502e5d4a8fSHaim Dreyfuss } 15512e5d4a8fSHaim Dreyfuss } 15522e5d4a8fSHaim Dreyfuss } 15532e5d4a8fSHaim Dreyfuss 15547c8d91ebSHaim Dreyfuss static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) 15557c8d91ebSHaim Dreyfuss { 15567c8d91ebSHaim Dreyfuss int iter_rx_q, i, ret, cpu, offset; 15577c8d91ebSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 15587c8d91ebSHaim Dreyfuss 15597c8d91ebSHaim Dreyfuss i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 15607c8d91ebSHaim Dreyfuss iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; 15617c8d91ebSHaim Dreyfuss offset = 1 + i; 15627c8d91ebSHaim Dreyfuss for (; i < iter_rx_q ; i++) { 15637c8d91ebSHaim Dreyfuss /* 15647c8d91ebSHaim Dreyfuss * Get the cpu prior to the place to search 15657c8d91ebSHaim Dreyfuss * (i.e. return will be > i - 1). 15667c8d91ebSHaim Dreyfuss */ 15677c8d91ebSHaim Dreyfuss cpu = cpumask_next(i - offset, cpu_online_mask); 15687c8d91ebSHaim Dreyfuss cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 15697c8d91ebSHaim Dreyfuss ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 15707c8d91ebSHaim Dreyfuss &trans_pcie->affinity_mask[i]); 15717c8d91ebSHaim Dreyfuss if (ret) 15727c8d91ebSHaim Dreyfuss IWL_ERR(trans_pcie->trans, 15737c8d91ebSHaim Dreyfuss "Failed to set affinity mask for IRQ %d\n", 15747c8d91ebSHaim Dreyfuss i); 15757c8d91ebSHaim Dreyfuss } 15767c8d91ebSHaim Dreyfuss } 15777c8d91ebSHaim Dreyfuss 157864fa3affSSharon Dvir static const char *queue_name(struct device *dev, 157964fa3affSSharon Dvir struct iwl_trans_pcie *trans_p, int i) 158064fa3affSSharon Dvir { 158164fa3affSSharon Dvir if (trans_p->shared_vec_mask) { 158264fa3affSSharon Dvir int vec = trans_p->shared_vec_mask & 158364fa3affSSharon Dvir IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 158464fa3affSSharon Dvir 158564fa3affSSharon Dvir if (i == 0) 158664fa3affSSharon Dvir return DRV_NAME ": shared IRQ"; 158764fa3affSSharon Dvir 158864fa3affSSharon Dvir return devm_kasprintf(dev, GFP_KERNEL, 158964fa3affSSharon Dvir DRV_NAME ": queue %d", i + vec); 159064fa3affSSharon Dvir } 159164fa3affSSharon Dvir if (i == 0) 159264fa3affSSharon Dvir return DRV_NAME ": default queue"; 159364fa3affSSharon Dvir 159464fa3affSSharon Dvir if (i == trans_p->alloc_vecs - 1) 159564fa3affSSharon Dvir return DRV_NAME ": exception"; 159664fa3affSSharon Dvir 159764fa3affSSharon Dvir return devm_kasprintf(dev, GFP_KERNEL, 159864fa3affSSharon Dvir DRV_NAME ": queue %d", i); 159964fa3affSSharon Dvir } 160064fa3affSSharon Dvir 16012e5d4a8fSHaim Dreyfuss static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 16022e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie) 16032e5d4a8fSHaim Dreyfuss { 1604496d83caSHaim Dreyfuss int i; 16052e5d4a8fSHaim Dreyfuss 1606496d83caSHaim Dreyfuss for (i = 0; i < trans_pcie->alloc_vecs; i++) { 16072e5d4a8fSHaim Dreyfuss int ret; 16085a41a86cSSharon Dvir struct msix_entry *msix_entry; 160964fa3affSSharon Dvir const char *qname = queue_name(&pdev->dev, trans_pcie, i); 161064fa3affSSharon Dvir 161164fa3affSSharon Dvir if (!qname) 161264fa3affSSharon Dvir return -ENOMEM; 16132e5d4a8fSHaim Dreyfuss 16145a41a86cSSharon Dvir msix_entry = &trans_pcie->msix_entries[i]; 16155a41a86cSSharon Dvir ret = devm_request_threaded_irq(&pdev->dev, 16165a41a86cSSharon Dvir msix_entry->vector, 16172e5d4a8fSHaim Dreyfuss iwl_pcie_msix_isr, 1618496d83caSHaim Dreyfuss (i == trans_pcie->def_irq) ? 16192e5d4a8fSHaim Dreyfuss iwl_pcie_irq_msix_handler : 16202e5d4a8fSHaim Dreyfuss iwl_pcie_irq_rx_msix_handler, 16212e5d4a8fSHaim Dreyfuss IRQF_SHARED, 162264fa3affSSharon Dvir qname, 16235a41a86cSSharon Dvir msix_entry); 16242e5d4a8fSHaim Dreyfuss if (ret) { 16252e5d4a8fSHaim Dreyfuss IWL_ERR(trans_pcie->trans, 16262e5d4a8fSHaim Dreyfuss "Error allocating IRQ %d\n", i); 16275a41a86cSSharon Dvir 16282e5d4a8fSHaim Dreyfuss return ret; 16292e5d4a8fSHaim Dreyfuss } 16302e5d4a8fSHaim Dreyfuss } 16317c8d91ebSHaim Dreyfuss iwl_pcie_irq_set_affinity(trans_pcie->trans); 16322e5d4a8fSHaim Dreyfuss 16332e5d4a8fSHaim Dreyfuss return 0; 16342e5d4a8fSHaim Dreyfuss } 16352e5d4a8fSHaim Dreyfuss 1636e705c121SKalle Valo static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) 1637e705c121SKalle Valo { 1638e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1639e705c121SKalle Valo int err; 1640e705c121SKalle Valo 1641e705c121SKalle Valo lockdep_assert_held(&trans_pcie->mutex); 1642e705c121SKalle Valo 1643e705c121SKalle Valo err = iwl_pcie_prepare_card_hw(trans); 1644e705c121SKalle Valo if (err) { 1645e705c121SKalle Valo IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1646e705c121SKalle Valo return err; 1647e705c121SKalle Valo } 1648e705c121SKalle Valo 1649099a628bSEmmanuel Grumbach iwl_pcie_sw_reset(trans); 1650e705c121SKalle Valo 165152b6e168SEmmanuel Grumbach err = iwl_pcie_apm_init(trans); 165252b6e168SEmmanuel Grumbach if (err) 165352b6e168SEmmanuel Grumbach return err; 1654e705c121SKalle Valo 16552e5d4a8fSHaim Dreyfuss iwl_pcie_init_msix(trans_pcie); 165683730058SHaim Dreyfuss 1657e705c121SKalle Valo /* From now on, the op_mode will be kept updated about RF kill state */ 1658e705c121SKalle Valo iwl_enable_rfkill_int(trans); 1659e705c121SKalle Valo 1660326477e4SJohannes Berg trans_pcie->opmode_down = false; 1661326477e4SJohannes Berg 1662e705c121SKalle Valo /* Set is_down to false here so that...*/ 1663e705c121SKalle Valo trans_pcie->is_down = false; 1664e705c121SKalle Valo 1665e705c121SKalle Valo /* ...rfkill can call stop_device and set it false if needed */ 16669ad8fd0bSJohannes Berg iwl_pcie_check_hw_rf_kill(trans); 1667e705c121SKalle Valo 16684cbb8e50SLuciano Coelho /* Make sure we sync here, because we'll need full access later */ 16694cbb8e50SLuciano Coelho if (low_power) 16704cbb8e50SLuciano Coelho pm_runtime_resume(trans->dev); 16714cbb8e50SLuciano Coelho 1672e705c121SKalle Valo return 0; 1673e705c121SKalle Valo } 1674e705c121SKalle Valo 1675e705c121SKalle Valo static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) 1676e705c121SKalle Valo { 1677e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1678e705c121SKalle Valo int ret; 1679e705c121SKalle Valo 1680e705c121SKalle Valo mutex_lock(&trans_pcie->mutex); 1681e705c121SKalle Valo ret = _iwl_trans_pcie_start_hw(trans, low_power); 1682e705c121SKalle Valo mutex_unlock(&trans_pcie->mutex); 1683e705c121SKalle Valo 1684e705c121SKalle Valo return ret; 1685e705c121SKalle Valo } 1686e705c121SKalle Valo 1687e705c121SKalle Valo static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1688e705c121SKalle Valo { 1689e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1690e705c121SKalle Valo 1691e705c121SKalle Valo mutex_lock(&trans_pcie->mutex); 1692e705c121SKalle Valo 1693e705c121SKalle Valo /* disable interrupts - don't enable HW RF kill interrupt */ 1694e705c121SKalle Valo iwl_disable_interrupts(trans); 1695e705c121SKalle Valo 1696e705c121SKalle Valo iwl_pcie_apm_stop(trans, true); 1697e705c121SKalle Valo 1698e705c121SKalle Valo iwl_disable_interrupts(trans); 1699e705c121SKalle Valo 1700e705c121SKalle Valo iwl_pcie_disable_ict(trans); 1701e705c121SKalle Valo 1702e705c121SKalle Valo mutex_unlock(&trans_pcie->mutex); 1703e705c121SKalle Valo 17042e5d4a8fSHaim Dreyfuss iwl_pcie_synchronize_irqs(trans); 1705e705c121SKalle Valo } 1706e705c121SKalle Valo 1707e705c121SKalle Valo static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1708e705c121SKalle Valo { 1709e705c121SKalle Valo writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1710e705c121SKalle Valo } 1711e705c121SKalle Valo 1712e705c121SKalle Valo static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1713e705c121SKalle Valo { 1714e705c121SKalle Valo writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1715e705c121SKalle Valo } 1716e705c121SKalle Valo 1717e705c121SKalle Valo static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1718e705c121SKalle Valo { 1719e705c121SKalle Valo return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1720e705c121SKalle Valo } 1721e705c121SKalle Valo 1722e705c121SKalle Valo static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1723e705c121SKalle Valo { 1724e705c121SKalle Valo iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1725e705c121SKalle Valo ((reg & 0x000FFFFF) | (3 << 24))); 1726e705c121SKalle Valo return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1727e705c121SKalle Valo } 1728e705c121SKalle Valo 1729e705c121SKalle Valo static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1730e705c121SKalle Valo u32 val) 1731e705c121SKalle Valo { 1732e705c121SKalle Valo iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1733e705c121SKalle Valo ((addr & 0x000FFFFF) | (3 << 24))); 1734e705c121SKalle Valo iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1735e705c121SKalle Valo } 1736e705c121SKalle Valo 1737e705c121SKalle Valo static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1738e705c121SKalle Valo const struct iwl_trans_config *trans_cfg) 1739e705c121SKalle Valo { 1740e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1741e705c121SKalle Valo 1742e705c121SKalle Valo trans_pcie->cmd_queue = trans_cfg->cmd_queue; 1743e705c121SKalle Valo trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; 1744e705c121SKalle Valo trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 1745e705c121SKalle Valo if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1746e705c121SKalle Valo trans_pcie->n_no_reclaim_cmds = 0; 1747e705c121SKalle Valo else 1748e705c121SKalle Valo trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 1749e705c121SKalle Valo if (trans_pcie->n_no_reclaim_cmds) 1750e705c121SKalle Valo memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1751e705c121SKalle Valo trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1752e705c121SKalle Valo 17536c4fbcbcSEmmanuel Grumbach trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 17546c4fbcbcSEmmanuel Grumbach trans_pcie->rx_page_order = 17556c4fbcbcSEmmanuel Grumbach iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 1756e705c121SKalle Valo 1757e705c121SKalle Valo trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; 1758e705c121SKalle Valo trans_pcie->scd_set_active = trans_cfg->scd_set_active; 175941837ca9SEmmanuel Grumbach trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; 1760e705c121SKalle Valo 176121cb3222SJohannes Berg trans_pcie->page_offs = trans_cfg->cb_data_offs; 176221cb3222SJohannes Berg trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); 176321cb3222SJohannes Berg 176439bdb17eSSharon Dvir trans->command_groups = trans_cfg->command_groups; 176539bdb17eSSharon Dvir trans->command_groups_size = trans_cfg->command_groups_size; 176639bdb17eSSharon Dvir 1767e705c121SKalle Valo /* Initialize NAPI here - it should be before registering to mac80211 1768e705c121SKalle Valo * in the opmode but after the HW struct is allocated. 1769e705c121SKalle Valo * As this function may be called again in some corner cases don't 1770e705c121SKalle Valo * do anything if NAPI was already initialized. 1771e705c121SKalle Valo */ 1772bce97731SSara Sharon if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 1773e705c121SKalle Valo init_dummy_netdev(&trans_pcie->napi_dev); 1774e705c121SKalle Valo } 1775e705c121SKalle Valo 1776e705c121SKalle Valo void iwl_trans_pcie_free(struct iwl_trans *trans) 1777e705c121SKalle Valo { 1778e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 17796eb5e529SEmmanuel Grumbach int i; 1780e705c121SKalle Valo 17812e5d4a8fSHaim Dreyfuss iwl_pcie_synchronize_irqs(trans); 1782e705c121SKalle Valo 178313a3a390SSara Sharon if (trans->cfg->gen2) 178413a3a390SSara Sharon iwl_pcie_gen2_tx_free(trans); 178513a3a390SSara Sharon else 1786e705c121SKalle Valo iwl_pcie_tx_free(trans); 1787e705c121SKalle Valo iwl_pcie_rx_free(trans); 1788e705c121SKalle Valo 17892e5d4a8fSHaim Dreyfuss if (trans_pcie->msix_enabled) { 17907c8d91ebSHaim Dreyfuss for (i = 0; i < trans_pcie->alloc_vecs; i++) { 17917c8d91ebSHaim Dreyfuss irq_set_affinity_hint( 17927c8d91ebSHaim Dreyfuss trans_pcie->msix_entries[i].vector, 17937c8d91ebSHaim Dreyfuss NULL); 17947c8d91ebSHaim Dreyfuss } 17952e5d4a8fSHaim Dreyfuss 17962e5d4a8fSHaim Dreyfuss trans_pcie->msix_enabled = false; 17972e5d4a8fSHaim Dreyfuss } else { 1798e705c121SKalle Valo iwl_pcie_free_ict(trans); 17992e5d4a8fSHaim Dreyfuss } 1800e705c121SKalle Valo 1801e705c121SKalle Valo iwl_pcie_free_fw_monitor(trans); 1802e705c121SKalle Valo 18036eb5e529SEmmanuel Grumbach for_each_possible_cpu(i) { 18046eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page *p = 18056eb5e529SEmmanuel Grumbach per_cpu_ptr(trans_pcie->tso_hdr_page, i); 18066eb5e529SEmmanuel Grumbach 18076eb5e529SEmmanuel Grumbach if (p->page) 18086eb5e529SEmmanuel Grumbach __free_page(p->page); 18096eb5e529SEmmanuel Grumbach } 18106eb5e529SEmmanuel Grumbach 18116eb5e529SEmmanuel Grumbach free_percpu(trans_pcie->tso_hdr_page); 1812a2a57a35SEmmanuel Grumbach mutex_destroy(&trans_pcie->mutex); 1813e705c121SKalle Valo iwl_trans_free(trans); 1814e705c121SKalle Valo } 1815e705c121SKalle Valo 1816e705c121SKalle Valo static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 1817e705c121SKalle Valo { 1818e705c121SKalle Valo if (state) 1819e705c121SKalle Valo set_bit(STATUS_TPOWER_PMI, &trans->status); 1820e705c121SKalle Valo else 1821e705c121SKalle Valo clear_bit(STATUS_TPOWER_PMI, &trans->status); 1822e705c121SKalle Valo } 1823e705c121SKalle Valo 182423ba9340SEmmanuel Grumbach static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, 1825e705c121SKalle Valo unsigned long *flags) 1826e705c121SKalle Valo { 1827e705c121SKalle Valo int ret; 1828e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1829e705c121SKalle Valo 1830e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, *flags); 1831e705c121SKalle Valo 1832e705c121SKalle Valo if (trans_pcie->cmd_hold_nic_awake) 1833e705c121SKalle Valo goto out; 1834e705c121SKalle Valo 1835e705c121SKalle Valo /* this bit wakes up the NIC */ 1836e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1837e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 18386e584873SSara Sharon if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1839e705c121SKalle Valo udelay(2); 1840e705c121SKalle Valo 1841e705c121SKalle Valo /* 1842e705c121SKalle Valo * These bits say the device is running, and should keep running for 1843e705c121SKalle Valo * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 1844e705c121SKalle Valo * but they do not indicate that embedded SRAM is restored yet; 1845e705c121SKalle Valo * 3945 and 4965 have volatile SRAM, and must save/restore contents 1846e705c121SKalle Valo * to/from host DRAM when sleeping/waking for power-saving. 1847e705c121SKalle Valo * Each direction takes approximately 1/4 millisecond; with this 1848e705c121SKalle Valo * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 1849e705c121SKalle Valo * series of register accesses are expected (e.g. reading Event Log), 1850e705c121SKalle Valo * to keep device from sleeping. 1851e705c121SKalle Valo * 1852e705c121SKalle Valo * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 1853e705c121SKalle Valo * SRAM is okay/restored. We don't check that here because this call 1854e705c121SKalle Valo * is just for hardware register access; but GP1 MAC_SLEEP check is a 1855e705c121SKalle Valo * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). 1856e705c121SKalle Valo * 1857e705c121SKalle Valo * 5000 series and later (including 1000 series) have non-volatile SRAM, 1858e705c121SKalle Valo * and do not save/restore SRAM when power cycling. 1859e705c121SKalle Valo */ 1860e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1861e705c121SKalle Valo CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1862e705c121SKalle Valo (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1863e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 1864e705c121SKalle Valo if (unlikely(ret < 0)) { 1865e705c121SKalle Valo iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 1866e705c121SKalle Valo WARN_ONCE(1, 1867e705c121SKalle Valo "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 186823ba9340SEmmanuel Grumbach iwl_read32(trans, CSR_GP_CNTRL)); 1869e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); 1870e705c121SKalle Valo return false; 1871e705c121SKalle Valo } 1872e705c121SKalle Valo 1873e705c121SKalle Valo out: 1874e705c121SKalle Valo /* 1875e705c121SKalle Valo * Fool sparse by faking we release the lock - sparse will 1876e705c121SKalle Valo * track nic_access anyway. 1877e705c121SKalle Valo */ 1878e705c121SKalle Valo __release(&trans_pcie->reg_lock); 1879e705c121SKalle Valo return true; 1880e705c121SKalle Valo } 1881e705c121SKalle Valo 1882e705c121SKalle Valo static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, 1883e705c121SKalle Valo unsigned long *flags) 1884e705c121SKalle Valo { 1885e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1886e705c121SKalle Valo 1887e705c121SKalle Valo lockdep_assert_held(&trans_pcie->reg_lock); 1888e705c121SKalle Valo 1889e705c121SKalle Valo /* 1890e705c121SKalle Valo * Fool sparse by faking we acquiring the lock - sparse will 1891e705c121SKalle Valo * track nic_access anyway. 1892e705c121SKalle Valo */ 1893e705c121SKalle Valo __acquire(&trans_pcie->reg_lock); 1894e705c121SKalle Valo 1895e705c121SKalle Valo if (trans_pcie->cmd_hold_nic_awake) 1896e705c121SKalle Valo goto out; 1897e705c121SKalle Valo 1898e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1899e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1900e705c121SKalle Valo /* 1901e705c121SKalle Valo * Above we read the CSR_GP_CNTRL register, which will flush 1902e705c121SKalle Valo * any previous writes, but we need the write that clears the 1903e705c121SKalle Valo * MAC_ACCESS_REQ bit to be performed before any other writes 1904e705c121SKalle Valo * scheduled on different CPUs (after we drop reg_lock). 1905e705c121SKalle Valo */ 1906e705c121SKalle Valo mmiowb(); 1907e705c121SKalle Valo out: 1908e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); 1909e705c121SKalle Valo } 1910e705c121SKalle Valo 1911e705c121SKalle Valo static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 1912e705c121SKalle Valo void *buf, int dwords) 1913e705c121SKalle Valo { 1914e705c121SKalle Valo unsigned long flags; 1915e705c121SKalle Valo int offs, ret = 0; 1916e705c121SKalle Valo u32 *vals = buf; 1917e705c121SKalle Valo 191823ba9340SEmmanuel Grumbach if (iwl_trans_grab_nic_access(trans, &flags)) { 1919e705c121SKalle Valo iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); 1920e705c121SKalle Valo for (offs = 0; offs < dwords; offs++) 1921e705c121SKalle Valo vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); 1922e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 1923e705c121SKalle Valo } else { 1924e705c121SKalle Valo ret = -EBUSY; 1925e705c121SKalle Valo } 1926e705c121SKalle Valo return ret; 1927e705c121SKalle Valo } 1928e705c121SKalle Valo 1929e705c121SKalle Valo static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 1930e705c121SKalle Valo const void *buf, int dwords) 1931e705c121SKalle Valo { 1932e705c121SKalle Valo unsigned long flags; 1933e705c121SKalle Valo int offs, ret = 0; 1934e705c121SKalle Valo const u32 *vals = buf; 1935e705c121SKalle Valo 193623ba9340SEmmanuel Grumbach if (iwl_trans_grab_nic_access(trans, &flags)) { 1937e705c121SKalle Valo iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 1938e705c121SKalle Valo for (offs = 0; offs < dwords; offs++) 1939e705c121SKalle Valo iwl_write32(trans, HBUS_TARG_MEM_WDAT, 1940e705c121SKalle Valo vals ? vals[offs] : 0); 1941e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 1942e705c121SKalle Valo } else { 1943e705c121SKalle Valo ret = -EBUSY; 1944e705c121SKalle Valo } 1945e705c121SKalle Valo return ret; 1946e705c121SKalle Valo } 1947e705c121SKalle Valo 1948e705c121SKalle Valo static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, 1949e705c121SKalle Valo unsigned long txqs, 1950e705c121SKalle Valo bool freeze) 1951e705c121SKalle Valo { 1952e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1953e705c121SKalle Valo int queue; 1954e705c121SKalle Valo 1955e705c121SKalle Valo for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 1956b2a3b1c1SSara Sharon struct iwl_txq *txq = trans_pcie->txq[queue]; 1957e705c121SKalle Valo unsigned long now; 1958e705c121SKalle Valo 1959e705c121SKalle Valo spin_lock_bh(&txq->lock); 1960e705c121SKalle Valo 1961e705c121SKalle Valo now = jiffies; 1962e705c121SKalle Valo 1963e705c121SKalle Valo if (txq->frozen == freeze) 1964e705c121SKalle Valo goto next_queue; 1965e705c121SKalle Valo 1966e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 1967e705c121SKalle Valo freeze ? "Freezing" : "Waking", queue); 1968e705c121SKalle Valo 1969e705c121SKalle Valo txq->frozen = freeze; 1970e705c121SKalle Valo 1971bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) 1972e705c121SKalle Valo goto next_queue; 1973e705c121SKalle Valo 1974e705c121SKalle Valo if (freeze) { 1975e705c121SKalle Valo if (unlikely(time_after(now, 1976e705c121SKalle Valo txq->stuck_timer.expires))) { 1977e705c121SKalle Valo /* 1978e705c121SKalle Valo * The timer should have fired, maybe it is 1979e705c121SKalle Valo * spinning right now on the lock. 1980e705c121SKalle Valo */ 1981e705c121SKalle Valo goto next_queue; 1982e705c121SKalle Valo } 1983e705c121SKalle Valo /* remember how long until the timer fires */ 1984e705c121SKalle Valo txq->frozen_expiry_remainder = 1985e705c121SKalle Valo txq->stuck_timer.expires - now; 1986e705c121SKalle Valo del_timer(&txq->stuck_timer); 1987e705c121SKalle Valo goto next_queue; 1988e705c121SKalle Valo } 1989e705c121SKalle Valo 1990e705c121SKalle Valo /* 1991e705c121SKalle Valo * Wake a non-empty queue -> arm timer with the 1992e705c121SKalle Valo * remainder before it froze 1993e705c121SKalle Valo */ 1994e705c121SKalle Valo mod_timer(&txq->stuck_timer, 1995e705c121SKalle Valo now + txq->frozen_expiry_remainder); 1996e705c121SKalle Valo 1997e705c121SKalle Valo next_queue: 1998e705c121SKalle Valo spin_unlock_bh(&txq->lock); 1999e705c121SKalle Valo } 2000e705c121SKalle Valo } 2001e705c121SKalle Valo 20020cd58eaaSEmmanuel Grumbach static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 20030cd58eaaSEmmanuel Grumbach { 20040cd58eaaSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 20050cd58eaaSEmmanuel Grumbach int i; 20060cd58eaaSEmmanuel Grumbach 20070cd58eaaSEmmanuel Grumbach for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 2008b2a3b1c1SSara Sharon struct iwl_txq *txq = trans_pcie->txq[i]; 20090cd58eaaSEmmanuel Grumbach 20100cd58eaaSEmmanuel Grumbach if (i == trans_pcie->cmd_queue) 20110cd58eaaSEmmanuel Grumbach continue; 20120cd58eaaSEmmanuel Grumbach 20130cd58eaaSEmmanuel Grumbach spin_lock_bh(&txq->lock); 20140cd58eaaSEmmanuel Grumbach 20150cd58eaaSEmmanuel Grumbach if (!block && !(WARN_ON_ONCE(!txq->block))) { 20160cd58eaaSEmmanuel Grumbach txq->block--; 20170cd58eaaSEmmanuel Grumbach if (!txq->block) { 20180cd58eaaSEmmanuel Grumbach iwl_write32(trans, HBUS_TARG_WRPTR, 2019bb98ecd4SSara Sharon txq->write_ptr | (i << 8)); 20200cd58eaaSEmmanuel Grumbach } 20210cd58eaaSEmmanuel Grumbach } else if (block) { 20220cd58eaaSEmmanuel Grumbach txq->block++; 20230cd58eaaSEmmanuel Grumbach } 20240cd58eaaSEmmanuel Grumbach 20250cd58eaaSEmmanuel Grumbach spin_unlock_bh(&txq->lock); 20260cd58eaaSEmmanuel Grumbach } 20270cd58eaaSEmmanuel Grumbach } 20280cd58eaaSEmmanuel Grumbach 2029e705c121SKalle Valo #define IWL_FLUSH_WAIT_MS 2000 2030e705c121SKalle Valo 203138398efbSSara Sharon void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 203238398efbSSara Sharon { 2033afb84431SEmmanuel Grumbach u32 txq_id = txq->id; 2034afb84431SEmmanuel Grumbach u32 status; 2035afb84431SEmmanuel Grumbach bool active; 2036afb84431SEmmanuel Grumbach u8 fifo; 203738398efbSSara Sharon 2038afb84431SEmmanuel Grumbach if (trans->cfg->use_tfh) { 2039afb84431SEmmanuel Grumbach IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 2040bb98ecd4SSara Sharon txq->read_ptr, txq->write_ptr); 2041ae79785fSSara Sharon /* TODO: access new SCD registers and dump them */ 2042ae79785fSSara Sharon return; 2043afb84431SEmmanuel Grumbach } 2044ae79785fSSara Sharon 2045afb84431SEmmanuel Grumbach status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 2046afb84431SEmmanuel Grumbach fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 2047afb84431SEmmanuel Grumbach active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 204838398efbSSara Sharon 204938398efbSSara Sharon IWL_ERR(trans, 2050afb84431SEmmanuel Grumbach "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 2051afb84431SEmmanuel Grumbach txq_id, active ? "" : "in", fifo, 2052afb84431SEmmanuel Grumbach jiffies_to_msecs(txq->wd_timeout), 2053afb84431SEmmanuel Grumbach txq->read_ptr, txq->write_ptr, 2054afb84431SEmmanuel Grumbach iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 205538398efbSSara Sharon (TFD_QUEUE_SIZE_MAX - 1), 2056afb84431SEmmanuel Grumbach iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 2057afb84431SEmmanuel Grumbach (TFD_QUEUE_SIZE_MAX - 1), 2058afb84431SEmmanuel Grumbach iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 205938398efbSSara Sharon } 206038398efbSSara Sharon 2061d6d517b7SSara Sharon static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2062e705c121SKalle Valo { 2063e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2064e705c121SKalle Valo struct iwl_txq *txq; 2065e705c121SKalle Valo unsigned long now = jiffies; 2066e705c121SKalle Valo u8 wr_ptr; 2067e705c121SKalle Valo 2068d6d517b7SSara Sharon if (!test_bit(txq_idx, trans_pcie->queue_used)) 2069d6d517b7SSara Sharon return -EINVAL; 2070e705c121SKalle Valo 2071d6d517b7SSara Sharon IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); 2072d6d517b7SSara Sharon txq = trans_pcie->txq[txq_idx]; 2073bb98ecd4SSara Sharon wr_ptr = ACCESS_ONCE(txq->write_ptr); 2074e705c121SKalle Valo 2075bb98ecd4SSara Sharon while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && 2076e705c121SKalle Valo !time_after(jiffies, 2077e705c121SKalle Valo now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2078bb98ecd4SSara Sharon u8 write_ptr = ACCESS_ONCE(txq->write_ptr); 2079e705c121SKalle Valo 2080e705c121SKalle Valo if (WARN_ONCE(wr_ptr != write_ptr, 2081e705c121SKalle Valo "WR pointer moved while flushing %d -> %d\n", 2082e705c121SKalle Valo wr_ptr, write_ptr)) 2083e705c121SKalle Valo return -ETIMEDOUT; 2084192185d6SJohannes Berg usleep_range(1000, 2000); 2085e705c121SKalle Valo } 2086e705c121SKalle Valo 2087bb98ecd4SSara Sharon if (txq->read_ptr != txq->write_ptr) { 2088e705c121SKalle Valo IWL_ERR(trans, 2089d6d517b7SSara Sharon "fail to flush all tx fifo queues Q %d\n", txq_idx); 2090d6d517b7SSara Sharon iwl_trans_pcie_log_scd_error(trans, txq); 2091d6d517b7SSara Sharon return -ETIMEDOUT; 2092e705c121SKalle Valo } 2093e705c121SKalle Valo 2094d6d517b7SSara Sharon IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); 2095d6d517b7SSara Sharon 2096d6d517b7SSara Sharon return 0; 2097d6d517b7SSara Sharon } 2098d6d517b7SSara Sharon 2099d6d517b7SSara Sharon static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) 2100d6d517b7SSara Sharon { 2101d6d517b7SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2102d6d517b7SSara Sharon int cnt; 2103d6d517b7SSara Sharon int ret = 0; 2104d6d517b7SSara Sharon 2105d6d517b7SSara Sharon /* waiting for all the tx frames complete might take a while */ 2106d6d517b7SSara Sharon for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 2107d6d517b7SSara Sharon 2108d6d517b7SSara Sharon if (cnt == trans_pcie->cmd_queue) 2109d6d517b7SSara Sharon continue; 2110d6d517b7SSara Sharon if (!test_bit(cnt, trans_pcie->queue_used)) 2111d6d517b7SSara Sharon continue; 2112d6d517b7SSara Sharon if (!(BIT(cnt) & txq_bm)) 2113d6d517b7SSara Sharon continue; 2114d6d517b7SSara Sharon 2115d6d517b7SSara Sharon ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); 211638398efbSSara Sharon if (ret) 2117d6d517b7SSara Sharon break; 2118d6d517b7SSara Sharon } 2119e705c121SKalle Valo 2120e705c121SKalle Valo return ret; 2121e705c121SKalle Valo } 2122e705c121SKalle Valo 2123e705c121SKalle Valo static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2124e705c121SKalle Valo u32 mask, u32 value) 2125e705c121SKalle Valo { 2126e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2127e705c121SKalle Valo unsigned long flags; 2128e705c121SKalle Valo 2129e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 2130e705c121SKalle Valo __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 2131e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 2132e705c121SKalle Valo } 2133e705c121SKalle Valo 2134c24c7f58SLuca Coelho static void iwl_trans_pcie_ref(struct iwl_trans *trans) 2135e705c121SKalle Valo { 2136e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2137e705c121SKalle Valo 2138e705c121SKalle Valo if (iwlwifi_mod_params.d0i3_disable) 2139e705c121SKalle Valo return; 2140e705c121SKalle Valo 2141b3ff1270SLuca Coelho pm_runtime_get(&trans_pcie->pci_dev->dev); 21425d93f3a2SLuca Coelho 21435d93f3a2SLuca Coelho #ifdef CONFIG_PM 21445d93f3a2SLuca Coelho IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", 21455d93f3a2SLuca Coelho atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); 21465d93f3a2SLuca Coelho #endif /* CONFIG_PM */ 2147e705c121SKalle Valo } 2148e705c121SKalle Valo 2149c24c7f58SLuca Coelho static void iwl_trans_pcie_unref(struct iwl_trans *trans) 2150e705c121SKalle Valo { 2151e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2152e705c121SKalle Valo 2153e705c121SKalle Valo if (iwlwifi_mod_params.d0i3_disable) 2154e705c121SKalle Valo return; 2155e705c121SKalle Valo 2156b3ff1270SLuca Coelho pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); 2157b3ff1270SLuca Coelho pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); 2158b3ff1270SLuca Coelho 21595d93f3a2SLuca Coelho #ifdef CONFIG_PM 21605d93f3a2SLuca Coelho IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", 21615d93f3a2SLuca Coelho atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); 21625d93f3a2SLuca Coelho #endif /* CONFIG_PM */ 2163e705c121SKalle Valo } 2164e705c121SKalle Valo 2165e705c121SKalle Valo static const char *get_csr_string(int cmd) 2166e705c121SKalle Valo { 2167e705c121SKalle Valo #define IWL_CMD(x) case x: return #x 2168e705c121SKalle Valo switch (cmd) { 2169e705c121SKalle Valo IWL_CMD(CSR_HW_IF_CONFIG_REG); 2170e705c121SKalle Valo IWL_CMD(CSR_INT_COALESCING); 2171e705c121SKalle Valo IWL_CMD(CSR_INT); 2172e705c121SKalle Valo IWL_CMD(CSR_INT_MASK); 2173e705c121SKalle Valo IWL_CMD(CSR_FH_INT_STATUS); 2174e705c121SKalle Valo IWL_CMD(CSR_GPIO_IN); 2175e705c121SKalle Valo IWL_CMD(CSR_RESET); 2176e705c121SKalle Valo IWL_CMD(CSR_GP_CNTRL); 2177e705c121SKalle Valo IWL_CMD(CSR_HW_REV); 2178e705c121SKalle Valo IWL_CMD(CSR_EEPROM_REG); 2179e705c121SKalle Valo IWL_CMD(CSR_EEPROM_GP); 2180e705c121SKalle Valo IWL_CMD(CSR_OTP_GP_REG); 2181e705c121SKalle Valo IWL_CMD(CSR_GIO_REG); 2182e705c121SKalle Valo IWL_CMD(CSR_GP_UCODE_REG); 2183e705c121SKalle Valo IWL_CMD(CSR_GP_DRIVER_REG); 2184e705c121SKalle Valo IWL_CMD(CSR_UCODE_DRV_GP1); 2185e705c121SKalle Valo IWL_CMD(CSR_UCODE_DRV_GP2); 2186e705c121SKalle Valo IWL_CMD(CSR_LED_REG); 2187e705c121SKalle Valo IWL_CMD(CSR_DRAM_INT_TBL_REG); 2188e705c121SKalle Valo IWL_CMD(CSR_GIO_CHICKEN_BITS); 2189e705c121SKalle Valo IWL_CMD(CSR_ANA_PLL_CFG); 2190e705c121SKalle Valo IWL_CMD(CSR_HW_REV_WA_REG); 2191e705c121SKalle Valo IWL_CMD(CSR_MONITOR_STATUS_REG); 2192e705c121SKalle Valo IWL_CMD(CSR_DBG_HPET_MEM_REG); 2193e705c121SKalle Valo default: 2194e705c121SKalle Valo return "UNKNOWN"; 2195e705c121SKalle Valo } 2196e705c121SKalle Valo #undef IWL_CMD 2197e705c121SKalle Valo } 2198e705c121SKalle Valo 2199e705c121SKalle Valo void iwl_pcie_dump_csr(struct iwl_trans *trans) 2200e705c121SKalle Valo { 2201e705c121SKalle Valo int i; 2202e705c121SKalle Valo static const u32 csr_tbl[] = { 2203e705c121SKalle Valo CSR_HW_IF_CONFIG_REG, 2204e705c121SKalle Valo CSR_INT_COALESCING, 2205e705c121SKalle Valo CSR_INT, 2206e705c121SKalle Valo CSR_INT_MASK, 2207e705c121SKalle Valo CSR_FH_INT_STATUS, 2208e705c121SKalle Valo CSR_GPIO_IN, 2209e705c121SKalle Valo CSR_RESET, 2210e705c121SKalle Valo CSR_GP_CNTRL, 2211e705c121SKalle Valo CSR_HW_REV, 2212e705c121SKalle Valo CSR_EEPROM_REG, 2213e705c121SKalle Valo CSR_EEPROM_GP, 2214e705c121SKalle Valo CSR_OTP_GP_REG, 2215e705c121SKalle Valo CSR_GIO_REG, 2216e705c121SKalle Valo CSR_GP_UCODE_REG, 2217e705c121SKalle Valo CSR_GP_DRIVER_REG, 2218e705c121SKalle Valo CSR_UCODE_DRV_GP1, 2219e705c121SKalle Valo CSR_UCODE_DRV_GP2, 2220e705c121SKalle Valo CSR_LED_REG, 2221e705c121SKalle Valo CSR_DRAM_INT_TBL_REG, 2222e705c121SKalle Valo CSR_GIO_CHICKEN_BITS, 2223e705c121SKalle Valo CSR_ANA_PLL_CFG, 2224e705c121SKalle Valo CSR_MONITOR_STATUS_REG, 2225e705c121SKalle Valo CSR_HW_REV_WA_REG, 2226e705c121SKalle Valo CSR_DBG_HPET_MEM_REG 2227e705c121SKalle Valo }; 2228e705c121SKalle Valo IWL_ERR(trans, "CSR values:\n"); 2229e705c121SKalle Valo IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2230e705c121SKalle Valo "CSR_INT_PERIODIC_REG)\n"); 2231e705c121SKalle Valo for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2232e705c121SKalle Valo IWL_ERR(trans, " %25s: 0X%08x\n", 2233e705c121SKalle Valo get_csr_string(csr_tbl[i]), 2234e705c121SKalle Valo iwl_read32(trans, csr_tbl[i])); 2235e705c121SKalle Valo } 2236e705c121SKalle Valo } 2237e705c121SKalle Valo 2238e705c121SKalle Valo #ifdef CONFIG_IWLWIFI_DEBUGFS 2239e705c121SKalle Valo /* create and remove of files */ 2240e705c121SKalle Valo #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2241e705c121SKalle Valo if (!debugfs_create_file(#name, mode, parent, trans, \ 2242e705c121SKalle Valo &iwl_dbgfs_##name##_ops)) \ 2243e705c121SKalle Valo goto err; \ 2244e705c121SKalle Valo } while (0) 2245e705c121SKalle Valo 2246e705c121SKalle Valo /* file operation */ 2247e705c121SKalle Valo #define DEBUGFS_READ_FILE_OPS(name) \ 2248e705c121SKalle Valo static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2249e705c121SKalle Valo .read = iwl_dbgfs_##name##_read, \ 2250e705c121SKalle Valo .open = simple_open, \ 2251e705c121SKalle Valo .llseek = generic_file_llseek, \ 2252e705c121SKalle Valo }; 2253e705c121SKalle Valo 2254e705c121SKalle Valo #define DEBUGFS_WRITE_FILE_OPS(name) \ 2255e705c121SKalle Valo static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2256e705c121SKalle Valo .write = iwl_dbgfs_##name##_write, \ 2257e705c121SKalle Valo .open = simple_open, \ 2258e705c121SKalle Valo .llseek = generic_file_llseek, \ 2259e705c121SKalle Valo }; 2260e705c121SKalle Valo 2261e705c121SKalle Valo #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2262e705c121SKalle Valo static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2263e705c121SKalle Valo .write = iwl_dbgfs_##name##_write, \ 2264e705c121SKalle Valo .read = iwl_dbgfs_##name##_read, \ 2265e705c121SKalle Valo .open = simple_open, \ 2266e705c121SKalle Valo .llseek = generic_file_llseek, \ 2267e705c121SKalle Valo }; 2268e705c121SKalle Valo 2269e705c121SKalle Valo static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, 2270e705c121SKalle Valo char __user *user_buf, 2271e705c121SKalle Valo size_t count, loff_t *ppos) 2272e705c121SKalle Valo { 2273e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2274e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2275e705c121SKalle Valo struct iwl_txq *txq; 2276e705c121SKalle Valo char *buf; 2277e705c121SKalle Valo int pos = 0; 2278e705c121SKalle Valo int cnt; 2279e705c121SKalle Valo int ret; 2280e705c121SKalle Valo size_t bufsz; 2281e705c121SKalle Valo 2282e705c121SKalle Valo bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; 2283e705c121SKalle Valo 2284b2a3b1c1SSara Sharon if (!trans_pcie->txq_memory) 2285e705c121SKalle Valo return -EAGAIN; 2286e705c121SKalle Valo 2287e705c121SKalle Valo buf = kzalloc(bufsz, GFP_KERNEL); 2288e705c121SKalle Valo if (!buf) 2289e705c121SKalle Valo return -ENOMEM; 2290e705c121SKalle Valo 2291e705c121SKalle Valo for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 2292b2a3b1c1SSara Sharon txq = trans_pcie->txq[cnt]; 2293e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2294e705c121SKalle Valo "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", 2295bb98ecd4SSara Sharon cnt, txq->read_ptr, txq->write_ptr, 2296e705c121SKalle Valo !!test_bit(cnt, trans_pcie->queue_used), 2297e705c121SKalle Valo !!test_bit(cnt, trans_pcie->queue_stopped), 2298e705c121SKalle Valo txq->need_update, txq->frozen, 2299e705c121SKalle Valo (cnt == trans_pcie->cmd_queue ? " HCMD" : "")); 2300e705c121SKalle Valo } 2301e705c121SKalle Valo ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2302e705c121SKalle Valo kfree(buf); 2303e705c121SKalle Valo return ret; 2304e705c121SKalle Valo } 2305e705c121SKalle Valo 2306e705c121SKalle Valo static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2307e705c121SKalle Valo char __user *user_buf, 2308e705c121SKalle Valo size_t count, loff_t *ppos) 2309e705c121SKalle Valo { 2310e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2311e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 231278485054SSara Sharon char *buf; 231378485054SSara Sharon int pos = 0, i, ret; 231478485054SSara Sharon size_t bufsz = sizeof(buf); 2315e705c121SKalle Valo 231678485054SSara Sharon bufsz = sizeof(char) * 121 * trans->num_rx_queues; 231778485054SSara Sharon 231878485054SSara Sharon if (!trans_pcie->rxq) 231978485054SSara Sharon return -EAGAIN; 232078485054SSara Sharon 232178485054SSara Sharon buf = kzalloc(bufsz, GFP_KERNEL); 232278485054SSara Sharon if (!buf) 232378485054SSara Sharon return -ENOMEM; 232478485054SSara Sharon 232578485054SSara Sharon for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 232678485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 232778485054SSara Sharon 232878485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 232978485054SSara Sharon i); 233078485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2331e705c121SKalle Valo rxq->read); 233278485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2333e705c121SKalle Valo rxq->write); 233478485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2335e705c121SKalle Valo rxq->write_actual); 233678485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2337e705c121SKalle Valo rxq->need_update); 233878485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2339e705c121SKalle Valo rxq->free_count); 2340e705c121SKalle Valo if (rxq->rb_stts) { 234178485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, 234278485054SSara Sharon "\tclosed_rb_num: %u\n", 234378485054SSara Sharon le16_to_cpu(rxq->rb_stts->closed_rb_num) & 234478485054SSara Sharon 0x0FFF); 2345e705c121SKalle Valo } else { 2346e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 234778485054SSara Sharon "\tclosed_rb_num: Not Allocated\n"); 2348e705c121SKalle Valo } 234978485054SSara Sharon } 235078485054SSara Sharon ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 235178485054SSara Sharon kfree(buf); 235278485054SSara Sharon 235378485054SSara Sharon return ret; 2354e705c121SKalle Valo } 2355e705c121SKalle Valo 2356e705c121SKalle Valo static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2357e705c121SKalle Valo char __user *user_buf, 2358e705c121SKalle Valo size_t count, loff_t *ppos) 2359e705c121SKalle Valo { 2360e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2361e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2362e705c121SKalle Valo struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2363e705c121SKalle Valo 2364e705c121SKalle Valo int pos = 0; 2365e705c121SKalle Valo char *buf; 2366e705c121SKalle Valo int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2367e705c121SKalle Valo ssize_t ret; 2368e705c121SKalle Valo 2369e705c121SKalle Valo buf = kzalloc(bufsz, GFP_KERNEL); 2370e705c121SKalle Valo if (!buf) 2371e705c121SKalle Valo return -ENOMEM; 2372e705c121SKalle Valo 2373e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2374e705c121SKalle Valo "Interrupt Statistics Report:\n"); 2375e705c121SKalle Valo 2376e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2377e705c121SKalle Valo isr_stats->hw); 2378e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2379e705c121SKalle Valo isr_stats->sw); 2380e705c121SKalle Valo if (isr_stats->sw || isr_stats->hw) { 2381e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2382e705c121SKalle Valo "\tLast Restarting Code: 0x%X\n", 2383e705c121SKalle Valo isr_stats->err_code); 2384e705c121SKalle Valo } 2385e705c121SKalle Valo #ifdef CONFIG_IWLWIFI_DEBUG 2386e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2387e705c121SKalle Valo isr_stats->sch); 2388e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2389e705c121SKalle Valo isr_stats->alive); 2390e705c121SKalle Valo #endif 2391e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2392e705c121SKalle Valo "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2393e705c121SKalle Valo 2394e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2395e705c121SKalle Valo isr_stats->ctkill); 2396e705c121SKalle Valo 2397e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2398e705c121SKalle Valo isr_stats->wakeup); 2399e705c121SKalle Valo 2400e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2401e705c121SKalle Valo "Rx command responses:\t\t %u\n", isr_stats->rx); 2402e705c121SKalle Valo 2403e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2404e705c121SKalle Valo isr_stats->tx); 2405e705c121SKalle Valo 2406e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2407e705c121SKalle Valo isr_stats->unhandled); 2408e705c121SKalle Valo 2409e705c121SKalle Valo ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2410e705c121SKalle Valo kfree(buf); 2411e705c121SKalle Valo return ret; 2412e705c121SKalle Valo } 2413e705c121SKalle Valo 2414e705c121SKalle Valo static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2415e705c121SKalle Valo const char __user *user_buf, 2416e705c121SKalle Valo size_t count, loff_t *ppos) 2417e705c121SKalle Valo { 2418e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2419e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2420e705c121SKalle Valo struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2421e705c121SKalle Valo u32 reset_flag; 2422078f1131SJohannes Berg int ret; 2423e705c121SKalle Valo 2424078f1131SJohannes Berg ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); 2425078f1131SJohannes Berg if (ret) 2426078f1131SJohannes Berg return ret; 2427e705c121SKalle Valo if (reset_flag == 0) 2428e705c121SKalle Valo memset(isr_stats, 0, sizeof(*isr_stats)); 2429e705c121SKalle Valo 2430e705c121SKalle Valo return count; 2431e705c121SKalle Valo } 2432e705c121SKalle Valo 2433e705c121SKalle Valo static ssize_t iwl_dbgfs_csr_write(struct file *file, 2434e705c121SKalle Valo const char __user *user_buf, 2435e705c121SKalle Valo size_t count, loff_t *ppos) 2436e705c121SKalle Valo { 2437e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2438e705c121SKalle Valo 2439e705c121SKalle Valo iwl_pcie_dump_csr(trans); 2440e705c121SKalle Valo 2441e705c121SKalle Valo return count; 2442e705c121SKalle Valo } 2443e705c121SKalle Valo 2444e705c121SKalle Valo static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2445e705c121SKalle Valo char __user *user_buf, 2446e705c121SKalle Valo size_t count, loff_t *ppos) 2447e705c121SKalle Valo { 2448e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2449e705c121SKalle Valo char *buf = NULL; 2450e705c121SKalle Valo ssize_t ret; 2451e705c121SKalle Valo 2452e705c121SKalle Valo ret = iwl_dump_fh(trans, &buf); 2453e705c121SKalle Valo if (ret < 0) 2454e705c121SKalle Valo return ret; 2455e705c121SKalle Valo if (!buf) 2456e705c121SKalle Valo return -EINVAL; 2457e705c121SKalle Valo ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2458e705c121SKalle Valo kfree(buf); 2459e705c121SKalle Valo return ret; 2460e705c121SKalle Valo } 2461e705c121SKalle Valo 2462fa4de7f7SJohannes Berg static ssize_t iwl_dbgfs_rfkill_read(struct file *file, 2463fa4de7f7SJohannes Berg char __user *user_buf, 2464fa4de7f7SJohannes Berg size_t count, loff_t *ppos) 2465fa4de7f7SJohannes Berg { 2466fa4de7f7SJohannes Berg struct iwl_trans *trans = file->private_data; 2467fa4de7f7SJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2468fa4de7f7SJohannes Berg char buf[100]; 2469fa4de7f7SJohannes Berg int pos; 2470fa4de7f7SJohannes Berg 2471fa4de7f7SJohannes Berg pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", 2472fa4de7f7SJohannes Berg trans_pcie->debug_rfkill, 2473fa4de7f7SJohannes Berg !(iwl_read32(trans, CSR_GP_CNTRL) & 2474fa4de7f7SJohannes Berg CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); 2475fa4de7f7SJohannes Berg 2476fa4de7f7SJohannes Berg return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2477fa4de7f7SJohannes Berg } 2478fa4de7f7SJohannes Berg 2479fa4de7f7SJohannes Berg static ssize_t iwl_dbgfs_rfkill_write(struct file *file, 2480fa4de7f7SJohannes Berg const char __user *user_buf, 2481fa4de7f7SJohannes Berg size_t count, loff_t *ppos) 2482fa4de7f7SJohannes Berg { 2483fa4de7f7SJohannes Berg struct iwl_trans *trans = file->private_data; 2484fa4de7f7SJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2485fa4de7f7SJohannes Berg bool old = trans_pcie->debug_rfkill; 2486fa4de7f7SJohannes Berg int ret; 2487fa4de7f7SJohannes Berg 2488fa4de7f7SJohannes Berg ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill); 2489fa4de7f7SJohannes Berg if (ret) 2490fa4de7f7SJohannes Berg return ret; 2491fa4de7f7SJohannes Berg if (old == trans_pcie->debug_rfkill) 2492fa4de7f7SJohannes Berg return count; 2493fa4de7f7SJohannes Berg IWL_WARN(trans, "changing debug rfkill %d->%d\n", 2494fa4de7f7SJohannes Berg old, trans_pcie->debug_rfkill); 2495fa4de7f7SJohannes Berg iwl_pcie_handle_rfkill_irq(trans); 2496fa4de7f7SJohannes Berg 2497fa4de7f7SJohannes Berg return count; 2498fa4de7f7SJohannes Berg } 2499fa4de7f7SJohannes Berg 2500e705c121SKalle Valo DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2501e705c121SKalle Valo DEBUGFS_READ_FILE_OPS(fh_reg); 2502e705c121SKalle Valo DEBUGFS_READ_FILE_OPS(rx_queue); 2503e705c121SKalle Valo DEBUGFS_READ_FILE_OPS(tx_queue); 2504e705c121SKalle Valo DEBUGFS_WRITE_FILE_OPS(csr); 2505fa4de7f7SJohannes Berg DEBUGFS_READ_WRITE_FILE_OPS(rfkill); 2506e705c121SKalle Valo 2507f8a1edb7SJohannes Berg /* Create the debugfs files and directories */ 2508f8a1edb7SJohannes Berg int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 2509e705c121SKalle Valo { 2510f8a1edb7SJohannes Berg struct dentry *dir = trans->dbgfs_dir; 2511f8a1edb7SJohannes Berg 2512e705c121SKalle Valo DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); 2513e705c121SKalle Valo DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); 2514e705c121SKalle Valo DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); 2515e705c121SKalle Valo DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); 2516e705c121SKalle Valo DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 2517fa4de7f7SJohannes Berg DEBUGFS_ADD_FILE(rfkill, dir, S_IWUSR | S_IRUSR); 2518e705c121SKalle Valo return 0; 2519e705c121SKalle Valo 2520e705c121SKalle Valo err: 2521e705c121SKalle Valo IWL_ERR(trans, "failed to create the trans debugfs entry\n"); 2522e705c121SKalle Valo return -ENOMEM; 2523e705c121SKalle Valo } 2524e705c121SKalle Valo #endif /*CONFIG_IWLWIFI_DEBUGFS */ 2525e705c121SKalle Valo 25266983ba69SSara Sharon static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 2527e705c121SKalle Valo { 25283cd1980bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2529e705c121SKalle Valo u32 cmdlen = 0; 2530e705c121SKalle Valo int i; 2531e705c121SKalle Valo 25323cd1980bSSara Sharon for (i = 0; i < trans_pcie->max_tbs; i++) 25336983ba69SSara Sharon cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i); 2534e705c121SKalle Valo 2535e705c121SKalle Valo return cmdlen; 2536e705c121SKalle Valo } 2537e705c121SKalle Valo 2538e705c121SKalle Valo static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 2539e705c121SKalle Valo struct iwl_fw_error_dump_data **data, 2540e705c121SKalle Valo int allocated_rb_nums) 2541e705c121SKalle Valo { 2542e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2543e705c121SKalle Valo int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 254478485054SSara Sharon /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 254578485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 2546e705c121SKalle Valo u32 i, r, j, rb_len = 0; 2547e705c121SKalle Valo 2548e705c121SKalle Valo spin_lock(&rxq->lock); 2549e705c121SKalle Valo 2550e705c121SKalle Valo r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 2551e705c121SKalle Valo 2552e705c121SKalle Valo for (i = rxq->read, j = 0; 2553e705c121SKalle Valo i != r && j < allocated_rb_nums; 2554e705c121SKalle Valo i = (i + 1) & RX_QUEUE_MASK, j++) { 2555e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 2556e705c121SKalle Valo struct iwl_fw_error_dump_rb *rb; 2557e705c121SKalle Valo 2558e705c121SKalle Valo dma_unmap_page(trans->dev, rxb->page_dma, max_len, 2559e705c121SKalle Valo DMA_FROM_DEVICE); 2560e705c121SKalle Valo 2561e705c121SKalle Valo rb_len += sizeof(**data) + sizeof(*rb) + max_len; 2562e705c121SKalle Valo 2563e705c121SKalle Valo (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 2564e705c121SKalle Valo (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 2565e705c121SKalle Valo rb = (void *)(*data)->data; 2566e705c121SKalle Valo rb->index = cpu_to_le32(i); 2567e705c121SKalle Valo memcpy(rb->data, page_address(rxb->page), max_len); 2568e705c121SKalle Valo /* remap the page for the free benefit */ 2569e705c121SKalle Valo rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, 2570e705c121SKalle Valo max_len, 2571e705c121SKalle Valo DMA_FROM_DEVICE); 2572e705c121SKalle Valo 2573e705c121SKalle Valo *data = iwl_fw_error_next_data(*data); 2574e705c121SKalle Valo } 2575e705c121SKalle Valo 2576e705c121SKalle Valo spin_unlock(&rxq->lock); 2577e705c121SKalle Valo 2578e705c121SKalle Valo return rb_len; 2579e705c121SKalle Valo } 2580e705c121SKalle Valo #define IWL_CSR_TO_DUMP (0x250) 2581e705c121SKalle Valo 2582e705c121SKalle Valo static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 2583e705c121SKalle Valo struct iwl_fw_error_dump_data **data) 2584e705c121SKalle Valo { 2585e705c121SKalle Valo u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 2586e705c121SKalle Valo __le32 *val; 2587e705c121SKalle Valo int i; 2588e705c121SKalle Valo 2589e705c121SKalle Valo (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 2590e705c121SKalle Valo (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 2591e705c121SKalle Valo val = (void *)(*data)->data; 2592e705c121SKalle Valo 2593e705c121SKalle Valo for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 2594e705c121SKalle Valo *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 2595e705c121SKalle Valo 2596e705c121SKalle Valo *data = iwl_fw_error_next_data(*data); 2597e705c121SKalle Valo 2598e705c121SKalle Valo return csr_len; 2599e705c121SKalle Valo } 2600e705c121SKalle Valo 2601e705c121SKalle Valo static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 2602e705c121SKalle Valo struct iwl_fw_error_dump_data **data) 2603e705c121SKalle Valo { 2604e705c121SKalle Valo u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 2605e705c121SKalle Valo unsigned long flags; 2606e705c121SKalle Valo __le32 *val; 2607e705c121SKalle Valo int i; 2608e705c121SKalle Valo 260923ba9340SEmmanuel Grumbach if (!iwl_trans_grab_nic_access(trans, &flags)) 2610e705c121SKalle Valo return 0; 2611e705c121SKalle Valo 2612e705c121SKalle Valo (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 2613e705c121SKalle Valo (*data)->len = cpu_to_le32(fh_regs_len); 2614e705c121SKalle Valo val = (void *)(*data)->data; 2615e705c121SKalle Valo 2616723b45e2SLiad Kaufman if (!trans->cfg->gen2) 2617723b45e2SLiad Kaufman for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; 2618723b45e2SLiad Kaufman i += sizeof(u32)) 2619e705c121SKalle Valo *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 2620723b45e2SLiad Kaufman else 2621723b45e2SLiad Kaufman for (i = FH_MEM_LOWER_BOUND_GEN2; i < FH_MEM_UPPER_BOUND_GEN2; 2622723b45e2SLiad Kaufman i += sizeof(u32)) 2623723b45e2SLiad Kaufman *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, 2624723b45e2SLiad Kaufman i)); 2625e705c121SKalle Valo 2626e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 2627e705c121SKalle Valo 2628e705c121SKalle Valo *data = iwl_fw_error_next_data(*data); 2629e705c121SKalle Valo 2630e705c121SKalle Valo return sizeof(**data) + fh_regs_len; 2631e705c121SKalle Valo } 2632e705c121SKalle Valo 2633e705c121SKalle Valo static u32 2634e705c121SKalle Valo iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 2635e705c121SKalle Valo struct iwl_fw_error_dump_fw_mon *fw_mon_data, 2636e705c121SKalle Valo u32 monitor_len) 2637e705c121SKalle Valo { 2638e705c121SKalle Valo u32 buf_size_in_dwords = (monitor_len >> 2); 2639e705c121SKalle Valo u32 *buffer = (u32 *)fw_mon_data->data; 2640e705c121SKalle Valo unsigned long flags; 2641e705c121SKalle Valo u32 i; 2642e705c121SKalle Valo 264323ba9340SEmmanuel Grumbach if (!iwl_trans_grab_nic_access(trans, &flags)) 2644e705c121SKalle Valo return 0; 2645e705c121SKalle Valo 264614ef1b43SGolan Ben-Ami iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 2647e705c121SKalle Valo for (i = 0; i < buf_size_in_dwords; i++) 264814ef1b43SGolan Ben-Ami buffer[i] = iwl_read_prph_no_grab(trans, 264914ef1b43SGolan Ben-Ami MON_DMARB_RD_DATA_ADDR); 265014ef1b43SGolan Ben-Ami iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 2651e705c121SKalle Valo 2652e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 2653e705c121SKalle Valo 2654e705c121SKalle Valo return monitor_len; 2655e705c121SKalle Valo } 2656e705c121SKalle Valo 2657e705c121SKalle Valo static u32 2658e705c121SKalle Valo iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 2659e705c121SKalle Valo struct iwl_fw_error_dump_data **data, 2660e705c121SKalle Valo u32 monitor_len) 2661e705c121SKalle Valo { 2662e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2663e705c121SKalle Valo u32 len = 0; 2664e705c121SKalle Valo 2665e705c121SKalle Valo if ((trans_pcie->fw_mon_page && 2666e705c121SKalle Valo trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || 2667e705c121SKalle Valo trans->dbg_dest_tlv) { 2668e705c121SKalle Valo struct iwl_fw_error_dump_fw_mon *fw_mon_data; 2669e705c121SKalle Valo u32 base, write_ptr, wrap_cnt; 2670e705c121SKalle Valo 2671e705c121SKalle Valo /* If there was a dest TLV - use the values from there */ 2672e705c121SKalle Valo if (trans->dbg_dest_tlv) { 2673e705c121SKalle Valo write_ptr = 2674e705c121SKalle Valo le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); 2675e705c121SKalle Valo wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); 2676e705c121SKalle Valo base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); 2677e705c121SKalle Valo } else { 2678e705c121SKalle Valo base = MON_BUFF_BASE_ADDR; 2679e705c121SKalle Valo write_ptr = MON_BUFF_WRPTR; 2680e705c121SKalle Valo wrap_cnt = MON_BUFF_CYCLE_CNT; 2681e705c121SKalle Valo } 2682e705c121SKalle Valo 2683e705c121SKalle Valo (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 2684e705c121SKalle Valo fw_mon_data = (void *)(*data)->data; 2685e705c121SKalle Valo fw_mon_data->fw_mon_wr_ptr = 2686e705c121SKalle Valo cpu_to_le32(iwl_read_prph(trans, write_ptr)); 2687e705c121SKalle Valo fw_mon_data->fw_mon_cycle_cnt = 2688e705c121SKalle Valo cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 2689e705c121SKalle Valo fw_mon_data->fw_mon_base_ptr = 2690e705c121SKalle Valo cpu_to_le32(iwl_read_prph(trans, base)); 2691e705c121SKalle Valo 2692e705c121SKalle Valo len += sizeof(**data) + sizeof(*fw_mon_data); 2693e705c121SKalle Valo if (trans_pcie->fw_mon_page) { 2694e705c121SKalle Valo /* 2695e705c121SKalle Valo * The firmware is now asserted, it won't write anything 2696e705c121SKalle Valo * to the buffer. CPU can take ownership to fetch the 2697e705c121SKalle Valo * data. The buffer will be handed back to the device 2698e705c121SKalle Valo * before the firmware will be restarted. 2699e705c121SKalle Valo */ 2700e705c121SKalle Valo dma_sync_single_for_cpu(trans->dev, 2701e705c121SKalle Valo trans_pcie->fw_mon_phys, 2702e705c121SKalle Valo trans_pcie->fw_mon_size, 2703e705c121SKalle Valo DMA_FROM_DEVICE); 2704e705c121SKalle Valo memcpy(fw_mon_data->data, 2705e705c121SKalle Valo page_address(trans_pcie->fw_mon_page), 2706e705c121SKalle Valo trans_pcie->fw_mon_size); 2707e705c121SKalle Valo 2708e705c121SKalle Valo monitor_len = trans_pcie->fw_mon_size; 2709e705c121SKalle Valo } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { 2710e705c121SKalle Valo /* 2711e705c121SKalle Valo * Update pointers to reflect actual values after 2712e705c121SKalle Valo * shifting 2713e705c121SKalle Valo */ 2714e705c121SKalle Valo base = iwl_read_prph(trans, base) << 2715e705c121SKalle Valo trans->dbg_dest_tlv->base_shift; 2716e705c121SKalle Valo iwl_trans_read_mem(trans, base, fw_mon_data->data, 2717e705c121SKalle Valo monitor_len / sizeof(u32)); 2718e705c121SKalle Valo } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { 2719e705c121SKalle Valo monitor_len = 2720e705c121SKalle Valo iwl_trans_pci_dump_marbh_monitor(trans, 2721e705c121SKalle Valo fw_mon_data, 2722e705c121SKalle Valo monitor_len); 2723e705c121SKalle Valo } else { 2724e705c121SKalle Valo /* Didn't match anything - output no monitor data */ 2725e705c121SKalle Valo monitor_len = 0; 2726e705c121SKalle Valo } 2727e705c121SKalle Valo 2728e705c121SKalle Valo len += monitor_len; 2729e705c121SKalle Valo (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 2730e705c121SKalle Valo } 2731e705c121SKalle Valo 2732e705c121SKalle Valo return len; 2733e705c121SKalle Valo } 2734e705c121SKalle Valo 2735e705c121SKalle Valo static struct iwl_trans_dump_data 2736e705c121SKalle Valo *iwl_trans_pcie_dump_data(struct iwl_trans *trans, 2737a80c7a69SEmmanuel Grumbach const struct iwl_fw_dbg_trigger_tlv *trigger) 2738e705c121SKalle Valo { 2739e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2740e705c121SKalle Valo struct iwl_fw_error_dump_data *data; 2741b2a3b1c1SSara Sharon struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; 2742e705c121SKalle Valo struct iwl_fw_error_dump_txcmd *txcmd; 2743e705c121SKalle Valo struct iwl_trans_dump_data *dump_data; 2744e705c121SKalle Valo u32 len, num_rbs; 2745e705c121SKalle Valo u32 monitor_len; 2746e705c121SKalle Valo int i, ptr; 274796a6497bSSara Sharon bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 274896a6497bSSara Sharon !trans->cfg->mq_rx_supported; 2749e705c121SKalle Valo 2750e705c121SKalle Valo /* transport dump header */ 2751e705c121SKalle Valo len = sizeof(*dump_data); 2752e705c121SKalle Valo 2753e705c121SKalle Valo /* host commands */ 2754e705c121SKalle Valo len += sizeof(*data) + 2755bb98ecd4SSara Sharon cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); 2756e705c121SKalle Valo 2757e705c121SKalle Valo /* FW monitor */ 2758e705c121SKalle Valo if (trans_pcie->fw_mon_page) { 2759e705c121SKalle Valo len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + 2760e705c121SKalle Valo trans_pcie->fw_mon_size; 2761e705c121SKalle Valo monitor_len = trans_pcie->fw_mon_size; 2762e705c121SKalle Valo } else if (trans->dbg_dest_tlv) { 2763e705c121SKalle Valo u32 base, end; 2764e705c121SKalle Valo 2765e705c121SKalle Valo base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); 2766e705c121SKalle Valo end = le32_to_cpu(trans->dbg_dest_tlv->end_reg); 2767e705c121SKalle Valo 2768e705c121SKalle Valo base = iwl_read_prph(trans, base) << 2769e705c121SKalle Valo trans->dbg_dest_tlv->base_shift; 2770e705c121SKalle Valo end = iwl_read_prph(trans, end) << 2771e705c121SKalle Valo trans->dbg_dest_tlv->end_shift; 2772e705c121SKalle Valo 2773e705c121SKalle Valo /* Make "end" point to the actual end */ 27746e584873SSara Sharon if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 || 2775e705c121SKalle Valo trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) 2776e705c121SKalle Valo end += (1 << trans->dbg_dest_tlv->end_shift); 2777e705c121SKalle Valo monitor_len = end - base; 2778e705c121SKalle Valo len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + 2779e705c121SKalle Valo monitor_len; 2780e705c121SKalle Valo } else { 2781e705c121SKalle Valo monitor_len = 0; 2782e705c121SKalle Valo } 2783e705c121SKalle Valo 2784e705c121SKalle Valo if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { 2785e705c121SKalle Valo dump_data = vzalloc(len); 2786e705c121SKalle Valo if (!dump_data) 2787e705c121SKalle Valo return NULL; 2788e705c121SKalle Valo 2789e705c121SKalle Valo data = (void *)dump_data->data; 2790e705c121SKalle Valo len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 2791e705c121SKalle Valo dump_data->len = len; 2792e705c121SKalle Valo 2793e705c121SKalle Valo return dump_data; 2794e705c121SKalle Valo } 2795e705c121SKalle Valo 2796e705c121SKalle Valo /* CSR registers */ 2797e705c121SKalle Valo len += sizeof(*data) + IWL_CSR_TO_DUMP; 2798e705c121SKalle Valo 2799e705c121SKalle Valo /* FH registers */ 2800723b45e2SLiad Kaufman if (trans->cfg->gen2) 2801723b45e2SLiad Kaufman len += sizeof(*data) + 2802723b45e2SLiad Kaufman (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2); 2803723b45e2SLiad Kaufman else 2804723b45e2SLiad Kaufman len += sizeof(*data) + 2805723b45e2SLiad Kaufman (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); 2806e705c121SKalle Valo 2807e705c121SKalle Valo if (dump_rbs) { 280878485054SSara Sharon /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 280978485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 2810e705c121SKalle Valo /* RBs */ 281178485054SSara Sharon num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) 2812e705c121SKalle Valo & 0x0FFF; 281378485054SSara Sharon num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 2814e705c121SKalle Valo len += num_rbs * (sizeof(*data) + 2815e705c121SKalle Valo sizeof(struct iwl_fw_error_dump_rb) + 2816e705c121SKalle Valo (PAGE_SIZE << trans_pcie->rx_page_order)); 2817e705c121SKalle Valo } 2818e705c121SKalle Valo 28195538409bSLiad Kaufman /* Paged memory for gen2 HW */ 28205538409bSLiad Kaufman if (trans->cfg->gen2) 28215538409bSLiad Kaufman for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) 28225538409bSLiad Kaufman len += sizeof(*data) + 28235538409bSLiad Kaufman sizeof(struct iwl_fw_error_dump_paging) + 28245538409bSLiad Kaufman trans_pcie->init_dram.paging[i].size; 28255538409bSLiad Kaufman 2826e705c121SKalle Valo dump_data = vzalloc(len); 2827e705c121SKalle Valo if (!dump_data) 2828e705c121SKalle Valo return NULL; 2829e705c121SKalle Valo 2830e705c121SKalle Valo len = 0; 2831e705c121SKalle Valo data = (void *)dump_data->data; 2832e705c121SKalle Valo data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 2833e705c121SKalle Valo txcmd = (void *)data->data; 2834e705c121SKalle Valo spin_lock_bh(&cmdq->lock); 2835bb98ecd4SSara Sharon ptr = cmdq->write_ptr; 2836bb98ecd4SSara Sharon for (i = 0; i < cmdq->n_window; i++) { 2837bb98ecd4SSara Sharon u8 idx = get_cmd_index(cmdq, ptr); 2838e705c121SKalle Valo u32 caplen, cmdlen; 2839e705c121SKalle Valo 28406983ba69SSara Sharon cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + 28416983ba69SSara Sharon trans_pcie->tfd_size * ptr); 2842e705c121SKalle Valo caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 2843e705c121SKalle Valo 2844e705c121SKalle Valo if (cmdlen) { 2845e705c121SKalle Valo len += sizeof(*txcmd) + caplen; 2846e705c121SKalle Valo txcmd->cmdlen = cpu_to_le32(cmdlen); 2847e705c121SKalle Valo txcmd->caplen = cpu_to_le32(caplen); 2848e705c121SKalle Valo memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); 2849e705c121SKalle Valo txcmd = (void *)((u8 *)txcmd->data + caplen); 2850e705c121SKalle Valo } 2851e705c121SKalle Valo 2852e705c121SKalle Valo ptr = iwl_queue_dec_wrap(ptr); 2853e705c121SKalle Valo } 2854e705c121SKalle Valo spin_unlock_bh(&cmdq->lock); 2855e705c121SKalle Valo 2856e705c121SKalle Valo data->len = cpu_to_le32(len); 2857e705c121SKalle Valo len += sizeof(*data); 2858e705c121SKalle Valo data = iwl_fw_error_next_data(data); 2859e705c121SKalle Valo 2860e705c121SKalle Valo len += iwl_trans_pcie_dump_csr(trans, &data); 2861e705c121SKalle Valo len += iwl_trans_pcie_fh_regs_dump(trans, &data); 2862e705c121SKalle Valo if (dump_rbs) 2863e705c121SKalle Valo len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 2864e705c121SKalle Valo 28655538409bSLiad Kaufman /* Paged memory for gen2 HW */ 28665538409bSLiad Kaufman if (trans->cfg->gen2) { 28675538409bSLiad Kaufman for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) { 28685538409bSLiad Kaufman struct iwl_fw_error_dump_paging *paging; 28695538409bSLiad Kaufman dma_addr_t addr = 28705538409bSLiad Kaufman trans_pcie->init_dram.paging[i].physical; 28715538409bSLiad Kaufman u32 page_len = trans_pcie->init_dram.paging[i].size; 28725538409bSLiad Kaufman 28735538409bSLiad Kaufman data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 28745538409bSLiad Kaufman data->len = cpu_to_le32(sizeof(*paging) + page_len); 28755538409bSLiad Kaufman paging = (void *)data->data; 28765538409bSLiad Kaufman paging->index = cpu_to_le32(i); 28775538409bSLiad Kaufman dma_sync_single_for_cpu(trans->dev, addr, page_len, 28785538409bSLiad Kaufman DMA_BIDIRECTIONAL); 28795538409bSLiad Kaufman memcpy(paging->data, 28805538409bSLiad Kaufman trans_pcie->init_dram.paging[i].block, page_len); 28815538409bSLiad Kaufman data = iwl_fw_error_next_data(data); 28825538409bSLiad Kaufman 28835538409bSLiad Kaufman len += sizeof(*data) + sizeof(*paging) + page_len; 28845538409bSLiad Kaufman } 28855538409bSLiad Kaufman } 28865538409bSLiad Kaufman 2887e705c121SKalle Valo len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 2888e705c121SKalle Valo 2889e705c121SKalle Valo dump_data->len = len; 2890e705c121SKalle Valo 2891e705c121SKalle Valo return dump_data; 2892e705c121SKalle Valo } 2893e705c121SKalle Valo 28944cbb8e50SLuciano Coelho #ifdef CONFIG_PM_SLEEP 28954cbb8e50SLuciano Coelho static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 28964cbb8e50SLuciano Coelho { 2897e4c49c49SLuca Coelho if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && 2898e4c49c49SLuca Coelho (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) 28994cbb8e50SLuciano Coelho return iwl_pci_fw_enter_d0i3(trans); 29004cbb8e50SLuciano Coelho 29014cbb8e50SLuciano Coelho return 0; 29024cbb8e50SLuciano Coelho } 29034cbb8e50SLuciano Coelho 29044cbb8e50SLuciano Coelho static void iwl_trans_pcie_resume(struct iwl_trans *trans) 29054cbb8e50SLuciano Coelho { 2906e4c49c49SLuca Coelho if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && 2907e4c49c49SLuca Coelho (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) 29084cbb8e50SLuciano Coelho iwl_pci_fw_exit_d0i3(trans); 29094cbb8e50SLuciano Coelho } 29104cbb8e50SLuciano Coelho #endif /* CONFIG_PM_SLEEP */ 29114cbb8e50SLuciano Coelho 2912623e7766SSara Sharon #define IWL_TRANS_COMMON_OPS \ 2913623e7766SSara Sharon .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ 2914623e7766SSara Sharon .write8 = iwl_trans_pcie_write8, \ 2915623e7766SSara Sharon .write32 = iwl_trans_pcie_write32, \ 2916623e7766SSara Sharon .read32 = iwl_trans_pcie_read32, \ 2917623e7766SSara Sharon .read_prph = iwl_trans_pcie_read_prph, \ 2918623e7766SSara Sharon .write_prph = iwl_trans_pcie_write_prph, \ 2919623e7766SSara Sharon .read_mem = iwl_trans_pcie_read_mem, \ 2920623e7766SSara Sharon .write_mem = iwl_trans_pcie_write_mem, \ 2921623e7766SSara Sharon .configure = iwl_trans_pcie_configure, \ 2922623e7766SSara Sharon .set_pmi = iwl_trans_pcie_set_pmi, \ 2923623e7766SSara Sharon .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ 2924623e7766SSara Sharon .release_nic_access = iwl_trans_pcie_release_nic_access, \ 2925623e7766SSara Sharon .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ 2926623e7766SSara Sharon .ref = iwl_trans_pcie_ref, \ 2927623e7766SSara Sharon .unref = iwl_trans_pcie_unref, \ 2928623e7766SSara Sharon .dump_data = iwl_trans_pcie_dump_data, \ 2929623e7766SSara Sharon .d3_suspend = iwl_trans_pcie_d3_suspend, \ 2930623e7766SSara Sharon .d3_resume = iwl_trans_pcie_d3_resume 2931623e7766SSara Sharon 2932623e7766SSara Sharon #ifdef CONFIG_PM_SLEEP 2933623e7766SSara Sharon #define IWL_TRANS_PM_OPS \ 2934623e7766SSara Sharon .suspend = iwl_trans_pcie_suspend, \ 2935623e7766SSara Sharon .resume = iwl_trans_pcie_resume, 2936623e7766SSara Sharon #else 2937623e7766SSara Sharon #define IWL_TRANS_PM_OPS 2938623e7766SSara Sharon #endif /* CONFIG_PM_SLEEP */ 2939623e7766SSara Sharon 2940e705c121SKalle Valo static const struct iwl_trans_ops trans_ops_pcie = { 2941623e7766SSara Sharon IWL_TRANS_COMMON_OPS, 2942623e7766SSara Sharon IWL_TRANS_PM_OPS 2943e705c121SKalle Valo .start_hw = iwl_trans_pcie_start_hw, 2944e705c121SKalle Valo .fw_alive = iwl_trans_pcie_fw_alive, 2945e705c121SKalle Valo .start_fw = iwl_trans_pcie_start_fw, 2946e705c121SKalle Valo .stop_device = iwl_trans_pcie_stop_device, 2947e705c121SKalle Valo 2948e705c121SKalle Valo .send_cmd = iwl_trans_pcie_send_hcmd, 2949e705c121SKalle Valo 2950e705c121SKalle Valo .tx = iwl_trans_pcie_tx, 2951e705c121SKalle Valo .reclaim = iwl_trans_pcie_reclaim, 2952e705c121SKalle Valo 2953e705c121SKalle Valo .txq_disable = iwl_trans_pcie_txq_disable, 2954e705c121SKalle Valo .txq_enable = iwl_trans_pcie_txq_enable, 2955e705c121SKalle Valo 295642db09c1SLiad Kaufman .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, 295742db09c1SLiad Kaufman 2958d6d517b7SSara Sharon .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, 2959d6d517b7SSara Sharon 2960e705c121SKalle Valo .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, 29610cd58eaaSEmmanuel Grumbach .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, 2962623e7766SSara Sharon }; 2963e705c121SKalle Valo 2964623e7766SSara Sharon static const struct iwl_trans_ops trans_ops_pcie_gen2 = { 2965623e7766SSara Sharon IWL_TRANS_COMMON_OPS, 2966623e7766SSara Sharon IWL_TRANS_PM_OPS 2967623e7766SSara Sharon .start_hw = iwl_trans_pcie_start_hw, 2968eda50cdeSSara Sharon .fw_alive = iwl_trans_pcie_gen2_fw_alive, 2969eda50cdeSSara Sharon .start_fw = iwl_trans_pcie_gen2_start_fw, 297077c09bc8SSara Sharon .stop_device = iwl_trans_pcie_gen2_stop_device, 2971e705c121SKalle Valo 2972ca60da2eSSara Sharon .send_cmd = iwl_trans_pcie_gen2_send_hcmd, 2973e705c121SKalle Valo 2974ab6c6445SSara Sharon .tx = iwl_trans_pcie_gen2_tx, 2975623e7766SSara Sharon .reclaim = iwl_trans_pcie_reclaim, 2976623e7766SSara Sharon 29776b35ff91SSara Sharon .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, 29786b35ff91SSara Sharon .txq_free = iwl_trans_pcie_dyn_txq_free, 2979d6d517b7SSara Sharon .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, 2980e705c121SKalle Valo }; 2981e705c121SKalle Valo 2982e705c121SKalle Valo struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 2983e705c121SKalle Valo const struct pci_device_id *ent, 2984e705c121SKalle Valo const struct iwl_cfg *cfg) 2985e705c121SKalle Valo { 2986e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie; 2987e705c121SKalle Valo struct iwl_trans *trans; 298896a6497bSSara Sharon int ret, addr_size; 2989e705c121SKalle Valo 29905a41a86cSSharon Dvir ret = pcim_enable_device(pdev); 29915a41a86cSSharon Dvir if (ret) 29925a41a86cSSharon Dvir return ERR_PTR(ret); 29935a41a86cSSharon Dvir 2994623e7766SSara Sharon if (cfg->gen2) 2995623e7766SSara Sharon trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2996623e7766SSara Sharon &pdev->dev, cfg, &trans_ops_pcie_gen2); 2997623e7766SSara Sharon else 2998e705c121SKalle Valo trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 29991ea423b0SLuca Coelho &pdev->dev, cfg, &trans_ops_pcie); 3000e705c121SKalle Valo if (!trans) 3001e705c121SKalle Valo return ERR_PTR(-ENOMEM); 3002e705c121SKalle Valo 3003e705c121SKalle Valo trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3004e705c121SKalle Valo 3005e705c121SKalle Valo trans_pcie->trans = trans; 3006326477e4SJohannes Berg trans_pcie->opmode_down = true; 3007e705c121SKalle Valo spin_lock_init(&trans_pcie->irq_lock); 3008e705c121SKalle Valo spin_lock_init(&trans_pcie->reg_lock); 3009e705c121SKalle Valo mutex_init(&trans_pcie->mutex); 3010e705c121SKalle Valo init_waitqueue_head(&trans_pcie->ucode_write_waitq); 30116eb5e529SEmmanuel Grumbach trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); 30126eb5e529SEmmanuel Grumbach if (!trans_pcie->tso_hdr_page) { 30136eb5e529SEmmanuel Grumbach ret = -ENOMEM; 30146eb5e529SEmmanuel Grumbach goto out_no_pci; 30156eb5e529SEmmanuel Grumbach } 3016e705c121SKalle Valo 3017e705c121SKalle Valo 3018e705c121SKalle Valo if (!cfg->base_params->pcie_l1_allowed) { 3019e705c121SKalle Valo /* 3020e705c121SKalle Valo * W/A - seems to solve weird behavior. We need to remove this 3021e705c121SKalle Valo * if we don't want to stay in L1 all the time. This wastes a 3022e705c121SKalle Valo * lot of power. 3023e705c121SKalle Valo */ 3024e705c121SKalle Valo pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 3025e705c121SKalle Valo PCIE_LINK_STATE_L1 | 3026e705c121SKalle Valo PCIE_LINK_STATE_CLKPM); 3027e705c121SKalle Valo } 3028e705c121SKalle Valo 30296983ba69SSara Sharon if (cfg->use_tfh) { 30302c6262b7SSara Sharon addr_size = 64; 30313cd1980bSSara Sharon trans_pcie->max_tbs = IWL_TFH_NUM_TBS; 30328352e62aSSara Sharon trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd); 30336983ba69SSara Sharon } else { 30342c6262b7SSara Sharon addr_size = 36; 30353cd1980bSSara Sharon trans_pcie->max_tbs = IWL_NUM_OF_TBS; 30366983ba69SSara Sharon trans_pcie->tfd_size = sizeof(struct iwl_tfd); 30376983ba69SSara Sharon } 30383cd1980bSSara Sharon trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie); 30393cd1980bSSara Sharon 3040e705c121SKalle Valo pci_set_master(pdev); 3041e705c121SKalle Valo 304296a6497bSSara Sharon ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); 3043e705c121SKalle Valo if (!ret) 304496a6497bSSara Sharon ret = pci_set_consistent_dma_mask(pdev, 304596a6497bSSara Sharon DMA_BIT_MASK(addr_size)); 3046e705c121SKalle Valo if (ret) { 3047e705c121SKalle Valo ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3048e705c121SKalle Valo if (!ret) 3049e705c121SKalle Valo ret = pci_set_consistent_dma_mask(pdev, 3050e705c121SKalle Valo DMA_BIT_MASK(32)); 3051e705c121SKalle Valo /* both attempts failed: */ 3052e705c121SKalle Valo if (ret) { 3053e705c121SKalle Valo dev_err(&pdev->dev, "No suitable DMA available\n"); 30545a41a86cSSharon Dvir goto out_no_pci; 3055e705c121SKalle Valo } 3056e705c121SKalle Valo } 3057e705c121SKalle Valo 30585a41a86cSSharon Dvir ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); 3059e705c121SKalle Valo if (ret) { 30605a41a86cSSharon Dvir dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); 30615a41a86cSSharon Dvir goto out_no_pci; 3062e705c121SKalle Valo } 3063e705c121SKalle Valo 30645a41a86cSSharon Dvir trans_pcie->hw_base = pcim_iomap_table(pdev)[0]; 3065e705c121SKalle Valo if (!trans_pcie->hw_base) { 30665a41a86cSSharon Dvir dev_err(&pdev->dev, "pcim_iomap_table failed\n"); 3067e705c121SKalle Valo ret = -ENODEV; 30685a41a86cSSharon Dvir goto out_no_pci; 3069e705c121SKalle Valo } 3070e705c121SKalle Valo 3071e705c121SKalle Valo /* We disable the RETRY_TIMEOUT register (0x41) to keep 3072e705c121SKalle Valo * PCI Tx retries from interfering with C3 CPU state */ 3073e705c121SKalle Valo pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3074e705c121SKalle Valo 3075e705c121SKalle Valo trans_pcie->pci_dev = pdev; 3076e705c121SKalle Valo iwl_disable_interrupts(trans); 3077e705c121SKalle Valo 3078e705c121SKalle Valo trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 3079e705c121SKalle Valo /* 3080e705c121SKalle Valo * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3081e705c121SKalle Valo * changed, and now the revision step also includes bit 0-1 (no more 3082e705c121SKalle Valo * "dash" value). To keep hw_rev backwards compatible - we'll store it 3083e705c121SKalle Valo * in the old format. 3084e705c121SKalle Valo */ 30856e584873SSara Sharon if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) { 3086e705c121SKalle Valo unsigned long flags; 3087e705c121SKalle Valo 3088e705c121SKalle Valo trans->hw_rev = (trans->hw_rev & 0xfff0) | 3089e705c121SKalle Valo (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 3090e705c121SKalle Valo 3091e705c121SKalle Valo ret = iwl_pcie_prepare_card_hw(trans); 3092e705c121SKalle Valo if (ret) { 3093e705c121SKalle Valo IWL_WARN(trans, "Exit HW not ready\n"); 30945a41a86cSSharon Dvir goto out_no_pci; 3095e705c121SKalle Valo } 3096e705c121SKalle Valo 3097e705c121SKalle Valo /* 3098e705c121SKalle Valo * in-order to recognize C step driver should read chip version 3099e705c121SKalle Valo * id located at the AUX bus MISC address space. 3100e705c121SKalle Valo */ 3101e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 3102e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 3103e705c121SKalle Valo udelay(2); 3104e705c121SKalle Valo 3105e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 3106e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 3107e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 3108e705c121SKalle Valo 25000); 3109e705c121SKalle Valo if (ret < 0) { 3110e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); 31115a41a86cSSharon Dvir goto out_no_pci; 3112e705c121SKalle Valo } 3113e705c121SKalle Valo 311423ba9340SEmmanuel Grumbach if (iwl_trans_grab_nic_access(trans, &flags)) { 3115e705c121SKalle Valo u32 hw_step; 3116e705c121SKalle Valo 311714ef1b43SGolan Ben-Ami hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG); 3118e705c121SKalle Valo hw_step |= ENABLE_WFPM; 311914ef1b43SGolan Ben-Ami iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step); 312014ef1b43SGolan Ben-Ami hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG); 3121e705c121SKalle Valo hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; 3122e705c121SKalle Valo if (hw_step == 0x3) 3123e705c121SKalle Valo trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | 3124e705c121SKalle Valo (SILICON_C_STEP << 2); 3125e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 3126e705c121SKalle Valo } 3127e705c121SKalle Valo } 3128e705c121SKalle Valo 3129c00ee467SJohannes Berg /* 3130c00ee467SJohannes Berg * 9000-series integrated A-step has a problem with suspend/resume 3131c00ee467SJohannes Berg * and sometimes even causes the whole platform to get stuck. This 3132c00ee467SJohannes Berg * workaround makes the hardware not go into the problematic state. 3133c00ee467SJohannes Berg */ 3134c00ee467SJohannes Berg if (trans->cfg->integrated && 3135c00ee467SJohannes Berg trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && 3136c00ee467SJohannes Berg CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP) 3137c00ee467SJohannes Berg iwl_set_bit(trans, CSR_HOST_CHICKEN, 3138c00ee467SJohannes Berg CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME); 3139c00ee467SJohannes Berg 31401afb0ae4SHaim Dreyfuss trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); 31411afb0ae4SHaim Dreyfuss 31422e5d4a8fSHaim Dreyfuss iwl_pcie_set_interrupt_capa(pdev, trans); 3143e705c121SKalle Valo trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 3144e705c121SKalle Valo snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 3145e705c121SKalle Valo "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 3146e705c121SKalle Valo 3147e705c121SKalle Valo /* Initialize the wait queue for commands */ 3148e705c121SKalle Valo init_waitqueue_head(&trans_pcie->wait_command_queue); 3149e705c121SKalle Valo 31504cbb8e50SLuciano Coelho init_waitqueue_head(&trans_pcie->d0i3_waitq); 31514cbb8e50SLuciano Coelho 31522e5d4a8fSHaim Dreyfuss if (trans_pcie->msix_enabled) { 31532e5d4a8fSHaim Dreyfuss if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) 31545a41a86cSSharon Dvir goto out_no_pci; 31552e5d4a8fSHaim Dreyfuss } else { 3156e705c121SKalle Valo ret = iwl_pcie_alloc_ict(trans); 3157e705c121SKalle Valo if (ret) 31585a41a86cSSharon Dvir goto out_no_pci; 3159e705c121SKalle Valo 31605a41a86cSSharon Dvir ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 31615a41a86cSSharon Dvir iwl_pcie_isr, 3162e705c121SKalle Valo iwl_pcie_irq_handler, 3163e705c121SKalle Valo IRQF_SHARED, DRV_NAME, trans); 3164e705c121SKalle Valo if (ret) { 3165e705c121SKalle Valo IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3166e705c121SKalle Valo goto out_free_ict; 3167e705c121SKalle Valo } 3168e705c121SKalle Valo trans_pcie->inta_mask = CSR_INI_SET_MASK; 31692e5d4a8fSHaim Dreyfuss } 3170e705c121SKalle Valo 3171b3ff1270SLuca Coelho #ifdef CONFIG_IWLWIFI_PCIE_RTPM 3172b3ff1270SLuca Coelho trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; 3173b3ff1270SLuca Coelho #else 3174b3ff1270SLuca Coelho trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 3175b3ff1270SLuca Coelho #endif /* CONFIG_IWLWIFI_PCIE_RTPM */ 3176b3ff1270SLuca Coelho 3177e705c121SKalle Valo return trans; 3178e705c121SKalle Valo 3179e705c121SKalle Valo out_free_ict: 3180e705c121SKalle Valo iwl_pcie_free_ict(trans); 3181e705c121SKalle Valo out_no_pci: 31826eb5e529SEmmanuel Grumbach free_percpu(trans_pcie->tso_hdr_page); 3183e705c121SKalle Valo iwl_trans_free(trans); 3184e705c121SKalle Valo return ERR_PTR(ret); 3185e705c121SKalle Valo } 3186