1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3e705c121SKalle Valo * This file is provided under a dual BSD/GPLv2 license. When using or 4e705c121SKalle Valo * redistributing this file, you may do so under either license. 5e705c121SKalle Valo * 6e705c121SKalle Valo * GPL LICENSE SUMMARY 7e705c121SKalle Valo * 8e705c121SKalle Valo * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. 9e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10afb84431SEmmanuel Grumbach * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11e705c121SKalle Valo * 12e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify 13e705c121SKalle Valo * it under the terms of version 2 of the GNU General Public License as 14e705c121SKalle Valo * published by the Free Software Foundation. 15e705c121SKalle Valo * 16e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but 17e705c121SKalle Valo * WITHOUT ANY WARRANTY; without even the implied warranty of 18e705c121SKalle Valo * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19e705c121SKalle Valo * General Public License for more details. 20e705c121SKalle Valo * 21e705c121SKalle Valo * You should have received a copy of the GNU General Public License 22e705c121SKalle Valo * along with this program; if not, write to the Free Software 23e705c121SKalle Valo * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24e705c121SKalle Valo * USA 25e705c121SKalle Valo * 26e705c121SKalle Valo * The full GNU General Public License is included in this distribution 27e705c121SKalle Valo * in the file called COPYING. 28e705c121SKalle Valo * 29e705c121SKalle Valo * Contact Information: 30cb2f8277SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 31e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32e705c121SKalle Valo * 33e705c121SKalle Valo * BSD LICENSE 34e705c121SKalle Valo * 35e705c121SKalle Valo * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. 36e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37afb84431SEmmanuel Grumbach * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38e705c121SKalle Valo * All rights reserved. 39e705c121SKalle Valo * 40e705c121SKalle Valo * Redistribution and use in source and binary forms, with or without 41e705c121SKalle Valo * modification, are permitted provided that the following conditions 42e705c121SKalle Valo * are met: 43e705c121SKalle Valo * 44e705c121SKalle Valo * * Redistributions of source code must retain the above copyright 45e705c121SKalle Valo * notice, this list of conditions and the following disclaimer. 46e705c121SKalle Valo * * Redistributions in binary form must reproduce the above copyright 47e705c121SKalle Valo * notice, this list of conditions and the following disclaimer in 48e705c121SKalle Valo * the documentation and/or other materials provided with the 49e705c121SKalle Valo * distribution. 50e705c121SKalle Valo * * Neither the name Intel Corporation nor the names of its 51e705c121SKalle Valo * contributors may be used to endorse or promote products derived 52e705c121SKalle Valo * from this software without specific prior written permission. 53e705c121SKalle Valo * 54e705c121SKalle Valo * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55e705c121SKalle Valo * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56e705c121SKalle Valo * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57e705c121SKalle Valo * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58e705c121SKalle Valo * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59e705c121SKalle Valo * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60e705c121SKalle Valo * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61e705c121SKalle Valo * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62e705c121SKalle Valo * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63e705c121SKalle Valo * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64e705c121SKalle Valo * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65e705c121SKalle Valo * 66e705c121SKalle Valo *****************************************************************************/ 67e705c121SKalle Valo #include <linux/pci.h> 68e705c121SKalle Valo #include <linux/pci-aspm.h> 69e705c121SKalle Valo #include <linux/interrupt.h> 70e705c121SKalle Valo #include <linux/debugfs.h> 71e705c121SKalle Valo #include <linux/sched.h> 72e705c121SKalle Valo #include <linux/bitops.h> 73e705c121SKalle Valo #include <linux/gfp.h> 74e705c121SKalle Valo #include <linux/vmalloc.h> 75b3ff1270SLuca Coelho #include <linux/pm_runtime.h> 76e705c121SKalle Valo 77e705c121SKalle Valo #include "iwl-drv.h" 78e705c121SKalle Valo #include "iwl-trans.h" 79e705c121SKalle Valo #include "iwl-csr.h" 80e705c121SKalle Valo #include "iwl-prph.h" 81e705c121SKalle Valo #include "iwl-scd.h" 82e705c121SKalle Valo #include "iwl-agn-hw.h" 83e705c121SKalle Valo #include "iwl-fw-error-dump.h" 84e705c121SKalle Valo #include "internal.h" 85e705c121SKalle Valo #include "iwl-fh.h" 86e705c121SKalle Valo 87e705c121SKalle Valo /* extended range in FW SRAM */ 88e705c121SKalle Valo #define IWL_FW_MEM_EXTENDED_START 0x40000 89e705c121SKalle Valo #define IWL_FW_MEM_EXTENDED_END 0x57FFF 90e705c121SKalle Valo 91e705c121SKalle Valo static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 92e705c121SKalle Valo { 93e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 94e705c121SKalle Valo 95e705c121SKalle Valo if (!trans_pcie->fw_mon_page) 96e705c121SKalle Valo return; 97e705c121SKalle Valo 98e705c121SKalle Valo dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys, 99e705c121SKalle Valo trans_pcie->fw_mon_size, DMA_FROM_DEVICE); 100e705c121SKalle Valo __free_pages(trans_pcie->fw_mon_page, 101e705c121SKalle Valo get_order(trans_pcie->fw_mon_size)); 102e705c121SKalle Valo trans_pcie->fw_mon_page = NULL; 103e705c121SKalle Valo trans_pcie->fw_mon_phys = 0; 104e705c121SKalle Valo trans_pcie->fw_mon_size = 0; 105e705c121SKalle Valo } 106e705c121SKalle Valo 107e705c121SKalle Valo static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 108e705c121SKalle Valo { 109e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 110e705c121SKalle Valo struct page *page = NULL; 111e705c121SKalle Valo dma_addr_t phys; 112e705c121SKalle Valo u32 size = 0; 113e705c121SKalle Valo u8 power; 114e705c121SKalle Valo 115e705c121SKalle Valo if (!max_power) { 116e705c121SKalle Valo /* default max_power is maximum */ 117e705c121SKalle Valo max_power = 26; 118e705c121SKalle Valo } else { 119e705c121SKalle Valo max_power += 11; 120e705c121SKalle Valo } 121e705c121SKalle Valo 122e705c121SKalle Valo if (WARN(max_power > 26, 123e705c121SKalle Valo "External buffer size for monitor is too big %d, check the FW TLV\n", 124e705c121SKalle Valo max_power)) 125e705c121SKalle Valo return; 126e705c121SKalle Valo 127e705c121SKalle Valo if (trans_pcie->fw_mon_page) { 128e705c121SKalle Valo dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys, 129e705c121SKalle Valo trans_pcie->fw_mon_size, 130e705c121SKalle Valo DMA_FROM_DEVICE); 131e705c121SKalle Valo return; 132e705c121SKalle Valo } 133e705c121SKalle Valo 134e705c121SKalle Valo phys = 0; 135e705c121SKalle Valo for (power = max_power; power >= 11; power--) { 136e705c121SKalle Valo int order; 137e705c121SKalle Valo 138e705c121SKalle Valo size = BIT(power); 139e705c121SKalle Valo order = get_order(size); 140e705c121SKalle Valo page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO, 141e705c121SKalle Valo order); 142e705c121SKalle Valo if (!page) 143e705c121SKalle Valo continue; 144e705c121SKalle Valo 145e705c121SKalle Valo phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order, 146e705c121SKalle Valo DMA_FROM_DEVICE); 147e705c121SKalle Valo if (dma_mapping_error(trans->dev, phys)) { 148e705c121SKalle Valo __free_pages(page, order); 149e705c121SKalle Valo page = NULL; 150e705c121SKalle Valo continue; 151e705c121SKalle Valo } 152e705c121SKalle Valo IWL_INFO(trans, 153e705c121SKalle Valo "Allocated 0x%08x bytes (order %d) for firmware monitor.\n", 154e705c121SKalle Valo size, order); 155e705c121SKalle Valo break; 156e705c121SKalle Valo } 157e705c121SKalle Valo 158e705c121SKalle Valo if (WARN_ON_ONCE(!page)) 159e705c121SKalle Valo return; 160e705c121SKalle Valo 161e705c121SKalle Valo if (power != max_power) 162e705c121SKalle Valo IWL_ERR(trans, 163e705c121SKalle Valo "Sorry - debug buffer is only %luK while you requested %luK\n", 164e705c121SKalle Valo (unsigned long)BIT(power - 10), 165e705c121SKalle Valo (unsigned long)BIT(max_power - 10)); 166e705c121SKalle Valo 167e705c121SKalle Valo trans_pcie->fw_mon_page = page; 168e705c121SKalle Valo trans_pcie->fw_mon_phys = phys; 169e705c121SKalle Valo trans_pcie->fw_mon_size = size; 170e705c121SKalle Valo } 171e705c121SKalle Valo 172e705c121SKalle Valo static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 173e705c121SKalle Valo { 174e705c121SKalle Valo iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 175e705c121SKalle Valo ((reg & 0x0000ffff) | (2 << 28))); 176e705c121SKalle Valo return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 177e705c121SKalle Valo } 178e705c121SKalle Valo 179e705c121SKalle Valo static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 180e705c121SKalle Valo { 181e705c121SKalle Valo iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 182e705c121SKalle Valo iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 183e705c121SKalle Valo ((reg & 0x0000ffff) | (3 << 28))); 184e705c121SKalle Valo } 185e705c121SKalle Valo 186e705c121SKalle Valo static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 187e705c121SKalle Valo { 188e705c121SKalle Valo if (trans->cfg->apmg_not_supported) 189e705c121SKalle Valo return; 190e705c121SKalle Valo 191e705c121SKalle Valo if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 192e705c121SKalle Valo iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 193e705c121SKalle Valo APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 194e705c121SKalle Valo ~APMG_PS_CTRL_MSK_PWR_SRC); 195e705c121SKalle Valo else 196e705c121SKalle Valo iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 197e705c121SKalle Valo APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 198e705c121SKalle Valo ~APMG_PS_CTRL_MSK_PWR_SRC); 199e705c121SKalle Valo } 200e705c121SKalle Valo 201e705c121SKalle Valo /* PCI registers */ 202e705c121SKalle Valo #define PCI_CFG_RETRY_TIMEOUT 0x041 203e705c121SKalle Valo 204e705c121SKalle Valo static void iwl_pcie_apm_config(struct iwl_trans *trans) 205e705c121SKalle Valo { 206e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 207e705c121SKalle Valo u16 lctl; 208e705c121SKalle Valo u16 cap; 209e705c121SKalle Valo 210e705c121SKalle Valo /* 211e705c121SKalle Valo * HW bug W/A for instability in PCIe bus L0S->L1 transition. 212e705c121SKalle Valo * Check if BIOS (or OS) enabled L1-ASPM on this device. 213e705c121SKalle Valo * If so (likely), disable L0S, so device moves directly L0->L1; 214e705c121SKalle Valo * costs negligible amount of power savings. 215e705c121SKalle Valo * If not (unlikely), enable L0S, so there is at least some 216e705c121SKalle Valo * power savings, even without L1. 217e705c121SKalle Valo */ 218e705c121SKalle Valo pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 219e705c121SKalle Valo if (lctl & PCI_EXP_LNKCTL_ASPM_L1) 220e705c121SKalle Valo iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 221e705c121SKalle Valo else 222e705c121SKalle Valo iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 223e705c121SKalle Valo trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 224e705c121SKalle Valo 225e705c121SKalle Valo pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 226e705c121SKalle Valo trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 227e705c121SKalle Valo dev_info(trans->dev, "L1 %sabled - LTR %sabled\n", 228e705c121SKalle Valo (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 229e705c121SKalle Valo trans->ltr_enabled ? "En" : "Dis"); 230e705c121SKalle Valo } 231e705c121SKalle Valo 232e705c121SKalle Valo /* 233e705c121SKalle Valo * Start up NIC's basic functionality after it has been reset 234e705c121SKalle Valo * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 235e705c121SKalle Valo * NOTE: This does not load uCode nor start the embedded processor 236e705c121SKalle Valo */ 237e705c121SKalle Valo static int iwl_pcie_apm_init(struct iwl_trans *trans) 238e705c121SKalle Valo { 239e705c121SKalle Valo int ret = 0; 240e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 241e705c121SKalle Valo 242e705c121SKalle Valo /* 243e705c121SKalle Valo * Use "set_bit" below rather than "write", to preserve any hardware 244e705c121SKalle Valo * bits already set by default after reset. 245e705c121SKalle Valo */ 246e705c121SKalle Valo 247e705c121SKalle Valo /* Disable L0S exit timer (platform NMI Work/Around) */ 248e705c121SKalle Valo if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 249e705c121SKalle Valo iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 250e705c121SKalle Valo CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 251e705c121SKalle Valo 252e705c121SKalle Valo /* 253e705c121SKalle Valo * Disable L0s without affecting L1; 254e705c121SKalle Valo * don't wait for ICH L0s (ICH bug W/A) 255e705c121SKalle Valo */ 256e705c121SKalle Valo iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 257e705c121SKalle Valo CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 258e705c121SKalle Valo 259e705c121SKalle Valo /* Set FH wait threshold to maximum (HW error during stress W/A) */ 260e705c121SKalle Valo iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 261e705c121SKalle Valo 262e705c121SKalle Valo /* 263e705c121SKalle Valo * Enable HAP INTA (interrupt from management bus) to 264e705c121SKalle Valo * wake device's PCI Express link L1a -> L0s 265e705c121SKalle Valo */ 266e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 267e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 268e705c121SKalle Valo 269e705c121SKalle Valo iwl_pcie_apm_config(trans); 270e705c121SKalle Valo 271e705c121SKalle Valo /* Configure analog phase-lock-loop before activating to D0A */ 27277d76931SJohannes Berg if (trans->cfg->base_params->pll_cfg) 27377d76931SJohannes Berg iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 274e705c121SKalle Valo 275e705c121SKalle Valo /* 276e705c121SKalle Valo * Set "initialization complete" bit to move adapter from 277e705c121SKalle Valo * D0U* --> D0A* (powered-up active) state. 278e705c121SKalle Valo */ 279e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 280e705c121SKalle Valo 281e705c121SKalle Valo /* 282e705c121SKalle Valo * Wait for clock stabilization; once stabilized, access to 283e705c121SKalle Valo * device-internal resources is supported, e.g. iwl_write_prph() 284e705c121SKalle Valo * and accesses to uCode SRAM. 285e705c121SKalle Valo */ 286e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 287e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 288e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 289e705c121SKalle Valo if (ret < 0) { 290e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Failed to init the card\n"); 291e705c121SKalle Valo goto out; 292e705c121SKalle Valo } 293e705c121SKalle Valo 294e705c121SKalle Valo if (trans->cfg->host_interrupt_operation_mode) { 295e705c121SKalle Valo /* 296e705c121SKalle Valo * This is a bit of an abuse - This is needed for 7260 / 3160 297e705c121SKalle Valo * only check host_interrupt_operation_mode even if this is 298e705c121SKalle Valo * not related to host_interrupt_operation_mode. 299e705c121SKalle Valo * 300e705c121SKalle Valo * Enable the oscillator to count wake up time for L1 exit. This 301e705c121SKalle Valo * consumes slightly more power (100uA) - but allows to be sure 302e705c121SKalle Valo * that we wake up from L1 on time. 303e705c121SKalle Valo * 304e705c121SKalle Valo * This looks weird: read twice the same register, discard the 305e705c121SKalle Valo * value, set a bit, and yet again, read that same register 306e705c121SKalle Valo * just to discard the value. But that's the way the hardware 307e705c121SKalle Valo * seems to like it. 308e705c121SKalle Valo */ 309e705c121SKalle Valo iwl_read_prph(trans, OSC_CLK); 310e705c121SKalle Valo iwl_read_prph(trans, OSC_CLK); 311e705c121SKalle Valo iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 312e705c121SKalle Valo iwl_read_prph(trans, OSC_CLK); 313e705c121SKalle Valo iwl_read_prph(trans, OSC_CLK); 314e705c121SKalle Valo } 315e705c121SKalle Valo 316e705c121SKalle Valo /* 317e705c121SKalle Valo * Enable DMA clock and wait for it to stabilize. 318e705c121SKalle Valo * 319e705c121SKalle Valo * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 320e705c121SKalle Valo * bits do not disable clocks. This preserves any hardware 321e705c121SKalle Valo * bits already set by default in "CLK_CTRL_REG" after reset. 322e705c121SKalle Valo */ 323e705c121SKalle Valo if (!trans->cfg->apmg_not_supported) { 324e705c121SKalle Valo iwl_write_prph(trans, APMG_CLK_EN_REG, 325e705c121SKalle Valo APMG_CLK_VAL_DMA_CLK_RQT); 326e705c121SKalle Valo udelay(20); 327e705c121SKalle Valo 328e705c121SKalle Valo /* Disable L1-Active */ 329e705c121SKalle Valo iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 330e705c121SKalle Valo APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 331e705c121SKalle Valo 332e705c121SKalle Valo /* Clear the interrupt in APMG if the NIC is in RFKILL */ 333e705c121SKalle Valo iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 334e705c121SKalle Valo APMG_RTC_INT_STT_RFKILL); 335e705c121SKalle Valo } 336e705c121SKalle Valo 337e705c121SKalle Valo set_bit(STATUS_DEVICE_ENABLED, &trans->status); 338e705c121SKalle Valo 339e705c121SKalle Valo out: 340e705c121SKalle Valo return ret; 341e705c121SKalle Valo } 342e705c121SKalle Valo 343e705c121SKalle Valo /* 344e705c121SKalle Valo * Enable LP XTAL to avoid HW bug where device may consume much power if 345e705c121SKalle Valo * FW is not loaded after device reset. LP XTAL is disabled by default 346e705c121SKalle Valo * after device HW reset. Do it only if XTAL is fed by internal source. 347e705c121SKalle Valo * Configure device's "persistence" mode to avoid resetting XTAL again when 348e705c121SKalle Valo * SHRD_HW_RST occurs in S3. 349e705c121SKalle Valo */ 350e705c121SKalle Valo static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 351e705c121SKalle Valo { 352e705c121SKalle Valo int ret; 353e705c121SKalle Valo u32 apmg_gp1_reg; 354e705c121SKalle Valo u32 apmg_xtal_cfg_reg; 355e705c121SKalle Valo u32 dl_cfg_reg; 356e705c121SKalle Valo 357e705c121SKalle Valo /* Force XTAL ON */ 358e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 359e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 360e705c121SKalle Valo 361e705c121SKalle Valo /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 362e705c121SKalle Valo iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 363b7a08b28SJohannes Berg usleep_range(1000, 2000); 364e705c121SKalle Valo 365e705c121SKalle Valo /* 366e705c121SKalle Valo * Set "initialization complete" bit to move adapter from 367e705c121SKalle Valo * D0U* --> D0A* (powered-up active) state. 368e705c121SKalle Valo */ 369e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 370e705c121SKalle Valo 371e705c121SKalle Valo /* 372e705c121SKalle Valo * Wait for clock stabilization; once stabilized, access to 373e705c121SKalle Valo * device-internal resources is possible. 374e705c121SKalle Valo */ 375e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 376e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 377e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 378e705c121SKalle Valo 25000); 379e705c121SKalle Valo if (WARN_ON(ret < 0)) { 380e705c121SKalle Valo IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); 381e705c121SKalle Valo /* Release XTAL ON request */ 382e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 383e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 384e705c121SKalle Valo return; 385e705c121SKalle Valo } 386e705c121SKalle Valo 387e705c121SKalle Valo /* 388e705c121SKalle Valo * Clear "disable persistence" to avoid LP XTAL resetting when 389e705c121SKalle Valo * SHRD_HW_RST is applied in S3. 390e705c121SKalle Valo */ 391e705c121SKalle Valo iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 392e705c121SKalle Valo APMG_PCIDEV_STT_VAL_PERSIST_DIS); 393e705c121SKalle Valo 394e705c121SKalle Valo /* 395e705c121SKalle Valo * Force APMG XTAL to be active to prevent its disabling by HW 396e705c121SKalle Valo * caused by APMG idle state. 397e705c121SKalle Valo */ 398e705c121SKalle Valo apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 399e705c121SKalle Valo SHR_APMG_XTAL_CFG_REG); 400e705c121SKalle Valo iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 401e705c121SKalle Valo apmg_xtal_cfg_reg | 402e705c121SKalle Valo SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 403e705c121SKalle Valo 404e705c121SKalle Valo /* 405e705c121SKalle Valo * Reset entire device again - do controller reset (results in 406e705c121SKalle Valo * SHRD_HW_RST). Turn MAC off before proceeding. 407e705c121SKalle Valo */ 408e705c121SKalle Valo iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 409b7a08b28SJohannes Berg usleep_range(1000, 2000); 410e705c121SKalle Valo 411e705c121SKalle Valo /* Enable LP XTAL by indirect access through CSR */ 412e705c121SKalle Valo apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 413e705c121SKalle Valo iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 414e705c121SKalle Valo SHR_APMG_GP1_WF_XTAL_LP_EN | 415e705c121SKalle Valo SHR_APMG_GP1_CHICKEN_BIT_SELECT); 416e705c121SKalle Valo 417e705c121SKalle Valo /* Clear delay line clock power up */ 418e705c121SKalle Valo dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 419e705c121SKalle Valo iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 420e705c121SKalle Valo ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 421e705c121SKalle Valo 422e705c121SKalle Valo /* 423e705c121SKalle Valo * Enable persistence mode to avoid LP XTAL resetting when 424e705c121SKalle Valo * SHRD_HW_RST is applied in S3. 425e705c121SKalle Valo */ 426e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 427e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 428e705c121SKalle Valo 429e705c121SKalle Valo /* 430e705c121SKalle Valo * Clear "initialization complete" bit to move adapter from 431e705c121SKalle Valo * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 432e705c121SKalle Valo */ 433e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 434e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 435e705c121SKalle Valo 436e705c121SKalle Valo /* Activates XTAL resources monitor */ 437e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 438e705c121SKalle Valo CSR_MONITOR_XTAL_RESOURCES); 439e705c121SKalle Valo 440e705c121SKalle Valo /* Release XTAL ON request */ 441e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 442e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 443e705c121SKalle Valo udelay(10); 444e705c121SKalle Valo 445e705c121SKalle Valo /* Release APMG XTAL */ 446e705c121SKalle Valo iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 447e705c121SKalle Valo apmg_xtal_cfg_reg & 448e705c121SKalle Valo ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 449e705c121SKalle Valo } 450e705c121SKalle Valo 451e705c121SKalle Valo static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) 452e705c121SKalle Valo { 453e705c121SKalle Valo int ret = 0; 454e705c121SKalle Valo 455e705c121SKalle Valo /* stop device's busmaster DMA activity */ 456e705c121SKalle Valo iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 457e705c121SKalle Valo 458e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_RESET, 459e705c121SKalle Valo CSR_RESET_REG_FLAG_MASTER_DISABLED, 460e705c121SKalle Valo CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 461e705c121SKalle Valo if (ret < 0) 462e705c121SKalle Valo IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 463e705c121SKalle Valo 464e705c121SKalle Valo IWL_DEBUG_INFO(trans, "stop master\n"); 465e705c121SKalle Valo 466e705c121SKalle Valo return ret; 467e705c121SKalle Valo } 468e705c121SKalle Valo 469e705c121SKalle Valo static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 470e705c121SKalle Valo { 471e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 472e705c121SKalle Valo 473e705c121SKalle Valo if (op_mode_leave) { 474e705c121SKalle Valo if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 475e705c121SKalle Valo iwl_pcie_apm_init(trans); 476e705c121SKalle Valo 477e705c121SKalle Valo /* inform ME that we are leaving */ 478e705c121SKalle Valo if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 479e705c121SKalle Valo iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 480e705c121SKalle Valo APMG_PCIDEV_STT_VAL_WAKE_ME); 481e705c121SKalle Valo else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 482e705c121SKalle Valo iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 483e705c121SKalle Valo CSR_RESET_LINK_PWR_MGMT_DISABLED); 484e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 485e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_PREPARE | 486e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_ENABLE_PME); 487e705c121SKalle Valo mdelay(1); 488e705c121SKalle Valo iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 489e705c121SKalle Valo CSR_RESET_LINK_PWR_MGMT_DISABLED); 490e705c121SKalle Valo } 491e705c121SKalle Valo mdelay(5); 492e705c121SKalle Valo } 493e705c121SKalle Valo 494e705c121SKalle Valo clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 495e705c121SKalle Valo 496e705c121SKalle Valo /* Stop device's DMA activity */ 497e705c121SKalle Valo iwl_pcie_apm_stop_master(trans); 498e705c121SKalle Valo 499e705c121SKalle Valo if (trans->cfg->lp_xtal_workaround) { 500e705c121SKalle Valo iwl_pcie_apm_lp_xtal_enable(trans); 501e705c121SKalle Valo return; 502e705c121SKalle Valo } 503e705c121SKalle Valo 504e705c121SKalle Valo /* Reset the entire device */ 505e705c121SKalle Valo iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 506b7a08b28SJohannes Berg usleep_range(1000, 2000); 507e705c121SKalle Valo 508e705c121SKalle Valo /* 509e705c121SKalle Valo * Clear "initialization complete" bit to move adapter from 510e705c121SKalle Valo * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 511e705c121SKalle Valo */ 512e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 513e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 514e705c121SKalle Valo } 515e705c121SKalle Valo 516e705c121SKalle Valo static int iwl_pcie_nic_init(struct iwl_trans *trans) 517e705c121SKalle Valo { 518e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 519e705c121SKalle Valo 520e705c121SKalle Valo /* nic_init */ 521e705c121SKalle Valo spin_lock(&trans_pcie->irq_lock); 522e705c121SKalle Valo iwl_pcie_apm_init(trans); 523e705c121SKalle Valo 524e705c121SKalle Valo spin_unlock(&trans_pcie->irq_lock); 525e705c121SKalle Valo 526e705c121SKalle Valo iwl_pcie_set_pwr(trans, false); 527e705c121SKalle Valo 528e705c121SKalle Valo iwl_op_mode_nic_config(trans->op_mode); 529e705c121SKalle Valo 530e705c121SKalle Valo /* Allocate the RX queue, or reset if it is already allocated */ 531e705c121SKalle Valo iwl_pcie_rx_init(trans); 532e705c121SKalle Valo 533e705c121SKalle Valo /* Allocate or reset and init all Tx and Command queues */ 534e705c121SKalle Valo if (iwl_pcie_tx_init(trans)) 535e705c121SKalle Valo return -ENOMEM; 536e705c121SKalle Valo 537e705c121SKalle Valo if (trans->cfg->base_params->shadow_reg_enable) { 538e705c121SKalle Valo /* enable shadow regs in HW */ 539e705c121SKalle Valo iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 540e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 541e705c121SKalle Valo } 542e705c121SKalle Valo 543e705c121SKalle Valo return 0; 544e705c121SKalle Valo } 545e705c121SKalle Valo 546e705c121SKalle Valo #define HW_READY_TIMEOUT (50) 547e705c121SKalle Valo 548e705c121SKalle Valo /* Note: returns poll_bit return value, which is >= 0 if success */ 549e705c121SKalle Valo static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 550e705c121SKalle Valo { 551e705c121SKalle Valo int ret; 552e705c121SKalle Valo 553e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 554e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 555e705c121SKalle Valo 556e705c121SKalle Valo /* See if we got it */ 557e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 558e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 559e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 560e705c121SKalle Valo HW_READY_TIMEOUT); 561e705c121SKalle Valo 562e705c121SKalle Valo if (ret >= 0) 563e705c121SKalle Valo iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 564e705c121SKalle Valo 565e705c121SKalle Valo IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 566e705c121SKalle Valo return ret; 567e705c121SKalle Valo } 568e705c121SKalle Valo 569e705c121SKalle Valo /* Note: returns standard 0/-ERROR code */ 570e705c121SKalle Valo static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 571e705c121SKalle Valo { 572e705c121SKalle Valo int ret; 573e705c121SKalle Valo int t = 0; 574e705c121SKalle Valo int iter; 575e705c121SKalle Valo 576e705c121SKalle Valo IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 577e705c121SKalle Valo 578e705c121SKalle Valo ret = iwl_pcie_set_hw_ready(trans); 579e705c121SKalle Valo /* If the card is ready, exit 0 */ 580e705c121SKalle Valo if (ret >= 0) 581e705c121SKalle Valo return 0; 582e705c121SKalle Valo 583e705c121SKalle Valo iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 584e705c121SKalle Valo CSR_RESET_LINK_PWR_MGMT_DISABLED); 585192185d6SJohannes Berg usleep_range(1000, 2000); 586e705c121SKalle Valo 587e705c121SKalle Valo for (iter = 0; iter < 10; iter++) { 588e705c121SKalle Valo /* If HW is not ready, prepare the conditions to check again */ 589e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 590e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_PREPARE); 591e705c121SKalle Valo 592e705c121SKalle Valo do { 593e705c121SKalle Valo ret = iwl_pcie_set_hw_ready(trans); 594e705c121SKalle Valo if (ret >= 0) 595e705c121SKalle Valo return 0; 596e705c121SKalle Valo 597e705c121SKalle Valo usleep_range(200, 1000); 598e705c121SKalle Valo t += 200; 599e705c121SKalle Valo } while (t < 150000); 600e705c121SKalle Valo msleep(25); 601e705c121SKalle Valo } 602e705c121SKalle Valo 603e705c121SKalle Valo IWL_ERR(trans, "Couldn't prepare the card\n"); 604e705c121SKalle Valo 605e705c121SKalle Valo return ret; 606e705c121SKalle Valo } 607e705c121SKalle Valo 608e705c121SKalle Valo /* 609e705c121SKalle Valo * ucode 610e705c121SKalle Valo */ 611564cdce7SSara Sharon static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 612564cdce7SSara Sharon u32 dst_addr, dma_addr_t phy_addr, 613564cdce7SSara Sharon u32 byte_cnt) 614e705c121SKalle Valo { 615bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 616e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 617e705c121SKalle Valo 618bac842daSEmmanuel Grumbach iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 619e705c121SKalle Valo dst_addr); 620e705c121SKalle Valo 621bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 622e705c121SKalle Valo phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 623e705c121SKalle Valo 624bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 625e705c121SKalle Valo (iwl_get_dma_hi_addr(phy_addr) 626e705c121SKalle Valo << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 627e705c121SKalle Valo 628bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 629bac842daSEmmanuel Grumbach BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 630bac842daSEmmanuel Grumbach BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 631e705c121SKalle Valo FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 632e705c121SKalle Valo 633bac842daSEmmanuel Grumbach iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 634e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 635e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 636e705c121SKalle Valo FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 637564cdce7SSara Sharon } 638e705c121SKalle Valo 639564cdce7SSara Sharon static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans, 640564cdce7SSara Sharon u32 dst_addr, dma_addr_t phy_addr, 641564cdce7SSara Sharon u32 byte_cnt) 642564cdce7SSara Sharon { 643564cdce7SSara Sharon /* Stop DMA channel */ 644564cdce7SSara Sharon iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0); 645564cdce7SSara Sharon 646564cdce7SSara Sharon /* Configure SRAM address */ 647564cdce7SSara Sharon iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR, 648564cdce7SSara Sharon dst_addr); 649564cdce7SSara Sharon 650564cdce7SSara Sharon /* Configure DRAM address - 64 bit */ 651564cdce7SSara Sharon iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr); 652564cdce7SSara Sharon 653564cdce7SSara Sharon /* Configure byte count to transfer */ 654564cdce7SSara Sharon iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt); 655564cdce7SSara Sharon 656564cdce7SSara Sharon /* Enable the DRAM2SRAM to start */ 657564cdce7SSara Sharon iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP | 658564cdce7SSara Sharon TFH_SRV_DMA_TO_DRIVER | 659564cdce7SSara Sharon TFH_SRV_DMA_START); 660564cdce7SSara Sharon } 661564cdce7SSara Sharon 662564cdce7SSara Sharon static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 663564cdce7SSara Sharon u32 dst_addr, dma_addr_t phy_addr, 664564cdce7SSara Sharon u32 byte_cnt) 665564cdce7SSara Sharon { 666564cdce7SSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 667564cdce7SSara Sharon unsigned long flags; 668564cdce7SSara Sharon int ret; 669564cdce7SSara Sharon 670564cdce7SSara Sharon trans_pcie->ucode_write_complete = false; 671564cdce7SSara Sharon 672564cdce7SSara Sharon if (!iwl_trans_grab_nic_access(trans, &flags)) 673564cdce7SSara Sharon return -EIO; 674564cdce7SSara Sharon 675564cdce7SSara Sharon if (trans->cfg->use_tfh) 676564cdce7SSara Sharon iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr, 677564cdce7SSara Sharon byte_cnt); 678564cdce7SSara Sharon else 679564cdce7SSara Sharon iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 680564cdce7SSara Sharon byte_cnt); 681bac842daSEmmanuel Grumbach iwl_trans_release_nic_access(trans, &flags); 682bac842daSEmmanuel Grumbach 683e705c121SKalle Valo ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 684e705c121SKalle Valo trans_pcie->ucode_write_complete, 5 * HZ); 685e705c121SKalle Valo if (!ret) { 686e705c121SKalle Valo IWL_ERR(trans, "Failed to load firmware chunk!\n"); 687e705c121SKalle Valo return -ETIMEDOUT; 688e705c121SKalle Valo } 689e705c121SKalle Valo 690e705c121SKalle Valo return 0; 691e705c121SKalle Valo } 692e705c121SKalle Valo 693e705c121SKalle Valo static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 694e705c121SKalle Valo const struct fw_desc *section) 695e705c121SKalle Valo { 696e705c121SKalle Valo u8 *v_addr; 697e705c121SKalle Valo dma_addr_t p_addr; 698e705c121SKalle Valo u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 699e705c121SKalle Valo int ret = 0; 700e705c121SKalle Valo 701e705c121SKalle Valo IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 702e705c121SKalle Valo section_num); 703e705c121SKalle Valo 704e705c121SKalle Valo v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 705e705c121SKalle Valo GFP_KERNEL | __GFP_NOWARN); 706e705c121SKalle Valo if (!v_addr) { 707e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 708e705c121SKalle Valo chunk_sz = PAGE_SIZE; 709e705c121SKalle Valo v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 710e705c121SKalle Valo &p_addr, GFP_KERNEL); 711e705c121SKalle Valo if (!v_addr) 712e705c121SKalle Valo return -ENOMEM; 713e705c121SKalle Valo } 714e705c121SKalle Valo 715e705c121SKalle Valo for (offset = 0; offset < section->len; offset += chunk_sz) { 716e705c121SKalle Valo u32 copy_size, dst_addr; 717e705c121SKalle Valo bool extended_addr = false; 718e705c121SKalle Valo 719e705c121SKalle Valo copy_size = min_t(u32, chunk_sz, section->len - offset); 720e705c121SKalle Valo dst_addr = section->offset + offset; 721e705c121SKalle Valo 722e705c121SKalle Valo if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 723e705c121SKalle Valo dst_addr <= IWL_FW_MEM_EXTENDED_END) 724e705c121SKalle Valo extended_addr = true; 725e705c121SKalle Valo 726e705c121SKalle Valo if (extended_addr) 727e705c121SKalle Valo iwl_set_bits_prph(trans, LMPM_CHICK, 728e705c121SKalle Valo LMPM_CHICK_EXTENDED_ADDR_SPACE); 729e705c121SKalle Valo 730e705c121SKalle Valo memcpy(v_addr, (u8 *)section->data + offset, copy_size); 731e705c121SKalle Valo ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 732e705c121SKalle Valo copy_size); 733e705c121SKalle Valo 734e705c121SKalle Valo if (extended_addr) 735e705c121SKalle Valo iwl_clear_bits_prph(trans, LMPM_CHICK, 736e705c121SKalle Valo LMPM_CHICK_EXTENDED_ADDR_SPACE); 737e705c121SKalle Valo 738e705c121SKalle Valo if (ret) { 739e705c121SKalle Valo IWL_ERR(trans, 740e705c121SKalle Valo "Could not load the [%d] uCode section\n", 741e705c121SKalle Valo section_num); 742e705c121SKalle Valo break; 743e705c121SKalle Valo } 744e705c121SKalle Valo } 745e705c121SKalle Valo 746e705c121SKalle Valo dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 747e705c121SKalle Valo return ret; 748e705c121SKalle Valo } 749e705c121SKalle Valo 750e705c121SKalle Valo /* 751e705c121SKalle Valo * Driver Takes the ownership on secure machine before FW load 752e705c121SKalle Valo * and prevent race with the BT load. 753e705c121SKalle Valo * W/A for ROM bug. (should be remove in the next Si step) 754e705c121SKalle Valo */ 755e705c121SKalle Valo static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans) 756e705c121SKalle Valo { 757e705c121SKalle Valo u32 val, loop = 1000; 758e705c121SKalle Valo 759e705c121SKalle Valo /* 760e705c121SKalle Valo * Check the RSA semaphore is accessible. 761e705c121SKalle Valo * If the HW isn't locked and the rsa semaphore isn't accessible, 762e705c121SKalle Valo * we are in trouble. 763e705c121SKalle Valo */ 764e705c121SKalle Valo val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 765e705c121SKalle Valo if (val & (BIT(1) | BIT(17))) { 7669fc515bcSEmmanuel Grumbach IWL_DEBUG_INFO(trans, 767e705c121SKalle Valo "can't access the RSA semaphore it is write protected\n"); 768e705c121SKalle Valo return 0; 769e705c121SKalle Valo } 770e705c121SKalle Valo 771e705c121SKalle Valo /* take ownership on the AUX IF */ 772e705c121SKalle Valo iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK); 773e705c121SKalle Valo iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK); 774e705c121SKalle Valo 775e705c121SKalle Valo do { 776e705c121SKalle Valo iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1); 777e705c121SKalle Valo val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS); 778e705c121SKalle Valo if (val == 0x1) { 779e705c121SKalle Valo iwl_write_prph(trans, RSA_ENABLE, 0); 780e705c121SKalle Valo return 0; 781e705c121SKalle Valo } 782e705c121SKalle Valo 783e705c121SKalle Valo udelay(10); 784e705c121SKalle Valo loop--; 785e705c121SKalle Valo } while (loop > 0); 786e705c121SKalle Valo 787e705c121SKalle Valo IWL_ERR(trans, "Failed to take ownership on secure machine\n"); 788e705c121SKalle Valo return -EIO; 789e705c121SKalle Valo } 790e705c121SKalle Valo 791e705c121SKalle Valo static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 792e705c121SKalle Valo const struct fw_img *image, 793e705c121SKalle Valo int cpu, 794e705c121SKalle Valo int *first_ucode_section) 795e705c121SKalle Valo { 796e705c121SKalle Valo int shift_param; 797e705c121SKalle Valo int i, ret = 0, sec_num = 0x1; 798e705c121SKalle Valo u32 val, last_read_idx = 0; 799e705c121SKalle Valo 800e705c121SKalle Valo if (cpu == 1) { 801e705c121SKalle Valo shift_param = 0; 802e705c121SKalle Valo *first_ucode_section = 0; 803e705c121SKalle Valo } else { 804e705c121SKalle Valo shift_param = 16; 805e705c121SKalle Valo (*first_ucode_section)++; 806e705c121SKalle Valo } 807e705c121SKalle Valo 808eef187a7SSara Sharon for (i = *first_ucode_section; i < image->num_sec; i++) { 809e705c121SKalle Valo last_read_idx = i; 810e705c121SKalle Valo 811e705c121SKalle Valo /* 812e705c121SKalle Valo * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 813e705c121SKalle Valo * CPU1 to CPU2. 814e705c121SKalle Valo * PAGING_SEPARATOR_SECTION delimiter - separate between 815e705c121SKalle Valo * CPU2 non paged to CPU2 paging sec. 816e705c121SKalle Valo */ 817e705c121SKalle Valo if (!image->sec[i].data || 818e705c121SKalle Valo image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 819e705c121SKalle Valo image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 820e705c121SKalle Valo IWL_DEBUG_FW(trans, 821e705c121SKalle Valo "Break since Data not valid or Empty section, sec = %d\n", 822e705c121SKalle Valo i); 823e705c121SKalle Valo break; 824e705c121SKalle Valo } 825e705c121SKalle Valo 826e705c121SKalle Valo ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 827e705c121SKalle Valo if (ret) 828e705c121SKalle Valo return ret; 829e705c121SKalle Valo 830d6a2c5c7SSara Sharon /* Notify ucode of loaded section number and status */ 831d6a2c5c7SSara Sharon if (trans->cfg->use_tfh) { 832d6a2c5c7SSara Sharon val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS); 833d6a2c5c7SSara Sharon val = val | (sec_num << shift_param); 834d6a2c5c7SSara Sharon iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val); 835d6a2c5c7SSara Sharon } else { 836e705c121SKalle Valo val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 837e705c121SKalle Valo val = val | (sec_num << shift_param); 838e705c121SKalle Valo iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 839d6a2c5c7SSara Sharon } 840e705c121SKalle Valo sec_num = (sec_num << 1) | 0x1; 841e705c121SKalle Valo } 842e705c121SKalle Valo 843e705c121SKalle Valo *first_ucode_section = last_read_idx; 844e705c121SKalle Valo 8452aabdbdcSEmmanuel Grumbach iwl_enable_interrupts(trans); 8462aabdbdcSEmmanuel Grumbach 847d6a2c5c7SSara Sharon if (trans->cfg->use_tfh) { 848e705c121SKalle Valo if (cpu == 1) 849d6a2c5c7SSara Sharon iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 850d6a2c5c7SSara Sharon 0xFFFF); 851e705c121SKalle Valo else 852d6a2c5c7SSara Sharon iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 853d6a2c5c7SSara Sharon 0xFFFFFFFF); 854d6a2c5c7SSara Sharon } else { 855d6a2c5c7SSara Sharon if (cpu == 1) 856d6a2c5c7SSara Sharon iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 857d6a2c5c7SSara Sharon 0xFFFF); 858d6a2c5c7SSara Sharon else 859d6a2c5c7SSara Sharon iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 860d6a2c5c7SSara Sharon 0xFFFFFFFF); 861d6a2c5c7SSara Sharon } 862e705c121SKalle Valo 863e705c121SKalle Valo return 0; 864e705c121SKalle Valo } 865e705c121SKalle Valo 866e705c121SKalle Valo static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 867e705c121SKalle Valo const struct fw_img *image, 868e705c121SKalle Valo int cpu, 869e705c121SKalle Valo int *first_ucode_section) 870e705c121SKalle Valo { 871e705c121SKalle Valo int i, ret = 0; 872e705c121SKalle Valo u32 last_read_idx = 0; 873e705c121SKalle Valo 8743ce4a038SKirtika Ruchandani if (cpu == 1) 875e705c121SKalle Valo *first_ucode_section = 0; 8763ce4a038SKirtika Ruchandani else 877e705c121SKalle Valo (*first_ucode_section)++; 878e705c121SKalle Valo 879eef187a7SSara Sharon for (i = *first_ucode_section; i < image->num_sec; i++) { 880e705c121SKalle Valo last_read_idx = i; 881e705c121SKalle Valo 882e705c121SKalle Valo /* 883e705c121SKalle Valo * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 884e705c121SKalle Valo * CPU1 to CPU2. 885e705c121SKalle Valo * PAGING_SEPARATOR_SECTION delimiter - separate between 886e705c121SKalle Valo * CPU2 non paged to CPU2 paging sec. 887e705c121SKalle Valo */ 888e705c121SKalle Valo if (!image->sec[i].data || 889e705c121SKalle Valo image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 890e705c121SKalle Valo image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 891e705c121SKalle Valo IWL_DEBUG_FW(trans, 892e705c121SKalle Valo "Break since Data not valid or Empty section, sec = %d\n", 893e705c121SKalle Valo i); 894e705c121SKalle Valo break; 895e705c121SKalle Valo } 896e705c121SKalle Valo 897e705c121SKalle Valo ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 898e705c121SKalle Valo if (ret) 899e705c121SKalle Valo return ret; 900e705c121SKalle Valo } 901e705c121SKalle Valo 902e705c121SKalle Valo *first_ucode_section = last_read_idx; 903e705c121SKalle Valo 904e705c121SKalle Valo return 0; 905e705c121SKalle Valo } 906e705c121SKalle Valo 907e705c121SKalle Valo static void iwl_pcie_apply_destination(struct iwl_trans *trans) 908e705c121SKalle Valo { 909e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 910e705c121SKalle Valo const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv; 911e705c121SKalle Valo int i; 912e705c121SKalle Valo 913e705c121SKalle Valo if (dest->version) 914e705c121SKalle Valo IWL_ERR(trans, 915e705c121SKalle Valo "DBG DEST version is %d - expect issues\n", 916e705c121SKalle Valo dest->version); 917e705c121SKalle Valo 918e705c121SKalle Valo IWL_INFO(trans, "Applying debug destination %s\n", 919e705c121SKalle Valo get_fw_dbg_mode_string(dest->monitor_mode)); 920e705c121SKalle Valo 921e705c121SKalle Valo if (dest->monitor_mode == EXTERNAL_MODE) 922e705c121SKalle Valo iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 923e705c121SKalle Valo else 924e705c121SKalle Valo IWL_WARN(trans, "PCI should have external buffer debug\n"); 925e705c121SKalle Valo 926e705c121SKalle Valo for (i = 0; i < trans->dbg_dest_reg_num; i++) { 927e705c121SKalle Valo u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 928e705c121SKalle Valo u32 val = le32_to_cpu(dest->reg_ops[i].val); 929e705c121SKalle Valo 930e705c121SKalle Valo switch (dest->reg_ops[i].op) { 931e705c121SKalle Valo case CSR_ASSIGN: 932e705c121SKalle Valo iwl_write32(trans, addr, val); 933e705c121SKalle Valo break; 934e705c121SKalle Valo case CSR_SETBIT: 935e705c121SKalle Valo iwl_set_bit(trans, addr, BIT(val)); 936e705c121SKalle Valo break; 937e705c121SKalle Valo case CSR_CLEARBIT: 938e705c121SKalle Valo iwl_clear_bit(trans, addr, BIT(val)); 939e705c121SKalle Valo break; 940e705c121SKalle Valo case PRPH_ASSIGN: 941e705c121SKalle Valo iwl_write_prph(trans, addr, val); 942e705c121SKalle Valo break; 943e705c121SKalle Valo case PRPH_SETBIT: 944e705c121SKalle Valo iwl_set_bits_prph(trans, addr, BIT(val)); 945e705c121SKalle Valo break; 946e705c121SKalle Valo case PRPH_CLEARBIT: 947e705c121SKalle Valo iwl_clear_bits_prph(trans, addr, BIT(val)); 948e705c121SKalle Valo break; 949e705c121SKalle Valo case PRPH_BLOCKBIT: 950e705c121SKalle Valo if (iwl_read_prph(trans, addr) & BIT(val)) { 951e705c121SKalle Valo IWL_ERR(trans, 952e705c121SKalle Valo "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 953e705c121SKalle Valo val, addr); 954e705c121SKalle Valo goto monitor; 955e705c121SKalle Valo } 956e705c121SKalle Valo break; 957e705c121SKalle Valo default: 958e705c121SKalle Valo IWL_ERR(trans, "FW debug - unknown OP %d\n", 959e705c121SKalle Valo dest->reg_ops[i].op); 960e705c121SKalle Valo break; 961e705c121SKalle Valo } 962e705c121SKalle Valo } 963e705c121SKalle Valo 964e705c121SKalle Valo monitor: 965e705c121SKalle Valo if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { 966e705c121SKalle Valo iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 967e705c121SKalle Valo trans_pcie->fw_mon_phys >> dest->base_shift); 96862d7476dSEmmanuel Grumbach if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 969e705c121SKalle Valo iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 970e705c121SKalle Valo (trans_pcie->fw_mon_phys + 97162d7476dSEmmanuel Grumbach trans_pcie->fw_mon_size - 256) >> 97262d7476dSEmmanuel Grumbach dest->end_shift); 97362d7476dSEmmanuel Grumbach else 97462d7476dSEmmanuel Grumbach iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 97562d7476dSEmmanuel Grumbach (trans_pcie->fw_mon_phys + 97662d7476dSEmmanuel Grumbach trans_pcie->fw_mon_size) >> 97762d7476dSEmmanuel Grumbach dest->end_shift); 978e705c121SKalle Valo } 979e705c121SKalle Valo } 980e705c121SKalle Valo 981e705c121SKalle Valo static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 982e705c121SKalle Valo const struct fw_img *image) 983e705c121SKalle Valo { 984e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 985e705c121SKalle Valo int ret = 0; 986e705c121SKalle Valo int first_ucode_section; 987e705c121SKalle Valo 988e705c121SKalle Valo IWL_DEBUG_FW(trans, "working with %s CPU\n", 989e705c121SKalle Valo image->is_dual_cpus ? "Dual" : "Single"); 990e705c121SKalle Valo 991e705c121SKalle Valo /* load to FW the binary non secured sections of CPU1 */ 992e705c121SKalle Valo ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 993e705c121SKalle Valo if (ret) 994e705c121SKalle Valo return ret; 995e705c121SKalle Valo 996e705c121SKalle Valo if (image->is_dual_cpus) { 997e705c121SKalle Valo /* set CPU2 header address */ 998e705c121SKalle Valo iwl_write_prph(trans, 999e705c121SKalle Valo LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 1000e705c121SKalle Valo LMPM_SECURE_CPU2_HDR_MEM_SPACE); 1001e705c121SKalle Valo 1002e705c121SKalle Valo /* load to FW the binary sections of CPU2 */ 1003e705c121SKalle Valo ret = iwl_pcie_load_cpu_sections(trans, image, 2, 1004e705c121SKalle Valo &first_ucode_section); 1005e705c121SKalle Valo if (ret) 1006e705c121SKalle Valo return ret; 1007e705c121SKalle Valo } 1008e705c121SKalle Valo 1009e705c121SKalle Valo /* supported for 7000 only for the moment */ 1010e705c121SKalle Valo if (iwlwifi_mod_params.fw_monitor && 1011e705c121SKalle Valo trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1012e705c121SKalle Valo iwl_pcie_alloc_fw_monitor(trans, 0); 1013e705c121SKalle Valo 1014e705c121SKalle Valo if (trans_pcie->fw_mon_size) { 1015e705c121SKalle Valo iwl_write_prph(trans, MON_BUFF_BASE_ADDR, 1016e705c121SKalle Valo trans_pcie->fw_mon_phys >> 4); 1017e705c121SKalle Valo iwl_write_prph(trans, MON_BUFF_END_ADDR, 1018e705c121SKalle Valo (trans_pcie->fw_mon_phys + 1019e705c121SKalle Valo trans_pcie->fw_mon_size) >> 4); 1020e705c121SKalle Valo } 1021e705c121SKalle Valo } else if (trans->dbg_dest_tlv) { 1022e705c121SKalle Valo iwl_pcie_apply_destination(trans); 1023e705c121SKalle Valo } 1024e705c121SKalle Valo 10252aabdbdcSEmmanuel Grumbach iwl_enable_interrupts(trans); 10262aabdbdcSEmmanuel Grumbach 1027e705c121SKalle Valo /* release CPU reset */ 1028e705c121SKalle Valo iwl_write32(trans, CSR_RESET, 0); 1029e705c121SKalle Valo 1030e705c121SKalle Valo return 0; 1031e705c121SKalle Valo } 1032e705c121SKalle Valo 1033e705c121SKalle Valo static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 1034e705c121SKalle Valo const struct fw_img *image) 1035e705c121SKalle Valo { 1036e705c121SKalle Valo int ret = 0; 1037e705c121SKalle Valo int first_ucode_section; 1038e705c121SKalle Valo 1039e705c121SKalle Valo IWL_DEBUG_FW(trans, "working with %s CPU\n", 1040e705c121SKalle Valo image->is_dual_cpus ? "Dual" : "Single"); 1041e705c121SKalle Valo 1042e705c121SKalle Valo if (trans->dbg_dest_tlv) 1043e705c121SKalle Valo iwl_pcie_apply_destination(trans); 1044e705c121SKalle Valo 1045e705c121SKalle Valo /* TODO: remove in the next Si step */ 1046e705c121SKalle Valo ret = iwl_pcie_rsa_race_bug_wa(trans); 1047e705c121SKalle Valo if (ret) 1048e705c121SKalle Valo return ret; 1049e705c121SKalle Valo 105082ea7966SSara Sharon IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 105182ea7966SSara Sharon iwl_read_prph(trans, WFPM_GP2)); 105282ea7966SSara Sharon 105382ea7966SSara Sharon /* 105482ea7966SSara Sharon * Set default value. On resume reading the values that were 105582ea7966SSara Sharon * zeored can provide debug data on the resume flow. 105682ea7966SSara Sharon * This is for debugging only and has no functional impact. 105782ea7966SSara Sharon */ 105882ea7966SSara Sharon iwl_write_prph(trans, WFPM_GP2, 0x01010101); 105982ea7966SSara Sharon 1060e705c121SKalle Valo /* configure the ucode to be ready to get the secured image */ 1061e705c121SKalle Valo /* release CPU reset */ 1062e705c121SKalle Valo iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 1063e705c121SKalle Valo 1064e705c121SKalle Valo /* load to FW the binary Secured sections of CPU1 */ 1065e705c121SKalle Valo ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 1066e705c121SKalle Valo &first_ucode_section); 1067e705c121SKalle Valo if (ret) 1068e705c121SKalle Valo return ret; 1069e705c121SKalle Valo 1070e705c121SKalle Valo /* load to FW the binary sections of CPU2 */ 1071e705c121SKalle Valo return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 1072e705c121SKalle Valo &first_ucode_section); 1073e705c121SKalle Valo } 1074e705c121SKalle Valo 1075727c02dfSSara Sharon static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) 1076727c02dfSSara Sharon { 1077727c02dfSSara Sharon bool hw_rfkill = iwl_is_rfkill_set(trans); 1078727c02dfSSara Sharon 1079727c02dfSSara Sharon if (hw_rfkill) 1080727c02dfSSara Sharon set_bit(STATUS_RFKILL, &trans->status); 1081727c02dfSSara Sharon else 1082727c02dfSSara Sharon clear_bit(STATUS_RFKILL, &trans->status); 1083727c02dfSSara Sharon 1084727c02dfSSara Sharon iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1085727c02dfSSara Sharon 1086727c02dfSSara Sharon return hw_rfkill; 1087727c02dfSSara Sharon } 1088727c02dfSSara Sharon 10897ca00409SHaim Dreyfuss struct iwl_causes_list { 10907ca00409SHaim Dreyfuss u32 cause_num; 10917ca00409SHaim Dreyfuss u32 mask_reg; 10927ca00409SHaim Dreyfuss u8 addr; 10937ca00409SHaim Dreyfuss }; 10947ca00409SHaim Dreyfuss 10957ca00409SHaim Dreyfuss static struct iwl_causes_list causes_list[] = { 10967ca00409SHaim Dreyfuss {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, 10977ca00409SHaim Dreyfuss {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, 10987ca00409SHaim Dreyfuss {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, 10997ca00409SHaim Dreyfuss {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, 11007ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, 11017ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, 11027ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, 11037ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, 11047ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, 11057ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, 11067ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, 11077ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, 11087ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, 11097ca00409SHaim Dreyfuss {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, 11107ca00409SHaim Dreyfuss }; 11117ca00409SHaim Dreyfuss 11127ca00409SHaim Dreyfuss static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 11137ca00409SHaim Dreyfuss { 11147ca00409SHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 11157ca00409SHaim Dreyfuss int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 11167ca00409SHaim Dreyfuss int i; 11177ca00409SHaim Dreyfuss 11187ca00409SHaim Dreyfuss /* 11197ca00409SHaim Dreyfuss * Access all non RX causes and map them to the default irq. 11207ca00409SHaim Dreyfuss * In case we are missing at least one interrupt vector, 11217ca00409SHaim Dreyfuss * the first interrupt vector will serve non-RX and FBQ causes. 11227ca00409SHaim Dreyfuss */ 11237ca00409SHaim Dreyfuss for (i = 0; i < ARRAY_SIZE(causes_list); i++) { 11247ca00409SHaim Dreyfuss iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); 11257ca00409SHaim Dreyfuss iwl_clear_bit(trans, causes_list[i].mask_reg, 11267ca00409SHaim Dreyfuss causes_list[i].cause_num); 11277ca00409SHaim Dreyfuss } 11287ca00409SHaim Dreyfuss } 11297ca00409SHaim Dreyfuss 11307ca00409SHaim Dreyfuss static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 11317ca00409SHaim Dreyfuss { 11327ca00409SHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 11337ca00409SHaim Dreyfuss u32 offset = 11347ca00409SHaim Dreyfuss trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 11357ca00409SHaim Dreyfuss u32 val, idx; 11367ca00409SHaim Dreyfuss 11377ca00409SHaim Dreyfuss /* 11387ca00409SHaim Dreyfuss * The first RX queue - fallback queue, which is designated for 11397ca00409SHaim Dreyfuss * management frame, command responses etc, is always mapped to the 11407ca00409SHaim Dreyfuss * first interrupt vector. The other RX queues are mapped to 11417ca00409SHaim Dreyfuss * the other (N - 2) interrupt vectors. 11427ca00409SHaim Dreyfuss */ 11437ca00409SHaim Dreyfuss val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 11447ca00409SHaim Dreyfuss for (idx = 1; idx < trans->num_rx_queues; idx++) { 11457ca00409SHaim Dreyfuss iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 11467ca00409SHaim Dreyfuss MSIX_FH_INT_CAUSES_Q(idx - offset)); 11477ca00409SHaim Dreyfuss val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 11487ca00409SHaim Dreyfuss } 11497ca00409SHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 11507ca00409SHaim Dreyfuss 11517ca00409SHaim Dreyfuss val = MSIX_FH_INT_CAUSES_Q(0); 11527ca00409SHaim Dreyfuss if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 11537ca00409SHaim Dreyfuss val |= MSIX_NON_AUTO_CLEAR_CAUSE; 11547ca00409SHaim Dreyfuss iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 11557ca00409SHaim Dreyfuss 11567ca00409SHaim Dreyfuss if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 11577ca00409SHaim Dreyfuss iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 11587ca00409SHaim Dreyfuss } 11597ca00409SHaim Dreyfuss 116083730058SHaim Dreyfuss static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 11617ca00409SHaim Dreyfuss { 11627ca00409SHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 11637ca00409SHaim Dreyfuss 11647ca00409SHaim Dreyfuss if (!trans_pcie->msix_enabled) { 1165d7270d61SHaim Dreyfuss if (trans->cfg->mq_rx_supported && 1166d7270d61SHaim Dreyfuss test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 11677ca00409SHaim Dreyfuss iwl_write_prph(trans, UREG_CHICK, 11687ca00409SHaim Dreyfuss UREG_CHICK_MSI_ENABLE); 11697ca00409SHaim Dreyfuss return; 11707ca00409SHaim Dreyfuss } 1171d7270d61SHaim Dreyfuss /* 1172d7270d61SHaim Dreyfuss * The IVAR table needs to be configured again after reset, 1173d7270d61SHaim Dreyfuss * but if the device is disabled, we can't write to 1174d7270d61SHaim Dreyfuss * prph. 1175d7270d61SHaim Dreyfuss */ 1176d7270d61SHaim Dreyfuss if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 11777ca00409SHaim Dreyfuss iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 11787ca00409SHaim Dreyfuss 11797ca00409SHaim Dreyfuss /* 11807ca00409SHaim Dreyfuss * Each cause from the causes list above and the RX causes is 11817ca00409SHaim Dreyfuss * represented as a byte in the IVAR table. The first nibble 11827ca00409SHaim Dreyfuss * represents the bound interrupt vector of the cause, the second 11837ca00409SHaim Dreyfuss * represents no auto clear for this cause. This will be set if its 11847ca00409SHaim Dreyfuss * interrupt vector is bound to serve other causes. 11857ca00409SHaim Dreyfuss */ 11867ca00409SHaim Dreyfuss iwl_pcie_map_rx_causes(trans); 11877ca00409SHaim Dreyfuss 11887ca00409SHaim Dreyfuss iwl_pcie_map_non_rx_causes(trans); 118983730058SHaim Dreyfuss } 11907ca00409SHaim Dreyfuss 119183730058SHaim Dreyfuss static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 119283730058SHaim Dreyfuss { 119383730058SHaim Dreyfuss struct iwl_trans *trans = trans_pcie->trans; 119483730058SHaim Dreyfuss 119583730058SHaim Dreyfuss iwl_pcie_conf_msix_hw(trans_pcie); 119683730058SHaim Dreyfuss 119783730058SHaim Dreyfuss if (!trans_pcie->msix_enabled) 119883730058SHaim Dreyfuss return; 119983730058SHaim Dreyfuss 120083730058SHaim Dreyfuss trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 12017ca00409SHaim Dreyfuss trans_pcie->fh_mask = trans_pcie->fh_init_mask; 120283730058SHaim Dreyfuss trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 12037ca00409SHaim Dreyfuss trans_pcie->hw_mask = trans_pcie->hw_init_mask; 12047ca00409SHaim Dreyfuss } 12057ca00409SHaim Dreyfuss 1206e705c121SKalle Valo static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1207e705c121SKalle Valo { 1208e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1209e705c121SKalle Valo bool hw_rfkill, was_hw_rfkill; 1210e705c121SKalle Valo 1211e705c121SKalle Valo lockdep_assert_held(&trans_pcie->mutex); 1212e705c121SKalle Valo 1213e705c121SKalle Valo if (trans_pcie->is_down) 1214e705c121SKalle Valo return; 1215e705c121SKalle Valo 1216e705c121SKalle Valo trans_pcie->is_down = true; 1217e705c121SKalle Valo 1218e705c121SKalle Valo was_hw_rfkill = iwl_is_rfkill_set(trans); 1219e705c121SKalle Valo 1220e705c121SKalle Valo /* tell the device to stop sending interrupts */ 1221e705c121SKalle Valo iwl_disable_interrupts(trans); 1222e705c121SKalle Valo 1223e705c121SKalle Valo /* device going down, Stop using ICT table */ 1224e705c121SKalle Valo iwl_pcie_disable_ict(trans); 1225e705c121SKalle Valo 1226e705c121SKalle Valo /* 1227e705c121SKalle Valo * If a HW restart happens during firmware loading, 1228e705c121SKalle Valo * then the firmware loading might call this function 1229e705c121SKalle Valo * and later it might be called again due to the 1230e705c121SKalle Valo * restart. So don't process again if the device is 1231e705c121SKalle Valo * already dead. 1232e705c121SKalle Valo */ 1233e705c121SKalle Valo if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1234a6bd005fSEmmanuel Grumbach IWL_DEBUG_INFO(trans, 1235a6bd005fSEmmanuel Grumbach "DEVICE_ENABLED bit was set and is now cleared\n"); 1236e705c121SKalle Valo iwl_pcie_tx_stop(trans); 1237e705c121SKalle Valo iwl_pcie_rx_stop(trans); 1238e705c121SKalle Valo 1239e705c121SKalle Valo /* Power-down device's busmaster DMA clocks */ 1240e705c121SKalle Valo if (!trans->cfg->apmg_not_supported) { 1241e705c121SKalle Valo iwl_write_prph(trans, APMG_CLK_DIS_REG, 1242e705c121SKalle Valo APMG_CLK_VAL_DMA_CLK_RQT); 1243e705c121SKalle Valo udelay(5); 1244e705c121SKalle Valo } 1245e705c121SKalle Valo } 1246e705c121SKalle Valo 1247e705c121SKalle Valo /* Make sure (redundant) we've released our request to stay awake */ 1248e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 1249e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1250e705c121SKalle Valo 1251e705c121SKalle Valo /* Stop the device, and put it in low power state */ 1252e705c121SKalle Valo iwl_pcie_apm_stop(trans, false); 1253e705c121SKalle Valo 1254e705c121SKalle Valo /* stop and reset the on-board processor */ 1255e705c121SKalle Valo iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1256b7a08b28SJohannes Berg usleep_range(1000, 2000); 1257e705c121SKalle Valo 1258e705c121SKalle Valo /* 1259f4a1f04aSGolan Ben Ami * Upon stop, the IVAR table gets erased, so msi-x won't 1260f4a1f04aSGolan Ben Ami * work. This causes a bug in RF-KILL flows, since the interrupt 1261f4a1f04aSGolan Ben Ami * that enables radio won't fire on the correct irq, and the 1262f4a1f04aSGolan Ben Ami * driver won't be able to handle the interrupt. 1263f4a1f04aSGolan Ben Ami * Configure the IVAR table again after reset. 1264f4a1f04aSGolan Ben Ami */ 1265f4a1f04aSGolan Ben Ami iwl_pcie_conf_msix_hw(trans_pcie); 1266f4a1f04aSGolan Ben Ami 1267f4a1f04aSGolan Ben Ami /* 1268e705c121SKalle Valo * Upon stop, the APM issues an interrupt if HW RF kill is set. 1269e705c121SKalle Valo * This is a bug in certain verions of the hardware. 1270e705c121SKalle Valo * Certain devices also keep sending HW RF kill interrupt all 1271e705c121SKalle Valo * the time, unless the interrupt is ACKed even if the interrupt 1272e705c121SKalle Valo * should be masked. Re-ACK all the interrupts here. 1273e705c121SKalle Valo */ 1274e705c121SKalle Valo iwl_disable_interrupts(trans); 1275e705c121SKalle Valo 1276e705c121SKalle Valo /* clear all status bits */ 1277e705c121SKalle Valo clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1278e705c121SKalle Valo clear_bit(STATUS_INT_ENABLED, &trans->status); 1279e705c121SKalle Valo clear_bit(STATUS_TPOWER_PMI, &trans->status); 1280e705c121SKalle Valo clear_bit(STATUS_RFKILL, &trans->status); 1281e705c121SKalle Valo 1282e705c121SKalle Valo /* 1283e705c121SKalle Valo * Even if we stop the HW, we still want the RF kill 1284e705c121SKalle Valo * interrupt 1285e705c121SKalle Valo */ 1286e705c121SKalle Valo iwl_enable_rfkill_int(trans); 1287e705c121SKalle Valo 1288e705c121SKalle Valo /* 1289e705c121SKalle Valo * Check again since the RF kill state may have changed while 1290e705c121SKalle Valo * all the interrupts were disabled, in this case we couldn't 1291e705c121SKalle Valo * receive the RF kill interrupt and update the state in the 1292e705c121SKalle Valo * op_mode. 1293e705c121SKalle Valo * Don't call the op_mode if the rkfill state hasn't changed. 1294e705c121SKalle Valo * This allows the op_mode to call stop_device from the rfkill 1295e705c121SKalle Valo * notification without endless recursion. Under very rare 1296e705c121SKalle Valo * circumstances, we might have a small recursion if the rfkill 1297e705c121SKalle Valo * state changed exactly now while we were called from stop_device. 1298e705c121SKalle Valo * This is very unlikely but can happen and is supported. 1299e705c121SKalle Valo */ 1300e705c121SKalle Valo hw_rfkill = iwl_is_rfkill_set(trans); 1301e705c121SKalle Valo if (hw_rfkill) 1302e705c121SKalle Valo set_bit(STATUS_RFKILL, &trans->status); 1303e705c121SKalle Valo else 1304e705c121SKalle Valo clear_bit(STATUS_RFKILL, &trans->status); 1305e705c121SKalle Valo if (hw_rfkill != was_hw_rfkill) 1306e705c121SKalle Valo iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1307e705c121SKalle Valo 1308a6bd005fSEmmanuel Grumbach /* re-take ownership to prevent other users from stealing the device */ 1309e705c121SKalle Valo iwl_pcie_prepare_card_hw(trans); 1310e705c121SKalle Valo } 1311e705c121SKalle Valo 13122e5d4a8fSHaim Dreyfuss static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 13132e5d4a8fSHaim Dreyfuss { 13142e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 13152e5d4a8fSHaim Dreyfuss 13162e5d4a8fSHaim Dreyfuss if (trans_pcie->msix_enabled) { 13172e5d4a8fSHaim Dreyfuss int i; 13182e5d4a8fSHaim Dreyfuss 1319496d83caSHaim Dreyfuss for (i = 0; i < trans_pcie->alloc_vecs; i++) 13202e5d4a8fSHaim Dreyfuss synchronize_irq(trans_pcie->msix_entries[i].vector); 13212e5d4a8fSHaim Dreyfuss } else { 13222e5d4a8fSHaim Dreyfuss synchronize_irq(trans_pcie->pci_dev->irq); 13232e5d4a8fSHaim Dreyfuss } 13242e5d4a8fSHaim Dreyfuss } 13252e5d4a8fSHaim Dreyfuss 1326a6bd005fSEmmanuel Grumbach static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1327a6bd005fSEmmanuel Grumbach const struct fw_img *fw, bool run_in_rfkill) 1328a6bd005fSEmmanuel Grumbach { 1329a6bd005fSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1330a6bd005fSEmmanuel Grumbach bool hw_rfkill; 1331a6bd005fSEmmanuel Grumbach int ret; 1332a6bd005fSEmmanuel Grumbach 1333a6bd005fSEmmanuel Grumbach /* This may fail if AMT took ownership of the device */ 1334a6bd005fSEmmanuel Grumbach if (iwl_pcie_prepare_card_hw(trans)) { 1335a6bd005fSEmmanuel Grumbach IWL_WARN(trans, "Exit HW not ready\n"); 1336a6bd005fSEmmanuel Grumbach ret = -EIO; 1337a6bd005fSEmmanuel Grumbach goto out; 1338a6bd005fSEmmanuel Grumbach } 1339a6bd005fSEmmanuel Grumbach 1340a6bd005fSEmmanuel Grumbach iwl_enable_rfkill_int(trans); 1341a6bd005fSEmmanuel Grumbach 1342a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1343a6bd005fSEmmanuel Grumbach 1344a6bd005fSEmmanuel Grumbach /* 1345a6bd005fSEmmanuel Grumbach * We enabled the RF-Kill interrupt and the handler may very 1346a6bd005fSEmmanuel Grumbach * well be running. Disable the interrupts to make sure no other 1347a6bd005fSEmmanuel Grumbach * interrupt can be fired. 1348a6bd005fSEmmanuel Grumbach */ 1349a6bd005fSEmmanuel Grumbach iwl_disable_interrupts(trans); 1350a6bd005fSEmmanuel Grumbach 1351a6bd005fSEmmanuel Grumbach /* Make sure it finished running */ 13522e5d4a8fSHaim Dreyfuss iwl_pcie_synchronize_irqs(trans); 1353a6bd005fSEmmanuel Grumbach 1354a6bd005fSEmmanuel Grumbach mutex_lock(&trans_pcie->mutex); 1355a6bd005fSEmmanuel Grumbach 1356a6bd005fSEmmanuel Grumbach /* If platform's RF_KILL switch is NOT set to KILL */ 1357727c02dfSSara Sharon hw_rfkill = iwl_trans_check_hw_rf_kill(trans); 1358a6bd005fSEmmanuel Grumbach if (hw_rfkill && !run_in_rfkill) { 1359a6bd005fSEmmanuel Grumbach ret = -ERFKILL; 1360a6bd005fSEmmanuel Grumbach goto out; 1361a6bd005fSEmmanuel Grumbach } 1362a6bd005fSEmmanuel Grumbach 1363a6bd005fSEmmanuel Grumbach /* Someone called stop_device, don't try to start_fw */ 1364a6bd005fSEmmanuel Grumbach if (trans_pcie->is_down) { 1365a6bd005fSEmmanuel Grumbach IWL_WARN(trans, 1366a6bd005fSEmmanuel Grumbach "Can't start_fw since the HW hasn't been started\n"); 136720aa99bbSAnton Protopopov ret = -EIO; 1368a6bd005fSEmmanuel Grumbach goto out; 1369a6bd005fSEmmanuel Grumbach } 1370a6bd005fSEmmanuel Grumbach 1371a6bd005fSEmmanuel Grumbach /* make sure rfkill handshake bits are cleared */ 1372a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1373a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1374a6bd005fSEmmanuel Grumbach CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1375a6bd005fSEmmanuel Grumbach 1376a6bd005fSEmmanuel Grumbach /* clear (again), then enable host interrupts */ 1377a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1378a6bd005fSEmmanuel Grumbach 1379a6bd005fSEmmanuel Grumbach ret = iwl_pcie_nic_init(trans); 1380a6bd005fSEmmanuel Grumbach if (ret) { 1381a6bd005fSEmmanuel Grumbach IWL_ERR(trans, "Unable to init nic\n"); 1382a6bd005fSEmmanuel Grumbach goto out; 1383a6bd005fSEmmanuel Grumbach } 1384a6bd005fSEmmanuel Grumbach 1385a6bd005fSEmmanuel Grumbach /* 1386a6bd005fSEmmanuel Grumbach * Now, we load the firmware and don't want to be interrupted, even 1387a6bd005fSEmmanuel Grumbach * by the RF-Kill interrupt (hence mask all the interrupt besides the 1388a6bd005fSEmmanuel Grumbach * FH_TX interrupt which is needed to load the firmware). If the 1389a6bd005fSEmmanuel Grumbach * RF-Kill switch is toggled, we will find out after having loaded 1390a6bd005fSEmmanuel Grumbach * the firmware and return the proper value to the caller. 1391a6bd005fSEmmanuel Grumbach */ 1392a6bd005fSEmmanuel Grumbach iwl_enable_fw_load_int(trans); 1393a6bd005fSEmmanuel Grumbach 1394a6bd005fSEmmanuel Grumbach /* really make sure rfkill handshake bits are cleared */ 1395a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1396a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1397a6bd005fSEmmanuel Grumbach 1398a6bd005fSEmmanuel Grumbach /* Load the given image to the HW */ 1399a6bd005fSEmmanuel Grumbach if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1400a6bd005fSEmmanuel Grumbach ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1401a6bd005fSEmmanuel Grumbach else 1402a6bd005fSEmmanuel Grumbach ret = iwl_pcie_load_given_ucode(trans, fw); 1403a6bd005fSEmmanuel Grumbach 1404a6bd005fSEmmanuel Grumbach /* re-check RF-Kill state since we may have missed the interrupt */ 1405727c02dfSSara Sharon hw_rfkill = iwl_trans_check_hw_rf_kill(trans); 1406a6bd005fSEmmanuel Grumbach if (hw_rfkill && !run_in_rfkill) 1407a6bd005fSEmmanuel Grumbach ret = -ERFKILL; 1408a6bd005fSEmmanuel Grumbach 1409a6bd005fSEmmanuel Grumbach out: 1410a6bd005fSEmmanuel Grumbach mutex_unlock(&trans_pcie->mutex); 1411a6bd005fSEmmanuel Grumbach return ret; 1412a6bd005fSEmmanuel Grumbach } 1413a6bd005fSEmmanuel Grumbach 1414a6bd005fSEmmanuel Grumbach static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1415a6bd005fSEmmanuel Grumbach { 1416a6bd005fSEmmanuel Grumbach iwl_pcie_reset_ict(trans); 1417a6bd005fSEmmanuel Grumbach iwl_pcie_tx_start(trans, scd_addr); 1418a6bd005fSEmmanuel Grumbach } 1419a6bd005fSEmmanuel Grumbach 1420e705c121SKalle Valo static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1421e705c121SKalle Valo { 1422e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1423e705c121SKalle Valo 1424e705c121SKalle Valo mutex_lock(&trans_pcie->mutex); 1425e705c121SKalle Valo _iwl_trans_pcie_stop_device(trans, low_power); 1426e705c121SKalle Valo mutex_unlock(&trans_pcie->mutex); 1427e705c121SKalle Valo } 1428e705c121SKalle Valo 1429e705c121SKalle Valo void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1430e705c121SKalle Valo { 1431e705c121SKalle Valo struct iwl_trans_pcie __maybe_unused *trans_pcie = 1432e705c121SKalle Valo IWL_TRANS_GET_PCIE_TRANS(trans); 1433e705c121SKalle Valo 1434e705c121SKalle Valo lockdep_assert_held(&trans_pcie->mutex); 1435e705c121SKalle Valo 1436e705c121SKalle Valo if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) 1437e705c121SKalle Valo _iwl_trans_pcie_stop_device(trans, true); 1438e705c121SKalle Valo } 1439e705c121SKalle Valo 144023ae6128SMatti Gottlieb static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 144123ae6128SMatti Gottlieb bool reset) 1442e705c121SKalle Valo { 144323ae6128SMatti Gottlieb if (!reset) { 1444e705c121SKalle Valo /* Enable persistence mode to avoid reset */ 1445e705c121SKalle Valo iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1446e705c121SKalle Valo CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1447e705c121SKalle Valo } 1448e705c121SKalle Valo 1449e705c121SKalle Valo iwl_disable_interrupts(trans); 1450e705c121SKalle Valo 1451e705c121SKalle Valo /* 1452e705c121SKalle Valo * in testing mode, the host stays awake and the 1453e705c121SKalle Valo * hardware won't be reset (not even partially) 1454e705c121SKalle Valo */ 1455e705c121SKalle Valo if (test) 1456e705c121SKalle Valo return; 1457e705c121SKalle Valo 1458e705c121SKalle Valo iwl_pcie_disable_ict(trans); 1459e705c121SKalle Valo 14602e5d4a8fSHaim Dreyfuss iwl_pcie_synchronize_irqs(trans); 1461e705c121SKalle Valo 1462e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 1463e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1464e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 1465e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1466e705c121SKalle Valo 14671316d595SSara Sharon iwl_pcie_enable_rx_wake(trans, false); 14681316d595SSara Sharon 146923ae6128SMatti Gottlieb if (reset) { 1470e705c121SKalle Valo /* 1471e705c121SKalle Valo * reset TX queues -- some of their registers reset during S3 1472e705c121SKalle Valo * so if we don't reset everything here the D3 image would try 1473e705c121SKalle Valo * to execute some invalid memory upon resume 1474e705c121SKalle Valo */ 1475e705c121SKalle Valo iwl_trans_pcie_tx_reset(trans); 1476e705c121SKalle Valo } 1477e705c121SKalle Valo 1478e705c121SKalle Valo iwl_pcie_set_pwr(trans, true); 1479e705c121SKalle Valo } 1480e705c121SKalle Valo 1481e705c121SKalle Valo static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1482e705c121SKalle Valo enum iwl_d3_status *status, 148323ae6128SMatti Gottlieb bool test, bool reset) 1484e705c121SKalle Valo { 1485d7270d61SHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1486e705c121SKalle Valo u32 val; 1487e705c121SKalle Valo int ret; 1488e705c121SKalle Valo 1489e705c121SKalle Valo if (test) { 1490e705c121SKalle Valo iwl_enable_interrupts(trans); 1491e705c121SKalle Valo *status = IWL_D3_STATUS_ALIVE; 1492e705c121SKalle Valo return 0; 1493e705c121SKalle Valo } 1494e705c121SKalle Valo 14951316d595SSara Sharon iwl_pcie_enable_rx_wake(trans, true); 14961316d595SSara Sharon 1497e705c121SKalle Valo /* 1498d7270d61SHaim Dreyfuss * Reconfigure IVAR table in case of MSIX or reset ict table in 1499d7270d61SHaim Dreyfuss * MSI mode since HW reset erased it. 1500d7270d61SHaim Dreyfuss * Also enables interrupts - none will happen as 1501d7270d61SHaim Dreyfuss * the device doesn't know we're waking it up, only when 1502d7270d61SHaim Dreyfuss * the opmode actually tells it after this call. 1503e705c121SKalle Valo */ 1504d7270d61SHaim Dreyfuss iwl_pcie_conf_msix_hw(trans_pcie); 1505d7270d61SHaim Dreyfuss if (!trans_pcie->msix_enabled) 1506e705c121SKalle Valo iwl_pcie_reset_ict(trans); 150718dcb9a9SSara Sharon iwl_enable_interrupts(trans); 1508e705c121SKalle Valo 1509e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1510e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1511e705c121SKalle Valo 1512e705c121SKalle Valo if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1513e705c121SKalle Valo udelay(2); 1514e705c121SKalle Valo 1515e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1516e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1517e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1518e705c121SKalle Valo 25000); 1519e705c121SKalle Valo if (ret < 0) { 1520e705c121SKalle Valo IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); 1521e705c121SKalle Valo return ret; 1522e705c121SKalle Valo } 1523e705c121SKalle Valo 1524e705c121SKalle Valo iwl_pcie_set_pwr(trans, false); 1525e705c121SKalle Valo 152623ae6128SMatti Gottlieb if (!reset) { 1527e705c121SKalle Valo iwl_clear_bit(trans, CSR_GP_CNTRL, 1528e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1529e705c121SKalle Valo } else { 1530e705c121SKalle Valo iwl_trans_pcie_tx_reset(trans); 1531e705c121SKalle Valo 1532e705c121SKalle Valo ret = iwl_pcie_rx_init(trans); 1533e705c121SKalle Valo if (ret) { 1534e705c121SKalle Valo IWL_ERR(trans, 1535e705c121SKalle Valo "Failed to resume the device (RX reset)\n"); 1536e705c121SKalle Valo return ret; 1537e705c121SKalle Valo } 1538e705c121SKalle Valo } 1539e705c121SKalle Valo 154082ea7966SSara Sharon IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 154182ea7966SSara Sharon iwl_read_prph(trans, WFPM_GP2)); 154282ea7966SSara Sharon 1543e705c121SKalle Valo val = iwl_read32(trans, CSR_RESET); 1544e705c121SKalle Valo if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1545e705c121SKalle Valo *status = IWL_D3_STATUS_RESET; 1546e705c121SKalle Valo else 1547e705c121SKalle Valo *status = IWL_D3_STATUS_ALIVE; 1548e705c121SKalle Valo 1549e705c121SKalle Valo return 0; 1550e705c121SKalle Valo } 1551e705c121SKalle Valo 15522e5d4a8fSHaim Dreyfuss static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 15532e5d4a8fSHaim Dreyfuss struct iwl_trans *trans) 15542e5d4a8fSHaim Dreyfuss { 15552e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 15569fb064dfSHaim Dreyfuss int max_irqs, num_irqs, i, ret, nr_online_cpus; 15572e5d4a8fSHaim Dreyfuss u16 pci_cmd; 15582e5d4a8fSHaim Dreyfuss 155906f4b081SSara Sharon if (!trans->cfg->mq_rx_supported) 156006f4b081SSara Sharon goto enable_msi; 156106f4b081SSara Sharon 15629fb064dfSHaim Dreyfuss nr_online_cpus = num_online_cpus(); 15639fb064dfSHaim Dreyfuss max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES); 156406f4b081SSara Sharon for (i = 0; i < max_irqs; i++) 15652e5d4a8fSHaim Dreyfuss trans_pcie->msix_entries[i].entry = i; 15662e5d4a8fSHaim Dreyfuss 156706f4b081SSara Sharon num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 15682e5d4a8fSHaim Dreyfuss MSIX_MIN_INTERRUPT_VECTORS, 156906f4b081SSara Sharon max_irqs); 157006f4b081SSara Sharon if (num_irqs < 0) { 1571496d83caSHaim Dreyfuss IWL_DEBUG_INFO(trans, 157206f4b081SSara Sharon "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 157306f4b081SSara Sharon num_irqs); 157406f4b081SSara Sharon goto enable_msi; 1575496d83caSHaim Dreyfuss } 157606f4b081SSara Sharon trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1577496d83caSHaim Dreyfuss 15782e5d4a8fSHaim Dreyfuss IWL_DEBUG_INFO(trans, 157906f4b081SSara Sharon "MSI-X enabled. %d interrupt vectors were allocated\n", 158006f4b081SSara Sharon num_irqs); 158106f4b081SSara Sharon 1582496d83caSHaim Dreyfuss /* 158306f4b081SSara Sharon * In case the OS provides fewer interrupts than requested, different 158406f4b081SSara Sharon * causes will share the same interrupt vector as follows: 1585496d83caSHaim Dreyfuss * One interrupt less: non rx causes shared with FBQ. 1586496d83caSHaim Dreyfuss * Two interrupts less: non rx causes shared with FBQ and RSS. 1587496d83caSHaim Dreyfuss * More than two interrupts: we will use fewer RSS queues. 1588496d83caSHaim Dreyfuss */ 15899fb064dfSHaim Dreyfuss if (num_irqs <= nr_online_cpus) { 159006f4b081SSara Sharon trans_pcie->trans->num_rx_queues = num_irqs + 1; 1591496d83caSHaim Dreyfuss trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1592496d83caSHaim Dreyfuss IWL_SHARED_IRQ_FIRST_RSS; 15939fb064dfSHaim Dreyfuss } else if (num_irqs == nr_online_cpus + 1) { 159406f4b081SSara Sharon trans_pcie->trans->num_rx_queues = num_irqs; 1595496d83caSHaim Dreyfuss trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1596496d83caSHaim Dreyfuss } else { 159706f4b081SSara Sharon trans_pcie->trans->num_rx_queues = num_irqs - 1; 1598496d83caSHaim Dreyfuss } 15992e5d4a8fSHaim Dreyfuss 160006f4b081SSara Sharon trans_pcie->alloc_vecs = num_irqs; 1601496d83caSHaim Dreyfuss trans_pcie->msix_enabled = true; 16022e5d4a8fSHaim Dreyfuss return; 16032e5d4a8fSHaim Dreyfuss 160406f4b081SSara Sharon enable_msi: 160506f4b081SSara Sharon ret = pci_enable_msi(pdev); 160606f4b081SSara Sharon if (ret) { 160706f4b081SSara Sharon dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 16082e5d4a8fSHaim Dreyfuss /* enable rfkill interrupt: hw bug w/a */ 16092e5d4a8fSHaim Dreyfuss pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 16102e5d4a8fSHaim Dreyfuss if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 16112e5d4a8fSHaim Dreyfuss pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 16122e5d4a8fSHaim Dreyfuss pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 16132e5d4a8fSHaim Dreyfuss } 16142e5d4a8fSHaim Dreyfuss } 16152e5d4a8fSHaim Dreyfuss } 16162e5d4a8fSHaim Dreyfuss 16177c8d91ebSHaim Dreyfuss static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) 16187c8d91ebSHaim Dreyfuss { 16197c8d91ebSHaim Dreyfuss int iter_rx_q, i, ret, cpu, offset; 16207c8d91ebSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 16217c8d91ebSHaim Dreyfuss 16227c8d91ebSHaim Dreyfuss i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 16237c8d91ebSHaim Dreyfuss iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; 16247c8d91ebSHaim Dreyfuss offset = 1 + i; 16257c8d91ebSHaim Dreyfuss for (; i < iter_rx_q ; i++) { 16267c8d91ebSHaim Dreyfuss /* 16277c8d91ebSHaim Dreyfuss * Get the cpu prior to the place to search 16287c8d91ebSHaim Dreyfuss * (i.e. return will be > i - 1). 16297c8d91ebSHaim Dreyfuss */ 16307c8d91ebSHaim Dreyfuss cpu = cpumask_next(i - offset, cpu_online_mask); 16317c8d91ebSHaim Dreyfuss cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 16327c8d91ebSHaim Dreyfuss ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 16337c8d91ebSHaim Dreyfuss &trans_pcie->affinity_mask[i]); 16347c8d91ebSHaim Dreyfuss if (ret) 16357c8d91ebSHaim Dreyfuss IWL_ERR(trans_pcie->trans, 16367c8d91ebSHaim Dreyfuss "Failed to set affinity mask for IRQ %d\n", 16377c8d91ebSHaim Dreyfuss i); 16387c8d91ebSHaim Dreyfuss } 16397c8d91ebSHaim Dreyfuss } 16407c8d91ebSHaim Dreyfuss 164164fa3affSSharon Dvir static const char *queue_name(struct device *dev, 164264fa3affSSharon Dvir struct iwl_trans_pcie *trans_p, int i) 164364fa3affSSharon Dvir { 164464fa3affSSharon Dvir if (trans_p->shared_vec_mask) { 164564fa3affSSharon Dvir int vec = trans_p->shared_vec_mask & 164664fa3affSSharon Dvir IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 164764fa3affSSharon Dvir 164864fa3affSSharon Dvir if (i == 0) 164964fa3affSSharon Dvir return DRV_NAME ": shared IRQ"; 165064fa3affSSharon Dvir 165164fa3affSSharon Dvir return devm_kasprintf(dev, GFP_KERNEL, 165264fa3affSSharon Dvir DRV_NAME ": queue %d", i + vec); 165364fa3affSSharon Dvir } 165464fa3affSSharon Dvir if (i == 0) 165564fa3affSSharon Dvir return DRV_NAME ": default queue"; 165664fa3affSSharon Dvir 165764fa3affSSharon Dvir if (i == trans_p->alloc_vecs - 1) 165864fa3affSSharon Dvir return DRV_NAME ": exception"; 165964fa3affSSharon Dvir 166064fa3affSSharon Dvir return devm_kasprintf(dev, GFP_KERNEL, 166164fa3affSSharon Dvir DRV_NAME ": queue %d", i); 166264fa3affSSharon Dvir } 166364fa3affSSharon Dvir 16642e5d4a8fSHaim Dreyfuss static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 16652e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie) 16662e5d4a8fSHaim Dreyfuss { 1667496d83caSHaim Dreyfuss int i; 16682e5d4a8fSHaim Dreyfuss 1669496d83caSHaim Dreyfuss for (i = 0; i < trans_pcie->alloc_vecs; i++) { 16702e5d4a8fSHaim Dreyfuss int ret; 16715a41a86cSSharon Dvir struct msix_entry *msix_entry; 167264fa3affSSharon Dvir const char *qname = queue_name(&pdev->dev, trans_pcie, i); 167364fa3affSSharon Dvir 167464fa3affSSharon Dvir if (!qname) 167564fa3affSSharon Dvir return -ENOMEM; 16762e5d4a8fSHaim Dreyfuss 16775a41a86cSSharon Dvir msix_entry = &trans_pcie->msix_entries[i]; 16785a41a86cSSharon Dvir ret = devm_request_threaded_irq(&pdev->dev, 16795a41a86cSSharon Dvir msix_entry->vector, 16802e5d4a8fSHaim Dreyfuss iwl_pcie_msix_isr, 1681496d83caSHaim Dreyfuss (i == trans_pcie->def_irq) ? 16822e5d4a8fSHaim Dreyfuss iwl_pcie_irq_msix_handler : 16832e5d4a8fSHaim Dreyfuss iwl_pcie_irq_rx_msix_handler, 16842e5d4a8fSHaim Dreyfuss IRQF_SHARED, 168564fa3affSSharon Dvir qname, 16865a41a86cSSharon Dvir msix_entry); 16872e5d4a8fSHaim Dreyfuss if (ret) { 16882e5d4a8fSHaim Dreyfuss IWL_ERR(trans_pcie->trans, 16892e5d4a8fSHaim Dreyfuss "Error allocating IRQ %d\n", i); 16905a41a86cSSharon Dvir 16912e5d4a8fSHaim Dreyfuss return ret; 16922e5d4a8fSHaim Dreyfuss } 16932e5d4a8fSHaim Dreyfuss } 16947c8d91ebSHaim Dreyfuss iwl_pcie_irq_set_affinity(trans_pcie->trans); 16952e5d4a8fSHaim Dreyfuss 16962e5d4a8fSHaim Dreyfuss return 0; 16972e5d4a8fSHaim Dreyfuss } 16982e5d4a8fSHaim Dreyfuss 1699e705c121SKalle Valo static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) 1700e705c121SKalle Valo { 1701e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1702e705c121SKalle Valo int err; 1703e705c121SKalle Valo 1704e705c121SKalle Valo lockdep_assert_held(&trans_pcie->mutex); 1705e705c121SKalle Valo 1706e705c121SKalle Valo err = iwl_pcie_prepare_card_hw(trans); 1707e705c121SKalle Valo if (err) { 1708e705c121SKalle Valo IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1709e705c121SKalle Valo return err; 1710e705c121SKalle Valo } 1711e705c121SKalle Valo 1712e705c121SKalle Valo /* Reset the entire device */ 1713e705c121SKalle Valo iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1714b7a08b28SJohannes Berg usleep_range(1000, 2000); 1715e705c121SKalle Valo 1716e705c121SKalle Valo iwl_pcie_apm_init(trans); 1717e705c121SKalle Valo 17182e5d4a8fSHaim Dreyfuss iwl_pcie_init_msix(trans_pcie); 171983730058SHaim Dreyfuss 1720e705c121SKalle Valo /* From now on, the op_mode will be kept updated about RF kill state */ 1721e705c121SKalle Valo iwl_enable_rfkill_int(trans); 1722e705c121SKalle Valo 1723e705c121SKalle Valo /* Set is_down to false here so that...*/ 1724e705c121SKalle Valo trans_pcie->is_down = false; 1725e705c121SKalle Valo 1726e705c121SKalle Valo /* ...rfkill can call stop_device and set it false if needed */ 1727727c02dfSSara Sharon iwl_trans_check_hw_rf_kill(trans); 1728e705c121SKalle Valo 17294cbb8e50SLuciano Coelho /* Make sure we sync here, because we'll need full access later */ 17304cbb8e50SLuciano Coelho if (low_power) 17314cbb8e50SLuciano Coelho pm_runtime_resume(trans->dev); 17324cbb8e50SLuciano Coelho 1733e705c121SKalle Valo return 0; 1734e705c121SKalle Valo } 1735e705c121SKalle Valo 1736e705c121SKalle Valo static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) 1737e705c121SKalle Valo { 1738e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1739e705c121SKalle Valo int ret; 1740e705c121SKalle Valo 1741e705c121SKalle Valo mutex_lock(&trans_pcie->mutex); 1742e705c121SKalle Valo ret = _iwl_trans_pcie_start_hw(trans, low_power); 1743e705c121SKalle Valo mutex_unlock(&trans_pcie->mutex); 1744e705c121SKalle Valo 1745e705c121SKalle Valo return ret; 1746e705c121SKalle Valo } 1747e705c121SKalle Valo 1748e705c121SKalle Valo static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1749e705c121SKalle Valo { 1750e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1751e705c121SKalle Valo 1752e705c121SKalle Valo mutex_lock(&trans_pcie->mutex); 1753e705c121SKalle Valo 1754e705c121SKalle Valo /* disable interrupts - don't enable HW RF kill interrupt */ 1755e705c121SKalle Valo iwl_disable_interrupts(trans); 1756e705c121SKalle Valo 1757e705c121SKalle Valo iwl_pcie_apm_stop(trans, true); 1758e705c121SKalle Valo 1759e705c121SKalle Valo iwl_disable_interrupts(trans); 1760e705c121SKalle Valo 1761e705c121SKalle Valo iwl_pcie_disable_ict(trans); 1762e705c121SKalle Valo 1763e705c121SKalle Valo mutex_unlock(&trans_pcie->mutex); 1764e705c121SKalle Valo 17652e5d4a8fSHaim Dreyfuss iwl_pcie_synchronize_irqs(trans); 1766e705c121SKalle Valo } 1767e705c121SKalle Valo 1768e705c121SKalle Valo static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1769e705c121SKalle Valo { 1770e705c121SKalle Valo writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1771e705c121SKalle Valo } 1772e705c121SKalle Valo 1773e705c121SKalle Valo static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1774e705c121SKalle Valo { 1775e705c121SKalle Valo writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1776e705c121SKalle Valo } 1777e705c121SKalle Valo 1778e705c121SKalle Valo static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1779e705c121SKalle Valo { 1780e705c121SKalle Valo return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1781e705c121SKalle Valo } 1782e705c121SKalle Valo 1783e705c121SKalle Valo static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1784e705c121SKalle Valo { 1785e705c121SKalle Valo iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1786e705c121SKalle Valo ((reg & 0x000FFFFF) | (3 << 24))); 1787e705c121SKalle Valo return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1788e705c121SKalle Valo } 1789e705c121SKalle Valo 1790e705c121SKalle Valo static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1791e705c121SKalle Valo u32 val) 1792e705c121SKalle Valo { 1793e705c121SKalle Valo iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1794e705c121SKalle Valo ((addr & 0x000FFFFF) | (3 << 24))); 1795e705c121SKalle Valo iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1796e705c121SKalle Valo } 1797e705c121SKalle Valo 1798e705c121SKalle Valo static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1799e705c121SKalle Valo const struct iwl_trans_config *trans_cfg) 1800e705c121SKalle Valo { 1801e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1802e705c121SKalle Valo 1803e705c121SKalle Valo trans_pcie->cmd_queue = trans_cfg->cmd_queue; 1804e705c121SKalle Valo trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; 1805e705c121SKalle Valo trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 1806e705c121SKalle Valo if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1807e705c121SKalle Valo trans_pcie->n_no_reclaim_cmds = 0; 1808e705c121SKalle Valo else 1809e705c121SKalle Valo trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 1810e705c121SKalle Valo if (trans_pcie->n_no_reclaim_cmds) 1811e705c121SKalle Valo memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1812e705c121SKalle Valo trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1813e705c121SKalle Valo 18146c4fbcbcSEmmanuel Grumbach trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 18156c4fbcbcSEmmanuel Grumbach trans_pcie->rx_page_order = 18166c4fbcbcSEmmanuel Grumbach iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 1817e705c121SKalle Valo 1818e705c121SKalle Valo trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; 1819e705c121SKalle Valo trans_pcie->scd_set_active = trans_cfg->scd_set_active; 182041837ca9SEmmanuel Grumbach trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; 1821e705c121SKalle Valo 182221cb3222SJohannes Berg trans_pcie->page_offs = trans_cfg->cb_data_offs; 182321cb3222SJohannes Berg trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); 182421cb3222SJohannes Berg 182539bdb17eSSharon Dvir trans->command_groups = trans_cfg->command_groups; 182639bdb17eSSharon Dvir trans->command_groups_size = trans_cfg->command_groups_size; 182739bdb17eSSharon Dvir 1828e705c121SKalle Valo /* Initialize NAPI here - it should be before registering to mac80211 1829e705c121SKalle Valo * in the opmode but after the HW struct is allocated. 1830e705c121SKalle Valo * As this function may be called again in some corner cases don't 1831e705c121SKalle Valo * do anything if NAPI was already initialized. 1832e705c121SKalle Valo */ 1833bce97731SSara Sharon if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 1834e705c121SKalle Valo init_dummy_netdev(&trans_pcie->napi_dev); 1835e705c121SKalle Valo } 1836e705c121SKalle Valo 1837e705c121SKalle Valo void iwl_trans_pcie_free(struct iwl_trans *trans) 1838e705c121SKalle Valo { 1839e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 18406eb5e529SEmmanuel Grumbach int i; 1841e705c121SKalle Valo 18422e5d4a8fSHaim Dreyfuss iwl_pcie_synchronize_irqs(trans); 1843e705c121SKalle Valo 1844e705c121SKalle Valo iwl_pcie_tx_free(trans); 1845e705c121SKalle Valo iwl_pcie_rx_free(trans); 1846e705c121SKalle Valo 18472e5d4a8fSHaim Dreyfuss if (trans_pcie->msix_enabled) { 18487c8d91ebSHaim Dreyfuss for (i = 0; i < trans_pcie->alloc_vecs; i++) { 18497c8d91ebSHaim Dreyfuss irq_set_affinity_hint( 18507c8d91ebSHaim Dreyfuss trans_pcie->msix_entries[i].vector, 18517c8d91ebSHaim Dreyfuss NULL); 18527c8d91ebSHaim Dreyfuss } 18532e5d4a8fSHaim Dreyfuss 18542e5d4a8fSHaim Dreyfuss trans_pcie->msix_enabled = false; 18552e5d4a8fSHaim Dreyfuss } else { 1856e705c121SKalle Valo iwl_pcie_free_ict(trans); 18572e5d4a8fSHaim Dreyfuss } 1858e705c121SKalle Valo 1859e705c121SKalle Valo iwl_pcie_free_fw_monitor(trans); 1860e705c121SKalle Valo 18616eb5e529SEmmanuel Grumbach for_each_possible_cpu(i) { 18626eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page *p = 18636eb5e529SEmmanuel Grumbach per_cpu_ptr(trans_pcie->tso_hdr_page, i); 18646eb5e529SEmmanuel Grumbach 18656eb5e529SEmmanuel Grumbach if (p->page) 18666eb5e529SEmmanuel Grumbach __free_page(p->page); 18676eb5e529SEmmanuel Grumbach } 18686eb5e529SEmmanuel Grumbach 18696eb5e529SEmmanuel Grumbach free_percpu(trans_pcie->tso_hdr_page); 1870a2a57a35SEmmanuel Grumbach mutex_destroy(&trans_pcie->mutex); 1871e705c121SKalle Valo iwl_trans_free(trans); 1872e705c121SKalle Valo } 1873e705c121SKalle Valo 1874e705c121SKalle Valo static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 1875e705c121SKalle Valo { 1876e705c121SKalle Valo if (state) 1877e705c121SKalle Valo set_bit(STATUS_TPOWER_PMI, &trans->status); 1878e705c121SKalle Valo else 1879e705c121SKalle Valo clear_bit(STATUS_TPOWER_PMI, &trans->status); 1880e705c121SKalle Valo } 1881e705c121SKalle Valo 188223ba9340SEmmanuel Grumbach static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, 1883e705c121SKalle Valo unsigned long *flags) 1884e705c121SKalle Valo { 1885e705c121SKalle Valo int ret; 1886e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1887e705c121SKalle Valo 1888e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, *flags); 1889e705c121SKalle Valo 1890e705c121SKalle Valo if (trans_pcie->cmd_hold_nic_awake) 1891e705c121SKalle Valo goto out; 1892e705c121SKalle Valo 1893e705c121SKalle Valo /* this bit wakes up the NIC */ 1894e705c121SKalle Valo __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1895e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1896e705c121SKalle Valo if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 1897e705c121SKalle Valo udelay(2); 1898e705c121SKalle Valo 1899e705c121SKalle Valo /* 1900e705c121SKalle Valo * These bits say the device is running, and should keep running for 1901e705c121SKalle Valo * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 1902e705c121SKalle Valo * but they do not indicate that embedded SRAM is restored yet; 1903e705c121SKalle Valo * 3945 and 4965 have volatile SRAM, and must save/restore contents 1904e705c121SKalle Valo * to/from host DRAM when sleeping/waking for power-saving. 1905e705c121SKalle Valo * Each direction takes approximately 1/4 millisecond; with this 1906e705c121SKalle Valo * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 1907e705c121SKalle Valo * series of register accesses are expected (e.g. reading Event Log), 1908e705c121SKalle Valo * to keep device from sleeping. 1909e705c121SKalle Valo * 1910e705c121SKalle Valo * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 1911e705c121SKalle Valo * SRAM is okay/restored. We don't check that here because this call 1912e705c121SKalle Valo * is just for hardware register access; but GP1 MAC_SLEEP check is a 1913e705c121SKalle Valo * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). 1914e705c121SKalle Valo * 1915e705c121SKalle Valo * 5000 series and later (including 1000 series) have non-volatile SRAM, 1916e705c121SKalle Valo * and do not save/restore SRAM when power cycling. 1917e705c121SKalle Valo */ 1918e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 1919e705c121SKalle Valo CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 1920e705c121SKalle Valo (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 1921e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 1922e705c121SKalle Valo if (unlikely(ret < 0)) { 1923e705c121SKalle Valo iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 1924e705c121SKalle Valo WARN_ONCE(1, 1925e705c121SKalle Valo "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 192623ba9340SEmmanuel Grumbach iwl_read32(trans, CSR_GP_CNTRL)); 1927e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); 1928e705c121SKalle Valo return false; 1929e705c121SKalle Valo } 1930e705c121SKalle Valo 1931e705c121SKalle Valo out: 1932e705c121SKalle Valo /* 1933e705c121SKalle Valo * Fool sparse by faking we release the lock - sparse will 1934e705c121SKalle Valo * track nic_access anyway. 1935e705c121SKalle Valo */ 1936e705c121SKalle Valo __release(&trans_pcie->reg_lock); 1937e705c121SKalle Valo return true; 1938e705c121SKalle Valo } 1939e705c121SKalle Valo 1940e705c121SKalle Valo static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, 1941e705c121SKalle Valo unsigned long *flags) 1942e705c121SKalle Valo { 1943e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1944e705c121SKalle Valo 1945e705c121SKalle Valo lockdep_assert_held(&trans_pcie->reg_lock); 1946e705c121SKalle Valo 1947e705c121SKalle Valo /* 1948e705c121SKalle Valo * Fool sparse by faking we acquiring the lock - sparse will 1949e705c121SKalle Valo * track nic_access anyway. 1950e705c121SKalle Valo */ 1951e705c121SKalle Valo __acquire(&trans_pcie->reg_lock); 1952e705c121SKalle Valo 1953e705c121SKalle Valo if (trans_pcie->cmd_hold_nic_awake) 1954e705c121SKalle Valo goto out; 1955e705c121SKalle Valo 1956e705c121SKalle Valo __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 1957e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1958e705c121SKalle Valo /* 1959e705c121SKalle Valo * Above we read the CSR_GP_CNTRL register, which will flush 1960e705c121SKalle Valo * any previous writes, but we need the write that clears the 1961e705c121SKalle Valo * MAC_ACCESS_REQ bit to be performed before any other writes 1962e705c121SKalle Valo * scheduled on different CPUs (after we drop reg_lock). 1963e705c121SKalle Valo */ 1964e705c121SKalle Valo mmiowb(); 1965e705c121SKalle Valo out: 1966e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); 1967e705c121SKalle Valo } 1968e705c121SKalle Valo 1969e705c121SKalle Valo static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 1970e705c121SKalle Valo void *buf, int dwords) 1971e705c121SKalle Valo { 1972e705c121SKalle Valo unsigned long flags; 1973e705c121SKalle Valo int offs, ret = 0; 1974e705c121SKalle Valo u32 *vals = buf; 1975e705c121SKalle Valo 197623ba9340SEmmanuel Grumbach if (iwl_trans_grab_nic_access(trans, &flags)) { 1977e705c121SKalle Valo iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); 1978e705c121SKalle Valo for (offs = 0; offs < dwords; offs++) 1979e705c121SKalle Valo vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); 1980e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 1981e705c121SKalle Valo } else { 1982e705c121SKalle Valo ret = -EBUSY; 1983e705c121SKalle Valo } 1984e705c121SKalle Valo return ret; 1985e705c121SKalle Valo } 1986e705c121SKalle Valo 1987e705c121SKalle Valo static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 1988e705c121SKalle Valo const void *buf, int dwords) 1989e705c121SKalle Valo { 1990e705c121SKalle Valo unsigned long flags; 1991e705c121SKalle Valo int offs, ret = 0; 1992e705c121SKalle Valo const u32 *vals = buf; 1993e705c121SKalle Valo 199423ba9340SEmmanuel Grumbach if (iwl_trans_grab_nic_access(trans, &flags)) { 1995e705c121SKalle Valo iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 1996e705c121SKalle Valo for (offs = 0; offs < dwords; offs++) 1997e705c121SKalle Valo iwl_write32(trans, HBUS_TARG_MEM_WDAT, 1998e705c121SKalle Valo vals ? vals[offs] : 0); 1999e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 2000e705c121SKalle Valo } else { 2001e705c121SKalle Valo ret = -EBUSY; 2002e705c121SKalle Valo } 2003e705c121SKalle Valo return ret; 2004e705c121SKalle Valo } 2005e705c121SKalle Valo 2006e705c121SKalle Valo static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, 2007e705c121SKalle Valo unsigned long txqs, 2008e705c121SKalle Valo bool freeze) 2009e705c121SKalle Valo { 2010e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2011e705c121SKalle Valo int queue; 2012e705c121SKalle Valo 2013e705c121SKalle Valo for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 2014e705c121SKalle Valo struct iwl_txq *txq = &trans_pcie->txq[queue]; 2015e705c121SKalle Valo unsigned long now; 2016e705c121SKalle Valo 2017e705c121SKalle Valo spin_lock_bh(&txq->lock); 2018e705c121SKalle Valo 2019e705c121SKalle Valo now = jiffies; 2020e705c121SKalle Valo 2021e705c121SKalle Valo if (txq->frozen == freeze) 2022e705c121SKalle Valo goto next_queue; 2023e705c121SKalle Valo 2024e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 2025e705c121SKalle Valo freeze ? "Freezing" : "Waking", queue); 2026e705c121SKalle Valo 2027e705c121SKalle Valo txq->frozen = freeze; 2028e705c121SKalle Valo 2029bb98ecd4SSara Sharon if (txq->read_ptr == txq->write_ptr) 2030e705c121SKalle Valo goto next_queue; 2031e705c121SKalle Valo 2032e705c121SKalle Valo if (freeze) { 2033e705c121SKalle Valo if (unlikely(time_after(now, 2034e705c121SKalle Valo txq->stuck_timer.expires))) { 2035e705c121SKalle Valo /* 2036e705c121SKalle Valo * The timer should have fired, maybe it is 2037e705c121SKalle Valo * spinning right now on the lock. 2038e705c121SKalle Valo */ 2039e705c121SKalle Valo goto next_queue; 2040e705c121SKalle Valo } 2041e705c121SKalle Valo /* remember how long until the timer fires */ 2042e705c121SKalle Valo txq->frozen_expiry_remainder = 2043e705c121SKalle Valo txq->stuck_timer.expires - now; 2044e705c121SKalle Valo del_timer(&txq->stuck_timer); 2045e705c121SKalle Valo goto next_queue; 2046e705c121SKalle Valo } 2047e705c121SKalle Valo 2048e705c121SKalle Valo /* 2049e705c121SKalle Valo * Wake a non-empty queue -> arm timer with the 2050e705c121SKalle Valo * remainder before it froze 2051e705c121SKalle Valo */ 2052e705c121SKalle Valo mod_timer(&txq->stuck_timer, 2053e705c121SKalle Valo now + txq->frozen_expiry_remainder); 2054e705c121SKalle Valo 2055e705c121SKalle Valo next_queue: 2056e705c121SKalle Valo spin_unlock_bh(&txq->lock); 2057e705c121SKalle Valo } 2058e705c121SKalle Valo } 2059e705c121SKalle Valo 20600cd58eaaSEmmanuel Grumbach static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 20610cd58eaaSEmmanuel Grumbach { 20620cd58eaaSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 20630cd58eaaSEmmanuel Grumbach int i; 20640cd58eaaSEmmanuel Grumbach 20650cd58eaaSEmmanuel Grumbach for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 20660cd58eaaSEmmanuel Grumbach struct iwl_txq *txq = &trans_pcie->txq[i]; 20670cd58eaaSEmmanuel Grumbach 20680cd58eaaSEmmanuel Grumbach if (i == trans_pcie->cmd_queue) 20690cd58eaaSEmmanuel Grumbach continue; 20700cd58eaaSEmmanuel Grumbach 20710cd58eaaSEmmanuel Grumbach spin_lock_bh(&txq->lock); 20720cd58eaaSEmmanuel Grumbach 20730cd58eaaSEmmanuel Grumbach if (!block && !(WARN_ON_ONCE(!txq->block))) { 20740cd58eaaSEmmanuel Grumbach txq->block--; 20750cd58eaaSEmmanuel Grumbach if (!txq->block) { 20760cd58eaaSEmmanuel Grumbach iwl_write32(trans, HBUS_TARG_WRPTR, 2077bb98ecd4SSara Sharon txq->write_ptr | (i << 8)); 20780cd58eaaSEmmanuel Grumbach } 20790cd58eaaSEmmanuel Grumbach } else if (block) { 20800cd58eaaSEmmanuel Grumbach txq->block++; 20810cd58eaaSEmmanuel Grumbach } 20820cd58eaaSEmmanuel Grumbach 20830cd58eaaSEmmanuel Grumbach spin_unlock_bh(&txq->lock); 20840cd58eaaSEmmanuel Grumbach } 20850cd58eaaSEmmanuel Grumbach } 20860cd58eaaSEmmanuel Grumbach 2087e705c121SKalle Valo #define IWL_FLUSH_WAIT_MS 2000 2088e705c121SKalle Valo 208938398efbSSara Sharon void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 209038398efbSSara Sharon { 2091afb84431SEmmanuel Grumbach u32 txq_id = txq->id; 2092afb84431SEmmanuel Grumbach u32 status; 2093afb84431SEmmanuel Grumbach bool active; 2094afb84431SEmmanuel Grumbach u8 fifo; 209538398efbSSara Sharon 2096afb84431SEmmanuel Grumbach if (trans->cfg->use_tfh) { 2097afb84431SEmmanuel Grumbach IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 2098bb98ecd4SSara Sharon txq->read_ptr, txq->write_ptr); 2099ae79785fSSara Sharon /* TODO: access new SCD registers and dump them */ 2100ae79785fSSara Sharon return; 2101afb84431SEmmanuel Grumbach } 2102ae79785fSSara Sharon 2103afb84431SEmmanuel Grumbach status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 2104afb84431SEmmanuel Grumbach fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 2105afb84431SEmmanuel Grumbach active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 210638398efbSSara Sharon 210738398efbSSara Sharon IWL_ERR(trans, 2108afb84431SEmmanuel Grumbach "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 2109afb84431SEmmanuel Grumbach txq_id, active ? "" : "in", fifo, 2110afb84431SEmmanuel Grumbach jiffies_to_msecs(txq->wd_timeout), 2111afb84431SEmmanuel Grumbach txq->read_ptr, txq->write_ptr, 2112afb84431SEmmanuel Grumbach iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 211338398efbSSara Sharon (TFD_QUEUE_SIZE_MAX - 1), 2114afb84431SEmmanuel Grumbach iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 2115afb84431SEmmanuel Grumbach (TFD_QUEUE_SIZE_MAX - 1), 2116afb84431SEmmanuel Grumbach iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 211738398efbSSara Sharon } 211838398efbSSara Sharon 2119e705c121SKalle Valo static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) 2120e705c121SKalle Valo { 2121e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2122e705c121SKalle Valo struct iwl_txq *txq; 2123e705c121SKalle Valo int cnt; 2124e705c121SKalle Valo unsigned long now = jiffies; 2125e705c121SKalle Valo int ret = 0; 2126e705c121SKalle Valo 2127e705c121SKalle Valo /* waiting for all the tx frames complete might take a while */ 2128e705c121SKalle Valo for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 2129e705c121SKalle Valo u8 wr_ptr; 2130e705c121SKalle Valo 2131e705c121SKalle Valo if (cnt == trans_pcie->cmd_queue) 2132e705c121SKalle Valo continue; 2133e705c121SKalle Valo if (!test_bit(cnt, trans_pcie->queue_used)) 2134e705c121SKalle Valo continue; 2135e705c121SKalle Valo if (!(BIT(cnt) & txq_bm)) 2136e705c121SKalle Valo continue; 2137e705c121SKalle Valo 2138e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); 2139e705c121SKalle Valo txq = &trans_pcie->txq[cnt]; 2140bb98ecd4SSara Sharon wr_ptr = ACCESS_ONCE(txq->write_ptr); 2141e705c121SKalle Valo 2142bb98ecd4SSara Sharon while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && 2143e705c121SKalle Valo !time_after(jiffies, 2144e705c121SKalle Valo now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2145bb98ecd4SSara Sharon u8 write_ptr = ACCESS_ONCE(txq->write_ptr); 2146e705c121SKalle Valo 2147e705c121SKalle Valo if (WARN_ONCE(wr_ptr != write_ptr, 2148e705c121SKalle Valo "WR pointer moved while flushing %d -> %d\n", 2149e705c121SKalle Valo wr_ptr, write_ptr)) 2150e705c121SKalle Valo return -ETIMEDOUT; 2151192185d6SJohannes Berg usleep_range(1000, 2000); 2152e705c121SKalle Valo } 2153e705c121SKalle Valo 2154bb98ecd4SSara Sharon if (txq->read_ptr != txq->write_ptr) { 2155e705c121SKalle Valo IWL_ERR(trans, 2156e705c121SKalle Valo "fail to flush all tx fifo queues Q %d\n", cnt); 2157e705c121SKalle Valo ret = -ETIMEDOUT; 2158e705c121SKalle Valo break; 2159e705c121SKalle Valo } 2160e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt); 2161e705c121SKalle Valo } 2162e705c121SKalle Valo 216338398efbSSara Sharon if (ret) 216438398efbSSara Sharon iwl_trans_pcie_log_scd_error(trans, txq); 2165e705c121SKalle Valo 2166e705c121SKalle Valo return ret; 2167e705c121SKalle Valo } 2168e705c121SKalle Valo 2169e705c121SKalle Valo static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2170e705c121SKalle Valo u32 mask, u32 value) 2171e705c121SKalle Valo { 2172e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2173e705c121SKalle Valo unsigned long flags; 2174e705c121SKalle Valo 2175e705c121SKalle Valo spin_lock_irqsave(&trans_pcie->reg_lock, flags); 2176e705c121SKalle Valo __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 2177e705c121SKalle Valo spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 2178e705c121SKalle Valo } 2179e705c121SKalle Valo 2180c24c7f58SLuca Coelho static void iwl_trans_pcie_ref(struct iwl_trans *trans) 2181e705c121SKalle Valo { 2182e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2183e705c121SKalle Valo 2184e705c121SKalle Valo if (iwlwifi_mod_params.d0i3_disable) 2185e705c121SKalle Valo return; 2186e705c121SKalle Valo 2187b3ff1270SLuca Coelho pm_runtime_get(&trans_pcie->pci_dev->dev); 21885d93f3a2SLuca Coelho 21895d93f3a2SLuca Coelho #ifdef CONFIG_PM 21905d93f3a2SLuca Coelho IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", 21915d93f3a2SLuca Coelho atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); 21925d93f3a2SLuca Coelho #endif /* CONFIG_PM */ 2193e705c121SKalle Valo } 2194e705c121SKalle Valo 2195c24c7f58SLuca Coelho static void iwl_trans_pcie_unref(struct iwl_trans *trans) 2196e705c121SKalle Valo { 2197e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2198e705c121SKalle Valo 2199e705c121SKalle Valo if (iwlwifi_mod_params.d0i3_disable) 2200e705c121SKalle Valo return; 2201e705c121SKalle Valo 2202b3ff1270SLuca Coelho pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); 2203b3ff1270SLuca Coelho pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); 2204b3ff1270SLuca Coelho 22055d93f3a2SLuca Coelho #ifdef CONFIG_PM 22065d93f3a2SLuca Coelho IWL_DEBUG_RPM(trans, "runtime usage count: %d\n", 22075d93f3a2SLuca Coelho atomic_read(&trans_pcie->pci_dev->dev.power.usage_count)); 22085d93f3a2SLuca Coelho #endif /* CONFIG_PM */ 2209e705c121SKalle Valo } 2210e705c121SKalle Valo 2211e705c121SKalle Valo static const char *get_csr_string(int cmd) 2212e705c121SKalle Valo { 2213e705c121SKalle Valo #define IWL_CMD(x) case x: return #x 2214e705c121SKalle Valo switch (cmd) { 2215e705c121SKalle Valo IWL_CMD(CSR_HW_IF_CONFIG_REG); 2216e705c121SKalle Valo IWL_CMD(CSR_INT_COALESCING); 2217e705c121SKalle Valo IWL_CMD(CSR_INT); 2218e705c121SKalle Valo IWL_CMD(CSR_INT_MASK); 2219e705c121SKalle Valo IWL_CMD(CSR_FH_INT_STATUS); 2220e705c121SKalle Valo IWL_CMD(CSR_GPIO_IN); 2221e705c121SKalle Valo IWL_CMD(CSR_RESET); 2222e705c121SKalle Valo IWL_CMD(CSR_GP_CNTRL); 2223e705c121SKalle Valo IWL_CMD(CSR_HW_REV); 2224e705c121SKalle Valo IWL_CMD(CSR_EEPROM_REG); 2225e705c121SKalle Valo IWL_CMD(CSR_EEPROM_GP); 2226e705c121SKalle Valo IWL_CMD(CSR_OTP_GP_REG); 2227e705c121SKalle Valo IWL_CMD(CSR_GIO_REG); 2228e705c121SKalle Valo IWL_CMD(CSR_GP_UCODE_REG); 2229e705c121SKalle Valo IWL_CMD(CSR_GP_DRIVER_REG); 2230e705c121SKalle Valo IWL_CMD(CSR_UCODE_DRV_GP1); 2231e705c121SKalle Valo IWL_CMD(CSR_UCODE_DRV_GP2); 2232e705c121SKalle Valo IWL_CMD(CSR_LED_REG); 2233e705c121SKalle Valo IWL_CMD(CSR_DRAM_INT_TBL_REG); 2234e705c121SKalle Valo IWL_CMD(CSR_GIO_CHICKEN_BITS); 2235e705c121SKalle Valo IWL_CMD(CSR_ANA_PLL_CFG); 2236e705c121SKalle Valo IWL_CMD(CSR_HW_REV_WA_REG); 2237e705c121SKalle Valo IWL_CMD(CSR_MONITOR_STATUS_REG); 2238e705c121SKalle Valo IWL_CMD(CSR_DBG_HPET_MEM_REG); 2239e705c121SKalle Valo default: 2240e705c121SKalle Valo return "UNKNOWN"; 2241e705c121SKalle Valo } 2242e705c121SKalle Valo #undef IWL_CMD 2243e705c121SKalle Valo } 2244e705c121SKalle Valo 2245e705c121SKalle Valo void iwl_pcie_dump_csr(struct iwl_trans *trans) 2246e705c121SKalle Valo { 2247e705c121SKalle Valo int i; 2248e705c121SKalle Valo static const u32 csr_tbl[] = { 2249e705c121SKalle Valo CSR_HW_IF_CONFIG_REG, 2250e705c121SKalle Valo CSR_INT_COALESCING, 2251e705c121SKalle Valo CSR_INT, 2252e705c121SKalle Valo CSR_INT_MASK, 2253e705c121SKalle Valo CSR_FH_INT_STATUS, 2254e705c121SKalle Valo CSR_GPIO_IN, 2255e705c121SKalle Valo CSR_RESET, 2256e705c121SKalle Valo CSR_GP_CNTRL, 2257e705c121SKalle Valo CSR_HW_REV, 2258e705c121SKalle Valo CSR_EEPROM_REG, 2259e705c121SKalle Valo CSR_EEPROM_GP, 2260e705c121SKalle Valo CSR_OTP_GP_REG, 2261e705c121SKalle Valo CSR_GIO_REG, 2262e705c121SKalle Valo CSR_GP_UCODE_REG, 2263e705c121SKalle Valo CSR_GP_DRIVER_REG, 2264e705c121SKalle Valo CSR_UCODE_DRV_GP1, 2265e705c121SKalle Valo CSR_UCODE_DRV_GP2, 2266e705c121SKalle Valo CSR_LED_REG, 2267e705c121SKalle Valo CSR_DRAM_INT_TBL_REG, 2268e705c121SKalle Valo CSR_GIO_CHICKEN_BITS, 2269e705c121SKalle Valo CSR_ANA_PLL_CFG, 2270e705c121SKalle Valo CSR_MONITOR_STATUS_REG, 2271e705c121SKalle Valo CSR_HW_REV_WA_REG, 2272e705c121SKalle Valo CSR_DBG_HPET_MEM_REG 2273e705c121SKalle Valo }; 2274e705c121SKalle Valo IWL_ERR(trans, "CSR values:\n"); 2275e705c121SKalle Valo IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2276e705c121SKalle Valo "CSR_INT_PERIODIC_REG)\n"); 2277e705c121SKalle Valo for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2278e705c121SKalle Valo IWL_ERR(trans, " %25s: 0X%08x\n", 2279e705c121SKalle Valo get_csr_string(csr_tbl[i]), 2280e705c121SKalle Valo iwl_read32(trans, csr_tbl[i])); 2281e705c121SKalle Valo } 2282e705c121SKalle Valo } 2283e705c121SKalle Valo 2284e705c121SKalle Valo #ifdef CONFIG_IWLWIFI_DEBUGFS 2285e705c121SKalle Valo /* create and remove of files */ 2286e705c121SKalle Valo #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2287e705c121SKalle Valo if (!debugfs_create_file(#name, mode, parent, trans, \ 2288e705c121SKalle Valo &iwl_dbgfs_##name##_ops)) \ 2289e705c121SKalle Valo goto err; \ 2290e705c121SKalle Valo } while (0) 2291e705c121SKalle Valo 2292e705c121SKalle Valo /* file operation */ 2293e705c121SKalle Valo #define DEBUGFS_READ_FILE_OPS(name) \ 2294e705c121SKalle Valo static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2295e705c121SKalle Valo .read = iwl_dbgfs_##name##_read, \ 2296e705c121SKalle Valo .open = simple_open, \ 2297e705c121SKalle Valo .llseek = generic_file_llseek, \ 2298e705c121SKalle Valo }; 2299e705c121SKalle Valo 2300e705c121SKalle Valo #define DEBUGFS_WRITE_FILE_OPS(name) \ 2301e705c121SKalle Valo static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2302e705c121SKalle Valo .write = iwl_dbgfs_##name##_write, \ 2303e705c121SKalle Valo .open = simple_open, \ 2304e705c121SKalle Valo .llseek = generic_file_llseek, \ 2305e705c121SKalle Valo }; 2306e705c121SKalle Valo 2307e705c121SKalle Valo #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2308e705c121SKalle Valo static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2309e705c121SKalle Valo .write = iwl_dbgfs_##name##_write, \ 2310e705c121SKalle Valo .read = iwl_dbgfs_##name##_read, \ 2311e705c121SKalle Valo .open = simple_open, \ 2312e705c121SKalle Valo .llseek = generic_file_llseek, \ 2313e705c121SKalle Valo }; 2314e705c121SKalle Valo 2315e705c121SKalle Valo static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, 2316e705c121SKalle Valo char __user *user_buf, 2317e705c121SKalle Valo size_t count, loff_t *ppos) 2318e705c121SKalle Valo { 2319e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2320e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2321e705c121SKalle Valo struct iwl_txq *txq; 2322e705c121SKalle Valo char *buf; 2323e705c121SKalle Valo int pos = 0; 2324e705c121SKalle Valo int cnt; 2325e705c121SKalle Valo int ret; 2326e705c121SKalle Valo size_t bufsz; 2327e705c121SKalle Valo 2328e705c121SKalle Valo bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; 2329e705c121SKalle Valo 2330e705c121SKalle Valo if (!trans_pcie->txq) 2331e705c121SKalle Valo return -EAGAIN; 2332e705c121SKalle Valo 2333e705c121SKalle Valo buf = kzalloc(bufsz, GFP_KERNEL); 2334e705c121SKalle Valo if (!buf) 2335e705c121SKalle Valo return -ENOMEM; 2336e705c121SKalle Valo 2337e705c121SKalle Valo for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { 2338e705c121SKalle Valo txq = &trans_pcie->txq[cnt]; 2339e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2340e705c121SKalle Valo "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", 2341bb98ecd4SSara Sharon cnt, txq->read_ptr, txq->write_ptr, 2342e705c121SKalle Valo !!test_bit(cnt, trans_pcie->queue_used), 2343e705c121SKalle Valo !!test_bit(cnt, trans_pcie->queue_stopped), 2344e705c121SKalle Valo txq->need_update, txq->frozen, 2345e705c121SKalle Valo (cnt == trans_pcie->cmd_queue ? " HCMD" : "")); 2346e705c121SKalle Valo } 2347e705c121SKalle Valo ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2348e705c121SKalle Valo kfree(buf); 2349e705c121SKalle Valo return ret; 2350e705c121SKalle Valo } 2351e705c121SKalle Valo 2352e705c121SKalle Valo static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2353e705c121SKalle Valo char __user *user_buf, 2354e705c121SKalle Valo size_t count, loff_t *ppos) 2355e705c121SKalle Valo { 2356e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2357e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 235878485054SSara Sharon char *buf; 235978485054SSara Sharon int pos = 0, i, ret; 236078485054SSara Sharon size_t bufsz = sizeof(buf); 2361e705c121SKalle Valo 236278485054SSara Sharon bufsz = sizeof(char) * 121 * trans->num_rx_queues; 236378485054SSara Sharon 236478485054SSara Sharon if (!trans_pcie->rxq) 236578485054SSara Sharon return -EAGAIN; 236678485054SSara Sharon 236778485054SSara Sharon buf = kzalloc(bufsz, GFP_KERNEL); 236878485054SSara Sharon if (!buf) 236978485054SSara Sharon return -ENOMEM; 237078485054SSara Sharon 237178485054SSara Sharon for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 237278485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 237378485054SSara Sharon 237478485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 237578485054SSara Sharon i); 237678485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2377e705c121SKalle Valo rxq->read); 237878485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2379e705c121SKalle Valo rxq->write); 238078485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2381e705c121SKalle Valo rxq->write_actual); 238278485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2383e705c121SKalle Valo rxq->need_update); 238478485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2385e705c121SKalle Valo rxq->free_count); 2386e705c121SKalle Valo if (rxq->rb_stts) { 238778485054SSara Sharon pos += scnprintf(buf + pos, bufsz - pos, 238878485054SSara Sharon "\tclosed_rb_num: %u\n", 238978485054SSara Sharon le16_to_cpu(rxq->rb_stts->closed_rb_num) & 239078485054SSara Sharon 0x0FFF); 2391e705c121SKalle Valo } else { 2392e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 239378485054SSara Sharon "\tclosed_rb_num: Not Allocated\n"); 2394e705c121SKalle Valo } 239578485054SSara Sharon } 239678485054SSara Sharon ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 239778485054SSara Sharon kfree(buf); 239878485054SSara Sharon 239978485054SSara Sharon return ret; 2400e705c121SKalle Valo } 2401e705c121SKalle Valo 2402e705c121SKalle Valo static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2403e705c121SKalle Valo char __user *user_buf, 2404e705c121SKalle Valo size_t count, loff_t *ppos) 2405e705c121SKalle Valo { 2406e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2407e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2408e705c121SKalle Valo struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2409e705c121SKalle Valo 2410e705c121SKalle Valo int pos = 0; 2411e705c121SKalle Valo char *buf; 2412e705c121SKalle Valo int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2413e705c121SKalle Valo ssize_t ret; 2414e705c121SKalle Valo 2415e705c121SKalle Valo buf = kzalloc(bufsz, GFP_KERNEL); 2416e705c121SKalle Valo if (!buf) 2417e705c121SKalle Valo return -ENOMEM; 2418e705c121SKalle Valo 2419e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2420e705c121SKalle Valo "Interrupt Statistics Report:\n"); 2421e705c121SKalle Valo 2422e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2423e705c121SKalle Valo isr_stats->hw); 2424e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2425e705c121SKalle Valo isr_stats->sw); 2426e705c121SKalle Valo if (isr_stats->sw || isr_stats->hw) { 2427e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2428e705c121SKalle Valo "\tLast Restarting Code: 0x%X\n", 2429e705c121SKalle Valo isr_stats->err_code); 2430e705c121SKalle Valo } 2431e705c121SKalle Valo #ifdef CONFIG_IWLWIFI_DEBUG 2432e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2433e705c121SKalle Valo isr_stats->sch); 2434e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2435e705c121SKalle Valo isr_stats->alive); 2436e705c121SKalle Valo #endif 2437e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2438e705c121SKalle Valo "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2439e705c121SKalle Valo 2440e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2441e705c121SKalle Valo isr_stats->ctkill); 2442e705c121SKalle Valo 2443e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2444e705c121SKalle Valo isr_stats->wakeup); 2445e705c121SKalle Valo 2446e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, 2447e705c121SKalle Valo "Rx command responses:\t\t %u\n", isr_stats->rx); 2448e705c121SKalle Valo 2449e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2450e705c121SKalle Valo isr_stats->tx); 2451e705c121SKalle Valo 2452e705c121SKalle Valo pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2453e705c121SKalle Valo isr_stats->unhandled); 2454e705c121SKalle Valo 2455e705c121SKalle Valo ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2456e705c121SKalle Valo kfree(buf); 2457e705c121SKalle Valo return ret; 2458e705c121SKalle Valo } 2459e705c121SKalle Valo 2460e705c121SKalle Valo static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2461e705c121SKalle Valo const char __user *user_buf, 2462e705c121SKalle Valo size_t count, loff_t *ppos) 2463e705c121SKalle Valo { 2464e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2465e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2466e705c121SKalle Valo struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2467e705c121SKalle Valo 2468e705c121SKalle Valo char buf[8]; 2469e705c121SKalle Valo int buf_size; 2470e705c121SKalle Valo u32 reset_flag; 2471e705c121SKalle Valo 2472e705c121SKalle Valo memset(buf, 0, sizeof(buf)); 2473e705c121SKalle Valo buf_size = min(count, sizeof(buf) - 1); 2474e705c121SKalle Valo if (copy_from_user(buf, user_buf, buf_size)) 2475e705c121SKalle Valo return -EFAULT; 2476e705c121SKalle Valo if (sscanf(buf, "%x", &reset_flag) != 1) 2477e705c121SKalle Valo return -EFAULT; 2478e705c121SKalle Valo if (reset_flag == 0) 2479e705c121SKalle Valo memset(isr_stats, 0, sizeof(*isr_stats)); 2480e705c121SKalle Valo 2481e705c121SKalle Valo return count; 2482e705c121SKalle Valo } 2483e705c121SKalle Valo 2484e705c121SKalle Valo static ssize_t iwl_dbgfs_csr_write(struct file *file, 2485e705c121SKalle Valo const char __user *user_buf, 2486e705c121SKalle Valo size_t count, loff_t *ppos) 2487e705c121SKalle Valo { 2488e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2489e705c121SKalle Valo char buf[8]; 2490e705c121SKalle Valo int buf_size; 2491e705c121SKalle Valo int csr; 2492e705c121SKalle Valo 2493e705c121SKalle Valo memset(buf, 0, sizeof(buf)); 2494e705c121SKalle Valo buf_size = min(count, sizeof(buf) - 1); 2495e705c121SKalle Valo if (copy_from_user(buf, user_buf, buf_size)) 2496e705c121SKalle Valo return -EFAULT; 2497e705c121SKalle Valo if (sscanf(buf, "%d", &csr) != 1) 2498e705c121SKalle Valo return -EFAULT; 2499e705c121SKalle Valo 2500e705c121SKalle Valo iwl_pcie_dump_csr(trans); 2501e705c121SKalle Valo 2502e705c121SKalle Valo return count; 2503e705c121SKalle Valo } 2504e705c121SKalle Valo 2505e705c121SKalle Valo static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2506e705c121SKalle Valo char __user *user_buf, 2507e705c121SKalle Valo size_t count, loff_t *ppos) 2508e705c121SKalle Valo { 2509e705c121SKalle Valo struct iwl_trans *trans = file->private_data; 2510e705c121SKalle Valo char *buf = NULL; 2511e705c121SKalle Valo ssize_t ret; 2512e705c121SKalle Valo 2513e705c121SKalle Valo ret = iwl_dump_fh(trans, &buf); 2514e705c121SKalle Valo if (ret < 0) 2515e705c121SKalle Valo return ret; 2516e705c121SKalle Valo if (!buf) 2517e705c121SKalle Valo return -EINVAL; 2518e705c121SKalle Valo ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2519e705c121SKalle Valo kfree(buf); 2520e705c121SKalle Valo return ret; 2521e705c121SKalle Valo } 2522e705c121SKalle Valo 2523e705c121SKalle Valo DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2524e705c121SKalle Valo DEBUGFS_READ_FILE_OPS(fh_reg); 2525e705c121SKalle Valo DEBUGFS_READ_FILE_OPS(rx_queue); 2526e705c121SKalle Valo DEBUGFS_READ_FILE_OPS(tx_queue); 2527e705c121SKalle Valo DEBUGFS_WRITE_FILE_OPS(csr); 2528e705c121SKalle Valo 2529f8a1edb7SJohannes Berg /* Create the debugfs files and directories */ 2530f8a1edb7SJohannes Berg int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 2531e705c121SKalle Valo { 2532f8a1edb7SJohannes Berg struct dentry *dir = trans->dbgfs_dir; 2533f8a1edb7SJohannes Berg 2534e705c121SKalle Valo DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); 2535e705c121SKalle Valo DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); 2536e705c121SKalle Valo DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); 2537e705c121SKalle Valo DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); 2538e705c121SKalle Valo DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 2539e705c121SKalle Valo return 0; 2540e705c121SKalle Valo 2541e705c121SKalle Valo err: 2542e705c121SKalle Valo IWL_ERR(trans, "failed to create the trans debugfs entry\n"); 2543e705c121SKalle Valo return -ENOMEM; 2544e705c121SKalle Valo } 2545e705c121SKalle Valo #endif /*CONFIG_IWLWIFI_DEBUGFS */ 2546e705c121SKalle Valo 25476983ba69SSara Sharon static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 2548e705c121SKalle Valo { 25493cd1980bSSara Sharon struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2550e705c121SKalle Valo u32 cmdlen = 0; 2551e705c121SKalle Valo int i; 2552e705c121SKalle Valo 25533cd1980bSSara Sharon for (i = 0; i < trans_pcie->max_tbs; i++) 25546983ba69SSara Sharon cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i); 2555e705c121SKalle Valo 2556e705c121SKalle Valo return cmdlen; 2557e705c121SKalle Valo } 2558e705c121SKalle Valo 2559e705c121SKalle Valo static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 2560e705c121SKalle Valo struct iwl_fw_error_dump_data **data, 2561e705c121SKalle Valo int allocated_rb_nums) 2562e705c121SKalle Valo { 2563e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2564e705c121SKalle Valo int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 256578485054SSara Sharon /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 256678485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 2567e705c121SKalle Valo u32 i, r, j, rb_len = 0; 2568e705c121SKalle Valo 2569e705c121SKalle Valo spin_lock(&rxq->lock); 2570e705c121SKalle Valo 2571e705c121SKalle Valo r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 2572e705c121SKalle Valo 2573e705c121SKalle Valo for (i = rxq->read, j = 0; 2574e705c121SKalle Valo i != r && j < allocated_rb_nums; 2575e705c121SKalle Valo i = (i + 1) & RX_QUEUE_MASK, j++) { 2576e705c121SKalle Valo struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 2577e705c121SKalle Valo struct iwl_fw_error_dump_rb *rb; 2578e705c121SKalle Valo 2579e705c121SKalle Valo dma_unmap_page(trans->dev, rxb->page_dma, max_len, 2580e705c121SKalle Valo DMA_FROM_DEVICE); 2581e705c121SKalle Valo 2582e705c121SKalle Valo rb_len += sizeof(**data) + sizeof(*rb) + max_len; 2583e705c121SKalle Valo 2584e705c121SKalle Valo (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 2585e705c121SKalle Valo (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 2586e705c121SKalle Valo rb = (void *)(*data)->data; 2587e705c121SKalle Valo rb->index = cpu_to_le32(i); 2588e705c121SKalle Valo memcpy(rb->data, page_address(rxb->page), max_len); 2589e705c121SKalle Valo /* remap the page for the free benefit */ 2590e705c121SKalle Valo rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, 2591e705c121SKalle Valo max_len, 2592e705c121SKalle Valo DMA_FROM_DEVICE); 2593e705c121SKalle Valo 2594e705c121SKalle Valo *data = iwl_fw_error_next_data(*data); 2595e705c121SKalle Valo } 2596e705c121SKalle Valo 2597e705c121SKalle Valo spin_unlock(&rxq->lock); 2598e705c121SKalle Valo 2599e705c121SKalle Valo return rb_len; 2600e705c121SKalle Valo } 2601e705c121SKalle Valo #define IWL_CSR_TO_DUMP (0x250) 2602e705c121SKalle Valo 2603e705c121SKalle Valo static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 2604e705c121SKalle Valo struct iwl_fw_error_dump_data **data) 2605e705c121SKalle Valo { 2606e705c121SKalle Valo u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 2607e705c121SKalle Valo __le32 *val; 2608e705c121SKalle Valo int i; 2609e705c121SKalle Valo 2610e705c121SKalle Valo (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 2611e705c121SKalle Valo (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 2612e705c121SKalle Valo val = (void *)(*data)->data; 2613e705c121SKalle Valo 2614e705c121SKalle Valo for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 2615e705c121SKalle Valo *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 2616e705c121SKalle Valo 2617e705c121SKalle Valo *data = iwl_fw_error_next_data(*data); 2618e705c121SKalle Valo 2619e705c121SKalle Valo return csr_len; 2620e705c121SKalle Valo } 2621e705c121SKalle Valo 2622e705c121SKalle Valo static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 2623e705c121SKalle Valo struct iwl_fw_error_dump_data **data) 2624e705c121SKalle Valo { 2625e705c121SKalle Valo u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 2626e705c121SKalle Valo unsigned long flags; 2627e705c121SKalle Valo __le32 *val; 2628e705c121SKalle Valo int i; 2629e705c121SKalle Valo 263023ba9340SEmmanuel Grumbach if (!iwl_trans_grab_nic_access(trans, &flags)) 2631e705c121SKalle Valo return 0; 2632e705c121SKalle Valo 2633e705c121SKalle Valo (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 2634e705c121SKalle Valo (*data)->len = cpu_to_le32(fh_regs_len); 2635e705c121SKalle Valo val = (void *)(*data)->data; 2636e705c121SKalle Valo 2637e705c121SKalle Valo for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) 2638e705c121SKalle Valo *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 2639e705c121SKalle Valo 2640e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 2641e705c121SKalle Valo 2642e705c121SKalle Valo *data = iwl_fw_error_next_data(*data); 2643e705c121SKalle Valo 2644e705c121SKalle Valo return sizeof(**data) + fh_regs_len; 2645e705c121SKalle Valo } 2646e705c121SKalle Valo 2647e705c121SKalle Valo static u32 2648e705c121SKalle Valo iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 2649e705c121SKalle Valo struct iwl_fw_error_dump_fw_mon *fw_mon_data, 2650e705c121SKalle Valo u32 monitor_len) 2651e705c121SKalle Valo { 2652e705c121SKalle Valo u32 buf_size_in_dwords = (monitor_len >> 2); 2653e705c121SKalle Valo u32 *buffer = (u32 *)fw_mon_data->data; 2654e705c121SKalle Valo unsigned long flags; 2655e705c121SKalle Valo u32 i; 2656e705c121SKalle Valo 265723ba9340SEmmanuel Grumbach if (!iwl_trans_grab_nic_access(trans, &flags)) 2658e705c121SKalle Valo return 0; 2659e705c121SKalle Valo 266014ef1b43SGolan Ben-Ami iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 2661e705c121SKalle Valo for (i = 0; i < buf_size_in_dwords; i++) 266214ef1b43SGolan Ben-Ami buffer[i] = iwl_read_prph_no_grab(trans, 266314ef1b43SGolan Ben-Ami MON_DMARB_RD_DATA_ADDR); 266414ef1b43SGolan Ben-Ami iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 2665e705c121SKalle Valo 2666e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 2667e705c121SKalle Valo 2668e705c121SKalle Valo return monitor_len; 2669e705c121SKalle Valo } 2670e705c121SKalle Valo 2671e705c121SKalle Valo static u32 2672e705c121SKalle Valo iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 2673e705c121SKalle Valo struct iwl_fw_error_dump_data **data, 2674e705c121SKalle Valo u32 monitor_len) 2675e705c121SKalle Valo { 2676e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2677e705c121SKalle Valo u32 len = 0; 2678e705c121SKalle Valo 2679e705c121SKalle Valo if ((trans_pcie->fw_mon_page && 2680e705c121SKalle Valo trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || 2681e705c121SKalle Valo trans->dbg_dest_tlv) { 2682e705c121SKalle Valo struct iwl_fw_error_dump_fw_mon *fw_mon_data; 2683e705c121SKalle Valo u32 base, write_ptr, wrap_cnt; 2684e705c121SKalle Valo 2685e705c121SKalle Valo /* If there was a dest TLV - use the values from there */ 2686e705c121SKalle Valo if (trans->dbg_dest_tlv) { 2687e705c121SKalle Valo write_ptr = 2688e705c121SKalle Valo le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); 2689e705c121SKalle Valo wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); 2690e705c121SKalle Valo base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); 2691e705c121SKalle Valo } else { 2692e705c121SKalle Valo base = MON_BUFF_BASE_ADDR; 2693e705c121SKalle Valo write_ptr = MON_BUFF_WRPTR; 2694e705c121SKalle Valo wrap_cnt = MON_BUFF_CYCLE_CNT; 2695e705c121SKalle Valo } 2696e705c121SKalle Valo 2697e705c121SKalle Valo (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 2698e705c121SKalle Valo fw_mon_data = (void *)(*data)->data; 2699e705c121SKalle Valo fw_mon_data->fw_mon_wr_ptr = 2700e705c121SKalle Valo cpu_to_le32(iwl_read_prph(trans, write_ptr)); 2701e705c121SKalle Valo fw_mon_data->fw_mon_cycle_cnt = 2702e705c121SKalle Valo cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 2703e705c121SKalle Valo fw_mon_data->fw_mon_base_ptr = 2704e705c121SKalle Valo cpu_to_le32(iwl_read_prph(trans, base)); 2705e705c121SKalle Valo 2706e705c121SKalle Valo len += sizeof(**data) + sizeof(*fw_mon_data); 2707e705c121SKalle Valo if (trans_pcie->fw_mon_page) { 2708e705c121SKalle Valo /* 2709e705c121SKalle Valo * The firmware is now asserted, it won't write anything 2710e705c121SKalle Valo * to the buffer. CPU can take ownership to fetch the 2711e705c121SKalle Valo * data. The buffer will be handed back to the device 2712e705c121SKalle Valo * before the firmware will be restarted. 2713e705c121SKalle Valo */ 2714e705c121SKalle Valo dma_sync_single_for_cpu(trans->dev, 2715e705c121SKalle Valo trans_pcie->fw_mon_phys, 2716e705c121SKalle Valo trans_pcie->fw_mon_size, 2717e705c121SKalle Valo DMA_FROM_DEVICE); 2718e705c121SKalle Valo memcpy(fw_mon_data->data, 2719e705c121SKalle Valo page_address(trans_pcie->fw_mon_page), 2720e705c121SKalle Valo trans_pcie->fw_mon_size); 2721e705c121SKalle Valo 2722e705c121SKalle Valo monitor_len = trans_pcie->fw_mon_size; 2723e705c121SKalle Valo } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { 2724e705c121SKalle Valo /* 2725e705c121SKalle Valo * Update pointers to reflect actual values after 2726e705c121SKalle Valo * shifting 2727e705c121SKalle Valo */ 2728e705c121SKalle Valo base = iwl_read_prph(trans, base) << 2729e705c121SKalle Valo trans->dbg_dest_tlv->base_shift; 2730e705c121SKalle Valo iwl_trans_read_mem(trans, base, fw_mon_data->data, 2731e705c121SKalle Valo monitor_len / sizeof(u32)); 2732e705c121SKalle Valo } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { 2733e705c121SKalle Valo monitor_len = 2734e705c121SKalle Valo iwl_trans_pci_dump_marbh_monitor(trans, 2735e705c121SKalle Valo fw_mon_data, 2736e705c121SKalle Valo monitor_len); 2737e705c121SKalle Valo } else { 2738e705c121SKalle Valo /* Didn't match anything - output no monitor data */ 2739e705c121SKalle Valo monitor_len = 0; 2740e705c121SKalle Valo } 2741e705c121SKalle Valo 2742e705c121SKalle Valo len += monitor_len; 2743e705c121SKalle Valo (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 2744e705c121SKalle Valo } 2745e705c121SKalle Valo 2746e705c121SKalle Valo return len; 2747e705c121SKalle Valo } 2748e705c121SKalle Valo 2749e705c121SKalle Valo static struct iwl_trans_dump_data 2750e705c121SKalle Valo *iwl_trans_pcie_dump_data(struct iwl_trans *trans, 2751a80c7a69SEmmanuel Grumbach const struct iwl_fw_dbg_trigger_tlv *trigger) 2752e705c121SKalle Valo { 2753e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2754e705c121SKalle Valo struct iwl_fw_error_dump_data *data; 2755e705c121SKalle Valo struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; 2756e705c121SKalle Valo struct iwl_fw_error_dump_txcmd *txcmd; 2757e705c121SKalle Valo struct iwl_trans_dump_data *dump_data; 2758e705c121SKalle Valo u32 len, num_rbs; 2759e705c121SKalle Valo u32 monitor_len; 2760e705c121SKalle Valo int i, ptr; 276196a6497bSSara Sharon bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 276296a6497bSSara Sharon !trans->cfg->mq_rx_supported; 2763e705c121SKalle Valo 2764e705c121SKalle Valo /* transport dump header */ 2765e705c121SKalle Valo len = sizeof(*dump_data); 2766e705c121SKalle Valo 2767e705c121SKalle Valo /* host commands */ 2768e705c121SKalle Valo len += sizeof(*data) + 2769bb98ecd4SSara Sharon cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); 2770e705c121SKalle Valo 2771e705c121SKalle Valo /* FW monitor */ 2772e705c121SKalle Valo if (trans_pcie->fw_mon_page) { 2773e705c121SKalle Valo len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + 2774e705c121SKalle Valo trans_pcie->fw_mon_size; 2775e705c121SKalle Valo monitor_len = trans_pcie->fw_mon_size; 2776e705c121SKalle Valo } else if (trans->dbg_dest_tlv) { 2777e705c121SKalle Valo u32 base, end; 2778e705c121SKalle Valo 2779e705c121SKalle Valo base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); 2780e705c121SKalle Valo end = le32_to_cpu(trans->dbg_dest_tlv->end_reg); 2781e705c121SKalle Valo 2782e705c121SKalle Valo base = iwl_read_prph(trans, base) << 2783e705c121SKalle Valo trans->dbg_dest_tlv->base_shift; 2784e705c121SKalle Valo end = iwl_read_prph(trans, end) << 2785e705c121SKalle Valo trans->dbg_dest_tlv->end_shift; 2786e705c121SKalle Valo 2787e705c121SKalle Valo /* Make "end" point to the actual end */ 2788e705c121SKalle Valo if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 || 2789e705c121SKalle Valo trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) 2790e705c121SKalle Valo end += (1 << trans->dbg_dest_tlv->end_shift); 2791e705c121SKalle Valo monitor_len = end - base; 2792e705c121SKalle Valo len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + 2793e705c121SKalle Valo monitor_len; 2794e705c121SKalle Valo } else { 2795e705c121SKalle Valo monitor_len = 0; 2796e705c121SKalle Valo } 2797e705c121SKalle Valo 2798e705c121SKalle Valo if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { 2799e705c121SKalle Valo dump_data = vzalloc(len); 2800e705c121SKalle Valo if (!dump_data) 2801e705c121SKalle Valo return NULL; 2802e705c121SKalle Valo 2803e705c121SKalle Valo data = (void *)dump_data->data; 2804e705c121SKalle Valo len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 2805e705c121SKalle Valo dump_data->len = len; 2806e705c121SKalle Valo 2807e705c121SKalle Valo return dump_data; 2808e705c121SKalle Valo } 2809e705c121SKalle Valo 2810e705c121SKalle Valo /* CSR registers */ 2811e705c121SKalle Valo len += sizeof(*data) + IWL_CSR_TO_DUMP; 2812e705c121SKalle Valo 2813e705c121SKalle Valo /* FH registers */ 2814e705c121SKalle Valo len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); 2815e705c121SKalle Valo 2816e705c121SKalle Valo if (dump_rbs) { 281778485054SSara Sharon /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 281878485054SSara Sharon struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 2819e705c121SKalle Valo /* RBs */ 282078485054SSara Sharon num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) 2821e705c121SKalle Valo & 0x0FFF; 282278485054SSara Sharon num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 2823e705c121SKalle Valo len += num_rbs * (sizeof(*data) + 2824e705c121SKalle Valo sizeof(struct iwl_fw_error_dump_rb) + 2825e705c121SKalle Valo (PAGE_SIZE << trans_pcie->rx_page_order)); 2826e705c121SKalle Valo } 2827e705c121SKalle Valo 2828e705c121SKalle Valo dump_data = vzalloc(len); 2829e705c121SKalle Valo if (!dump_data) 2830e705c121SKalle Valo return NULL; 2831e705c121SKalle Valo 2832e705c121SKalle Valo len = 0; 2833e705c121SKalle Valo data = (void *)dump_data->data; 2834e705c121SKalle Valo data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 2835e705c121SKalle Valo txcmd = (void *)data->data; 2836e705c121SKalle Valo spin_lock_bh(&cmdq->lock); 2837bb98ecd4SSara Sharon ptr = cmdq->write_ptr; 2838bb98ecd4SSara Sharon for (i = 0; i < cmdq->n_window; i++) { 2839bb98ecd4SSara Sharon u8 idx = get_cmd_index(cmdq, ptr); 2840e705c121SKalle Valo u32 caplen, cmdlen; 2841e705c121SKalle Valo 28426983ba69SSara Sharon cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + 28436983ba69SSara Sharon trans_pcie->tfd_size * ptr); 2844e705c121SKalle Valo caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 2845e705c121SKalle Valo 2846e705c121SKalle Valo if (cmdlen) { 2847e705c121SKalle Valo len += sizeof(*txcmd) + caplen; 2848e705c121SKalle Valo txcmd->cmdlen = cpu_to_le32(cmdlen); 2849e705c121SKalle Valo txcmd->caplen = cpu_to_le32(caplen); 2850e705c121SKalle Valo memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); 2851e705c121SKalle Valo txcmd = (void *)((u8 *)txcmd->data + caplen); 2852e705c121SKalle Valo } 2853e705c121SKalle Valo 2854e705c121SKalle Valo ptr = iwl_queue_dec_wrap(ptr); 2855e705c121SKalle Valo } 2856e705c121SKalle Valo spin_unlock_bh(&cmdq->lock); 2857e705c121SKalle Valo 2858e705c121SKalle Valo data->len = cpu_to_le32(len); 2859e705c121SKalle Valo len += sizeof(*data); 2860e705c121SKalle Valo data = iwl_fw_error_next_data(data); 2861e705c121SKalle Valo 2862e705c121SKalle Valo len += iwl_trans_pcie_dump_csr(trans, &data); 2863e705c121SKalle Valo len += iwl_trans_pcie_fh_regs_dump(trans, &data); 2864e705c121SKalle Valo if (dump_rbs) 2865e705c121SKalle Valo len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 2866e705c121SKalle Valo 2867e705c121SKalle Valo len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 2868e705c121SKalle Valo 2869e705c121SKalle Valo dump_data->len = len; 2870e705c121SKalle Valo 2871e705c121SKalle Valo return dump_data; 2872e705c121SKalle Valo } 2873e705c121SKalle Valo 28744cbb8e50SLuciano Coelho #ifdef CONFIG_PM_SLEEP 28754cbb8e50SLuciano Coelho static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 28764cbb8e50SLuciano Coelho { 28774cbb8e50SLuciano Coelho if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 28784cbb8e50SLuciano Coelho return iwl_pci_fw_enter_d0i3(trans); 28794cbb8e50SLuciano Coelho 28804cbb8e50SLuciano Coelho return 0; 28814cbb8e50SLuciano Coelho } 28824cbb8e50SLuciano Coelho 28834cbb8e50SLuciano Coelho static void iwl_trans_pcie_resume(struct iwl_trans *trans) 28844cbb8e50SLuciano Coelho { 28854cbb8e50SLuciano Coelho if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 28864cbb8e50SLuciano Coelho iwl_pci_fw_exit_d0i3(trans); 28874cbb8e50SLuciano Coelho } 28884cbb8e50SLuciano Coelho #endif /* CONFIG_PM_SLEEP */ 28894cbb8e50SLuciano Coelho 2890e705c121SKalle Valo static const struct iwl_trans_ops trans_ops_pcie = { 2891e705c121SKalle Valo .start_hw = iwl_trans_pcie_start_hw, 2892e705c121SKalle Valo .op_mode_leave = iwl_trans_pcie_op_mode_leave, 2893e705c121SKalle Valo .fw_alive = iwl_trans_pcie_fw_alive, 2894e705c121SKalle Valo .start_fw = iwl_trans_pcie_start_fw, 2895e705c121SKalle Valo .stop_device = iwl_trans_pcie_stop_device, 2896e705c121SKalle Valo 2897e705c121SKalle Valo .d3_suspend = iwl_trans_pcie_d3_suspend, 2898e705c121SKalle Valo .d3_resume = iwl_trans_pcie_d3_resume, 2899e705c121SKalle Valo 29004cbb8e50SLuciano Coelho #ifdef CONFIG_PM_SLEEP 29014cbb8e50SLuciano Coelho .suspend = iwl_trans_pcie_suspend, 29024cbb8e50SLuciano Coelho .resume = iwl_trans_pcie_resume, 29034cbb8e50SLuciano Coelho #endif /* CONFIG_PM_SLEEP */ 29044cbb8e50SLuciano Coelho 2905e705c121SKalle Valo .send_cmd = iwl_trans_pcie_send_hcmd, 2906e705c121SKalle Valo 2907e705c121SKalle Valo .tx = iwl_trans_pcie_tx, 2908e705c121SKalle Valo .reclaim = iwl_trans_pcie_reclaim, 2909e705c121SKalle Valo 2910e705c121SKalle Valo .txq_disable = iwl_trans_pcie_txq_disable, 2911e705c121SKalle Valo .txq_enable = iwl_trans_pcie_txq_enable, 2912e705c121SKalle Valo 291342db09c1SLiad Kaufman .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, 291442db09c1SLiad Kaufman 2915e705c121SKalle Valo .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, 2916e705c121SKalle Valo .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, 29170cd58eaaSEmmanuel Grumbach .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, 2918e705c121SKalle Valo 2919e705c121SKalle Valo .write8 = iwl_trans_pcie_write8, 2920e705c121SKalle Valo .write32 = iwl_trans_pcie_write32, 2921e705c121SKalle Valo .read32 = iwl_trans_pcie_read32, 2922e705c121SKalle Valo .read_prph = iwl_trans_pcie_read_prph, 2923e705c121SKalle Valo .write_prph = iwl_trans_pcie_write_prph, 2924e705c121SKalle Valo .read_mem = iwl_trans_pcie_read_mem, 2925e705c121SKalle Valo .write_mem = iwl_trans_pcie_write_mem, 2926e705c121SKalle Valo .configure = iwl_trans_pcie_configure, 2927e705c121SKalle Valo .set_pmi = iwl_trans_pcie_set_pmi, 2928e705c121SKalle Valo .grab_nic_access = iwl_trans_pcie_grab_nic_access, 2929e705c121SKalle Valo .release_nic_access = iwl_trans_pcie_release_nic_access, 2930e705c121SKalle Valo .set_bits_mask = iwl_trans_pcie_set_bits_mask, 2931e705c121SKalle Valo 2932e705c121SKalle Valo .ref = iwl_trans_pcie_ref, 2933e705c121SKalle Valo .unref = iwl_trans_pcie_unref, 2934e705c121SKalle Valo 2935e705c121SKalle Valo .dump_data = iwl_trans_pcie_dump_data, 2936e705c121SKalle Valo }; 2937e705c121SKalle Valo 2938e705c121SKalle Valo struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 2939e705c121SKalle Valo const struct pci_device_id *ent, 2940e705c121SKalle Valo const struct iwl_cfg *cfg) 2941e705c121SKalle Valo { 2942e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie; 2943e705c121SKalle Valo struct iwl_trans *trans; 294496a6497bSSara Sharon int ret, addr_size; 2945e705c121SKalle Valo 29465a41a86cSSharon Dvir ret = pcim_enable_device(pdev); 29475a41a86cSSharon Dvir if (ret) 29485a41a86cSSharon Dvir return ERR_PTR(ret); 29495a41a86cSSharon Dvir 2950e705c121SKalle Valo trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 29511ea423b0SLuca Coelho &pdev->dev, cfg, &trans_ops_pcie); 2952e705c121SKalle Valo if (!trans) 2953e705c121SKalle Valo return ERR_PTR(-ENOMEM); 2954e705c121SKalle Valo 2955e705c121SKalle Valo trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2956e705c121SKalle Valo 2957e705c121SKalle Valo trans_pcie->trans = trans; 2958e705c121SKalle Valo spin_lock_init(&trans_pcie->irq_lock); 2959e705c121SKalle Valo spin_lock_init(&trans_pcie->reg_lock); 2960e705c121SKalle Valo mutex_init(&trans_pcie->mutex); 2961e705c121SKalle Valo init_waitqueue_head(&trans_pcie->ucode_write_waitq); 29626eb5e529SEmmanuel Grumbach trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); 29636eb5e529SEmmanuel Grumbach if (!trans_pcie->tso_hdr_page) { 29646eb5e529SEmmanuel Grumbach ret = -ENOMEM; 29656eb5e529SEmmanuel Grumbach goto out_no_pci; 29666eb5e529SEmmanuel Grumbach } 2967e705c121SKalle Valo 2968e705c121SKalle Valo 2969e705c121SKalle Valo if (!cfg->base_params->pcie_l1_allowed) { 2970e705c121SKalle Valo /* 2971e705c121SKalle Valo * W/A - seems to solve weird behavior. We need to remove this 2972e705c121SKalle Valo * if we don't want to stay in L1 all the time. This wastes a 2973e705c121SKalle Valo * lot of power. 2974e705c121SKalle Valo */ 2975e705c121SKalle Valo pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 2976e705c121SKalle Valo PCIE_LINK_STATE_L1 | 2977e705c121SKalle Valo PCIE_LINK_STATE_CLKPM); 2978e705c121SKalle Valo } 2979e705c121SKalle Valo 29806983ba69SSara Sharon if (cfg->use_tfh) { 29812c6262b7SSara Sharon addr_size = 64; 29823cd1980bSSara Sharon trans_pcie->max_tbs = IWL_TFH_NUM_TBS; 29838352e62aSSara Sharon trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd); 29846983ba69SSara Sharon } else { 29852c6262b7SSara Sharon addr_size = 36; 29863cd1980bSSara Sharon trans_pcie->max_tbs = IWL_NUM_OF_TBS; 29876983ba69SSara Sharon trans_pcie->tfd_size = sizeof(struct iwl_tfd); 29886983ba69SSara Sharon } 29893cd1980bSSara Sharon trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie); 29903cd1980bSSara Sharon 2991e705c121SKalle Valo pci_set_master(pdev); 2992e705c121SKalle Valo 299396a6497bSSara Sharon ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); 2994e705c121SKalle Valo if (!ret) 299596a6497bSSara Sharon ret = pci_set_consistent_dma_mask(pdev, 299696a6497bSSara Sharon DMA_BIT_MASK(addr_size)); 2997e705c121SKalle Valo if (ret) { 2998e705c121SKalle Valo ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2999e705c121SKalle Valo if (!ret) 3000e705c121SKalle Valo ret = pci_set_consistent_dma_mask(pdev, 3001e705c121SKalle Valo DMA_BIT_MASK(32)); 3002e705c121SKalle Valo /* both attempts failed: */ 3003e705c121SKalle Valo if (ret) { 3004e705c121SKalle Valo dev_err(&pdev->dev, "No suitable DMA available\n"); 30055a41a86cSSharon Dvir goto out_no_pci; 3006e705c121SKalle Valo } 3007e705c121SKalle Valo } 3008e705c121SKalle Valo 30095a41a86cSSharon Dvir ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); 3010e705c121SKalle Valo if (ret) { 30115a41a86cSSharon Dvir dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); 30125a41a86cSSharon Dvir goto out_no_pci; 3013e705c121SKalle Valo } 3014e705c121SKalle Valo 30155a41a86cSSharon Dvir trans_pcie->hw_base = pcim_iomap_table(pdev)[0]; 3016e705c121SKalle Valo if (!trans_pcie->hw_base) { 30175a41a86cSSharon Dvir dev_err(&pdev->dev, "pcim_iomap_table failed\n"); 3018e705c121SKalle Valo ret = -ENODEV; 30195a41a86cSSharon Dvir goto out_no_pci; 3020e705c121SKalle Valo } 3021e705c121SKalle Valo 3022e705c121SKalle Valo /* We disable the RETRY_TIMEOUT register (0x41) to keep 3023e705c121SKalle Valo * PCI Tx retries from interfering with C3 CPU state */ 3024e705c121SKalle Valo pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3025e705c121SKalle Valo 3026e705c121SKalle Valo trans->dev = &pdev->dev; 3027e705c121SKalle Valo trans_pcie->pci_dev = pdev; 3028e705c121SKalle Valo iwl_disable_interrupts(trans); 3029e705c121SKalle Valo 3030e705c121SKalle Valo trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 3031e705c121SKalle Valo /* 3032e705c121SKalle Valo * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3033e705c121SKalle Valo * changed, and now the revision step also includes bit 0-1 (no more 3034e705c121SKalle Valo * "dash" value). To keep hw_rev backwards compatible - we'll store it 3035e705c121SKalle Valo * in the old format. 3036e705c121SKalle Valo */ 3037e705c121SKalle Valo if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 3038e705c121SKalle Valo unsigned long flags; 3039e705c121SKalle Valo 3040e705c121SKalle Valo trans->hw_rev = (trans->hw_rev & 0xfff0) | 3041e705c121SKalle Valo (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 3042e705c121SKalle Valo 3043e705c121SKalle Valo ret = iwl_pcie_prepare_card_hw(trans); 3044e705c121SKalle Valo if (ret) { 3045e705c121SKalle Valo IWL_WARN(trans, "Exit HW not ready\n"); 30465a41a86cSSharon Dvir goto out_no_pci; 3047e705c121SKalle Valo } 3048e705c121SKalle Valo 3049e705c121SKalle Valo /* 3050e705c121SKalle Valo * in-order to recognize C step driver should read chip version 3051e705c121SKalle Valo * id located at the AUX bus MISC address space. 3052e705c121SKalle Valo */ 3053e705c121SKalle Valo iwl_set_bit(trans, CSR_GP_CNTRL, 3054e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 3055e705c121SKalle Valo udelay(2); 3056e705c121SKalle Valo 3057e705c121SKalle Valo ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 3058e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 3059e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 3060e705c121SKalle Valo 25000); 3061e705c121SKalle Valo if (ret < 0) { 3062e705c121SKalle Valo IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); 30635a41a86cSSharon Dvir goto out_no_pci; 3064e705c121SKalle Valo } 3065e705c121SKalle Valo 306623ba9340SEmmanuel Grumbach if (iwl_trans_grab_nic_access(trans, &flags)) { 3067e705c121SKalle Valo u32 hw_step; 3068e705c121SKalle Valo 306914ef1b43SGolan Ben-Ami hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG); 3070e705c121SKalle Valo hw_step |= ENABLE_WFPM; 307114ef1b43SGolan Ben-Ami iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step); 307214ef1b43SGolan Ben-Ami hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG); 3073e705c121SKalle Valo hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; 3074e705c121SKalle Valo if (hw_step == 0x3) 3075e705c121SKalle Valo trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | 3076e705c121SKalle Valo (SILICON_C_STEP << 2); 3077e705c121SKalle Valo iwl_trans_release_nic_access(trans, &flags); 3078e705c121SKalle Valo } 3079e705c121SKalle Valo } 3080e705c121SKalle Valo 30811afb0ae4SHaim Dreyfuss trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); 30821afb0ae4SHaim Dreyfuss 30832e5d4a8fSHaim Dreyfuss iwl_pcie_set_interrupt_capa(pdev, trans); 3084e705c121SKalle Valo trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 3085e705c121SKalle Valo snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 3086e705c121SKalle Valo "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 3087e705c121SKalle Valo 3088e705c121SKalle Valo /* Initialize the wait queue for commands */ 3089e705c121SKalle Valo init_waitqueue_head(&trans_pcie->wait_command_queue); 3090e705c121SKalle Valo 30914cbb8e50SLuciano Coelho init_waitqueue_head(&trans_pcie->d0i3_waitq); 30924cbb8e50SLuciano Coelho 30932e5d4a8fSHaim Dreyfuss if (trans_pcie->msix_enabled) { 30942e5d4a8fSHaim Dreyfuss if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) 30955a41a86cSSharon Dvir goto out_no_pci; 30962e5d4a8fSHaim Dreyfuss } else { 3097e705c121SKalle Valo ret = iwl_pcie_alloc_ict(trans); 3098e705c121SKalle Valo if (ret) 30995a41a86cSSharon Dvir goto out_no_pci; 3100e705c121SKalle Valo 31015a41a86cSSharon Dvir ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 31025a41a86cSSharon Dvir iwl_pcie_isr, 3103e705c121SKalle Valo iwl_pcie_irq_handler, 3104e705c121SKalle Valo IRQF_SHARED, DRV_NAME, trans); 3105e705c121SKalle Valo if (ret) { 3106e705c121SKalle Valo IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3107e705c121SKalle Valo goto out_free_ict; 3108e705c121SKalle Valo } 3109e705c121SKalle Valo trans_pcie->inta_mask = CSR_INI_SET_MASK; 31102e5d4a8fSHaim Dreyfuss } 3111e705c121SKalle Valo 3112b3ff1270SLuca Coelho #ifdef CONFIG_IWLWIFI_PCIE_RTPM 3113b3ff1270SLuca Coelho trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; 3114b3ff1270SLuca Coelho #else 3115b3ff1270SLuca Coelho trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 3116b3ff1270SLuca Coelho #endif /* CONFIG_IWLWIFI_PCIE_RTPM */ 3117b3ff1270SLuca Coelho 3118e705c121SKalle Valo return trans; 3119e705c121SKalle Valo 3120e705c121SKalle Valo out_free_ict: 3121e705c121SKalle Valo iwl_pcie_free_ict(trans); 3122e705c121SKalle Valo out_no_pci: 31236eb5e529SEmmanuel Grumbach free_percpu(trans_pcie->tso_hdr_page); 3124e705c121SKalle Valo iwl_trans_free(trans); 3125e705c121SKalle Valo return ERR_PTR(ret); 3126e705c121SKalle Valo } 3127