1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2007-2015, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/debugfs.h> 10 #include <linux/sched.h> 11 #include <linux/bitops.h> 12 #include <linux/gfp.h> 13 #include <linux/vmalloc.h> 14 #include <linux/module.h> 15 #include <linux/wait.h> 16 #include <linux/seq_file.h> 17 18 #include "iwl-drv.h" 19 #include "iwl-trans.h" 20 #include "iwl-csr.h" 21 #include "iwl-prph.h" 22 #include "iwl-scd.h" 23 #include "iwl-agn-hw.h" 24 #include "fw/error-dump.h" 25 #include "fw/dbg.h" 26 #include "fw/api/tx.h" 27 #include "internal.h" 28 #include "iwl-fh.h" 29 #include "iwl-context-info-gen3.h" 30 31 /* extended range in FW SRAM */ 32 #define IWL_FW_MEM_EXTENDED_START 0x40000 33 #define IWL_FW_MEM_EXTENDED_END 0x57FFF 34 35 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) 36 { 37 #define PCI_DUMP_SIZE 352 38 #define PCI_MEM_DUMP_SIZE 64 39 #define PCI_PARENT_DUMP_SIZE 524 40 #define PREFIX_LEN 32 41 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 42 struct pci_dev *pdev = trans_pcie->pci_dev; 43 u32 i, pos, alloc_size, *ptr, *buf; 44 char *prefix; 45 46 if (trans_pcie->pcie_dbg_dumped_once) 47 return; 48 49 /* Should be a multiple of 4 */ 50 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); 51 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); 52 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); 53 54 /* Alloc a max size buffer */ 55 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; 56 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); 57 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); 58 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); 59 60 buf = kmalloc(alloc_size, GFP_ATOMIC); 61 if (!buf) 62 return; 63 prefix = (char *)buf + alloc_size - PREFIX_LEN; 64 65 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); 66 67 /* Print wifi device registers */ 68 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 69 IWL_ERR(trans, "iwlwifi device config registers:\n"); 70 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) 71 if (pci_read_config_dword(pdev, i, ptr)) 72 goto err_read; 73 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 74 75 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); 76 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) 77 *ptr = iwl_read32(trans, i); 78 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 79 80 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 81 if (pos) { 82 IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); 83 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) 84 if (pci_read_config_dword(pdev, pos + i, ptr)) 85 goto err_read; 86 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 87 32, 4, buf, i, 0); 88 } 89 90 /* Print parent device registers next */ 91 if (!pdev->bus->self) 92 goto out; 93 94 pdev = pdev->bus->self; 95 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 96 97 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", 98 pci_name(pdev)); 99 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) 100 if (pci_read_config_dword(pdev, i, ptr)) 101 goto err_read; 102 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 103 104 /* Print root port AER registers */ 105 pos = 0; 106 pdev = pcie_find_root_port(pdev); 107 if (pdev) 108 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 109 if (pos) { 110 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", 111 pci_name(pdev)); 112 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 113 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) 114 if (pci_read_config_dword(pdev, pos + i, ptr)) 115 goto err_read; 116 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 117 4, buf, i, 0); 118 } 119 goto out; 120 121 err_read: 122 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 123 IWL_ERR(trans, "Read failed at 0x%X\n", i); 124 out: 125 trans_pcie->pcie_dbg_dumped_once = 1; 126 kfree(buf); 127 } 128 129 static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) 130 { 131 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 132 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 133 usleep_range(5000, 6000); 134 } 135 136 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 137 { 138 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 139 140 if (!fw_mon->size) 141 return; 142 143 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, 144 fw_mon->physical); 145 146 fw_mon->block = NULL; 147 fw_mon->physical = 0; 148 fw_mon->size = 0; 149 } 150 151 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, 152 u8 max_power, u8 min_power) 153 { 154 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 155 void *block = NULL; 156 dma_addr_t physical = 0; 157 u32 size = 0; 158 u8 power; 159 160 if (fw_mon->size) 161 return; 162 163 for (power = max_power; power >= min_power; power--) { 164 size = BIT(power); 165 block = dma_alloc_coherent(trans->dev, size, &physical, 166 GFP_KERNEL | __GFP_NOWARN); 167 if (!block) 168 continue; 169 170 IWL_INFO(trans, 171 "Allocated 0x%08x bytes for firmware monitor.\n", 172 size); 173 break; 174 } 175 176 if (WARN_ON_ONCE(!block)) 177 return; 178 179 if (power != max_power) 180 IWL_ERR(trans, 181 "Sorry - debug buffer is only %luK while you requested %luK\n", 182 (unsigned long)BIT(power - 10), 183 (unsigned long)BIT(max_power - 10)); 184 185 fw_mon->block = block; 186 fw_mon->physical = physical; 187 fw_mon->size = size; 188 } 189 190 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 191 { 192 if (!max_power) { 193 /* default max_power is maximum */ 194 max_power = 26; 195 } else { 196 max_power += 11; 197 } 198 199 if (WARN(max_power > 26, 200 "External buffer size for monitor is too big %d, check the FW TLV\n", 201 max_power)) 202 return; 203 204 if (trans->dbg.fw_mon.size) 205 return; 206 207 iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); 208 } 209 210 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 211 { 212 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 213 ((reg & 0x0000ffff) | (2 << 28))); 214 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 215 } 216 217 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 218 { 219 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 220 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 221 ((reg & 0x0000ffff) | (3 << 28))); 222 } 223 224 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 225 { 226 if (trans->cfg->apmg_not_supported) 227 return; 228 229 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 230 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 231 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 232 ~APMG_PS_CTRL_MSK_PWR_SRC); 233 else 234 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 235 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 236 ~APMG_PS_CTRL_MSK_PWR_SRC); 237 } 238 239 /* PCI registers */ 240 #define PCI_CFG_RETRY_TIMEOUT 0x041 241 242 void iwl_pcie_apm_config(struct iwl_trans *trans) 243 { 244 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 245 u16 lctl; 246 u16 cap; 247 248 /* 249 * L0S states have been found to be unstable with our devices 250 * and in newer hardware they are not officially supported at 251 * all, so we must always set the L0S_DISABLED bit. 252 */ 253 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); 254 255 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 256 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 257 258 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 259 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 260 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", 261 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 262 trans->ltr_enabled ? "En" : "Dis"); 263 } 264 265 /* 266 * Start up NIC's basic functionality after it has been reset 267 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 268 * NOTE: This does not load uCode nor start the embedded processor 269 */ 270 static int iwl_pcie_apm_init(struct iwl_trans *trans) 271 { 272 int ret; 273 274 IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 275 276 /* 277 * Use "set_bit" below rather than "write", to preserve any hardware 278 * bits already set by default after reset. 279 */ 280 281 /* Disable L0S exit timer (platform NMI Work/Around) */ 282 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 283 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 284 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 285 286 /* 287 * Disable L0s without affecting L1; 288 * don't wait for ICH L0s (ICH bug W/A) 289 */ 290 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 291 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 292 293 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 294 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 295 296 /* 297 * Enable HAP INTA (interrupt from management bus) to 298 * wake device's PCI Express link L1a -> L0s 299 */ 300 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 301 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 302 303 iwl_pcie_apm_config(trans); 304 305 /* Configure analog phase-lock-loop before activating to D0A */ 306 if (trans->trans_cfg->base_params->pll_cfg) 307 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 308 309 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 310 if (ret) 311 return ret; 312 313 if (trans->cfg->host_interrupt_operation_mode) { 314 /* 315 * This is a bit of an abuse - This is needed for 7260 / 3160 316 * only check host_interrupt_operation_mode even if this is 317 * not related to host_interrupt_operation_mode. 318 * 319 * Enable the oscillator to count wake up time for L1 exit. This 320 * consumes slightly more power (100uA) - but allows to be sure 321 * that we wake up from L1 on time. 322 * 323 * This looks weird: read twice the same register, discard the 324 * value, set a bit, and yet again, read that same register 325 * just to discard the value. But that's the way the hardware 326 * seems to like it. 327 */ 328 iwl_read_prph(trans, OSC_CLK); 329 iwl_read_prph(trans, OSC_CLK); 330 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 331 iwl_read_prph(trans, OSC_CLK); 332 iwl_read_prph(trans, OSC_CLK); 333 } 334 335 /* 336 * Enable DMA clock and wait for it to stabilize. 337 * 338 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 339 * bits do not disable clocks. This preserves any hardware 340 * bits already set by default in "CLK_CTRL_REG" after reset. 341 */ 342 if (!trans->cfg->apmg_not_supported) { 343 iwl_write_prph(trans, APMG_CLK_EN_REG, 344 APMG_CLK_VAL_DMA_CLK_RQT); 345 udelay(20); 346 347 /* Disable L1-Active */ 348 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 349 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 350 351 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 352 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 353 APMG_RTC_INT_STT_RFKILL); 354 } 355 356 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 357 358 return 0; 359 } 360 361 /* 362 * Enable LP XTAL to avoid HW bug where device may consume much power if 363 * FW is not loaded after device reset. LP XTAL is disabled by default 364 * after device HW reset. Do it only if XTAL is fed by internal source. 365 * Configure device's "persistence" mode to avoid resetting XTAL again when 366 * SHRD_HW_RST occurs in S3. 367 */ 368 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 369 { 370 int ret; 371 u32 apmg_gp1_reg; 372 u32 apmg_xtal_cfg_reg; 373 u32 dl_cfg_reg; 374 375 /* Force XTAL ON */ 376 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 377 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 378 379 iwl_trans_pcie_sw_reset(trans); 380 381 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 382 if (WARN_ON(ret)) { 383 /* Release XTAL ON request */ 384 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 385 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 386 return; 387 } 388 389 /* 390 * Clear "disable persistence" to avoid LP XTAL resetting when 391 * SHRD_HW_RST is applied in S3. 392 */ 393 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 394 APMG_PCIDEV_STT_VAL_PERSIST_DIS); 395 396 /* 397 * Force APMG XTAL to be active to prevent its disabling by HW 398 * caused by APMG idle state. 399 */ 400 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 401 SHR_APMG_XTAL_CFG_REG); 402 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 403 apmg_xtal_cfg_reg | 404 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 405 406 iwl_trans_pcie_sw_reset(trans); 407 408 /* Enable LP XTAL by indirect access through CSR */ 409 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 410 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 411 SHR_APMG_GP1_WF_XTAL_LP_EN | 412 SHR_APMG_GP1_CHICKEN_BIT_SELECT); 413 414 /* Clear delay line clock power up */ 415 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 416 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 417 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 418 419 /* 420 * Enable persistence mode to avoid LP XTAL resetting when 421 * SHRD_HW_RST is applied in S3. 422 */ 423 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 424 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 425 426 /* 427 * Clear "initialization complete" bit to move adapter from 428 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 429 */ 430 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 431 432 /* Activates XTAL resources monitor */ 433 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 434 CSR_MONITOR_XTAL_RESOURCES); 435 436 /* Release XTAL ON request */ 437 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 438 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 439 udelay(10); 440 441 /* Release APMG XTAL */ 442 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 443 apmg_xtal_cfg_reg & 444 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 445 } 446 447 void iwl_pcie_apm_stop_master(struct iwl_trans *trans) 448 { 449 int ret; 450 451 /* stop device's busmaster DMA activity */ 452 453 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 454 iwl_set_bit(trans, CSR_GP_CNTRL, 455 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); 456 457 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 458 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 459 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 460 100); 461 } else { 462 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 463 464 ret = iwl_poll_bit(trans, CSR_RESET, 465 CSR_RESET_REG_FLAG_MASTER_DISABLED, 466 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 467 } 468 469 if (ret < 0) 470 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 471 472 IWL_DEBUG_INFO(trans, "stop master\n"); 473 } 474 475 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 476 { 477 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 478 479 if (op_mode_leave) { 480 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 481 iwl_pcie_apm_init(trans); 482 483 /* inform ME that we are leaving */ 484 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) 485 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 486 APMG_PCIDEV_STT_VAL_WAKE_ME); 487 else if (trans->trans_cfg->device_family >= 488 IWL_DEVICE_FAMILY_8000) { 489 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 490 CSR_RESET_LINK_PWR_MGMT_DISABLED); 491 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 492 CSR_HW_IF_CONFIG_REG_PREPARE | 493 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 494 mdelay(1); 495 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 496 CSR_RESET_LINK_PWR_MGMT_DISABLED); 497 } 498 mdelay(5); 499 } 500 501 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 502 503 /* Stop device's DMA activity */ 504 iwl_pcie_apm_stop_master(trans); 505 506 if (trans->cfg->lp_xtal_workaround) { 507 iwl_pcie_apm_lp_xtal_enable(trans); 508 return; 509 } 510 511 iwl_trans_pcie_sw_reset(trans); 512 513 /* 514 * Clear "initialization complete" bit to move adapter from 515 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 516 */ 517 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 518 } 519 520 static int iwl_pcie_nic_init(struct iwl_trans *trans) 521 { 522 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 523 int ret; 524 525 /* nic_init */ 526 spin_lock_bh(&trans_pcie->irq_lock); 527 ret = iwl_pcie_apm_init(trans); 528 spin_unlock_bh(&trans_pcie->irq_lock); 529 530 if (ret) 531 return ret; 532 533 iwl_pcie_set_pwr(trans, false); 534 535 iwl_op_mode_nic_config(trans->op_mode); 536 537 /* Allocate the RX queue, or reset if it is already allocated */ 538 ret = iwl_pcie_rx_init(trans); 539 if (ret) 540 return ret; 541 542 /* Allocate or reset and init all Tx and Command queues */ 543 if (iwl_pcie_tx_init(trans)) { 544 iwl_pcie_rx_free(trans); 545 return -ENOMEM; 546 } 547 548 if (trans->trans_cfg->base_params->shadow_reg_enable) { 549 /* enable shadow regs in HW */ 550 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 551 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 552 } 553 554 return 0; 555 } 556 557 #define HW_READY_TIMEOUT (50) 558 559 /* Note: returns poll_bit return value, which is >= 0 if success */ 560 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 561 { 562 int ret; 563 564 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 565 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 566 567 /* See if we got it */ 568 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 569 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 570 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 571 HW_READY_TIMEOUT); 572 573 if (ret >= 0) 574 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 575 576 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 577 return ret; 578 } 579 580 /* Note: returns standard 0/-ERROR code */ 581 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 582 { 583 int ret; 584 int t = 0; 585 int iter; 586 587 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 588 589 ret = iwl_pcie_set_hw_ready(trans); 590 /* If the card is ready, exit 0 */ 591 if (ret >= 0) 592 return 0; 593 594 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 595 CSR_RESET_LINK_PWR_MGMT_DISABLED); 596 usleep_range(1000, 2000); 597 598 for (iter = 0; iter < 10; iter++) { 599 /* If HW is not ready, prepare the conditions to check again */ 600 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 601 CSR_HW_IF_CONFIG_REG_PREPARE); 602 603 do { 604 ret = iwl_pcie_set_hw_ready(trans); 605 if (ret >= 0) 606 return 0; 607 608 usleep_range(200, 1000); 609 t += 200; 610 } while (t < 150000); 611 msleep(25); 612 } 613 614 IWL_ERR(trans, "Couldn't prepare the card\n"); 615 616 return ret; 617 } 618 619 /* 620 * ucode 621 */ 622 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 623 u32 dst_addr, dma_addr_t phy_addr, 624 u32 byte_cnt) 625 { 626 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 627 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 628 629 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 630 dst_addr); 631 632 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 633 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 634 635 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 636 (iwl_get_dma_hi_addr(phy_addr) 637 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 638 639 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 640 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 641 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 642 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 643 644 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 645 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 646 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 647 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 648 } 649 650 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 651 u32 dst_addr, dma_addr_t phy_addr, 652 u32 byte_cnt) 653 { 654 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 655 int ret; 656 657 trans_pcie->ucode_write_complete = false; 658 659 if (!iwl_trans_grab_nic_access(trans)) 660 return -EIO; 661 662 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 663 byte_cnt); 664 iwl_trans_release_nic_access(trans); 665 666 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 667 trans_pcie->ucode_write_complete, 5 * HZ); 668 if (!ret) { 669 IWL_ERR(trans, "Failed to load firmware chunk!\n"); 670 iwl_trans_pcie_dump_regs(trans); 671 return -ETIMEDOUT; 672 } 673 674 return 0; 675 } 676 677 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 678 const struct fw_desc *section) 679 { 680 u8 *v_addr; 681 dma_addr_t p_addr; 682 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 683 int ret = 0; 684 685 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 686 section_num); 687 688 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 689 GFP_KERNEL | __GFP_NOWARN); 690 if (!v_addr) { 691 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 692 chunk_sz = PAGE_SIZE; 693 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 694 &p_addr, GFP_KERNEL); 695 if (!v_addr) 696 return -ENOMEM; 697 } 698 699 for (offset = 0; offset < section->len; offset += chunk_sz) { 700 u32 copy_size, dst_addr; 701 bool extended_addr = false; 702 703 copy_size = min_t(u32, chunk_sz, section->len - offset); 704 dst_addr = section->offset + offset; 705 706 if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 707 dst_addr <= IWL_FW_MEM_EXTENDED_END) 708 extended_addr = true; 709 710 if (extended_addr) 711 iwl_set_bits_prph(trans, LMPM_CHICK, 712 LMPM_CHICK_EXTENDED_ADDR_SPACE); 713 714 memcpy(v_addr, (u8 *)section->data + offset, copy_size); 715 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 716 copy_size); 717 718 if (extended_addr) 719 iwl_clear_bits_prph(trans, LMPM_CHICK, 720 LMPM_CHICK_EXTENDED_ADDR_SPACE); 721 722 if (ret) { 723 IWL_ERR(trans, 724 "Could not load the [%d] uCode section\n", 725 section_num); 726 break; 727 } 728 } 729 730 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 731 return ret; 732 } 733 734 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 735 const struct fw_img *image, 736 int cpu, 737 int *first_ucode_section) 738 { 739 int shift_param; 740 int i, ret = 0, sec_num = 0x1; 741 u32 val, last_read_idx = 0; 742 743 if (cpu == 1) { 744 shift_param = 0; 745 *first_ucode_section = 0; 746 } else { 747 shift_param = 16; 748 (*first_ucode_section)++; 749 } 750 751 for (i = *first_ucode_section; i < image->num_sec; i++) { 752 last_read_idx = i; 753 754 /* 755 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 756 * CPU1 to CPU2. 757 * PAGING_SEPARATOR_SECTION delimiter - separate between 758 * CPU2 non paged to CPU2 paging sec. 759 */ 760 if (!image->sec[i].data || 761 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 762 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 763 IWL_DEBUG_FW(trans, 764 "Break since Data not valid or Empty section, sec = %d\n", 765 i); 766 break; 767 } 768 769 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 770 if (ret) 771 return ret; 772 773 /* Notify ucode of loaded section number and status */ 774 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 775 val = val | (sec_num << shift_param); 776 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 777 778 sec_num = (sec_num << 1) | 0x1; 779 } 780 781 *first_ucode_section = last_read_idx; 782 783 iwl_enable_interrupts(trans); 784 785 if (trans->trans_cfg->use_tfh) { 786 if (cpu == 1) 787 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 788 0xFFFF); 789 else 790 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 791 0xFFFFFFFF); 792 } else { 793 if (cpu == 1) 794 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 795 0xFFFF); 796 else 797 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 798 0xFFFFFFFF); 799 } 800 801 return 0; 802 } 803 804 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 805 const struct fw_img *image, 806 int cpu, 807 int *first_ucode_section) 808 { 809 int i, ret = 0; 810 u32 last_read_idx = 0; 811 812 if (cpu == 1) 813 *first_ucode_section = 0; 814 else 815 (*first_ucode_section)++; 816 817 for (i = *first_ucode_section; i < image->num_sec; i++) { 818 last_read_idx = i; 819 820 /* 821 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 822 * CPU1 to CPU2. 823 * PAGING_SEPARATOR_SECTION delimiter - separate between 824 * CPU2 non paged to CPU2 paging sec. 825 */ 826 if (!image->sec[i].data || 827 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 828 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 829 IWL_DEBUG_FW(trans, 830 "Break since Data not valid or Empty section, sec = %d\n", 831 i); 832 break; 833 } 834 835 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 836 if (ret) 837 return ret; 838 } 839 840 *first_ucode_section = last_read_idx; 841 842 return 0; 843 } 844 845 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) 846 { 847 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; 848 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 849 &trans->dbg.fw_mon_cfg[alloc_id]; 850 struct iwl_dram_data *frag; 851 852 if (!iwl_trans_dbg_ini_valid(trans)) 853 return; 854 855 if (le32_to_cpu(fw_mon_cfg->buf_location) == 856 IWL_FW_INI_LOCATION_SRAM_PATH) { 857 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); 858 /* set sram monitor by enabling bit 7 */ 859 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 860 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); 861 862 return; 863 } 864 865 if (le32_to_cpu(fw_mon_cfg->buf_location) != 866 IWL_FW_INI_LOCATION_DRAM_PATH || 867 !trans->dbg.fw_mon_ini[alloc_id].num_frags) 868 return; 869 870 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; 871 872 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", 873 alloc_id); 874 875 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, 876 frag->physical >> MON_BUFF_SHIFT_VER2); 877 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, 878 (frag->physical + frag->size - 256) >> 879 MON_BUFF_SHIFT_VER2); 880 } 881 882 void iwl_pcie_apply_destination(struct iwl_trans *trans) 883 { 884 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; 885 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 886 int i; 887 888 if (iwl_trans_dbg_ini_valid(trans)) { 889 iwl_pcie_apply_destination_ini(trans); 890 return; 891 } 892 893 IWL_INFO(trans, "Applying debug destination %s\n", 894 get_fw_dbg_mode_string(dest->monitor_mode)); 895 896 if (dest->monitor_mode == EXTERNAL_MODE) 897 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 898 else 899 IWL_WARN(trans, "PCI should have external buffer debug\n"); 900 901 for (i = 0; i < trans->dbg.n_dest_reg; i++) { 902 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 903 u32 val = le32_to_cpu(dest->reg_ops[i].val); 904 905 switch (dest->reg_ops[i].op) { 906 case CSR_ASSIGN: 907 iwl_write32(trans, addr, val); 908 break; 909 case CSR_SETBIT: 910 iwl_set_bit(trans, addr, BIT(val)); 911 break; 912 case CSR_CLEARBIT: 913 iwl_clear_bit(trans, addr, BIT(val)); 914 break; 915 case PRPH_ASSIGN: 916 iwl_write_prph(trans, addr, val); 917 break; 918 case PRPH_SETBIT: 919 iwl_set_bits_prph(trans, addr, BIT(val)); 920 break; 921 case PRPH_CLEARBIT: 922 iwl_clear_bits_prph(trans, addr, BIT(val)); 923 break; 924 case PRPH_BLOCKBIT: 925 if (iwl_read_prph(trans, addr) & BIT(val)) { 926 IWL_ERR(trans, 927 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 928 val, addr); 929 goto monitor; 930 } 931 break; 932 default: 933 IWL_ERR(trans, "FW debug - unknown OP %d\n", 934 dest->reg_ops[i].op); 935 break; 936 } 937 } 938 939 monitor: 940 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { 941 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 942 fw_mon->physical >> dest->base_shift); 943 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 944 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 945 (fw_mon->physical + fw_mon->size - 946 256) >> dest->end_shift); 947 else 948 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 949 (fw_mon->physical + fw_mon->size) >> 950 dest->end_shift); 951 } 952 } 953 954 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 955 const struct fw_img *image) 956 { 957 int ret = 0; 958 int first_ucode_section; 959 960 IWL_DEBUG_FW(trans, "working with %s CPU\n", 961 image->is_dual_cpus ? "Dual" : "Single"); 962 963 /* load to FW the binary non secured sections of CPU1 */ 964 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 965 if (ret) 966 return ret; 967 968 if (image->is_dual_cpus) { 969 /* set CPU2 header address */ 970 iwl_write_prph(trans, 971 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 972 LMPM_SECURE_CPU2_HDR_MEM_SPACE); 973 974 /* load to FW the binary sections of CPU2 */ 975 ret = iwl_pcie_load_cpu_sections(trans, image, 2, 976 &first_ucode_section); 977 if (ret) 978 return ret; 979 } 980 981 if (iwl_pcie_dbg_on(trans)) 982 iwl_pcie_apply_destination(trans); 983 984 iwl_enable_interrupts(trans); 985 986 /* release CPU reset */ 987 iwl_write32(trans, CSR_RESET, 0); 988 989 return 0; 990 } 991 992 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 993 const struct fw_img *image) 994 { 995 int ret = 0; 996 int first_ucode_section; 997 998 IWL_DEBUG_FW(trans, "working with %s CPU\n", 999 image->is_dual_cpus ? "Dual" : "Single"); 1000 1001 if (iwl_pcie_dbg_on(trans)) 1002 iwl_pcie_apply_destination(trans); 1003 1004 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 1005 iwl_read_prph(trans, WFPM_GP2)); 1006 1007 /* 1008 * Set default value. On resume reading the values that were 1009 * zeored can provide debug data on the resume flow. 1010 * This is for debugging only and has no functional impact. 1011 */ 1012 iwl_write_prph(trans, WFPM_GP2, 0x01010101); 1013 1014 /* configure the ucode to be ready to get the secured image */ 1015 /* release CPU reset */ 1016 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 1017 1018 /* load to FW the binary Secured sections of CPU1 */ 1019 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 1020 &first_ucode_section); 1021 if (ret) 1022 return ret; 1023 1024 /* load to FW the binary sections of CPU2 */ 1025 return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 1026 &first_ucode_section); 1027 } 1028 1029 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) 1030 { 1031 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1032 bool hw_rfkill = iwl_is_rfkill_set(trans); 1033 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1034 bool report; 1035 1036 if (hw_rfkill) { 1037 set_bit(STATUS_RFKILL_HW, &trans->status); 1038 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1039 } else { 1040 clear_bit(STATUS_RFKILL_HW, &trans->status); 1041 if (trans_pcie->opmode_down) 1042 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1043 } 1044 1045 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1046 1047 if (prev != report) 1048 iwl_trans_pcie_rf_kill(trans, report); 1049 1050 return hw_rfkill; 1051 } 1052 1053 struct iwl_causes_list { 1054 u32 cause_num; 1055 u32 mask_reg; 1056 u8 addr; 1057 }; 1058 1059 static struct iwl_causes_list causes_list[] = { 1060 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, 1061 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, 1062 {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, 1063 {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, 1064 {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, 1065 {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, 1066 {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12}, 1067 {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, 1068 {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, 1069 {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, 1070 {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, 1071 {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, 1072 {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, 1073 {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, 1074 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, 1075 }; 1076 1077 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 1078 { 1079 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1080 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 1081 int i, arr_size = ARRAY_SIZE(causes_list); 1082 struct iwl_causes_list *causes = causes_list; 1083 1084 /* 1085 * Access all non RX causes and map them to the default irq. 1086 * In case we are missing at least one interrupt vector, 1087 * the first interrupt vector will serve non-RX and FBQ causes. 1088 */ 1089 for (i = 0; i < arr_size; i++) { 1090 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); 1091 iwl_clear_bit(trans, causes[i].mask_reg, 1092 causes[i].cause_num); 1093 } 1094 } 1095 1096 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 1097 { 1098 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1099 u32 offset = 1100 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 1101 u32 val, idx; 1102 1103 /* 1104 * The first RX queue - fallback queue, which is designated for 1105 * management frame, command responses etc, is always mapped to the 1106 * first interrupt vector. The other RX queues are mapped to 1107 * the other (N - 2) interrupt vectors. 1108 */ 1109 val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 1110 for (idx = 1; idx < trans->num_rx_queues; idx++) { 1111 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 1112 MSIX_FH_INT_CAUSES_Q(idx - offset)); 1113 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 1114 } 1115 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 1116 1117 val = MSIX_FH_INT_CAUSES_Q(0); 1118 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 1119 val |= MSIX_NON_AUTO_CLEAR_CAUSE; 1120 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 1121 1122 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 1123 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 1124 } 1125 1126 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 1127 { 1128 struct iwl_trans *trans = trans_pcie->trans; 1129 1130 if (!trans_pcie->msix_enabled) { 1131 if (trans->trans_cfg->mq_rx_supported && 1132 test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1133 iwl_write_umac_prph(trans, UREG_CHICK, 1134 UREG_CHICK_MSI_ENABLE); 1135 return; 1136 } 1137 /* 1138 * The IVAR table needs to be configured again after reset, 1139 * but if the device is disabled, we can't write to 1140 * prph. 1141 */ 1142 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1143 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 1144 1145 /* 1146 * Each cause from the causes list above and the RX causes is 1147 * represented as a byte in the IVAR table. The first nibble 1148 * represents the bound interrupt vector of the cause, the second 1149 * represents no auto clear for this cause. This will be set if its 1150 * interrupt vector is bound to serve other causes. 1151 */ 1152 iwl_pcie_map_rx_causes(trans); 1153 1154 iwl_pcie_map_non_rx_causes(trans); 1155 } 1156 1157 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 1158 { 1159 struct iwl_trans *trans = trans_pcie->trans; 1160 1161 iwl_pcie_conf_msix_hw(trans_pcie); 1162 1163 if (!trans_pcie->msix_enabled) 1164 return; 1165 1166 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 1167 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 1168 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 1169 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 1170 } 1171 1172 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1173 { 1174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1175 1176 lockdep_assert_held(&trans_pcie->mutex); 1177 1178 if (trans_pcie->is_down) 1179 return; 1180 1181 trans_pcie->is_down = true; 1182 1183 /* tell the device to stop sending interrupts */ 1184 iwl_disable_interrupts(trans); 1185 1186 /* device going down, Stop using ICT table */ 1187 iwl_pcie_disable_ict(trans); 1188 1189 /* 1190 * If a HW restart happens during firmware loading, 1191 * then the firmware loading might call this function 1192 * and later it might be called again due to the 1193 * restart. So don't process again if the device is 1194 * already dead. 1195 */ 1196 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1197 IWL_DEBUG_INFO(trans, 1198 "DEVICE_ENABLED bit was set and is now cleared\n"); 1199 iwl_pcie_tx_stop(trans); 1200 iwl_pcie_rx_stop(trans); 1201 1202 /* Power-down device's busmaster DMA clocks */ 1203 if (!trans->cfg->apmg_not_supported) { 1204 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1205 APMG_CLK_VAL_DMA_CLK_RQT); 1206 udelay(5); 1207 } 1208 } 1209 1210 /* Make sure (redundant) we've released our request to stay awake */ 1211 iwl_clear_bit(trans, CSR_GP_CNTRL, 1212 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1213 1214 /* Stop the device, and put it in low power state */ 1215 iwl_pcie_apm_stop(trans, false); 1216 1217 iwl_trans_pcie_sw_reset(trans); 1218 1219 /* 1220 * Upon stop, the IVAR table gets erased, so msi-x won't 1221 * work. This causes a bug in RF-KILL flows, since the interrupt 1222 * that enables radio won't fire on the correct irq, and the 1223 * driver won't be able to handle the interrupt. 1224 * Configure the IVAR table again after reset. 1225 */ 1226 iwl_pcie_conf_msix_hw(trans_pcie); 1227 1228 /* 1229 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1230 * This is a bug in certain verions of the hardware. 1231 * Certain devices also keep sending HW RF kill interrupt all 1232 * the time, unless the interrupt is ACKed even if the interrupt 1233 * should be masked. Re-ACK all the interrupts here. 1234 */ 1235 iwl_disable_interrupts(trans); 1236 1237 /* clear all status bits */ 1238 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1239 clear_bit(STATUS_INT_ENABLED, &trans->status); 1240 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1241 1242 /* 1243 * Even if we stop the HW, we still want the RF kill 1244 * interrupt 1245 */ 1246 iwl_enable_rfkill_int(trans); 1247 1248 /* re-take ownership to prevent other users from stealing the device */ 1249 iwl_pcie_prepare_card_hw(trans); 1250 } 1251 1252 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 1253 { 1254 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1255 1256 if (trans_pcie->msix_enabled) { 1257 int i; 1258 1259 for (i = 0; i < trans_pcie->alloc_vecs; i++) 1260 synchronize_irq(trans_pcie->msix_entries[i].vector); 1261 } else { 1262 synchronize_irq(trans_pcie->pci_dev->irq); 1263 } 1264 } 1265 1266 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1267 const struct fw_img *fw, bool run_in_rfkill) 1268 { 1269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1270 bool hw_rfkill; 1271 int ret; 1272 1273 /* This may fail if AMT took ownership of the device */ 1274 if (iwl_pcie_prepare_card_hw(trans)) { 1275 IWL_WARN(trans, "Exit HW not ready\n"); 1276 return -EIO; 1277 } 1278 1279 iwl_enable_rfkill_int(trans); 1280 1281 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1282 1283 /* 1284 * We enabled the RF-Kill interrupt and the handler may very 1285 * well be running. Disable the interrupts to make sure no other 1286 * interrupt can be fired. 1287 */ 1288 iwl_disable_interrupts(trans); 1289 1290 /* Make sure it finished running */ 1291 iwl_pcie_synchronize_irqs(trans); 1292 1293 mutex_lock(&trans_pcie->mutex); 1294 1295 /* If platform's RF_KILL switch is NOT set to KILL */ 1296 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1297 if (hw_rfkill && !run_in_rfkill) { 1298 ret = -ERFKILL; 1299 goto out; 1300 } 1301 1302 /* Someone called stop_device, don't try to start_fw */ 1303 if (trans_pcie->is_down) { 1304 IWL_WARN(trans, 1305 "Can't start_fw since the HW hasn't been started\n"); 1306 ret = -EIO; 1307 goto out; 1308 } 1309 1310 /* make sure rfkill handshake bits are cleared */ 1311 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1312 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1313 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1314 1315 /* clear (again), then enable host interrupts */ 1316 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1317 1318 ret = iwl_pcie_nic_init(trans); 1319 if (ret) { 1320 IWL_ERR(trans, "Unable to init nic\n"); 1321 goto out; 1322 } 1323 1324 /* 1325 * Now, we load the firmware and don't want to be interrupted, even 1326 * by the RF-Kill interrupt (hence mask all the interrupt besides the 1327 * FH_TX interrupt which is needed to load the firmware). If the 1328 * RF-Kill switch is toggled, we will find out after having loaded 1329 * the firmware and return the proper value to the caller. 1330 */ 1331 iwl_enable_fw_load_int(trans); 1332 1333 /* really make sure rfkill handshake bits are cleared */ 1334 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1335 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1336 1337 /* Load the given image to the HW */ 1338 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1339 ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1340 else 1341 ret = iwl_pcie_load_given_ucode(trans, fw); 1342 1343 /* re-check RF-Kill state since we may have missed the interrupt */ 1344 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1345 if (hw_rfkill && !run_in_rfkill) 1346 ret = -ERFKILL; 1347 1348 out: 1349 mutex_unlock(&trans_pcie->mutex); 1350 return ret; 1351 } 1352 1353 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1354 { 1355 iwl_pcie_reset_ict(trans); 1356 iwl_pcie_tx_start(trans, scd_addr); 1357 } 1358 1359 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1360 bool was_in_rfkill) 1361 { 1362 bool hw_rfkill; 1363 1364 /* 1365 * Check again since the RF kill state may have changed while 1366 * all the interrupts were disabled, in this case we couldn't 1367 * receive the RF kill interrupt and update the state in the 1368 * op_mode. 1369 * Don't call the op_mode if the rkfill state hasn't changed. 1370 * This allows the op_mode to call stop_device from the rfkill 1371 * notification without endless recursion. Under very rare 1372 * circumstances, we might have a small recursion if the rfkill 1373 * state changed exactly now while we were called from stop_device. 1374 * This is very unlikely but can happen and is supported. 1375 */ 1376 hw_rfkill = iwl_is_rfkill_set(trans); 1377 if (hw_rfkill) { 1378 set_bit(STATUS_RFKILL_HW, &trans->status); 1379 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1380 } else { 1381 clear_bit(STATUS_RFKILL_HW, &trans->status); 1382 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1383 } 1384 if (hw_rfkill != was_in_rfkill) 1385 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1386 } 1387 1388 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1389 { 1390 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1391 bool was_in_rfkill; 1392 1393 iwl_op_mode_time_point(trans->op_mode, 1394 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, 1395 NULL); 1396 1397 mutex_lock(&trans_pcie->mutex); 1398 trans_pcie->opmode_down = true; 1399 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1400 _iwl_trans_pcie_stop_device(trans); 1401 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); 1402 mutex_unlock(&trans_pcie->mutex); 1403 } 1404 1405 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1406 { 1407 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1408 IWL_TRANS_GET_PCIE_TRANS(trans); 1409 1410 lockdep_assert_held(&trans_pcie->mutex); 1411 1412 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", 1413 state ? "disabled" : "enabled"); 1414 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { 1415 if (trans->trans_cfg->gen2) 1416 _iwl_trans_pcie_gen2_stop_device(trans); 1417 else 1418 _iwl_trans_pcie_stop_device(trans); 1419 } 1420 } 1421 1422 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1423 bool test, bool reset) 1424 { 1425 iwl_disable_interrupts(trans); 1426 1427 /* 1428 * in testing mode, the host stays awake and the 1429 * hardware won't be reset (not even partially) 1430 */ 1431 if (test) 1432 return; 1433 1434 iwl_pcie_disable_ict(trans); 1435 1436 iwl_pcie_synchronize_irqs(trans); 1437 1438 iwl_clear_bit(trans, CSR_GP_CNTRL, 1439 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1440 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1441 1442 if (reset) { 1443 /* 1444 * reset TX queues -- some of their registers reset during S3 1445 * so if we don't reset everything here the D3 image would try 1446 * to execute some invalid memory upon resume 1447 */ 1448 iwl_trans_pcie_tx_reset(trans); 1449 } 1450 1451 iwl_pcie_set_pwr(trans, true); 1452 } 1453 1454 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 1455 bool reset) 1456 { 1457 int ret; 1458 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1459 1460 if (!reset) 1461 /* Enable persistence mode to avoid reset */ 1462 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1463 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1464 1465 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1466 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1467 UREG_DOORBELL_TO_ISR6_SUSPEND); 1468 1469 ret = wait_event_timeout(trans_pcie->sx_waitq, 1470 trans_pcie->sx_complete, 2 * HZ); 1471 /* 1472 * Invalidate it toward resume. 1473 */ 1474 trans_pcie->sx_complete = false; 1475 1476 if (!ret) { 1477 IWL_ERR(trans, "Timeout entering D3\n"); 1478 return -ETIMEDOUT; 1479 } 1480 } 1481 iwl_pcie_d3_complete_suspend(trans, test, reset); 1482 1483 return 0; 1484 } 1485 1486 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1487 enum iwl_d3_status *status, 1488 bool test, bool reset) 1489 { 1490 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1491 u32 val; 1492 int ret; 1493 1494 if (test) { 1495 iwl_enable_interrupts(trans); 1496 *status = IWL_D3_STATUS_ALIVE; 1497 goto out; 1498 } 1499 1500 iwl_set_bit(trans, CSR_GP_CNTRL, 1501 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1502 1503 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 1504 if (ret) 1505 return ret; 1506 1507 /* 1508 * Reconfigure IVAR table in case of MSIX or reset ict table in 1509 * MSI mode since HW reset erased it. 1510 * Also enables interrupts - none will happen as 1511 * the device doesn't know we're waking it up, only when 1512 * the opmode actually tells it after this call. 1513 */ 1514 iwl_pcie_conf_msix_hw(trans_pcie); 1515 if (!trans_pcie->msix_enabled) 1516 iwl_pcie_reset_ict(trans); 1517 iwl_enable_interrupts(trans); 1518 1519 iwl_pcie_set_pwr(trans, false); 1520 1521 if (!reset) { 1522 iwl_clear_bit(trans, CSR_GP_CNTRL, 1523 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1524 } else { 1525 iwl_trans_pcie_tx_reset(trans); 1526 1527 ret = iwl_pcie_rx_init(trans); 1528 if (ret) { 1529 IWL_ERR(trans, 1530 "Failed to resume the device (RX reset)\n"); 1531 return ret; 1532 } 1533 } 1534 1535 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 1536 iwl_read_umac_prph(trans, WFPM_GP2)); 1537 1538 val = iwl_read32(trans, CSR_RESET); 1539 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1540 *status = IWL_D3_STATUS_RESET; 1541 else 1542 *status = IWL_D3_STATUS_ALIVE; 1543 1544 out: 1545 if (*status == IWL_D3_STATUS_ALIVE && 1546 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1547 trans_pcie->sx_complete = false; 1548 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1549 UREG_DOORBELL_TO_ISR6_RESUME); 1550 1551 ret = wait_event_timeout(trans_pcie->sx_waitq, 1552 trans_pcie->sx_complete, 2 * HZ); 1553 /* 1554 * Invalidate it toward next suspend. 1555 */ 1556 trans_pcie->sx_complete = false; 1557 1558 if (!ret) { 1559 IWL_ERR(trans, "Timeout exiting D3\n"); 1560 return -ETIMEDOUT; 1561 } 1562 } 1563 return 0; 1564 } 1565 1566 static void 1567 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 1568 struct iwl_trans *trans, 1569 const struct iwl_cfg_trans_params *cfg_trans) 1570 { 1571 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1572 int max_irqs, num_irqs, i, ret; 1573 u16 pci_cmd; 1574 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; 1575 1576 if (!cfg_trans->mq_rx_supported) 1577 goto enable_msi; 1578 1579 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) 1580 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; 1581 1582 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); 1583 for (i = 0; i < max_irqs; i++) 1584 trans_pcie->msix_entries[i].entry = i; 1585 1586 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 1587 MSIX_MIN_INTERRUPT_VECTORS, 1588 max_irqs); 1589 if (num_irqs < 0) { 1590 IWL_DEBUG_INFO(trans, 1591 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 1592 num_irqs); 1593 goto enable_msi; 1594 } 1595 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1596 1597 IWL_DEBUG_INFO(trans, 1598 "MSI-X enabled. %d interrupt vectors were allocated\n", 1599 num_irqs); 1600 1601 /* 1602 * In case the OS provides fewer interrupts than requested, different 1603 * causes will share the same interrupt vector as follows: 1604 * One interrupt less: non rx causes shared with FBQ. 1605 * Two interrupts less: non rx causes shared with FBQ and RSS. 1606 * More than two interrupts: we will use fewer RSS queues. 1607 */ 1608 if (num_irqs <= max_irqs - 2) { 1609 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1610 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1611 IWL_SHARED_IRQ_FIRST_RSS; 1612 } else if (num_irqs == max_irqs - 1) { 1613 trans_pcie->trans->num_rx_queues = num_irqs; 1614 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1615 } else { 1616 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1617 } 1618 1619 IWL_DEBUG_INFO(trans, 1620 "MSI-X enabled with rx queues %d, vec mask 0x%x\n", 1621 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); 1622 1623 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1624 1625 trans_pcie->alloc_vecs = num_irqs; 1626 trans_pcie->msix_enabled = true; 1627 return; 1628 1629 enable_msi: 1630 ret = pci_enable_msi(pdev); 1631 if (ret) { 1632 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 1633 /* enable rfkill interrupt: hw bug w/a */ 1634 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 1635 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 1636 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 1637 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 1638 } 1639 } 1640 } 1641 1642 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) 1643 { 1644 int iter_rx_q, i, ret, cpu, offset; 1645 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1646 1647 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 1648 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; 1649 offset = 1 + i; 1650 for (; i < iter_rx_q ; i++) { 1651 /* 1652 * Get the cpu prior to the place to search 1653 * (i.e. return will be > i - 1). 1654 */ 1655 cpu = cpumask_next(i - offset, cpu_online_mask); 1656 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 1657 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 1658 &trans_pcie->affinity_mask[i]); 1659 if (ret) 1660 IWL_ERR(trans_pcie->trans, 1661 "Failed to set affinity mask for IRQ %d\n", 1662 trans_pcie->msix_entries[i].vector); 1663 } 1664 } 1665 1666 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 1667 struct iwl_trans_pcie *trans_pcie) 1668 { 1669 int i; 1670 1671 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1672 int ret; 1673 struct msix_entry *msix_entry; 1674 const char *qname = queue_name(&pdev->dev, trans_pcie, i); 1675 1676 if (!qname) 1677 return -ENOMEM; 1678 1679 msix_entry = &trans_pcie->msix_entries[i]; 1680 ret = devm_request_threaded_irq(&pdev->dev, 1681 msix_entry->vector, 1682 iwl_pcie_msix_isr, 1683 (i == trans_pcie->def_irq) ? 1684 iwl_pcie_irq_msix_handler : 1685 iwl_pcie_irq_rx_msix_handler, 1686 IRQF_SHARED, 1687 qname, 1688 msix_entry); 1689 if (ret) { 1690 IWL_ERR(trans_pcie->trans, 1691 "Error allocating IRQ %d\n", i); 1692 1693 return ret; 1694 } 1695 } 1696 iwl_pcie_irq_set_affinity(trans_pcie->trans); 1697 1698 return 0; 1699 } 1700 1701 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) 1702 { 1703 u32 hpm, wprot; 1704 1705 switch (trans->trans_cfg->device_family) { 1706 case IWL_DEVICE_FAMILY_9000: 1707 wprot = PREG_PRPH_WPROT_9000; 1708 break; 1709 case IWL_DEVICE_FAMILY_22000: 1710 wprot = PREG_PRPH_WPROT_22000; 1711 break; 1712 default: 1713 return 0; 1714 } 1715 1716 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); 1717 if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { 1718 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); 1719 1720 if (wprot_val & PREG_WFPM_ACCESS) { 1721 IWL_ERR(trans, 1722 "Error, can not clear persistence bit\n"); 1723 return -EPERM; 1724 } 1725 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, 1726 hpm & ~PERSISTENCE_BIT); 1727 } 1728 1729 return 0; 1730 } 1731 1732 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) 1733 { 1734 int ret; 1735 1736 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 1737 if (ret < 0) 1738 return ret; 1739 1740 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1741 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1742 udelay(20); 1743 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1744 HPM_HIPM_GEN_CFG_CR_PG_EN | 1745 HPM_HIPM_GEN_CFG_CR_SLP_EN); 1746 udelay(20); 1747 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, 1748 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1749 1750 iwl_trans_pcie_sw_reset(trans); 1751 1752 return 0; 1753 } 1754 1755 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1756 { 1757 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1758 int err; 1759 1760 lockdep_assert_held(&trans_pcie->mutex); 1761 1762 err = iwl_pcie_prepare_card_hw(trans); 1763 if (err) { 1764 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1765 return err; 1766 } 1767 1768 err = iwl_trans_pcie_clear_persistence_bit(trans); 1769 if (err) 1770 return err; 1771 1772 iwl_trans_pcie_sw_reset(trans); 1773 1774 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && 1775 trans->trans_cfg->integrated) { 1776 err = iwl_pcie_gen2_force_power_gating(trans); 1777 if (err) 1778 return err; 1779 } 1780 1781 err = iwl_pcie_apm_init(trans); 1782 if (err) 1783 return err; 1784 1785 iwl_pcie_init_msix(trans_pcie); 1786 1787 /* From now on, the op_mode will be kept updated about RF kill state */ 1788 iwl_enable_rfkill_int(trans); 1789 1790 trans_pcie->opmode_down = false; 1791 1792 /* Set is_down to false here so that...*/ 1793 trans_pcie->is_down = false; 1794 1795 /* ...rfkill can call stop_device and set it false if needed */ 1796 iwl_pcie_check_hw_rf_kill(trans); 1797 1798 return 0; 1799 } 1800 1801 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1802 { 1803 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1804 int ret; 1805 1806 mutex_lock(&trans_pcie->mutex); 1807 ret = _iwl_trans_pcie_start_hw(trans); 1808 mutex_unlock(&trans_pcie->mutex); 1809 1810 return ret; 1811 } 1812 1813 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1814 { 1815 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1816 1817 mutex_lock(&trans_pcie->mutex); 1818 1819 /* disable interrupts - don't enable HW RF kill interrupt */ 1820 iwl_disable_interrupts(trans); 1821 1822 iwl_pcie_apm_stop(trans, true); 1823 1824 iwl_disable_interrupts(trans); 1825 1826 iwl_pcie_disable_ict(trans); 1827 1828 mutex_unlock(&trans_pcie->mutex); 1829 1830 iwl_pcie_synchronize_irqs(trans); 1831 } 1832 1833 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1834 { 1835 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1836 } 1837 1838 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1839 { 1840 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1841 } 1842 1843 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1844 { 1845 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1846 } 1847 1848 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) 1849 { 1850 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1851 return 0x00FFFFFF; 1852 else 1853 return 0x000FFFFF; 1854 } 1855 1856 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1857 { 1858 u32 mask = iwl_trans_pcie_prph_msk(trans); 1859 1860 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1861 ((reg & mask) | (3 << 24))); 1862 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1863 } 1864 1865 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1866 u32 val) 1867 { 1868 u32 mask = iwl_trans_pcie_prph_msk(trans); 1869 1870 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1871 ((addr & mask) | (3 << 24))); 1872 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1873 } 1874 1875 static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1876 const struct iwl_trans_config *trans_cfg) 1877 { 1878 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1879 1880 /* free all first - we might be reconfigured for a different size */ 1881 iwl_pcie_free_rbs_pool(trans); 1882 1883 trans->txqs.cmd.q_id = trans_cfg->cmd_queue; 1884 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; 1885 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 1886 trans->txqs.page_offs = trans_cfg->cb_data_offs; 1887 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); 1888 1889 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1890 trans_pcie->n_no_reclaim_cmds = 0; 1891 else 1892 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 1893 if (trans_pcie->n_no_reclaim_cmds) 1894 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1895 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1896 1897 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 1898 trans_pcie->rx_page_order = 1899 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 1900 trans_pcie->rx_buf_bytes = 1901 iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 1902 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); 1903 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1904 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); 1905 1906 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; 1907 trans_pcie->scd_set_active = trans_cfg->scd_set_active; 1908 1909 trans->command_groups = trans_cfg->command_groups; 1910 trans->command_groups_size = trans_cfg->command_groups_size; 1911 1912 /* Initialize NAPI here - it should be before registering to mac80211 1913 * in the opmode but after the HW struct is allocated. 1914 * As this function may be called again in some corner cases don't 1915 * do anything if NAPI was already initialized. 1916 */ 1917 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 1918 init_dummy_netdev(&trans_pcie->napi_dev); 1919 1920 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; 1921 } 1922 1923 void iwl_trans_pcie_free(struct iwl_trans *trans) 1924 { 1925 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1926 int i; 1927 1928 iwl_pcie_synchronize_irqs(trans); 1929 1930 if (trans->trans_cfg->gen2) 1931 iwl_txq_gen2_tx_free(trans); 1932 else 1933 iwl_pcie_tx_free(trans); 1934 iwl_pcie_rx_free(trans); 1935 1936 if (trans_pcie->rba.alloc_wq) { 1937 destroy_workqueue(trans_pcie->rba.alloc_wq); 1938 trans_pcie->rba.alloc_wq = NULL; 1939 } 1940 1941 if (trans_pcie->msix_enabled) { 1942 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1943 irq_set_affinity_hint( 1944 trans_pcie->msix_entries[i].vector, 1945 NULL); 1946 } 1947 1948 trans_pcie->msix_enabled = false; 1949 } else { 1950 iwl_pcie_free_ict(trans); 1951 } 1952 1953 iwl_pcie_free_fw_monitor(trans); 1954 1955 if (trans_pcie->pnvm_dram.size) 1956 dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size, 1957 trans_pcie->pnvm_dram.block, 1958 trans_pcie->pnvm_dram.physical); 1959 1960 if (trans_pcie->reduce_power_dram.size) 1961 dma_free_coherent(trans->dev, 1962 trans_pcie->reduce_power_dram.size, 1963 trans_pcie->reduce_power_dram.block, 1964 trans_pcie->reduce_power_dram.physical); 1965 1966 mutex_destroy(&trans_pcie->mutex); 1967 iwl_trans_free(trans); 1968 } 1969 1970 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 1971 { 1972 if (state) 1973 set_bit(STATUS_TPOWER_PMI, &trans->status); 1974 else 1975 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1976 } 1977 1978 struct iwl_trans_pcie_removal { 1979 struct pci_dev *pdev; 1980 struct work_struct work; 1981 }; 1982 1983 static void iwl_trans_pcie_removal_wk(struct work_struct *wk) 1984 { 1985 struct iwl_trans_pcie_removal *removal = 1986 container_of(wk, struct iwl_trans_pcie_removal, work); 1987 struct pci_dev *pdev = removal->pdev; 1988 static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; 1989 1990 dev_err(&pdev->dev, "Device gone - attempting removal\n"); 1991 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); 1992 pci_lock_rescan_remove(); 1993 pci_dev_put(pdev); 1994 pci_stop_and_remove_bus_device(pdev); 1995 pci_unlock_rescan_remove(); 1996 1997 kfree(removal); 1998 module_put(THIS_MODULE); 1999 } 2000 2001 /* 2002 * This version doesn't disable BHs but rather assumes they're 2003 * already disabled. 2004 */ 2005 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2006 { 2007 int ret; 2008 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2009 u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; 2010 u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 2011 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; 2012 u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; 2013 2014 spin_lock(&trans_pcie->reg_lock); 2015 2016 if (trans_pcie->cmd_hold_nic_awake) 2017 goto out; 2018 2019 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 2020 write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; 2021 mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2022 poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2023 } 2024 2025 /* this bit wakes up the NIC */ 2026 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); 2027 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 2028 udelay(2); 2029 2030 /* 2031 * These bits say the device is running, and should keep running for 2032 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 2033 * but they do not indicate that embedded SRAM is restored yet; 2034 * HW with volatile SRAM must save/restore contents to/from 2035 * host DRAM when sleeping/waking for power-saving. 2036 * Each direction takes approximately 1/4 millisecond; with this 2037 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 2038 * series of register accesses are expected (e.g. reading Event Log), 2039 * to keep device from sleeping. 2040 * 2041 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 2042 * SRAM is okay/restored. We don't check that here because this call 2043 * is just for hardware register access; but GP1 MAC_SLEEP 2044 * check is a good idea before accessing the SRAM of HW with 2045 * volatile SRAM (e.g. reading Event Log). 2046 * 2047 * 5000 series and later (including 1000 series) have non-volatile SRAM, 2048 * and do not save/restore SRAM when power cycling. 2049 */ 2050 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); 2051 if (unlikely(ret < 0)) { 2052 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); 2053 2054 WARN_ONCE(1, 2055 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 2056 cntrl); 2057 2058 iwl_trans_pcie_dump_regs(trans); 2059 2060 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { 2061 struct iwl_trans_pcie_removal *removal; 2062 2063 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2064 goto err; 2065 2066 IWL_ERR(trans, "Device gone - scheduling removal!\n"); 2067 2068 /* 2069 * get a module reference to avoid doing this 2070 * while unloading anyway and to avoid 2071 * scheduling a work with code that's being 2072 * removed. 2073 */ 2074 if (!try_module_get(THIS_MODULE)) { 2075 IWL_ERR(trans, 2076 "Module is being unloaded - abort\n"); 2077 goto err; 2078 } 2079 2080 removal = kzalloc(sizeof(*removal), GFP_ATOMIC); 2081 if (!removal) { 2082 module_put(THIS_MODULE); 2083 goto err; 2084 } 2085 /* 2086 * we don't need to clear this flag, because 2087 * the trans will be freed and reallocated. 2088 */ 2089 set_bit(STATUS_TRANS_DEAD, &trans->status); 2090 2091 removal->pdev = to_pci_dev(trans->dev); 2092 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); 2093 pci_dev_get(removal->pdev); 2094 schedule_work(&removal->work); 2095 } else { 2096 iwl_write32(trans, CSR_RESET, 2097 CSR_RESET_REG_FLAG_FORCE_NMI); 2098 } 2099 2100 err: 2101 spin_unlock(&trans_pcie->reg_lock); 2102 return false; 2103 } 2104 2105 out: 2106 /* 2107 * Fool sparse by faking we release the lock - sparse will 2108 * track nic_access anyway. 2109 */ 2110 __release(&trans_pcie->reg_lock); 2111 return true; 2112 } 2113 2114 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2115 { 2116 bool ret; 2117 2118 local_bh_disable(); 2119 ret = __iwl_trans_pcie_grab_nic_access(trans); 2120 if (ret) { 2121 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ 2122 return ret; 2123 } 2124 local_bh_enable(); 2125 return false; 2126 } 2127 2128 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) 2129 { 2130 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2131 2132 lockdep_assert_held(&trans_pcie->reg_lock); 2133 2134 /* 2135 * Fool sparse by faking we acquiring the lock - sparse will 2136 * track nic_access anyway. 2137 */ 2138 __acquire(&trans_pcie->reg_lock); 2139 2140 if (trans_pcie->cmd_hold_nic_awake) 2141 goto out; 2142 2143 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2144 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2145 /* 2146 * Above we read the CSR_GP_CNTRL register, which will flush 2147 * any previous writes, but we need the write that clears the 2148 * MAC_ACCESS_REQ bit to be performed before any other writes 2149 * scheduled on different CPUs (after we drop reg_lock). 2150 */ 2151 out: 2152 spin_unlock_bh(&trans_pcie->reg_lock); 2153 } 2154 2155 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 2156 void *buf, int dwords) 2157 { 2158 int offs = 0; 2159 u32 *vals = buf; 2160 2161 while (offs < dwords) { 2162 /* limit the time we spin here under lock to 1/2s */ 2163 unsigned long end = jiffies + HZ / 2; 2164 bool resched = false; 2165 2166 if (iwl_trans_grab_nic_access(trans)) { 2167 iwl_write32(trans, HBUS_TARG_MEM_RADDR, 2168 addr + 4 * offs); 2169 2170 while (offs < dwords) { 2171 vals[offs] = iwl_read32(trans, 2172 HBUS_TARG_MEM_RDAT); 2173 offs++; 2174 2175 if (time_after(jiffies, end)) { 2176 resched = true; 2177 break; 2178 } 2179 } 2180 iwl_trans_release_nic_access(trans); 2181 2182 if (resched) 2183 cond_resched(); 2184 } else { 2185 return -EBUSY; 2186 } 2187 } 2188 2189 return 0; 2190 } 2191 2192 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 2193 const void *buf, int dwords) 2194 { 2195 int offs, ret = 0; 2196 const u32 *vals = buf; 2197 2198 if (iwl_trans_grab_nic_access(trans)) { 2199 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 2200 for (offs = 0; offs < dwords; offs++) 2201 iwl_write32(trans, HBUS_TARG_MEM_WDAT, 2202 vals ? vals[offs] : 0); 2203 iwl_trans_release_nic_access(trans); 2204 } else { 2205 ret = -EBUSY; 2206 } 2207 return ret; 2208 } 2209 2210 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, 2211 u32 *val) 2212 { 2213 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, 2214 ofs, val); 2215 } 2216 2217 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 2218 { 2219 int i; 2220 2221 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 2222 struct iwl_txq *txq = trans->txqs.txq[i]; 2223 2224 if (i == trans->txqs.cmd.q_id) 2225 continue; 2226 2227 spin_lock_bh(&txq->lock); 2228 2229 if (!block && !(WARN_ON_ONCE(!txq->block))) { 2230 txq->block--; 2231 if (!txq->block) { 2232 iwl_write32(trans, HBUS_TARG_WRPTR, 2233 txq->write_ptr | (i << 8)); 2234 } 2235 } else if (block) { 2236 txq->block++; 2237 } 2238 2239 spin_unlock_bh(&txq->lock); 2240 } 2241 } 2242 2243 #define IWL_FLUSH_WAIT_MS 2000 2244 2245 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, 2246 struct iwl_trans_rxq_dma_data *data) 2247 { 2248 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2249 2250 if (queue >= trans->num_rx_queues || !trans_pcie->rxq) 2251 return -EINVAL; 2252 2253 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; 2254 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; 2255 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; 2256 data->fr_bd_wid = 0; 2257 2258 return 0; 2259 } 2260 2261 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2262 { 2263 struct iwl_txq *txq; 2264 unsigned long now = jiffies; 2265 bool overflow_tx; 2266 u8 wr_ptr; 2267 2268 /* Make sure the NIC is still alive in the bus */ 2269 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2270 return -ENODEV; 2271 2272 if (!test_bit(txq_idx, trans->txqs.queue_used)) 2273 return -EINVAL; 2274 2275 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); 2276 txq = trans->txqs.txq[txq_idx]; 2277 2278 spin_lock_bh(&txq->lock); 2279 overflow_tx = txq->overflow_tx || 2280 !skb_queue_empty(&txq->overflow_q); 2281 spin_unlock_bh(&txq->lock); 2282 2283 wr_ptr = READ_ONCE(txq->write_ptr); 2284 2285 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || 2286 overflow_tx) && 2287 !time_after(jiffies, 2288 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2289 u8 write_ptr = READ_ONCE(txq->write_ptr); 2290 2291 /* 2292 * If write pointer moved during the wait, warn only 2293 * if the TX came from op mode. In case TX came from 2294 * trans layer (overflow TX) don't warn. 2295 */ 2296 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, 2297 "WR pointer moved while flushing %d -> %d\n", 2298 wr_ptr, write_ptr)) 2299 return -ETIMEDOUT; 2300 wr_ptr = write_ptr; 2301 2302 usleep_range(1000, 2000); 2303 2304 spin_lock_bh(&txq->lock); 2305 overflow_tx = txq->overflow_tx || 2306 !skb_queue_empty(&txq->overflow_q); 2307 spin_unlock_bh(&txq->lock); 2308 } 2309 2310 if (txq->read_ptr != txq->write_ptr) { 2311 IWL_ERR(trans, 2312 "fail to flush all tx fifo queues Q %d\n", txq_idx); 2313 iwl_txq_log_scd_error(trans, txq); 2314 return -ETIMEDOUT; 2315 } 2316 2317 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); 2318 2319 return 0; 2320 } 2321 2322 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) 2323 { 2324 int cnt; 2325 int ret = 0; 2326 2327 /* waiting for all the tx frames complete might take a while */ 2328 for (cnt = 0; 2329 cnt < trans->trans_cfg->base_params->num_of_queues; 2330 cnt++) { 2331 2332 if (cnt == trans->txqs.cmd.q_id) 2333 continue; 2334 if (!test_bit(cnt, trans->txqs.queue_used)) 2335 continue; 2336 if (!(BIT(cnt) & txq_bm)) 2337 continue; 2338 2339 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); 2340 if (ret) 2341 break; 2342 } 2343 2344 return ret; 2345 } 2346 2347 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2348 u32 mask, u32 value) 2349 { 2350 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2351 2352 spin_lock_bh(&trans_pcie->reg_lock); 2353 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 2354 spin_unlock_bh(&trans_pcie->reg_lock); 2355 } 2356 2357 static const char *get_csr_string(int cmd) 2358 { 2359 #define IWL_CMD(x) case x: return #x 2360 switch (cmd) { 2361 IWL_CMD(CSR_HW_IF_CONFIG_REG); 2362 IWL_CMD(CSR_INT_COALESCING); 2363 IWL_CMD(CSR_INT); 2364 IWL_CMD(CSR_INT_MASK); 2365 IWL_CMD(CSR_FH_INT_STATUS); 2366 IWL_CMD(CSR_GPIO_IN); 2367 IWL_CMD(CSR_RESET); 2368 IWL_CMD(CSR_GP_CNTRL); 2369 IWL_CMD(CSR_HW_REV); 2370 IWL_CMD(CSR_EEPROM_REG); 2371 IWL_CMD(CSR_EEPROM_GP); 2372 IWL_CMD(CSR_OTP_GP_REG); 2373 IWL_CMD(CSR_GIO_REG); 2374 IWL_CMD(CSR_GP_UCODE_REG); 2375 IWL_CMD(CSR_GP_DRIVER_REG); 2376 IWL_CMD(CSR_UCODE_DRV_GP1); 2377 IWL_CMD(CSR_UCODE_DRV_GP2); 2378 IWL_CMD(CSR_LED_REG); 2379 IWL_CMD(CSR_DRAM_INT_TBL_REG); 2380 IWL_CMD(CSR_GIO_CHICKEN_BITS); 2381 IWL_CMD(CSR_ANA_PLL_CFG); 2382 IWL_CMD(CSR_HW_REV_WA_REG); 2383 IWL_CMD(CSR_MONITOR_STATUS_REG); 2384 IWL_CMD(CSR_DBG_HPET_MEM_REG); 2385 default: 2386 return "UNKNOWN"; 2387 } 2388 #undef IWL_CMD 2389 } 2390 2391 void iwl_pcie_dump_csr(struct iwl_trans *trans) 2392 { 2393 int i; 2394 static const u32 csr_tbl[] = { 2395 CSR_HW_IF_CONFIG_REG, 2396 CSR_INT_COALESCING, 2397 CSR_INT, 2398 CSR_INT_MASK, 2399 CSR_FH_INT_STATUS, 2400 CSR_GPIO_IN, 2401 CSR_RESET, 2402 CSR_GP_CNTRL, 2403 CSR_HW_REV, 2404 CSR_EEPROM_REG, 2405 CSR_EEPROM_GP, 2406 CSR_OTP_GP_REG, 2407 CSR_GIO_REG, 2408 CSR_GP_UCODE_REG, 2409 CSR_GP_DRIVER_REG, 2410 CSR_UCODE_DRV_GP1, 2411 CSR_UCODE_DRV_GP2, 2412 CSR_LED_REG, 2413 CSR_DRAM_INT_TBL_REG, 2414 CSR_GIO_CHICKEN_BITS, 2415 CSR_ANA_PLL_CFG, 2416 CSR_MONITOR_STATUS_REG, 2417 CSR_HW_REV_WA_REG, 2418 CSR_DBG_HPET_MEM_REG 2419 }; 2420 IWL_ERR(trans, "CSR values:\n"); 2421 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2422 "CSR_INT_PERIODIC_REG)\n"); 2423 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2424 IWL_ERR(trans, " %25s: 0X%08x\n", 2425 get_csr_string(csr_tbl[i]), 2426 iwl_read32(trans, csr_tbl[i])); 2427 } 2428 } 2429 2430 #ifdef CONFIG_IWLWIFI_DEBUGFS 2431 /* create and remove of files */ 2432 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2433 debugfs_create_file(#name, mode, parent, trans, \ 2434 &iwl_dbgfs_##name##_ops); \ 2435 } while (0) 2436 2437 /* file operation */ 2438 #define DEBUGFS_READ_FILE_OPS(name) \ 2439 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2440 .read = iwl_dbgfs_##name##_read, \ 2441 .open = simple_open, \ 2442 .llseek = generic_file_llseek, \ 2443 }; 2444 2445 #define DEBUGFS_WRITE_FILE_OPS(name) \ 2446 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2447 .write = iwl_dbgfs_##name##_write, \ 2448 .open = simple_open, \ 2449 .llseek = generic_file_llseek, \ 2450 }; 2451 2452 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2453 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2454 .write = iwl_dbgfs_##name##_write, \ 2455 .read = iwl_dbgfs_##name##_read, \ 2456 .open = simple_open, \ 2457 .llseek = generic_file_llseek, \ 2458 }; 2459 2460 struct iwl_dbgfs_tx_queue_priv { 2461 struct iwl_trans *trans; 2462 }; 2463 2464 struct iwl_dbgfs_tx_queue_state { 2465 loff_t pos; 2466 }; 2467 2468 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) 2469 { 2470 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2471 struct iwl_dbgfs_tx_queue_state *state; 2472 2473 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2474 return NULL; 2475 2476 state = kmalloc(sizeof(*state), GFP_KERNEL); 2477 if (!state) 2478 return NULL; 2479 state->pos = *pos; 2480 return state; 2481 } 2482 2483 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, 2484 void *v, loff_t *pos) 2485 { 2486 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2487 struct iwl_dbgfs_tx_queue_state *state = v; 2488 2489 *pos = ++state->pos; 2490 2491 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2492 return NULL; 2493 2494 return state; 2495 } 2496 2497 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) 2498 { 2499 kfree(v); 2500 } 2501 2502 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) 2503 { 2504 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2505 struct iwl_dbgfs_tx_queue_state *state = v; 2506 struct iwl_trans *trans = priv->trans; 2507 struct iwl_txq *txq = trans->txqs.txq[state->pos]; 2508 2509 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", 2510 (unsigned int)state->pos, 2511 !!test_bit(state->pos, trans->txqs.queue_used), 2512 !!test_bit(state->pos, trans->txqs.queue_stopped)); 2513 if (txq) 2514 seq_printf(seq, 2515 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", 2516 txq->read_ptr, txq->write_ptr, 2517 txq->need_update, txq->frozen, 2518 txq->n_window, txq->ampdu); 2519 else 2520 seq_puts(seq, "(unallocated)"); 2521 2522 if (state->pos == trans->txqs.cmd.q_id) 2523 seq_puts(seq, " (HCMD)"); 2524 seq_puts(seq, "\n"); 2525 2526 return 0; 2527 } 2528 2529 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { 2530 .start = iwl_dbgfs_tx_queue_seq_start, 2531 .next = iwl_dbgfs_tx_queue_seq_next, 2532 .stop = iwl_dbgfs_tx_queue_seq_stop, 2533 .show = iwl_dbgfs_tx_queue_seq_show, 2534 }; 2535 2536 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) 2537 { 2538 struct iwl_dbgfs_tx_queue_priv *priv; 2539 2540 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, 2541 sizeof(*priv)); 2542 2543 if (!priv) 2544 return -ENOMEM; 2545 2546 priv->trans = inode->i_private; 2547 return 0; 2548 } 2549 2550 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2551 char __user *user_buf, 2552 size_t count, loff_t *ppos) 2553 { 2554 struct iwl_trans *trans = file->private_data; 2555 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2556 char *buf; 2557 int pos = 0, i, ret; 2558 size_t bufsz; 2559 2560 bufsz = sizeof(char) * 121 * trans->num_rx_queues; 2561 2562 if (!trans_pcie->rxq) 2563 return -EAGAIN; 2564 2565 buf = kzalloc(bufsz, GFP_KERNEL); 2566 if (!buf) 2567 return -ENOMEM; 2568 2569 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 2570 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2571 2572 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 2573 i); 2574 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2575 rxq->read); 2576 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2577 rxq->write); 2578 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2579 rxq->write_actual); 2580 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2581 rxq->need_update); 2582 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2583 rxq->free_count); 2584 if (rxq->rb_stts) { 2585 u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, 2586 rxq)); 2587 pos += scnprintf(buf + pos, bufsz - pos, 2588 "\tclosed_rb_num: %u\n", 2589 r & 0x0FFF); 2590 } else { 2591 pos += scnprintf(buf + pos, bufsz - pos, 2592 "\tclosed_rb_num: Not Allocated\n"); 2593 } 2594 } 2595 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2596 kfree(buf); 2597 2598 return ret; 2599 } 2600 2601 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2602 char __user *user_buf, 2603 size_t count, loff_t *ppos) 2604 { 2605 struct iwl_trans *trans = file->private_data; 2606 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2607 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2608 2609 int pos = 0; 2610 char *buf; 2611 int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2612 ssize_t ret; 2613 2614 buf = kzalloc(bufsz, GFP_KERNEL); 2615 if (!buf) 2616 return -ENOMEM; 2617 2618 pos += scnprintf(buf + pos, bufsz - pos, 2619 "Interrupt Statistics Report:\n"); 2620 2621 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2622 isr_stats->hw); 2623 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2624 isr_stats->sw); 2625 if (isr_stats->sw || isr_stats->hw) { 2626 pos += scnprintf(buf + pos, bufsz - pos, 2627 "\tLast Restarting Code: 0x%X\n", 2628 isr_stats->err_code); 2629 } 2630 #ifdef CONFIG_IWLWIFI_DEBUG 2631 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2632 isr_stats->sch); 2633 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2634 isr_stats->alive); 2635 #endif 2636 pos += scnprintf(buf + pos, bufsz - pos, 2637 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2638 2639 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2640 isr_stats->ctkill); 2641 2642 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2643 isr_stats->wakeup); 2644 2645 pos += scnprintf(buf + pos, bufsz - pos, 2646 "Rx command responses:\t\t %u\n", isr_stats->rx); 2647 2648 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2649 isr_stats->tx); 2650 2651 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2652 isr_stats->unhandled); 2653 2654 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2655 kfree(buf); 2656 return ret; 2657 } 2658 2659 static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2660 const char __user *user_buf, 2661 size_t count, loff_t *ppos) 2662 { 2663 struct iwl_trans *trans = file->private_data; 2664 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2665 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2666 u32 reset_flag; 2667 int ret; 2668 2669 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); 2670 if (ret) 2671 return ret; 2672 if (reset_flag == 0) 2673 memset(isr_stats, 0, sizeof(*isr_stats)); 2674 2675 return count; 2676 } 2677 2678 static ssize_t iwl_dbgfs_csr_write(struct file *file, 2679 const char __user *user_buf, 2680 size_t count, loff_t *ppos) 2681 { 2682 struct iwl_trans *trans = file->private_data; 2683 2684 iwl_pcie_dump_csr(trans); 2685 2686 return count; 2687 } 2688 2689 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2690 char __user *user_buf, 2691 size_t count, loff_t *ppos) 2692 { 2693 struct iwl_trans *trans = file->private_data; 2694 char *buf = NULL; 2695 ssize_t ret; 2696 2697 ret = iwl_dump_fh(trans, &buf); 2698 if (ret < 0) 2699 return ret; 2700 if (!buf) 2701 return -EINVAL; 2702 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2703 kfree(buf); 2704 return ret; 2705 } 2706 2707 static ssize_t iwl_dbgfs_rfkill_read(struct file *file, 2708 char __user *user_buf, 2709 size_t count, loff_t *ppos) 2710 { 2711 struct iwl_trans *trans = file->private_data; 2712 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2713 char buf[100]; 2714 int pos; 2715 2716 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", 2717 trans_pcie->debug_rfkill, 2718 !(iwl_read32(trans, CSR_GP_CNTRL) & 2719 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); 2720 2721 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2722 } 2723 2724 static ssize_t iwl_dbgfs_rfkill_write(struct file *file, 2725 const char __user *user_buf, 2726 size_t count, loff_t *ppos) 2727 { 2728 struct iwl_trans *trans = file->private_data; 2729 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2730 bool new_value; 2731 int ret; 2732 2733 ret = kstrtobool_from_user(user_buf, count, &new_value); 2734 if (ret) 2735 return ret; 2736 if (new_value == trans_pcie->debug_rfkill) 2737 return count; 2738 IWL_WARN(trans, "changing debug rfkill %d->%d\n", 2739 trans_pcie->debug_rfkill, new_value); 2740 trans_pcie->debug_rfkill = new_value; 2741 iwl_pcie_handle_rfkill_irq(trans); 2742 2743 return count; 2744 } 2745 2746 static int iwl_dbgfs_monitor_data_open(struct inode *inode, 2747 struct file *file) 2748 { 2749 struct iwl_trans *trans = inode->i_private; 2750 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2751 2752 if (!trans->dbg.dest_tlv || 2753 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { 2754 IWL_ERR(trans, "Debug destination is not set to DRAM\n"); 2755 return -ENOENT; 2756 } 2757 2758 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) 2759 return -EBUSY; 2760 2761 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; 2762 return simple_open(inode, file); 2763 } 2764 2765 static int iwl_dbgfs_monitor_data_release(struct inode *inode, 2766 struct file *file) 2767 { 2768 struct iwl_trans_pcie *trans_pcie = 2769 IWL_TRANS_GET_PCIE_TRANS(inode->i_private); 2770 2771 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) 2772 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 2773 return 0; 2774 } 2775 2776 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, 2777 void *buf, ssize_t *size, 2778 ssize_t *bytes_copied) 2779 { 2780 int buf_size_left = count - *bytes_copied; 2781 2782 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); 2783 if (*size > buf_size_left) 2784 *size = buf_size_left; 2785 2786 *size -= copy_to_user(user_buf, buf, *size); 2787 *bytes_copied += *size; 2788 2789 if (buf_size_left == *size) 2790 return true; 2791 return false; 2792 } 2793 2794 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, 2795 char __user *user_buf, 2796 size_t count, loff_t *ppos) 2797 { 2798 struct iwl_trans *trans = file->private_data; 2799 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2800 void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; 2801 struct cont_rec *data = &trans_pcie->fw_mon_data; 2802 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; 2803 ssize_t size, bytes_copied = 0; 2804 bool b_full; 2805 2806 if (trans->dbg.dest_tlv) { 2807 write_ptr_addr = 2808 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 2809 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 2810 } else { 2811 write_ptr_addr = MON_BUFF_WRPTR; 2812 wrap_cnt_addr = MON_BUFF_CYCLE_CNT; 2813 } 2814 2815 if (unlikely(!trans->dbg.rec_on)) 2816 return 0; 2817 2818 mutex_lock(&data->mutex); 2819 if (data->state == 2820 IWL_FW_MON_DBGFS_STATE_DISABLED) { 2821 mutex_unlock(&data->mutex); 2822 return 0; 2823 } 2824 2825 /* write_ptr position in bytes rather then DW */ 2826 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); 2827 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); 2828 2829 if (data->prev_wrap_cnt == wrap_cnt) { 2830 size = write_ptr - data->prev_wr_ptr; 2831 curr_buf = cpu_addr + data->prev_wr_ptr; 2832 b_full = iwl_write_to_user_buf(user_buf, count, 2833 curr_buf, &size, 2834 &bytes_copied); 2835 data->prev_wr_ptr += size; 2836 2837 } else if (data->prev_wrap_cnt == wrap_cnt - 1 && 2838 write_ptr < data->prev_wr_ptr) { 2839 size = trans->dbg.fw_mon.size - data->prev_wr_ptr; 2840 curr_buf = cpu_addr + data->prev_wr_ptr; 2841 b_full = iwl_write_to_user_buf(user_buf, count, 2842 curr_buf, &size, 2843 &bytes_copied); 2844 data->prev_wr_ptr += size; 2845 2846 if (!b_full) { 2847 size = write_ptr; 2848 b_full = iwl_write_to_user_buf(user_buf, count, 2849 cpu_addr, &size, 2850 &bytes_copied); 2851 data->prev_wr_ptr = size; 2852 data->prev_wrap_cnt++; 2853 } 2854 } else { 2855 if (data->prev_wrap_cnt == wrap_cnt - 1 && 2856 write_ptr > data->prev_wr_ptr) 2857 IWL_WARN(trans, 2858 "write pointer passed previous write pointer, start copying from the beginning\n"); 2859 else if (!unlikely(data->prev_wrap_cnt == 0 && 2860 data->prev_wr_ptr == 0)) 2861 IWL_WARN(trans, 2862 "monitor data is out of sync, start copying from the beginning\n"); 2863 2864 size = write_ptr; 2865 b_full = iwl_write_to_user_buf(user_buf, count, 2866 cpu_addr, &size, 2867 &bytes_copied); 2868 data->prev_wr_ptr = size; 2869 data->prev_wrap_cnt = wrap_cnt; 2870 } 2871 2872 mutex_unlock(&data->mutex); 2873 2874 return bytes_copied; 2875 } 2876 2877 static ssize_t iwl_dbgfs_rf_read(struct file *file, 2878 char __user *user_buf, 2879 size_t count, loff_t *ppos) 2880 { 2881 struct iwl_trans *trans = file->private_data; 2882 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2883 2884 if (!trans_pcie->rf_name[0]) 2885 return -ENODEV; 2886 2887 return simple_read_from_buffer(user_buf, count, ppos, 2888 trans_pcie->rf_name, 2889 strlen(trans_pcie->rf_name)); 2890 } 2891 2892 DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2893 DEBUGFS_READ_FILE_OPS(fh_reg); 2894 DEBUGFS_READ_FILE_OPS(rx_queue); 2895 DEBUGFS_WRITE_FILE_OPS(csr); 2896 DEBUGFS_READ_WRITE_FILE_OPS(rfkill); 2897 DEBUGFS_READ_FILE_OPS(rf); 2898 2899 static const struct file_operations iwl_dbgfs_tx_queue_ops = { 2900 .owner = THIS_MODULE, 2901 .open = iwl_dbgfs_tx_queue_open, 2902 .read = seq_read, 2903 .llseek = seq_lseek, 2904 .release = seq_release_private, 2905 }; 2906 2907 static const struct file_operations iwl_dbgfs_monitor_data_ops = { 2908 .read = iwl_dbgfs_monitor_data_read, 2909 .open = iwl_dbgfs_monitor_data_open, 2910 .release = iwl_dbgfs_monitor_data_release, 2911 }; 2912 2913 /* Create the debugfs files and directories */ 2914 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 2915 { 2916 struct dentry *dir = trans->dbgfs_dir; 2917 2918 DEBUGFS_ADD_FILE(rx_queue, dir, 0400); 2919 DEBUGFS_ADD_FILE(tx_queue, dir, 0400); 2920 DEBUGFS_ADD_FILE(interrupt, dir, 0600); 2921 DEBUGFS_ADD_FILE(csr, dir, 0200); 2922 DEBUGFS_ADD_FILE(fh_reg, dir, 0400); 2923 DEBUGFS_ADD_FILE(rfkill, dir, 0600); 2924 DEBUGFS_ADD_FILE(monitor_data, dir, 0400); 2925 DEBUGFS_ADD_FILE(rf, dir, 0400); 2926 } 2927 2928 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) 2929 { 2930 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2931 struct cont_rec *data = &trans_pcie->fw_mon_data; 2932 2933 mutex_lock(&data->mutex); 2934 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; 2935 mutex_unlock(&data->mutex); 2936 } 2937 #endif /*CONFIG_IWLWIFI_DEBUGFS */ 2938 2939 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 2940 { 2941 u32 cmdlen = 0; 2942 int i; 2943 2944 for (i = 0; i < trans->txqs.tfd.max_tbs; i++) 2945 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); 2946 2947 return cmdlen; 2948 } 2949 2950 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 2951 struct iwl_fw_error_dump_data **data, 2952 int allocated_rb_nums) 2953 { 2954 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2955 int max_len = trans_pcie->rx_buf_bytes; 2956 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 2957 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 2958 u32 i, r, j, rb_len = 0; 2959 2960 spin_lock(&rxq->lock); 2961 2962 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 2963 2964 for (i = rxq->read, j = 0; 2965 i != r && j < allocated_rb_nums; 2966 i = (i + 1) & RX_QUEUE_MASK, j++) { 2967 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 2968 struct iwl_fw_error_dump_rb *rb; 2969 2970 dma_sync_single_for_cpu(trans->dev, rxb->page_dma, 2971 max_len, DMA_FROM_DEVICE); 2972 2973 rb_len += sizeof(**data) + sizeof(*rb) + max_len; 2974 2975 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 2976 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 2977 rb = (void *)(*data)->data; 2978 rb->index = cpu_to_le32(i); 2979 memcpy(rb->data, page_address(rxb->page), max_len); 2980 2981 *data = iwl_fw_error_next_data(*data); 2982 } 2983 2984 spin_unlock(&rxq->lock); 2985 2986 return rb_len; 2987 } 2988 #define IWL_CSR_TO_DUMP (0x250) 2989 2990 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 2991 struct iwl_fw_error_dump_data **data) 2992 { 2993 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 2994 __le32 *val; 2995 int i; 2996 2997 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 2998 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 2999 val = (void *)(*data)->data; 3000 3001 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 3002 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3003 3004 *data = iwl_fw_error_next_data(*data); 3005 3006 return csr_len; 3007 } 3008 3009 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 3010 struct iwl_fw_error_dump_data **data) 3011 { 3012 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 3013 __le32 *val; 3014 int i; 3015 3016 if (!iwl_trans_grab_nic_access(trans)) 3017 return 0; 3018 3019 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 3020 (*data)->len = cpu_to_le32(fh_regs_len); 3021 val = (void *)(*data)->data; 3022 3023 if (!trans->trans_cfg->gen2) 3024 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; 3025 i += sizeof(u32)) 3026 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3027 else 3028 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); 3029 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); 3030 i += sizeof(u32)) 3031 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, 3032 i)); 3033 3034 iwl_trans_release_nic_access(trans); 3035 3036 *data = iwl_fw_error_next_data(*data); 3037 3038 return sizeof(**data) + fh_regs_len; 3039 } 3040 3041 static u32 3042 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 3043 struct iwl_fw_error_dump_fw_mon *fw_mon_data, 3044 u32 monitor_len) 3045 { 3046 u32 buf_size_in_dwords = (monitor_len >> 2); 3047 u32 *buffer = (u32 *)fw_mon_data->data; 3048 u32 i; 3049 3050 if (!iwl_trans_grab_nic_access(trans)) 3051 return 0; 3052 3053 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 3054 for (i = 0; i < buf_size_in_dwords; i++) 3055 buffer[i] = iwl_read_umac_prph_no_grab(trans, 3056 MON_DMARB_RD_DATA_ADDR); 3057 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 3058 3059 iwl_trans_release_nic_access(trans); 3060 3061 return monitor_len; 3062 } 3063 3064 static void 3065 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, 3066 struct iwl_fw_error_dump_fw_mon *fw_mon_data) 3067 { 3068 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; 3069 3070 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3071 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; 3072 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; 3073 write_ptr = DBGC_CUR_DBGBUF_STATUS; 3074 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; 3075 } else if (trans->dbg.dest_tlv) { 3076 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 3077 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 3078 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3079 } else { 3080 base = MON_BUFF_BASE_ADDR; 3081 write_ptr = MON_BUFF_WRPTR; 3082 wrap_cnt = MON_BUFF_CYCLE_CNT; 3083 } 3084 3085 write_ptr_val = iwl_read_prph(trans, write_ptr); 3086 fw_mon_data->fw_mon_cycle_cnt = 3087 cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 3088 fw_mon_data->fw_mon_base_ptr = 3089 cpu_to_le32(iwl_read_prph(trans, base)); 3090 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3091 fw_mon_data->fw_mon_base_high_ptr = 3092 cpu_to_le32(iwl_read_prph(trans, base_high)); 3093 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; 3094 /* convert wrtPtr to DWs, to align with all HWs */ 3095 write_ptr_val >>= 2; 3096 } 3097 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); 3098 } 3099 3100 static u32 3101 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 3102 struct iwl_fw_error_dump_data **data, 3103 u32 monitor_len) 3104 { 3105 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 3106 u32 len = 0; 3107 3108 if (trans->dbg.dest_tlv || 3109 (fw_mon->size && 3110 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || 3111 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { 3112 struct iwl_fw_error_dump_fw_mon *fw_mon_data; 3113 3114 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 3115 fw_mon_data = (void *)(*data)->data; 3116 3117 iwl_trans_pcie_dump_pointers(trans, fw_mon_data); 3118 3119 len += sizeof(**data) + sizeof(*fw_mon_data); 3120 if (fw_mon->size) { 3121 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); 3122 monitor_len = fw_mon->size; 3123 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { 3124 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); 3125 /* 3126 * Update pointers to reflect actual values after 3127 * shifting 3128 */ 3129 if (trans->dbg.dest_tlv->version) { 3130 base = (iwl_read_prph(trans, base) & 3131 IWL_LDBG_M2S_BUF_BA_MSK) << 3132 trans->dbg.dest_tlv->base_shift; 3133 base *= IWL_M2S_UNIT_SIZE; 3134 base += trans->cfg->smem_offset; 3135 } else { 3136 base = iwl_read_prph(trans, base) << 3137 trans->dbg.dest_tlv->base_shift; 3138 } 3139 3140 iwl_trans_read_mem(trans, base, fw_mon_data->data, 3141 monitor_len / sizeof(u32)); 3142 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { 3143 monitor_len = 3144 iwl_trans_pci_dump_marbh_monitor(trans, 3145 fw_mon_data, 3146 monitor_len); 3147 } else { 3148 /* Didn't match anything - output no monitor data */ 3149 monitor_len = 0; 3150 } 3151 3152 len += monitor_len; 3153 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 3154 } 3155 3156 return len; 3157 } 3158 3159 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) 3160 { 3161 if (trans->dbg.fw_mon.size) { 3162 *len += sizeof(struct iwl_fw_error_dump_data) + 3163 sizeof(struct iwl_fw_error_dump_fw_mon) + 3164 trans->dbg.fw_mon.size; 3165 return trans->dbg.fw_mon.size; 3166 } else if (trans->dbg.dest_tlv) { 3167 u32 base, end, cfg_reg, monitor_len; 3168 3169 if (trans->dbg.dest_tlv->version == 1) { 3170 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3171 cfg_reg = iwl_read_prph(trans, cfg_reg); 3172 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << 3173 trans->dbg.dest_tlv->base_shift; 3174 base *= IWL_M2S_UNIT_SIZE; 3175 base += trans->cfg->smem_offset; 3176 3177 monitor_len = 3178 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> 3179 trans->dbg.dest_tlv->end_shift; 3180 monitor_len *= IWL_M2S_UNIT_SIZE; 3181 } else { 3182 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3183 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); 3184 3185 base = iwl_read_prph(trans, base) << 3186 trans->dbg.dest_tlv->base_shift; 3187 end = iwl_read_prph(trans, end) << 3188 trans->dbg.dest_tlv->end_shift; 3189 3190 /* Make "end" point to the actual end */ 3191 if (trans->trans_cfg->device_family >= 3192 IWL_DEVICE_FAMILY_8000 || 3193 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) 3194 end += (1 << trans->dbg.dest_tlv->end_shift); 3195 monitor_len = end - base; 3196 } 3197 *len += sizeof(struct iwl_fw_error_dump_data) + 3198 sizeof(struct iwl_fw_error_dump_fw_mon) + 3199 monitor_len; 3200 return monitor_len; 3201 } 3202 return 0; 3203 } 3204 3205 static struct iwl_trans_dump_data 3206 *iwl_trans_pcie_dump_data(struct iwl_trans *trans, 3207 u32 dump_mask) 3208 { 3209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3210 struct iwl_fw_error_dump_data *data; 3211 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; 3212 struct iwl_fw_error_dump_txcmd *txcmd; 3213 struct iwl_trans_dump_data *dump_data; 3214 u32 len, num_rbs = 0, monitor_len = 0; 3215 int i, ptr; 3216 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 3217 !trans->trans_cfg->mq_rx_supported && 3218 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); 3219 3220 if (!dump_mask) 3221 return NULL; 3222 3223 /* transport dump header */ 3224 len = sizeof(*dump_data); 3225 3226 /* host commands */ 3227 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) 3228 len += sizeof(*data) + 3229 cmdq->n_window * (sizeof(*txcmd) + 3230 TFD_MAX_PAYLOAD_SIZE); 3231 3232 /* FW monitor */ 3233 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3234 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); 3235 3236 /* CSR registers */ 3237 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3238 len += sizeof(*data) + IWL_CSR_TO_DUMP; 3239 3240 /* FH registers */ 3241 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { 3242 if (trans->trans_cfg->gen2) 3243 len += sizeof(*data) + 3244 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - 3245 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); 3246 else 3247 len += sizeof(*data) + 3248 (FH_MEM_UPPER_BOUND - 3249 FH_MEM_LOWER_BOUND); 3250 } 3251 3252 if (dump_rbs) { 3253 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3254 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3255 /* RBs */ 3256 num_rbs = 3257 le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) 3258 & 0x0FFF; 3259 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 3260 len += num_rbs * (sizeof(*data) + 3261 sizeof(struct iwl_fw_error_dump_rb) + 3262 (PAGE_SIZE << trans_pcie->rx_page_order)); 3263 } 3264 3265 /* Paged memory for gen2 HW */ 3266 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) 3267 for (i = 0; i < trans->init_dram.paging_cnt; i++) 3268 len += sizeof(*data) + 3269 sizeof(struct iwl_fw_error_dump_paging) + 3270 trans->init_dram.paging[i].size; 3271 3272 dump_data = vzalloc(len); 3273 if (!dump_data) 3274 return NULL; 3275 3276 len = 0; 3277 data = (void *)dump_data->data; 3278 3279 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { 3280 u16 tfd_size = trans->txqs.tfd.size; 3281 3282 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 3283 txcmd = (void *)data->data; 3284 spin_lock_bh(&cmdq->lock); 3285 ptr = cmdq->write_ptr; 3286 for (i = 0; i < cmdq->n_window; i++) { 3287 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); 3288 u8 tfdidx; 3289 u32 caplen, cmdlen; 3290 3291 if (trans->trans_cfg->use_tfh) 3292 tfdidx = idx; 3293 else 3294 tfdidx = ptr; 3295 3296 cmdlen = iwl_trans_pcie_get_cmdlen(trans, 3297 (u8 *)cmdq->tfds + 3298 tfd_size * tfdidx); 3299 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 3300 3301 if (cmdlen) { 3302 len += sizeof(*txcmd) + caplen; 3303 txcmd->cmdlen = cpu_to_le32(cmdlen); 3304 txcmd->caplen = cpu_to_le32(caplen); 3305 memcpy(txcmd->data, cmdq->entries[idx].cmd, 3306 caplen); 3307 txcmd = (void *)((u8 *)txcmd->data + caplen); 3308 } 3309 3310 ptr = iwl_txq_dec_wrap(trans, ptr); 3311 } 3312 spin_unlock_bh(&cmdq->lock); 3313 3314 data->len = cpu_to_le32(len); 3315 len += sizeof(*data); 3316 data = iwl_fw_error_next_data(data); 3317 } 3318 3319 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3320 len += iwl_trans_pcie_dump_csr(trans, &data); 3321 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) 3322 len += iwl_trans_pcie_fh_regs_dump(trans, &data); 3323 if (dump_rbs) 3324 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 3325 3326 /* Paged memory for gen2 HW */ 3327 if (trans->trans_cfg->gen2 && 3328 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { 3329 for (i = 0; i < trans->init_dram.paging_cnt; i++) { 3330 struct iwl_fw_error_dump_paging *paging; 3331 u32 page_len = trans->init_dram.paging[i].size; 3332 3333 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 3334 data->len = cpu_to_le32(sizeof(*paging) + page_len); 3335 paging = (void *)data->data; 3336 paging->index = cpu_to_le32(i); 3337 memcpy(paging->data, 3338 trans->init_dram.paging[i].block, page_len); 3339 data = iwl_fw_error_next_data(data); 3340 3341 len += sizeof(*data) + sizeof(*paging) + page_len; 3342 } 3343 } 3344 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3345 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 3346 3347 dump_data->len = len; 3348 3349 return dump_data; 3350 } 3351 3352 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) 3353 { 3354 if (enable) 3355 iwl_enable_interrupts(trans); 3356 else 3357 iwl_disable_interrupts(trans); 3358 } 3359 3360 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) 3361 { 3362 u32 inta_addr, sw_err_bit; 3363 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3364 3365 if (trans_pcie->msix_enabled) { 3366 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; 3367 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; 3368 } else { 3369 inta_addr = CSR_INT; 3370 sw_err_bit = CSR_INT_BIT_SW_ERR; 3371 } 3372 3373 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); 3374 } 3375 3376 #define IWL_TRANS_COMMON_OPS \ 3377 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ 3378 .write8 = iwl_trans_pcie_write8, \ 3379 .write32 = iwl_trans_pcie_write32, \ 3380 .read32 = iwl_trans_pcie_read32, \ 3381 .read_prph = iwl_trans_pcie_read_prph, \ 3382 .write_prph = iwl_trans_pcie_write_prph, \ 3383 .read_mem = iwl_trans_pcie_read_mem, \ 3384 .write_mem = iwl_trans_pcie_write_mem, \ 3385 .read_config32 = iwl_trans_pcie_read_config32, \ 3386 .configure = iwl_trans_pcie_configure, \ 3387 .set_pmi = iwl_trans_pcie_set_pmi, \ 3388 .sw_reset = iwl_trans_pcie_sw_reset, \ 3389 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ 3390 .release_nic_access = iwl_trans_pcie_release_nic_access, \ 3391 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ 3392 .dump_data = iwl_trans_pcie_dump_data, \ 3393 .d3_suspend = iwl_trans_pcie_d3_suspend, \ 3394 .d3_resume = iwl_trans_pcie_d3_resume, \ 3395 .interrupts = iwl_trans_pci_interrupts, \ 3396 .sync_nmi = iwl_trans_pcie_sync_nmi \ 3397 3398 static const struct iwl_trans_ops trans_ops_pcie = { 3399 IWL_TRANS_COMMON_OPS, 3400 .start_hw = iwl_trans_pcie_start_hw, 3401 .fw_alive = iwl_trans_pcie_fw_alive, 3402 .start_fw = iwl_trans_pcie_start_fw, 3403 .stop_device = iwl_trans_pcie_stop_device, 3404 3405 .send_cmd = iwl_pcie_enqueue_hcmd, 3406 3407 .tx = iwl_trans_pcie_tx, 3408 .reclaim = iwl_txq_reclaim, 3409 3410 .txq_disable = iwl_trans_pcie_txq_disable, 3411 .txq_enable = iwl_trans_pcie_txq_enable, 3412 3413 .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, 3414 3415 .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, 3416 3417 .freeze_txq_timer = iwl_trans_txq_freeze_timer, 3418 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, 3419 #ifdef CONFIG_IWLWIFI_DEBUGFS 3420 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3421 #endif 3422 }; 3423 3424 static const struct iwl_trans_ops trans_ops_pcie_gen2 = { 3425 IWL_TRANS_COMMON_OPS, 3426 .start_hw = iwl_trans_pcie_start_hw, 3427 .fw_alive = iwl_trans_pcie_gen2_fw_alive, 3428 .start_fw = iwl_trans_pcie_gen2_start_fw, 3429 .stop_device = iwl_trans_pcie_gen2_stop_device, 3430 3431 .send_cmd = iwl_pcie_gen2_enqueue_hcmd, 3432 3433 .tx = iwl_txq_gen2_tx, 3434 .reclaim = iwl_txq_reclaim, 3435 3436 .set_q_ptrs = iwl_txq_set_q_ptrs, 3437 3438 .txq_alloc = iwl_txq_dyn_alloc, 3439 .txq_free = iwl_txq_dyn_free, 3440 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, 3441 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, 3442 .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, 3443 .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, 3444 #ifdef CONFIG_IWLWIFI_DEBUGFS 3445 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3446 #endif 3447 }; 3448 3449 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 3450 const struct pci_device_id *ent, 3451 const struct iwl_cfg_trans_params *cfg_trans) 3452 { 3453 struct iwl_trans_pcie *trans_pcie; 3454 struct iwl_trans *trans; 3455 int ret, addr_size; 3456 const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; 3457 void __iomem * const *table; 3458 3459 if (!cfg_trans->gen2) 3460 ops = &trans_ops_pcie; 3461 3462 ret = pcim_enable_device(pdev); 3463 if (ret) 3464 return ERR_PTR(ret); 3465 3466 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, 3467 cfg_trans); 3468 if (!trans) 3469 return ERR_PTR(-ENOMEM); 3470 3471 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3472 3473 trans_pcie->trans = trans; 3474 trans_pcie->opmode_down = true; 3475 spin_lock_init(&trans_pcie->irq_lock); 3476 spin_lock_init(&trans_pcie->reg_lock); 3477 spin_lock_init(&trans_pcie->alloc_page_lock); 3478 mutex_init(&trans_pcie->mutex); 3479 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 3480 init_waitqueue_head(&trans_pcie->fw_reset_waitq); 3481 3482 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", 3483 WQ_HIGHPRI | WQ_UNBOUND, 1); 3484 if (!trans_pcie->rba.alloc_wq) { 3485 ret = -ENOMEM; 3486 goto out_free_trans; 3487 } 3488 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); 3489 3490 trans_pcie->debug_rfkill = -1; 3491 3492 if (!cfg_trans->base_params->pcie_l1_allowed) { 3493 /* 3494 * W/A - seems to solve weird behavior. We need to remove this 3495 * if we don't want to stay in L1 all the time. This wastes a 3496 * lot of power. 3497 */ 3498 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 3499 PCIE_LINK_STATE_L1 | 3500 PCIE_LINK_STATE_CLKPM); 3501 } 3502 3503 trans_pcie->def_rx_queue = 0; 3504 3505 pci_set_master(pdev); 3506 3507 addr_size = trans->txqs.tfd.addr_size; 3508 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); 3509 if (ret) { 3510 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3511 /* both attempts failed: */ 3512 if (ret) { 3513 dev_err(&pdev->dev, "No suitable DMA available\n"); 3514 goto out_no_pci; 3515 } 3516 } 3517 3518 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); 3519 if (ret) { 3520 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); 3521 goto out_no_pci; 3522 } 3523 3524 table = pcim_iomap_table(pdev); 3525 if (!table) { 3526 dev_err(&pdev->dev, "pcim_iomap_table failed\n"); 3527 ret = -ENOMEM; 3528 goto out_no_pci; 3529 } 3530 3531 trans_pcie->hw_base = table[0]; 3532 if (!trans_pcie->hw_base) { 3533 dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); 3534 ret = -ENODEV; 3535 goto out_no_pci; 3536 } 3537 3538 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3539 * PCI Tx retries from interfering with C3 CPU state */ 3540 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3541 3542 trans_pcie->pci_dev = pdev; 3543 iwl_disable_interrupts(trans); 3544 3545 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 3546 if (trans->hw_rev == 0xffffffff) { 3547 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); 3548 ret = -EIO; 3549 goto out_no_pci; 3550 } 3551 3552 /* 3553 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3554 * changed, and now the revision step also includes bit 0-1 (no more 3555 * "dash" value). To keep hw_rev backwards compatible - we'll store it 3556 * in the old format. 3557 */ 3558 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) 3559 trans->hw_rev = (trans->hw_rev & 0xfff0) | 3560 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 3561 3562 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); 3563 3564 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); 3565 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 3566 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 3567 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 3568 3569 init_waitqueue_head(&trans_pcie->sx_waitq); 3570 3571 3572 if (trans_pcie->msix_enabled) { 3573 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); 3574 if (ret) 3575 goto out_no_pci; 3576 } else { 3577 ret = iwl_pcie_alloc_ict(trans); 3578 if (ret) 3579 goto out_no_pci; 3580 3581 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 3582 iwl_pcie_isr, 3583 iwl_pcie_irq_handler, 3584 IRQF_SHARED, DRV_NAME, trans); 3585 if (ret) { 3586 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3587 goto out_free_ict; 3588 } 3589 } 3590 3591 #ifdef CONFIG_IWLWIFI_DEBUGFS 3592 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 3593 mutex_init(&trans_pcie->fw_mon_data.mutex); 3594 #endif 3595 3596 iwl_dbg_tlv_init(trans); 3597 3598 return trans; 3599 3600 out_free_ict: 3601 iwl_pcie_free_ict(trans); 3602 out_no_pci: 3603 destroy_workqueue(trans_pcie->rba.alloc_wq); 3604 out_free_trans: 3605 iwl_trans_free(trans); 3606 return ERR_PTR(ret); 3607 } 3608