1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2007-2015, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/debugfs.h> 10 #include <linux/sched.h> 11 #include <linux/bitops.h> 12 #include <linux/gfp.h> 13 #include <linux/vmalloc.h> 14 #include <linux/module.h> 15 #include <linux/wait.h> 16 #include <linux/seq_file.h> 17 18 #include "iwl-drv.h" 19 #include "iwl-trans.h" 20 #include "iwl-csr.h" 21 #include "iwl-prph.h" 22 #include "iwl-scd.h" 23 #include "iwl-agn-hw.h" 24 #include "fw/error-dump.h" 25 #include "fw/dbg.h" 26 #include "fw/api/tx.h" 27 #include "internal.h" 28 #include "iwl-fh.h" 29 #include "iwl-context-info-gen3.h" 30 31 /* extended range in FW SRAM */ 32 #define IWL_FW_MEM_EXTENDED_START 0x40000 33 #define IWL_FW_MEM_EXTENDED_END 0x57FFF 34 35 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) 36 { 37 #define PCI_DUMP_SIZE 352 38 #define PCI_MEM_DUMP_SIZE 64 39 #define PCI_PARENT_DUMP_SIZE 524 40 #define PREFIX_LEN 32 41 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 42 struct pci_dev *pdev = trans_pcie->pci_dev; 43 u32 i, pos, alloc_size, *ptr, *buf; 44 char *prefix; 45 46 if (trans_pcie->pcie_dbg_dumped_once) 47 return; 48 49 /* Should be a multiple of 4 */ 50 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); 51 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); 52 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); 53 54 /* Alloc a max size buffer */ 55 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; 56 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); 57 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); 58 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); 59 60 buf = kmalloc(alloc_size, GFP_ATOMIC); 61 if (!buf) 62 return; 63 prefix = (char *)buf + alloc_size - PREFIX_LEN; 64 65 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); 66 67 /* Print wifi device registers */ 68 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 69 IWL_ERR(trans, "iwlwifi device config registers:\n"); 70 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) 71 if (pci_read_config_dword(pdev, i, ptr)) 72 goto err_read; 73 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 74 75 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); 76 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) 77 *ptr = iwl_read32(trans, i); 78 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 79 80 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 81 if (pos) { 82 IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); 83 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) 84 if (pci_read_config_dword(pdev, pos + i, ptr)) 85 goto err_read; 86 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 87 32, 4, buf, i, 0); 88 } 89 90 /* Print parent device registers next */ 91 if (!pdev->bus->self) 92 goto out; 93 94 pdev = pdev->bus->self; 95 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 96 97 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", 98 pci_name(pdev)); 99 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) 100 if (pci_read_config_dword(pdev, i, ptr)) 101 goto err_read; 102 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 103 104 /* Print root port AER registers */ 105 pos = 0; 106 pdev = pcie_find_root_port(pdev); 107 if (pdev) 108 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 109 if (pos) { 110 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", 111 pci_name(pdev)); 112 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 113 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) 114 if (pci_read_config_dword(pdev, pos + i, ptr)) 115 goto err_read; 116 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 117 4, buf, i, 0); 118 } 119 goto out; 120 121 err_read: 122 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 123 IWL_ERR(trans, "Read failed at 0x%X\n", i); 124 out: 125 trans_pcie->pcie_dbg_dumped_once = 1; 126 kfree(buf); 127 } 128 129 static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) 130 { 131 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 132 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 133 usleep_range(5000, 6000); 134 } 135 136 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 137 { 138 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 139 140 if (!fw_mon->size) 141 return; 142 143 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, 144 fw_mon->physical); 145 146 fw_mon->block = NULL; 147 fw_mon->physical = 0; 148 fw_mon->size = 0; 149 } 150 151 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, 152 u8 max_power, u8 min_power) 153 { 154 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 155 void *block = NULL; 156 dma_addr_t physical = 0; 157 u32 size = 0; 158 u8 power; 159 160 if (fw_mon->size) 161 return; 162 163 for (power = max_power; power >= min_power; power--) { 164 size = BIT(power); 165 block = dma_alloc_coherent(trans->dev, size, &physical, 166 GFP_KERNEL | __GFP_NOWARN); 167 if (!block) 168 continue; 169 170 IWL_INFO(trans, 171 "Allocated 0x%08x bytes for firmware monitor.\n", 172 size); 173 break; 174 } 175 176 if (WARN_ON_ONCE(!block)) 177 return; 178 179 if (power != max_power) 180 IWL_ERR(trans, 181 "Sorry - debug buffer is only %luK while you requested %luK\n", 182 (unsigned long)BIT(power - 10), 183 (unsigned long)BIT(max_power - 10)); 184 185 fw_mon->block = block; 186 fw_mon->physical = physical; 187 fw_mon->size = size; 188 } 189 190 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 191 { 192 if (!max_power) { 193 /* default max_power is maximum */ 194 max_power = 26; 195 } else { 196 max_power += 11; 197 } 198 199 if (WARN(max_power > 26, 200 "External buffer size for monitor is too big %d, check the FW TLV\n", 201 max_power)) 202 return; 203 204 if (trans->dbg.fw_mon.size) 205 return; 206 207 iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); 208 } 209 210 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 211 { 212 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 213 ((reg & 0x0000ffff) | (2 << 28))); 214 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 215 } 216 217 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 218 { 219 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 220 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 221 ((reg & 0x0000ffff) | (3 << 28))); 222 } 223 224 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 225 { 226 if (trans->cfg->apmg_not_supported) 227 return; 228 229 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 230 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 231 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 232 ~APMG_PS_CTRL_MSK_PWR_SRC); 233 else 234 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 235 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 236 ~APMG_PS_CTRL_MSK_PWR_SRC); 237 } 238 239 /* PCI registers */ 240 #define PCI_CFG_RETRY_TIMEOUT 0x041 241 242 void iwl_pcie_apm_config(struct iwl_trans *trans) 243 { 244 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 245 u16 lctl; 246 u16 cap; 247 248 /* 249 * L0S states have been found to be unstable with our devices 250 * and in newer hardware they are not officially supported at 251 * all, so we must always set the L0S_DISABLED bit. 252 */ 253 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); 254 255 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 256 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 257 258 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 259 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 260 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", 261 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 262 trans->ltr_enabled ? "En" : "Dis"); 263 } 264 265 /* 266 * Start up NIC's basic functionality after it has been reset 267 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 268 * NOTE: This does not load uCode nor start the embedded processor 269 */ 270 static int iwl_pcie_apm_init(struct iwl_trans *trans) 271 { 272 int ret; 273 274 IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 275 276 /* 277 * Use "set_bit" below rather than "write", to preserve any hardware 278 * bits already set by default after reset. 279 */ 280 281 /* Disable L0S exit timer (platform NMI Work/Around) */ 282 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 283 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 284 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 285 286 /* 287 * Disable L0s without affecting L1; 288 * don't wait for ICH L0s (ICH bug W/A) 289 */ 290 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 291 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 292 293 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 294 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 295 296 /* 297 * Enable HAP INTA (interrupt from management bus) to 298 * wake device's PCI Express link L1a -> L0s 299 */ 300 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 301 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 302 303 iwl_pcie_apm_config(trans); 304 305 /* Configure analog phase-lock-loop before activating to D0A */ 306 if (trans->trans_cfg->base_params->pll_cfg) 307 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 308 309 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 310 if (ret) 311 return ret; 312 313 if (trans->cfg->host_interrupt_operation_mode) { 314 /* 315 * This is a bit of an abuse - This is needed for 7260 / 3160 316 * only check host_interrupt_operation_mode even if this is 317 * not related to host_interrupt_operation_mode. 318 * 319 * Enable the oscillator to count wake up time for L1 exit. This 320 * consumes slightly more power (100uA) - but allows to be sure 321 * that we wake up from L1 on time. 322 * 323 * This looks weird: read twice the same register, discard the 324 * value, set a bit, and yet again, read that same register 325 * just to discard the value. But that's the way the hardware 326 * seems to like it. 327 */ 328 iwl_read_prph(trans, OSC_CLK); 329 iwl_read_prph(trans, OSC_CLK); 330 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 331 iwl_read_prph(trans, OSC_CLK); 332 iwl_read_prph(trans, OSC_CLK); 333 } 334 335 /* 336 * Enable DMA clock and wait for it to stabilize. 337 * 338 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 339 * bits do not disable clocks. This preserves any hardware 340 * bits already set by default in "CLK_CTRL_REG" after reset. 341 */ 342 if (!trans->cfg->apmg_not_supported) { 343 iwl_write_prph(trans, APMG_CLK_EN_REG, 344 APMG_CLK_VAL_DMA_CLK_RQT); 345 udelay(20); 346 347 /* Disable L1-Active */ 348 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 349 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 350 351 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 352 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 353 APMG_RTC_INT_STT_RFKILL); 354 } 355 356 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 357 358 return 0; 359 } 360 361 /* 362 * Enable LP XTAL to avoid HW bug where device may consume much power if 363 * FW is not loaded after device reset. LP XTAL is disabled by default 364 * after device HW reset. Do it only if XTAL is fed by internal source. 365 * Configure device's "persistence" mode to avoid resetting XTAL again when 366 * SHRD_HW_RST occurs in S3. 367 */ 368 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 369 { 370 int ret; 371 u32 apmg_gp1_reg; 372 u32 apmg_xtal_cfg_reg; 373 u32 dl_cfg_reg; 374 375 /* Force XTAL ON */ 376 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 377 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 378 379 iwl_trans_pcie_sw_reset(trans); 380 381 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 382 if (WARN_ON(ret)) { 383 /* Release XTAL ON request */ 384 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 385 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 386 return; 387 } 388 389 /* 390 * Clear "disable persistence" to avoid LP XTAL resetting when 391 * SHRD_HW_RST is applied in S3. 392 */ 393 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 394 APMG_PCIDEV_STT_VAL_PERSIST_DIS); 395 396 /* 397 * Force APMG XTAL to be active to prevent its disabling by HW 398 * caused by APMG idle state. 399 */ 400 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 401 SHR_APMG_XTAL_CFG_REG); 402 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 403 apmg_xtal_cfg_reg | 404 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 405 406 iwl_trans_pcie_sw_reset(trans); 407 408 /* Enable LP XTAL by indirect access through CSR */ 409 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 410 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 411 SHR_APMG_GP1_WF_XTAL_LP_EN | 412 SHR_APMG_GP1_CHICKEN_BIT_SELECT); 413 414 /* Clear delay line clock power up */ 415 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 416 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 417 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 418 419 /* 420 * Enable persistence mode to avoid LP XTAL resetting when 421 * SHRD_HW_RST is applied in S3. 422 */ 423 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 424 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 425 426 /* 427 * Clear "initialization complete" bit to move adapter from 428 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 429 */ 430 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 431 432 /* Activates XTAL resources monitor */ 433 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 434 CSR_MONITOR_XTAL_RESOURCES); 435 436 /* Release XTAL ON request */ 437 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 438 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 439 udelay(10); 440 441 /* Release APMG XTAL */ 442 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 443 apmg_xtal_cfg_reg & 444 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 445 } 446 447 void iwl_pcie_apm_stop_master(struct iwl_trans *trans) 448 { 449 int ret; 450 451 /* stop device's busmaster DMA activity */ 452 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 453 454 ret = iwl_poll_bit(trans, CSR_RESET, 455 CSR_RESET_REG_FLAG_MASTER_DISABLED, 456 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 457 if (ret < 0) 458 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 459 460 IWL_DEBUG_INFO(trans, "stop master\n"); 461 } 462 463 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 464 { 465 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 466 467 if (op_mode_leave) { 468 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 469 iwl_pcie_apm_init(trans); 470 471 /* inform ME that we are leaving */ 472 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) 473 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 474 APMG_PCIDEV_STT_VAL_WAKE_ME); 475 else if (trans->trans_cfg->device_family >= 476 IWL_DEVICE_FAMILY_8000) { 477 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 478 CSR_RESET_LINK_PWR_MGMT_DISABLED); 479 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 480 CSR_HW_IF_CONFIG_REG_PREPARE | 481 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 482 mdelay(1); 483 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 484 CSR_RESET_LINK_PWR_MGMT_DISABLED); 485 } 486 mdelay(5); 487 } 488 489 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 490 491 /* Stop device's DMA activity */ 492 iwl_pcie_apm_stop_master(trans); 493 494 if (trans->cfg->lp_xtal_workaround) { 495 iwl_pcie_apm_lp_xtal_enable(trans); 496 return; 497 } 498 499 iwl_trans_pcie_sw_reset(trans); 500 501 /* 502 * Clear "initialization complete" bit to move adapter from 503 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 504 */ 505 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 506 } 507 508 static int iwl_pcie_nic_init(struct iwl_trans *trans) 509 { 510 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 511 int ret; 512 513 /* nic_init */ 514 spin_lock_bh(&trans_pcie->irq_lock); 515 ret = iwl_pcie_apm_init(trans); 516 spin_unlock_bh(&trans_pcie->irq_lock); 517 518 if (ret) 519 return ret; 520 521 iwl_pcie_set_pwr(trans, false); 522 523 iwl_op_mode_nic_config(trans->op_mode); 524 525 /* Allocate the RX queue, or reset if it is already allocated */ 526 ret = iwl_pcie_rx_init(trans); 527 if (ret) 528 return ret; 529 530 /* Allocate or reset and init all Tx and Command queues */ 531 if (iwl_pcie_tx_init(trans)) { 532 iwl_pcie_rx_free(trans); 533 return -ENOMEM; 534 } 535 536 if (trans->trans_cfg->base_params->shadow_reg_enable) { 537 /* enable shadow regs in HW */ 538 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 539 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 540 } 541 542 return 0; 543 } 544 545 #define HW_READY_TIMEOUT (50) 546 547 /* Note: returns poll_bit return value, which is >= 0 if success */ 548 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 549 { 550 int ret; 551 552 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 553 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 554 555 /* See if we got it */ 556 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 557 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 558 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 559 HW_READY_TIMEOUT); 560 561 if (ret >= 0) 562 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 563 564 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 565 return ret; 566 } 567 568 /* Note: returns standard 0/-ERROR code */ 569 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 570 { 571 int ret; 572 int t = 0; 573 int iter; 574 575 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 576 577 ret = iwl_pcie_set_hw_ready(trans); 578 /* If the card is ready, exit 0 */ 579 if (ret >= 0) 580 return 0; 581 582 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 583 CSR_RESET_LINK_PWR_MGMT_DISABLED); 584 usleep_range(1000, 2000); 585 586 for (iter = 0; iter < 10; iter++) { 587 /* If HW is not ready, prepare the conditions to check again */ 588 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 589 CSR_HW_IF_CONFIG_REG_PREPARE); 590 591 do { 592 ret = iwl_pcie_set_hw_ready(trans); 593 if (ret >= 0) 594 return 0; 595 596 usleep_range(200, 1000); 597 t += 200; 598 } while (t < 150000); 599 msleep(25); 600 } 601 602 IWL_ERR(trans, "Couldn't prepare the card\n"); 603 604 return ret; 605 } 606 607 /* 608 * ucode 609 */ 610 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 611 u32 dst_addr, dma_addr_t phy_addr, 612 u32 byte_cnt) 613 { 614 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 615 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 616 617 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 618 dst_addr); 619 620 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 621 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 622 623 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 624 (iwl_get_dma_hi_addr(phy_addr) 625 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 626 627 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 628 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 629 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 630 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 631 632 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 633 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 634 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 635 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 636 } 637 638 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 639 u32 dst_addr, dma_addr_t phy_addr, 640 u32 byte_cnt) 641 { 642 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 643 unsigned long flags; 644 int ret; 645 646 trans_pcie->ucode_write_complete = false; 647 648 if (!iwl_trans_grab_nic_access(trans, &flags)) 649 return -EIO; 650 651 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 652 byte_cnt); 653 iwl_trans_release_nic_access(trans, &flags); 654 655 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 656 trans_pcie->ucode_write_complete, 5 * HZ); 657 if (!ret) { 658 IWL_ERR(trans, "Failed to load firmware chunk!\n"); 659 iwl_trans_pcie_dump_regs(trans); 660 return -ETIMEDOUT; 661 } 662 663 return 0; 664 } 665 666 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 667 const struct fw_desc *section) 668 { 669 u8 *v_addr; 670 dma_addr_t p_addr; 671 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 672 int ret = 0; 673 674 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 675 section_num); 676 677 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 678 GFP_KERNEL | __GFP_NOWARN); 679 if (!v_addr) { 680 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 681 chunk_sz = PAGE_SIZE; 682 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 683 &p_addr, GFP_KERNEL); 684 if (!v_addr) 685 return -ENOMEM; 686 } 687 688 for (offset = 0; offset < section->len; offset += chunk_sz) { 689 u32 copy_size, dst_addr; 690 bool extended_addr = false; 691 692 copy_size = min_t(u32, chunk_sz, section->len - offset); 693 dst_addr = section->offset + offset; 694 695 if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 696 dst_addr <= IWL_FW_MEM_EXTENDED_END) 697 extended_addr = true; 698 699 if (extended_addr) 700 iwl_set_bits_prph(trans, LMPM_CHICK, 701 LMPM_CHICK_EXTENDED_ADDR_SPACE); 702 703 memcpy(v_addr, (u8 *)section->data + offset, copy_size); 704 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 705 copy_size); 706 707 if (extended_addr) 708 iwl_clear_bits_prph(trans, LMPM_CHICK, 709 LMPM_CHICK_EXTENDED_ADDR_SPACE); 710 711 if (ret) { 712 IWL_ERR(trans, 713 "Could not load the [%d] uCode section\n", 714 section_num); 715 break; 716 } 717 } 718 719 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 720 return ret; 721 } 722 723 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 724 const struct fw_img *image, 725 int cpu, 726 int *first_ucode_section) 727 { 728 int shift_param; 729 int i, ret = 0, sec_num = 0x1; 730 u32 val, last_read_idx = 0; 731 732 if (cpu == 1) { 733 shift_param = 0; 734 *first_ucode_section = 0; 735 } else { 736 shift_param = 16; 737 (*first_ucode_section)++; 738 } 739 740 for (i = *first_ucode_section; i < image->num_sec; i++) { 741 last_read_idx = i; 742 743 /* 744 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 745 * CPU1 to CPU2. 746 * PAGING_SEPARATOR_SECTION delimiter - separate between 747 * CPU2 non paged to CPU2 paging sec. 748 */ 749 if (!image->sec[i].data || 750 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 751 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 752 IWL_DEBUG_FW(trans, 753 "Break since Data not valid or Empty section, sec = %d\n", 754 i); 755 break; 756 } 757 758 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 759 if (ret) 760 return ret; 761 762 /* Notify ucode of loaded section number and status */ 763 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 764 val = val | (sec_num << shift_param); 765 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 766 767 sec_num = (sec_num << 1) | 0x1; 768 } 769 770 *first_ucode_section = last_read_idx; 771 772 iwl_enable_interrupts(trans); 773 774 if (trans->trans_cfg->use_tfh) { 775 if (cpu == 1) 776 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 777 0xFFFF); 778 else 779 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 780 0xFFFFFFFF); 781 } else { 782 if (cpu == 1) 783 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 784 0xFFFF); 785 else 786 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 787 0xFFFFFFFF); 788 } 789 790 return 0; 791 } 792 793 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 794 const struct fw_img *image, 795 int cpu, 796 int *first_ucode_section) 797 { 798 int i, ret = 0; 799 u32 last_read_idx = 0; 800 801 if (cpu == 1) 802 *first_ucode_section = 0; 803 else 804 (*first_ucode_section)++; 805 806 for (i = *first_ucode_section; i < image->num_sec; i++) { 807 last_read_idx = i; 808 809 /* 810 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 811 * CPU1 to CPU2. 812 * PAGING_SEPARATOR_SECTION delimiter - separate between 813 * CPU2 non paged to CPU2 paging sec. 814 */ 815 if (!image->sec[i].data || 816 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 817 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 818 IWL_DEBUG_FW(trans, 819 "Break since Data not valid or Empty section, sec = %d\n", 820 i); 821 break; 822 } 823 824 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 825 if (ret) 826 return ret; 827 } 828 829 *first_ucode_section = last_read_idx; 830 831 return 0; 832 } 833 834 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) 835 { 836 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; 837 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 838 &trans->dbg.fw_mon_cfg[alloc_id]; 839 struct iwl_dram_data *frag; 840 841 if (!iwl_trans_dbg_ini_valid(trans)) 842 return; 843 844 if (le32_to_cpu(fw_mon_cfg->buf_location) == 845 IWL_FW_INI_LOCATION_SRAM_PATH) { 846 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); 847 /* set sram monitor by enabling bit 7 */ 848 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 849 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); 850 851 return; 852 } 853 854 if (le32_to_cpu(fw_mon_cfg->buf_location) != 855 IWL_FW_INI_LOCATION_DRAM_PATH || 856 !trans->dbg.fw_mon_ini[alloc_id].num_frags) 857 return; 858 859 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; 860 861 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", 862 alloc_id); 863 864 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, 865 frag->physical >> MON_BUFF_SHIFT_VER2); 866 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, 867 (frag->physical + frag->size - 256) >> 868 MON_BUFF_SHIFT_VER2); 869 } 870 871 void iwl_pcie_apply_destination(struct iwl_trans *trans) 872 { 873 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; 874 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 875 int i; 876 877 if (iwl_trans_dbg_ini_valid(trans)) { 878 iwl_pcie_apply_destination_ini(trans); 879 return; 880 } 881 882 IWL_INFO(trans, "Applying debug destination %s\n", 883 get_fw_dbg_mode_string(dest->monitor_mode)); 884 885 if (dest->monitor_mode == EXTERNAL_MODE) 886 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 887 else 888 IWL_WARN(trans, "PCI should have external buffer debug\n"); 889 890 for (i = 0; i < trans->dbg.n_dest_reg; i++) { 891 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 892 u32 val = le32_to_cpu(dest->reg_ops[i].val); 893 894 switch (dest->reg_ops[i].op) { 895 case CSR_ASSIGN: 896 iwl_write32(trans, addr, val); 897 break; 898 case CSR_SETBIT: 899 iwl_set_bit(trans, addr, BIT(val)); 900 break; 901 case CSR_CLEARBIT: 902 iwl_clear_bit(trans, addr, BIT(val)); 903 break; 904 case PRPH_ASSIGN: 905 iwl_write_prph(trans, addr, val); 906 break; 907 case PRPH_SETBIT: 908 iwl_set_bits_prph(trans, addr, BIT(val)); 909 break; 910 case PRPH_CLEARBIT: 911 iwl_clear_bits_prph(trans, addr, BIT(val)); 912 break; 913 case PRPH_BLOCKBIT: 914 if (iwl_read_prph(trans, addr) & BIT(val)) { 915 IWL_ERR(trans, 916 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 917 val, addr); 918 goto monitor; 919 } 920 break; 921 default: 922 IWL_ERR(trans, "FW debug - unknown OP %d\n", 923 dest->reg_ops[i].op); 924 break; 925 } 926 } 927 928 monitor: 929 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { 930 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 931 fw_mon->physical >> dest->base_shift); 932 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 933 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 934 (fw_mon->physical + fw_mon->size - 935 256) >> dest->end_shift); 936 else 937 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 938 (fw_mon->physical + fw_mon->size) >> 939 dest->end_shift); 940 } 941 } 942 943 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 944 const struct fw_img *image) 945 { 946 int ret = 0; 947 int first_ucode_section; 948 949 IWL_DEBUG_FW(trans, "working with %s CPU\n", 950 image->is_dual_cpus ? "Dual" : "Single"); 951 952 /* load to FW the binary non secured sections of CPU1 */ 953 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 954 if (ret) 955 return ret; 956 957 if (image->is_dual_cpus) { 958 /* set CPU2 header address */ 959 iwl_write_prph(trans, 960 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 961 LMPM_SECURE_CPU2_HDR_MEM_SPACE); 962 963 /* load to FW the binary sections of CPU2 */ 964 ret = iwl_pcie_load_cpu_sections(trans, image, 2, 965 &first_ucode_section); 966 if (ret) 967 return ret; 968 } 969 970 if (iwl_pcie_dbg_on(trans)) 971 iwl_pcie_apply_destination(trans); 972 973 iwl_enable_interrupts(trans); 974 975 /* release CPU reset */ 976 iwl_write32(trans, CSR_RESET, 0); 977 978 return 0; 979 } 980 981 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 982 const struct fw_img *image) 983 { 984 int ret = 0; 985 int first_ucode_section; 986 987 IWL_DEBUG_FW(trans, "working with %s CPU\n", 988 image->is_dual_cpus ? "Dual" : "Single"); 989 990 if (iwl_pcie_dbg_on(trans)) 991 iwl_pcie_apply_destination(trans); 992 993 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 994 iwl_read_prph(trans, WFPM_GP2)); 995 996 /* 997 * Set default value. On resume reading the values that were 998 * zeored can provide debug data on the resume flow. 999 * This is for debugging only and has no functional impact. 1000 */ 1001 iwl_write_prph(trans, WFPM_GP2, 0x01010101); 1002 1003 /* configure the ucode to be ready to get the secured image */ 1004 /* release CPU reset */ 1005 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 1006 1007 /* load to FW the binary Secured sections of CPU1 */ 1008 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 1009 &first_ucode_section); 1010 if (ret) 1011 return ret; 1012 1013 /* load to FW the binary sections of CPU2 */ 1014 return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 1015 &first_ucode_section); 1016 } 1017 1018 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) 1019 { 1020 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1021 bool hw_rfkill = iwl_is_rfkill_set(trans); 1022 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1023 bool report; 1024 1025 if (hw_rfkill) { 1026 set_bit(STATUS_RFKILL_HW, &trans->status); 1027 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1028 } else { 1029 clear_bit(STATUS_RFKILL_HW, &trans->status); 1030 if (trans_pcie->opmode_down) 1031 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1032 } 1033 1034 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1035 1036 if (prev != report) 1037 iwl_trans_pcie_rf_kill(trans, report); 1038 1039 return hw_rfkill; 1040 } 1041 1042 struct iwl_causes_list { 1043 u32 cause_num; 1044 u32 mask_reg; 1045 u8 addr; 1046 }; 1047 1048 static struct iwl_causes_list causes_list[] = { 1049 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, 1050 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, 1051 {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, 1052 {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, 1053 {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, 1054 {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, 1055 {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12}, 1056 {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, 1057 {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, 1058 {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, 1059 {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, 1060 {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, 1061 {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, 1062 {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, 1063 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, 1064 }; 1065 1066 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 1067 { 1068 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1069 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 1070 int i, arr_size = ARRAY_SIZE(causes_list); 1071 struct iwl_causes_list *causes = causes_list; 1072 1073 /* 1074 * Access all non RX causes and map them to the default irq. 1075 * In case we are missing at least one interrupt vector, 1076 * the first interrupt vector will serve non-RX and FBQ causes. 1077 */ 1078 for (i = 0; i < arr_size; i++) { 1079 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); 1080 iwl_clear_bit(trans, causes[i].mask_reg, 1081 causes[i].cause_num); 1082 } 1083 } 1084 1085 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 1086 { 1087 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1088 u32 offset = 1089 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 1090 u32 val, idx; 1091 1092 /* 1093 * The first RX queue - fallback queue, which is designated for 1094 * management frame, command responses etc, is always mapped to the 1095 * first interrupt vector. The other RX queues are mapped to 1096 * the other (N - 2) interrupt vectors. 1097 */ 1098 val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 1099 for (idx = 1; idx < trans->num_rx_queues; idx++) { 1100 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 1101 MSIX_FH_INT_CAUSES_Q(idx - offset)); 1102 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 1103 } 1104 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 1105 1106 val = MSIX_FH_INT_CAUSES_Q(0); 1107 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 1108 val |= MSIX_NON_AUTO_CLEAR_CAUSE; 1109 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 1110 1111 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 1112 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 1113 } 1114 1115 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 1116 { 1117 struct iwl_trans *trans = trans_pcie->trans; 1118 1119 if (!trans_pcie->msix_enabled) { 1120 if (trans->trans_cfg->mq_rx_supported && 1121 test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1122 iwl_write_umac_prph(trans, UREG_CHICK, 1123 UREG_CHICK_MSI_ENABLE); 1124 return; 1125 } 1126 /* 1127 * The IVAR table needs to be configured again after reset, 1128 * but if the device is disabled, we can't write to 1129 * prph. 1130 */ 1131 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1132 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 1133 1134 /* 1135 * Each cause from the causes list above and the RX causes is 1136 * represented as a byte in the IVAR table. The first nibble 1137 * represents the bound interrupt vector of the cause, the second 1138 * represents no auto clear for this cause. This will be set if its 1139 * interrupt vector is bound to serve other causes. 1140 */ 1141 iwl_pcie_map_rx_causes(trans); 1142 1143 iwl_pcie_map_non_rx_causes(trans); 1144 } 1145 1146 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 1147 { 1148 struct iwl_trans *trans = trans_pcie->trans; 1149 1150 iwl_pcie_conf_msix_hw(trans_pcie); 1151 1152 if (!trans_pcie->msix_enabled) 1153 return; 1154 1155 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 1156 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 1157 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 1158 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 1159 } 1160 1161 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1162 { 1163 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1164 1165 lockdep_assert_held(&trans_pcie->mutex); 1166 1167 if (trans_pcie->is_down) 1168 return; 1169 1170 trans_pcie->is_down = true; 1171 1172 /* tell the device to stop sending interrupts */ 1173 iwl_disable_interrupts(trans); 1174 1175 /* device going down, Stop using ICT table */ 1176 iwl_pcie_disable_ict(trans); 1177 1178 /* 1179 * If a HW restart happens during firmware loading, 1180 * then the firmware loading might call this function 1181 * and later it might be called again due to the 1182 * restart. So don't process again if the device is 1183 * already dead. 1184 */ 1185 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1186 IWL_DEBUG_INFO(trans, 1187 "DEVICE_ENABLED bit was set and is now cleared\n"); 1188 iwl_pcie_tx_stop(trans); 1189 iwl_pcie_rx_stop(trans); 1190 1191 /* Power-down device's busmaster DMA clocks */ 1192 if (!trans->cfg->apmg_not_supported) { 1193 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1194 APMG_CLK_VAL_DMA_CLK_RQT); 1195 udelay(5); 1196 } 1197 } 1198 1199 /* Make sure (redundant) we've released our request to stay awake */ 1200 iwl_clear_bit(trans, CSR_GP_CNTRL, 1201 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1202 1203 /* Stop the device, and put it in low power state */ 1204 iwl_pcie_apm_stop(trans, false); 1205 1206 iwl_trans_pcie_sw_reset(trans); 1207 1208 /* 1209 * Upon stop, the IVAR table gets erased, so msi-x won't 1210 * work. This causes a bug in RF-KILL flows, since the interrupt 1211 * that enables radio won't fire on the correct irq, and the 1212 * driver won't be able to handle the interrupt. 1213 * Configure the IVAR table again after reset. 1214 */ 1215 iwl_pcie_conf_msix_hw(trans_pcie); 1216 1217 /* 1218 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1219 * This is a bug in certain verions of the hardware. 1220 * Certain devices also keep sending HW RF kill interrupt all 1221 * the time, unless the interrupt is ACKed even if the interrupt 1222 * should be masked. Re-ACK all the interrupts here. 1223 */ 1224 iwl_disable_interrupts(trans); 1225 1226 /* clear all status bits */ 1227 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1228 clear_bit(STATUS_INT_ENABLED, &trans->status); 1229 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1230 1231 /* 1232 * Even if we stop the HW, we still want the RF kill 1233 * interrupt 1234 */ 1235 iwl_enable_rfkill_int(trans); 1236 1237 /* re-take ownership to prevent other users from stealing the device */ 1238 iwl_pcie_prepare_card_hw(trans); 1239 } 1240 1241 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 1242 { 1243 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1244 1245 if (trans_pcie->msix_enabled) { 1246 int i; 1247 1248 for (i = 0; i < trans_pcie->alloc_vecs; i++) 1249 synchronize_irq(trans_pcie->msix_entries[i].vector); 1250 } else { 1251 synchronize_irq(trans_pcie->pci_dev->irq); 1252 } 1253 } 1254 1255 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1256 const struct fw_img *fw, bool run_in_rfkill) 1257 { 1258 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1259 bool hw_rfkill; 1260 int ret; 1261 1262 /* This may fail if AMT took ownership of the device */ 1263 if (iwl_pcie_prepare_card_hw(trans)) { 1264 IWL_WARN(trans, "Exit HW not ready\n"); 1265 ret = -EIO; 1266 goto out; 1267 } 1268 1269 iwl_enable_rfkill_int(trans); 1270 1271 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1272 1273 /* 1274 * We enabled the RF-Kill interrupt and the handler may very 1275 * well be running. Disable the interrupts to make sure no other 1276 * interrupt can be fired. 1277 */ 1278 iwl_disable_interrupts(trans); 1279 1280 /* Make sure it finished running */ 1281 iwl_pcie_synchronize_irqs(trans); 1282 1283 mutex_lock(&trans_pcie->mutex); 1284 1285 /* If platform's RF_KILL switch is NOT set to KILL */ 1286 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1287 if (hw_rfkill && !run_in_rfkill) { 1288 ret = -ERFKILL; 1289 goto out; 1290 } 1291 1292 /* Someone called stop_device, don't try to start_fw */ 1293 if (trans_pcie->is_down) { 1294 IWL_WARN(trans, 1295 "Can't start_fw since the HW hasn't been started\n"); 1296 ret = -EIO; 1297 goto out; 1298 } 1299 1300 /* make sure rfkill handshake bits are cleared */ 1301 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1302 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1303 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1304 1305 /* clear (again), then enable host interrupts */ 1306 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1307 1308 ret = iwl_pcie_nic_init(trans); 1309 if (ret) { 1310 IWL_ERR(trans, "Unable to init nic\n"); 1311 goto out; 1312 } 1313 1314 /* 1315 * Now, we load the firmware and don't want to be interrupted, even 1316 * by the RF-Kill interrupt (hence mask all the interrupt besides the 1317 * FH_TX interrupt which is needed to load the firmware). If the 1318 * RF-Kill switch is toggled, we will find out after having loaded 1319 * the firmware and return the proper value to the caller. 1320 */ 1321 iwl_enable_fw_load_int(trans); 1322 1323 /* really make sure rfkill handshake bits are cleared */ 1324 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1325 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1326 1327 /* Load the given image to the HW */ 1328 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1329 ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1330 else 1331 ret = iwl_pcie_load_given_ucode(trans, fw); 1332 1333 /* re-check RF-Kill state since we may have missed the interrupt */ 1334 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1335 if (hw_rfkill && !run_in_rfkill) 1336 ret = -ERFKILL; 1337 1338 out: 1339 mutex_unlock(&trans_pcie->mutex); 1340 return ret; 1341 } 1342 1343 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1344 { 1345 iwl_pcie_reset_ict(trans); 1346 iwl_pcie_tx_start(trans, scd_addr); 1347 } 1348 1349 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1350 bool was_in_rfkill) 1351 { 1352 bool hw_rfkill; 1353 1354 /* 1355 * Check again since the RF kill state may have changed while 1356 * all the interrupts were disabled, in this case we couldn't 1357 * receive the RF kill interrupt and update the state in the 1358 * op_mode. 1359 * Don't call the op_mode if the rkfill state hasn't changed. 1360 * This allows the op_mode to call stop_device from the rfkill 1361 * notification without endless recursion. Under very rare 1362 * circumstances, we might have a small recursion if the rfkill 1363 * state changed exactly now while we were called from stop_device. 1364 * This is very unlikely but can happen and is supported. 1365 */ 1366 hw_rfkill = iwl_is_rfkill_set(trans); 1367 if (hw_rfkill) { 1368 set_bit(STATUS_RFKILL_HW, &trans->status); 1369 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1370 } else { 1371 clear_bit(STATUS_RFKILL_HW, &trans->status); 1372 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1373 } 1374 if (hw_rfkill != was_in_rfkill) 1375 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1376 } 1377 1378 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1379 { 1380 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1381 bool was_in_rfkill; 1382 1383 mutex_lock(&trans_pcie->mutex); 1384 trans_pcie->opmode_down = true; 1385 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1386 _iwl_trans_pcie_stop_device(trans); 1387 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); 1388 mutex_unlock(&trans_pcie->mutex); 1389 } 1390 1391 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1392 { 1393 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1394 IWL_TRANS_GET_PCIE_TRANS(trans); 1395 1396 lockdep_assert_held(&trans_pcie->mutex); 1397 1398 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", 1399 state ? "disabled" : "enabled"); 1400 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { 1401 if (trans->trans_cfg->gen2) 1402 _iwl_trans_pcie_gen2_stop_device(trans); 1403 else 1404 _iwl_trans_pcie_stop_device(trans); 1405 } 1406 } 1407 1408 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1409 bool test, bool reset) 1410 { 1411 iwl_disable_interrupts(trans); 1412 1413 /* 1414 * in testing mode, the host stays awake and the 1415 * hardware won't be reset (not even partially) 1416 */ 1417 if (test) 1418 return; 1419 1420 iwl_pcie_disable_ict(trans); 1421 1422 iwl_pcie_synchronize_irqs(trans); 1423 1424 iwl_clear_bit(trans, CSR_GP_CNTRL, 1425 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1426 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1427 1428 if (reset) { 1429 /* 1430 * reset TX queues -- some of their registers reset during S3 1431 * so if we don't reset everything here the D3 image would try 1432 * to execute some invalid memory upon resume 1433 */ 1434 iwl_trans_pcie_tx_reset(trans); 1435 } 1436 1437 iwl_pcie_set_pwr(trans, true); 1438 } 1439 1440 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 1441 bool reset) 1442 { 1443 int ret; 1444 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1445 1446 if (!reset) 1447 /* Enable persistence mode to avoid reset */ 1448 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1449 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1450 1451 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1452 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1453 UREG_DOORBELL_TO_ISR6_SUSPEND); 1454 1455 ret = wait_event_timeout(trans_pcie->sx_waitq, 1456 trans_pcie->sx_complete, 2 * HZ); 1457 /* 1458 * Invalidate it toward resume. 1459 */ 1460 trans_pcie->sx_complete = false; 1461 1462 if (!ret) { 1463 IWL_ERR(trans, "Timeout entering D3\n"); 1464 return -ETIMEDOUT; 1465 } 1466 } 1467 iwl_pcie_d3_complete_suspend(trans, test, reset); 1468 1469 return 0; 1470 } 1471 1472 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1473 enum iwl_d3_status *status, 1474 bool test, bool reset) 1475 { 1476 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1477 u32 val; 1478 int ret; 1479 1480 if (test) { 1481 iwl_enable_interrupts(trans); 1482 *status = IWL_D3_STATUS_ALIVE; 1483 goto out; 1484 } 1485 1486 iwl_set_bit(trans, CSR_GP_CNTRL, 1487 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1488 1489 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 1490 if (ret) 1491 return ret; 1492 1493 /* 1494 * Reconfigure IVAR table in case of MSIX or reset ict table in 1495 * MSI mode since HW reset erased it. 1496 * Also enables interrupts - none will happen as 1497 * the device doesn't know we're waking it up, only when 1498 * the opmode actually tells it after this call. 1499 */ 1500 iwl_pcie_conf_msix_hw(trans_pcie); 1501 if (!trans_pcie->msix_enabled) 1502 iwl_pcie_reset_ict(trans); 1503 iwl_enable_interrupts(trans); 1504 1505 iwl_pcie_set_pwr(trans, false); 1506 1507 if (!reset) { 1508 iwl_clear_bit(trans, CSR_GP_CNTRL, 1509 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1510 } else { 1511 iwl_trans_pcie_tx_reset(trans); 1512 1513 ret = iwl_pcie_rx_init(trans); 1514 if (ret) { 1515 IWL_ERR(trans, 1516 "Failed to resume the device (RX reset)\n"); 1517 return ret; 1518 } 1519 } 1520 1521 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 1522 iwl_read_umac_prph(trans, WFPM_GP2)); 1523 1524 val = iwl_read32(trans, CSR_RESET); 1525 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1526 *status = IWL_D3_STATUS_RESET; 1527 else 1528 *status = IWL_D3_STATUS_ALIVE; 1529 1530 out: 1531 if (*status == IWL_D3_STATUS_ALIVE && 1532 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1533 trans_pcie->sx_complete = false; 1534 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1535 UREG_DOORBELL_TO_ISR6_RESUME); 1536 1537 ret = wait_event_timeout(trans_pcie->sx_waitq, 1538 trans_pcie->sx_complete, 2 * HZ); 1539 /* 1540 * Invalidate it toward next suspend. 1541 */ 1542 trans_pcie->sx_complete = false; 1543 1544 if (!ret) { 1545 IWL_ERR(trans, "Timeout exiting D3\n"); 1546 return -ETIMEDOUT; 1547 } 1548 } 1549 return 0; 1550 } 1551 1552 static void 1553 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 1554 struct iwl_trans *trans, 1555 const struct iwl_cfg_trans_params *cfg_trans) 1556 { 1557 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1558 int max_irqs, num_irqs, i, ret; 1559 u16 pci_cmd; 1560 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; 1561 1562 if (!cfg_trans->mq_rx_supported) 1563 goto enable_msi; 1564 1565 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) 1566 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; 1567 1568 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); 1569 for (i = 0; i < max_irqs; i++) 1570 trans_pcie->msix_entries[i].entry = i; 1571 1572 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 1573 MSIX_MIN_INTERRUPT_VECTORS, 1574 max_irqs); 1575 if (num_irqs < 0) { 1576 IWL_DEBUG_INFO(trans, 1577 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 1578 num_irqs); 1579 goto enable_msi; 1580 } 1581 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1582 1583 IWL_DEBUG_INFO(trans, 1584 "MSI-X enabled. %d interrupt vectors were allocated\n", 1585 num_irqs); 1586 1587 /* 1588 * In case the OS provides fewer interrupts than requested, different 1589 * causes will share the same interrupt vector as follows: 1590 * One interrupt less: non rx causes shared with FBQ. 1591 * Two interrupts less: non rx causes shared with FBQ and RSS. 1592 * More than two interrupts: we will use fewer RSS queues. 1593 */ 1594 if (num_irqs <= max_irqs - 2) { 1595 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1596 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1597 IWL_SHARED_IRQ_FIRST_RSS; 1598 } else if (num_irqs == max_irqs - 1) { 1599 trans_pcie->trans->num_rx_queues = num_irqs; 1600 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1601 } else { 1602 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1603 } 1604 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1605 1606 trans_pcie->alloc_vecs = num_irqs; 1607 trans_pcie->msix_enabled = true; 1608 return; 1609 1610 enable_msi: 1611 ret = pci_enable_msi(pdev); 1612 if (ret) { 1613 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 1614 /* enable rfkill interrupt: hw bug w/a */ 1615 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 1616 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 1617 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 1618 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 1619 } 1620 } 1621 } 1622 1623 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) 1624 { 1625 int iter_rx_q, i, ret, cpu, offset; 1626 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1627 1628 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 1629 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; 1630 offset = 1 + i; 1631 for (; i < iter_rx_q ; i++) { 1632 /* 1633 * Get the cpu prior to the place to search 1634 * (i.e. return will be > i - 1). 1635 */ 1636 cpu = cpumask_next(i - offset, cpu_online_mask); 1637 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 1638 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 1639 &trans_pcie->affinity_mask[i]); 1640 if (ret) 1641 IWL_ERR(trans_pcie->trans, 1642 "Failed to set affinity mask for IRQ %d\n", 1643 i); 1644 } 1645 } 1646 1647 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 1648 struct iwl_trans_pcie *trans_pcie) 1649 { 1650 int i; 1651 1652 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1653 int ret; 1654 struct msix_entry *msix_entry; 1655 const char *qname = queue_name(&pdev->dev, trans_pcie, i); 1656 1657 if (!qname) 1658 return -ENOMEM; 1659 1660 msix_entry = &trans_pcie->msix_entries[i]; 1661 ret = devm_request_threaded_irq(&pdev->dev, 1662 msix_entry->vector, 1663 iwl_pcie_msix_isr, 1664 (i == trans_pcie->def_irq) ? 1665 iwl_pcie_irq_msix_handler : 1666 iwl_pcie_irq_rx_msix_handler, 1667 IRQF_SHARED, 1668 qname, 1669 msix_entry); 1670 if (ret) { 1671 IWL_ERR(trans_pcie->trans, 1672 "Error allocating IRQ %d\n", i); 1673 1674 return ret; 1675 } 1676 } 1677 iwl_pcie_irq_set_affinity(trans_pcie->trans); 1678 1679 return 0; 1680 } 1681 1682 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) 1683 { 1684 u32 hpm, wprot; 1685 1686 switch (trans->trans_cfg->device_family) { 1687 case IWL_DEVICE_FAMILY_9000: 1688 wprot = PREG_PRPH_WPROT_9000; 1689 break; 1690 case IWL_DEVICE_FAMILY_22000: 1691 wprot = PREG_PRPH_WPROT_22000; 1692 break; 1693 default: 1694 return 0; 1695 } 1696 1697 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); 1698 if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { 1699 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); 1700 1701 if (wprot_val & PREG_WFPM_ACCESS) { 1702 IWL_ERR(trans, 1703 "Error, can not clear persistence bit\n"); 1704 return -EPERM; 1705 } 1706 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, 1707 hpm & ~PERSISTENCE_BIT); 1708 } 1709 1710 return 0; 1711 } 1712 1713 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) 1714 { 1715 int ret; 1716 1717 ret = iwl_finish_nic_init(trans, trans->trans_cfg); 1718 if (ret < 0) 1719 return ret; 1720 1721 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1722 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1723 udelay(20); 1724 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1725 HPM_HIPM_GEN_CFG_CR_PG_EN | 1726 HPM_HIPM_GEN_CFG_CR_SLP_EN); 1727 udelay(20); 1728 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, 1729 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1730 1731 iwl_trans_pcie_sw_reset(trans); 1732 1733 return 0; 1734 } 1735 1736 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1737 { 1738 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1739 int err; 1740 1741 lockdep_assert_held(&trans_pcie->mutex); 1742 1743 err = iwl_pcie_prepare_card_hw(trans); 1744 if (err) { 1745 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1746 return err; 1747 } 1748 1749 err = iwl_trans_pcie_clear_persistence_bit(trans); 1750 if (err) 1751 return err; 1752 1753 iwl_trans_pcie_sw_reset(trans); 1754 1755 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && 1756 trans->trans_cfg->integrated) { 1757 err = iwl_pcie_gen2_force_power_gating(trans); 1758 if (err) 1759 return err; 1760 } 1761 1762 err = iwl_pcie_apm_init(trans); 1763 if (err) 1764 return err; 1765 1766 iwl_pcie_init_msix(trans_pcie); 1767 1768 /* From now on, the op_mode will be kept updated about RF kill state */ 1769 iwl_enable_rfkill_int(trans); 1770 1771 trans_pcie->opmode_down = false; 1772 1773 /* Set is_down to false here so that...*/ 1774 trans_pcie->is_down = false; 1775 1776 /* ...rfkill can call stop_device and set it false if needed */ 1777 iwl_pcie_check_hw_rf_kill(trans); 1778 1779 return 0; 1780 } 1781 1782 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1783 { 1784 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1785 int ret; 1786 1787 mutex_lock(&trans_pcie->mutex); 1788 ret = _iwl_trans_pcie_start_hw(trans); 1789 mutex_unlock(&trans_pcie->mutex); 1790 1791 return ret; 1792 } 1793 1794 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1795 { 1796 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1797 1798 mutex_lock(&trans_pcie->mutex); 1799 1800 /* disable interrupts - don't enable HW RF kill interrupt */ 1801 iwl_disable_interrupts(trans); 1802 1803 iwl_pcie_apm_stop(trans, true); 1804 1805 iwl_disable_interrupts(trans); 1806 1807 iwl_pcie_disable_ict(trans); 1808 1809 mutex_unlock(&trans_pcie->mutex); 1810 1811 iwl_pcie_synchronize_irqs(trans); 1812 } 1813 1814 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1815 { 1816 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1817 } 1818 1819 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1820 { 1821 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1822 } 1823 1824 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1825 { 1826 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1827 } 1828 1829 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) 1830 { 1831 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1832 return 0x00FFFFFF; 1833 else 1834 return 0x000FFFFF; 1835 } 1836 1837 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1838 { 1839 u32 mask = iwl_trans_pcie_prph_msk(trans); 1840 1841 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1842 ((reg & mask) | (3 << 24))); 1843 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1844 } 1845 1846 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1847 u32 val) 1848 { 1849 u32 mask = iwl_trans_pcie_prph_msk(trans); 1850 1851 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1852 ((addr & mask) | (3 << 24))); 1853 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1854 } 1855 1856 static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1857 const struct iwl_trans_config *trans_cfg) 1858 { 1859 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1860 1861 trans->txqs.cmd.q_id = trans_cfg->cmd_queue; 1862 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; 1863 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 1864 trans->txqs.page_offs = trans_cfg->cb_data_offs; 1865 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); 1866 1867 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1868 trans_pcie->n_no_reclaim_cmds = 0; 1869 else 1870 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 1871 if (trans_pcie->n_no_reclaim_cmds) 1872 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1873 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1874 1875 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 1876 trans_pcie->rx_page_order = 1877 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 1878 trans_pcie->rx_buf_bytes = 1879 iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 1880 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); 1881 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1882 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); 1883 1884 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; 1885 trans_pcie->scd_set_active = trans_cfg->scd_set_active; 1886 1887 trans->command_groups = trans_cfg->command_groups; 1888 trans->command_groups_size = trans_cfg->command_groups_size; 1889 1890 /* Initialize NAPI here - it should be before registering to mac80211 1891 * in the opmode but after the HW struct is allocated. 1892 * As this function may be called again in some corner cases don't 1893 * do anything if NAPI was already initialized. 1894 */ 1895 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 1896 init_dummy_netdev(&trans_pcie->napi_dev); 1897 1898 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; 1899 } 1900 1901 void iwl_trans_pcie_free(struct iwl_trans *trans) 1902 { 1903 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1904 int i; 1905 1906 iwl_pcie_synchronize_irqs(trans); 1907 1908 if (trans->trans_cfg->gen2) 1909 iwl_txq_gen2_tx_free(trans); 1910 else 1911 iwl_pcie_tx_free(trans); 1912 iwl_pcie_rx_free(trans); 1913 1914 if (trans_pcie->rba.alloc_wq) { 1915 destroy_workqueue(trans_pcie->rba.alloc_wq); 1916 trans_pcie->rba.alloc_wq = NULL; 1917 } 1918 1919 if (trans_pcie->msix_enabled) { 1920 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1921 irq_set_affinity_hint( 1922 trans_pcie->msix_entries[i].vector, 1923 NULL); 1924 } 1925 1926 trans_pcie->msix_enabled = false; 1927 } else { 1928 iwl_pcie_free_ict(trans); 1929 } 1930 1931 iwl_pcie_free_fw_monitor(trans); 1932 1933 if (trans_pcie->pnvm_dram.size) 1934 dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size, 1935 trans_pcie->pnvm_dram.block, 1936 trans_pcie->pnvm_dram.physical); 1937 1938 mutex_destroy(&trans_pcie->mutex); 1939 iwl_trans_free(trans); 1940 } 1941 1942 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 1943 { 1944 if (state) 1945 set_bit(STATUS_TPOWER_PMI, &trans->status); 1946 else 1947 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1948 } 1949 1950 struct iwl_trans_pcie_removal { 1951 struct pci_dev *pdev; 1952 struct work_struct work; 1953 }; 1954 1955 static void iwl_trans_pcie_removal_wk(struct work_struct *wk) 1956 { 1957 struct iwl_trans_pcie_removal *removal = 1958 container_of(wk, struct iwl_trans_pcie_removal, work); 1959 struct pci_dev *pdev = removal->pdev; 1960 static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; 1961 1962 dev_err(&pdev->dev, "Device gone - attempting removal\n"); 1963 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); 1964 pci_lock_rescan_remove(); 1965 pci_dev_put(pdev); 1966 pci_stop_and_remove_bus_device(pdev); 1967 pci_unlock_rescan_remove(); 1968 1969 kfree(removal); 1970 module_put(THIS_MODULE); 1971 } 1972 1973 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, 1974 unsigned long *flags) 1975 { 1976 int ret; 1977 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1978 1979 spin_lock_bh(&trans_pcie->reg_lock); 1980 1981 if (trans_pcie->cmd_hold_nic_awake) 1982 goto out; 1983 1984 /* this bit wakes up the NIC */ 1985 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 1986 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1987 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1988 udelay(2); 1989 1990 /* 1991 * These bits say the device is running, and should keep running for 1992 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 1993 * but they do not indicate that embedded SRAM is restored yet; 1994 * HW with volatile SRAM must save/restore contents to/from 1995 * host DRAM when sleeping/waking for power-saving. 1996 * Each direction takes approximately 1/4 millisecond; with this 1997 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 1998 * series of register accesses are expected (e.g. reading Event Log), 1999 * to keep device from sleeping. 2000 * 2001 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 2002 * SRAM is okay/restored. We don't check that here because this call 2003 * is just for hardware register access; but GP1 MAC_SLEEP 2004 * check is a good idea before accessing the SRAM of HW with 2005 * volatile SRAM (e.g. reading Event Log). 2006 * 2007 * 5000 series and later (including 1000 series) have non-volatile SRAM, 2008 * and do not save/restore SRAM when power cycling. 2009 */ 2010 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 2011 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 2012 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 2013 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 2014 if (unlikely(ret < 0)) { 2015 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); 2016 2017 WARN_ONCE(1, 2018 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 2019 cntrl); 2020 2021 iwl_trans_pcie_dump_regs(trans); 2022 2023 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { 2024 struct iwl_trans_pcie_removal *removal; 2025 2026 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2027 goto err; 2028 2029 IWL_ERR(trans, "Device gone - scheduling removal!\n"); 2030 2031 /* 2032 * get a module reference to avoid doing this 2033 * while unloading anyway and to avoid 2034 * scheduling a work with code that's being 2035 * removed. 2036 */ 2037 if (!try_module_get(THIS_MODULE)) { 2038 IWL_ERR(trans, 2039 "Module is being unloaded - abort\n"); 2040 goto err; 2041 } 2042 2043 removal = kzalloc(sizeof(*removal), GFP_ATOMIC); 2044 if (!removal) { 2045 module_put(THIS_MODULE); 2046 goto err; 2047 } 2048 /* 2049 * we don't need to clear this flag, because 2050 * the trans will be freed and reallocated. 2051 */ 2052 set_bit(STATUS_TRANS_DEAD, &trans->status); 2053 2054 removal->pdev = to_pci_dev(trans->dev); 2055 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); 2056 pci_dev_get(removal->pdev); 2057 schedule_work(&removal->work); 2058 } else { 2059 iwl_write32(trans, CSR_RESET, 2060 CSR_RESET_REG_FLAG_FORCE_NMI); 2061 } 2062 2063 err: 2064 spin_unlock_bh(&trans_pcie->reg_lock); 2065 return false; 2066 } 2067 2068 out: 2069 /* 2070 * Fool sparse by faking we release the lock - sparse will 2071 * track nic_access anyway. 2072 */ 2073 __release(&trans_pcie->reg_lock); 2074 return true; 2075 } 2076 2077 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, 2078 unsigned long *flags) 2079 { 2080 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2081 2082 lockdep_assert_held(&trans_pcie->reg_lock); 2083 2084 /* 2085 * Fool sparse by faking we acquiring the lock - sparse will 2086 * track nic_access anyway. 2087 */ 2088 __acquire(&trans_pcie->reg_lock); 2089 2090 if (trans_pcie->cmd_hold_nic_awake) 2091 goto out; 2092 2093 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2094 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2095 /* 2096 * Above we read the CSR_GP_CNTRL register, which will flush 2097 * any previous writes, but we need the write that clears the 2098 * MAC_ACCESS_REQ bit to be performed before any other writes 2099 * scheduled on different CPUs (after we drop reg_lock). 2100 */ 2101 out: 2102 spin_unlock_bh(&trans_pcie->reg_lock); 2103 } 2104 2105 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 2106 void *buf, int dwords) 2107 { 2108 unsigned long flags; 2109 int offs = 0; 2110 u32 *vals = buf; 2111 2112 while (offs < dwords) { 2113 /* limit the time we spin here under lock to 1/2s */ 2114 unsigned long end = jiffies + HZ / 2; 2115 bool resched = false; 2116 2117 if (iwl_trans_grab_nic_access(trans, &flags)) { 2118 iwl_write32(trans, HBUS_TARG_MEM_RADDR, 2119 addr + 4 * offs); 2120 2121 while (offs < dwords) { 2122 vals[offs] = iwl_read32(trans, 2123 HBUS_TARG_MEM_RDAT); 2124 offs++; 2125 2126 if (time_after(jiffies, end)) { 2127 resched = true; 2128 break; 2129 } 2130 } 2131 iwl_trans_release_nic_access(trans, &flags); 2132 2133 if (resched) 2134 cond_resched(); 2135 } else { 2136 return -EBUSY; 2137 } 2138 } 2139 2140 return 0; 2141 } 2142 2143 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 2144 const void *buf, int dwords) 2145 { 2146 unsigned long flags; 2147 int offs, ret = 0; 2148 const u32 *vals = buf; 2149 2150 if (iwl_trans_grab_nic_access(trans, &flags)) { 2151 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 2152 for (offs = 0; offs < dwords; offs++) 2153 iwl_write32(trans, HBUS_TARG_MEM_WDAT, 2154 vals ? vals[offs] : 0); 2155 iwl_trans_release_nic_access(trans, &flags); 2156 } else { 2157 ret = -EBUSY; 2158 } 2159 return ret; 2160 } 2161 2162 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, 2163 u32 *val) 2164 { 2165 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, 2166 ofs, val); 2167 } 2168 2169 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 2170 { 2171 int i; 2172 2173 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 2174 struct iwl_txq *txq = trans->txqs.txq[i]; 2175 2176 if (i == trans->txqs.cmd.q_id) 2177 continue; 2178 2179 spin_lock_bh(&txq->lock); 2180 2181 if (!block && !(WARN_ON_ONCE(!txq->block))) { 2182 txq->block--; 2183 if (!txq->block) { 2184 iwl_write32(trans, HBUS_TARG_WRPTR, 2185 txq->write_ptr | (i << 8)); 2186 } 2187 } else if (block) { 2188 txq->block++; 2189 } 2190 2191 spin_unlock_bh(&txq->lock); 2192 } 2193 } 2194 2195 #define IWL_FLUSH_WAIT_MS 2000 2196 2197 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, 2198 struct iwl_trans_rxq_dma_data *data) 2199 { 2200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2201 2202 if (queue >= trans->num_rx_queues || !trans_pcie->rxq) 2203 return -EINVAL; 2204 2205 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; 2206 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; 2207 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; 2208 data->fr_bd_wid = 0; 2209 2210 return 0; 2211 } 2212 2213 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2214 { 2215 struct iwl_txq *txq; 2216 unsigned long now = jiffies; 2217 bool overflow_tx; 2218 u8 wr_ptr; 2219 2220 /* Make sure the NIC is still alive in the bus */ 2221 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2222 return -ENODEV; 2223 2224 if (!test_bit(txq_idx, trans->txqs.queue_used)) 2225 return -EINVAL; 2226 2227 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); 2228 txq = trans->txqs.txq[txq_idx]; 2229 2230 spin_lock_bh(&txq->lock); 2231 overflow_tx = txq->overflow_tx || 2232 !skb_queue_empty(&txq->overflow_q); 2233 spin_unlock_bh(&txq->lock); 2234 2235 wr_ptr = READ_ONCE(txq->write_ptr); 2236 2237 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || 2238 overflow_tx) && 2239 !time_after(jiffies, 2240 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2241 u8 write_ptr = READ_ONCE(txq->write_ptr); 2242 2243 /* 2244 * If write pointer moved during the wait, warn only 2245 * if the TX came from op mode. In case TX came from 2246 * trans layer (overflow TX) don't warn. 2247 */ 2248 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, 2249 "WR pointer moved while flushing %d -> %d\n", 2250 wr_ptr, write_ptr)) 2251 return -ETIMEDOUT; 2252 wr_ptr = write_ptr; 2253 2254 usleep_range(1000, 2000); 2255 2256 spin_lock_bh(&txq->lock); 2257 overflow_tx = txq->overflow_tx || 2258 !skb_queue_empty(&txq->overflow_q); 2259 spin_unlock_bh(&txq->lock); 2260 } 2261 2262 if (txq->read_ptr != txq->write_ptr) { 2263 IWL_ERR(trans, 2264 "fail to flush all tx fifo queues Q %d\n", txq_idx); 2265 iwl_txq_log_scd_error(trans, txq); 2266 return -ETIMEDOUT; 2267 } 2268 2269 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); 2270 2271 return 0; 2272 } 2273 2274 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) 2275 { 2276 int cnt; 2277 int ret = 0; 2278 2279 /* waiting for all the tx frames complete might take a while */ 2280 for (cnt = 0; 2281 cnt < trans->trans_cfg->base_params->num_of_queues; 2282 cnt++) { 2283 2284 if (cnt == trans->txqs.cmd.q_id) 2285 continue; 2286 if (!test_bit(cnt, trans->txqs.queue_used)) 2287 continue; 2288 if (!(BIT(cnt) & txq_bm)) 2289 continue; 2290 2291 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); 2292 if (ret) 2293 break; 2294 } 2295 2296 return ret; 2297 } 2298 2299 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2300 u32 mask, u32 value) 2301 { 2302 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2303 2304 spin_lock_bh(&trans_pcie->reg_lock); 2305 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 2306 spin_unlock_bh(&trans_pcie->reg_lock); 2307 } 2308 2309 static const char *get_csr_string(int cmd) 2310 { 2311 #define IWL_CMD(x) case x: return #x 2312 switch (cmd) { 2313 IWL_CMD(CSR_HW_IF_CONFIG_REG); 2314 IWL_CMD(CSR_INT_COALESCING); 2315 IWL_CMD(CSR_INT); 2316 IWL_CMD(CSR_INT_MASK); 2317 IWL_CMD(CSR_FH_INT_STATUS); 2318 IWL_CMD(CSR_GPIO_IN); 2319 IWL_CMD(CSR_RESET); 2320 IWL_CMD(CSR_GP_CNTRL); 2321 IWL_CMD(CSR_HW_REV); 2322 IWL_CMD(CSR_EEPROM_REG); 2323 IWL_CMD(CSR_EEPROM_GP); 2324 IWL_CMD(CSR_OTP_GP_REG); 2325 IWL_CMD(CSR_GIO_REG); 2326 IWL_CMD(CSR_GP_UCODE_REG); 2327 IWL_CMD(CSR_GP_DRIVER_REG); 2328 IWL_CMD(CSR_UCODE_DRV_GP1); 2329 IWL_CMD(CSR_UCODE_DRV_GP2); 2330 IWL_CMD(CSR_LED_REG); 2331 IWL_CMD(CSR_DRAM_INT_TBL_REG); 2332 IWL_CMD(CSR_GIO_CHICKEN_BITS); 2333 IWL_CMD(CSR_ANA_PLL_CFG); 2334 IWL_CMD(CSR_HW_REV_WA_REG); 2335 IWL_CMD(CSR_MONITOR_STATUS_REG); 2336 IWL_CMD(CSR_DBG_HPET_MEM_REG); 2337 default: 2338 return "UNKNOWN"; 2339 } 2340 #undef IWL_CMD 2341 } 2342 2343 void iwl_pcie_dump_csr(struct iwl_trans *trans) 2344 { 2345 int i; 2346 static const u32 csr_tbl[] = { 2347 CSR_HW_IF_CONFIG_REG, 2348 CSR_INT_COALESCING, 2349 CSR_INT, 2350 CSR_INT_MASK, 2351 CSR_FH_INT_STATUS, 2352 CSR_GPIO_IN, 2353 CSR_RESET, 2354 CSR_GP_CNTRL, 2355 CSR_HW_REV, 2356 CSR_EEPROM_REG, 2357 CSR_EEPROM_GP, 2358 CSR_OTP_GP_REG, 2359 CSR_GIO_REG, 2360 CSR_GP_UCODE_REG, 2361 CSR_GP_DRIVER_REG, 2362 CSR_UCODE_DRV_GP1, 2363 CSR_UCODE_DRV_GP2, 2364 CSR_LED_REG, 2365 CSR_DRAM_INT_TBL_REG, 2366 CSR_GIO_CHICKEN_BITS, 2367 CSR_ANA_PLL_CFG, 2368 CSR_MONITOR_STATUS_REG, 2369 CSR_HW_REV_WA_REG, 2370 CSR_DBG_HPET_MEM_REG 2371 }; 2372 IWL_ERR(trans, "CSR values:\n"); 2373 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2374 "CSR_INT_PERIODIC_REG)\n"); 2375 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2376 IWL_ERR(trans, " %25s: 0X%08x\n", 2377 get_csr_string(csr_tbl[i]), 2378 iwl_read32(trans, csr_tbl[i])); 2379 } 2380 } 2381 2382 #ifdef CONFIG_IWLWIFI_DEBUGFS 2383 /* create and remove of files */ 2384 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2385 debugfs_create_file(#name, mode, parent, trans, \ 2386 &iwl_dbgfs_##name##_ops); \ 2387 } while (0) 2388 2389 /* file operation */ 2390 #define DEBUGFS_READ_FILE_OPS(name) \ 2391 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2392 .read = iwl_dbgfs_##name##_read, \ 2393 .open = simple_open, \ 2394 .llseek = generic_file_llseek, \ 2395 }; 2396 2397 #define DEBUGFS_WRITE_FILE_OPS(name) \ 2398 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2399 .write = iwl_dbgfs_##name##_write, \ 2400 .open = simple_open, \ 2401 .llseek = generic_file_llseek, \ 2402 }; 2403 2404 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2405 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2406 .write = iwl_dbgfs_##name##_write, \ 2407 .read = iwl_dbgfs_##name##_read, \ 2408 .open = simple_open, \ 2409 .llseek = generic_file_llseek, \ 2410 }; 2411 2412 struct iwl_dbgfs_tx_queue_priv { 2413 struct iwl_trans *trans; 2414 }; 2415 2416 struct iwl_dbgfs_tx_queue_state { 2417 loff_t pos; 2418 }; 2419 2420 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) 2421 { 2422 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2423 struct iwl_dbgfs_tx_queue_state *state; 2424 2425 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2426 return NULL; 2427 2428 state = kmalloc(sizeof(*state), GFP_KERNEL); 2429 if (!state) 2430 return NULL; 2431 state->pos = *pos; 2432 return state; 2433 } 2434 2435 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, 2436 void *v, loff_t *pos) 2437 { 2438 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2439 struct iwl_dbgfs_tx_queue_state *state = v; 2440 2441 *pos = ++state->pos; 2442 2443 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2444 return NULL; 2445 2446 return state; 2447 } 2448 2449 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) 2450 { 2451 kfree(v); 2452 } 2453 2454 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) 2455 { 2456 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2457 struct iwl_dbgfs_tx_queue_state *state = v; 2458 struct iwl_trans *trans = priv->trans; 2459 struct iwl_txq *txq = trans->txqs.txq[state->pos]; 2460 2461 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", 2462 (unsigned int)state->pos, 2463 !!test_bit(state->pos, trans->txqs.queue_used), 2464 !!test_bit(state->pos, trans->txqs.queue_stopped)); 2465 if (txq) 2466 seq_printf(seq, 2467 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", 2468 txq->read_ptr, txq->write_ptr, 2469 txq->need_update, txq->frozen, 2470 txq->n_window, txq->ampdu); 2471 else 2472 seq_puts(seq, "(unallocated)"); 2473 2474 if (state->pos == trans->txqs.cmd.q_id) 2475 seq_puts(seq, " (HCMD)"); 2476 seq_puts(seq, "\n"); 2477 2478 return 0; 2479 } 2480 2481 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { 2482 .start = iwl_dbgfs_tx_queue_seq_start, 2483 .next = iwl_dbgfs_tx_queue_seq_next, 2484 .stop = iwl_dbgfs_tx_queue_seq_stop, 2485 .show = iwl_dbgfs_tx_queue_seq_show, 2486 }; 2487 2488 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) 2489 { 2490 struct iwl_dbgfs_tx_queue_priv *priv; 2491 2492 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, 2493 sizeof(*priv)); 2494 2495 if (!priv) 2496 return -ENOMEM; 2497 2498 priv->trans = inode->i_private; 2499 return 0; 2500 } 2501 2502 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2503 char __user *user_buf, 2504 size_t count, loff_t *ppos) 2505 { 2506 struct iwl_trans *trans = file->private_data; 2507 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2508 char *buf; 2509 int pos = 0, i, ret; 2510 size_t bufsz; 2511 2512 bufsz = sizeof(char) * 121 * trans->num_rx_queues; 2513 2514 if (!trans_pcie->rxq) 2515 return -EAGAIN; 2516 2517 buf = kzalloc(bufsz, GFP_KERNEL); 2518 if (!buf) 2519 return -ENOMEM; 2520 2521 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 2522 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2523 2524 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 2525 i); 2526 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2527 rxq->read); 2528 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2529 rxq->write); 2530 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2531 rxq->write_actual); 2532 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2533 rxq->need_update); 2534 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2535 rxq->free_count); 2536 if (rxq->rb_stts) { 2537 u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, 2538 rxq)); 2539 pos += scnprintf(buf + pos, bufsz - pos, 2540 "\tclosed_rb_num: %u\n", 2541 r & 0x0FFF); 2542 } else { 2543 pos += scnprintf(buf + pos, bufsz - pos, 2544 "\tclosed_rb_num: Not Allocated\n"); 2545 } 2546 } 2547 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2548 kfree(buf); 2549 2550 return ret; 2551 } 2552 2553 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2554 char __user *user_buf, 2555 size_t count, loff_t *ppos) 2556 { 2557 struct iwl_trans *trans = file->private_data; 2558 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2559 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2560 2561 int pos = 0; 2562 char *buf; 2563 int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2564 ssize_t ret; 2565 2566 buf = kzalloc(bufsz, GFP_KERNEL); 2567 if (!buf) 2568 return -ENOMEM; 2569 2570 pos += scnprintf(buf + pos, bufsz - pos, 2571 "Interrupt Statistics Report:\n"); 2572 2573 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2574 isr_stats->hw); 2575 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2576 isr_stats->sw); 2577 if (isr_stats->sw || isr_stats->hw) { 2578 pos += scnprintf(buf + pos, bufsz - pos, 2579 "\tLast Restarting Code: 0x%X\n", 2580 isr_stats->err_code); 2581 } 2582 #ifdef CONFIG_IWLWIFI_DEBUG 2583 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2584 isr_stats->sch); 2585 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2586 isr_stats->alive); 2587 #endif 2588 pos += scnprintf(buf + pos, bufsz - pos, 2589 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2590 2591 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2592 isr_stats->ctkill); 2593 2594 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2595 isr_stats->wakeup); 2596 2597 pos += scnprintf(buf + pos, bufsz - pos, 2598 "Rx command responses:\t\t %u\n", isr_stats->rx); 2599 2600 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2601 isr_stats->tx); 2602 2603 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2604 isr_stats->unhandled); 2605 2606 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2607 kfree(buf); 2608 return ret; 2609 } 2610 2611 static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2612 const char __user *user_buf, 2613 size_t count, loff_t *ppos) 2614 { 2615 struct iwl_trans *trans = file->private_data; 2616 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2617 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2618 u32 reset_flag; 2619 int ret; 2620 2621 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); 2622 if (ret) 2623 return ret; 2624 if (reset_flag == 0) 2625 memset(isr_stats, 0, sizeof(*isr_stats)); 2626 2627 return count; 2628 } 2629 2630 static ssize_t iwl_dbgfs_csr_write(struct file *file, 2631 const char __user *user_buf, 2632 size_t count, loff_t *ppos) 2633 { 2634 struct iwl_trans *trans = file->private_data; 2635 2636 iwl_pcie_dump_csr(trans); 2637 2638 return count; 2639 } 2640 2641 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2642 char __user *user_buf, 2643 size_t count, loff_t *ppos) 2644 { 2645 struct iwl_trans *trans = file->private_data; 2646 char *buf = NULL; 2647 ssize_t ret; 2648 2649 ret = iwl_dump_fh(trans, &buf); 2650 if (ret < 0) 2651 return ret; 2652 if (!buf) 2653 return -EINVAL; 2654 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2655 kfree(buf); 2656 return ret; 2657 } 2658 2659 static ssize_t iwl_dbgfs_rfkill_read(struct file *file, 2660 char __user *user_buf, 2661 size_t count, loff_t *ppos) 2662 { 2663 struct iwl_trans *trans = file->private_data; 2664 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2665 char buf[100]; 2666 int pos; 2667 2668 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", 2669 trans_pcie->debug_rfkill, 2670 !(iwl_read32(trans, CSR_GP_CNTRL) & 2671 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); 2672 2673 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2674 } 2675 2676 static ssize_t iwl_dbgfs_rfkill_write(struct file *file, 2677 const char __user *user_buf, 2678 size_t count, loff_t *ppos) 2679 { 2680 struct iwl_trans *trans = file->private_data; 2681 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2682 bool new_value; 2683 int ret; 2684 2685 ret = kstrtobool_from_user(user_buf, count, &new_value); 2686 if (ret) 2687 return ret; 2688 if (new_value == trans_pcie->debug_rfkill) 2689 return count; 2690 IWL_WARN(trans, "changing debug rfkill %d->%d\n", 2691 trans_pcie->debug_rfkill, new_value); 2692 trans_pcie->debug_rfkill = new_value; 2693 iwl_pcie_handle_rfkill_irq(trans); 2694 2695 return count; 2696 } 2697 2698 static int iwl_dbgfs_monitor_data_open(struct inode *inode, 2699 struct file *file) 2700 { 2701 struct iwl_trans *trans = inode->i_private; 2702 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2703 2704 if (!trans->dbg.dest_tlv || 2705 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { 2706 IWL_ERR(trans, "Debug destination is not set to DRAM\n"); 2707 return -ENOENT; 2708 } 2709 2710 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) 2711 return -EBUSY; 2712 2713 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; 2714 return simple_open(inode, file); 2715 } 2716 2717 static int iwl_dbgfs_monitor_data_release(struct inode *inode, 2718 struct file *file) 2719 { 2720 struct iwl_trans_pcie *trans_pcie = 2721 IWL_TRANS_GET_PCIE_TRANS(inode->i_private); 2722 2723 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) 2724 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 2725 return 0; 2726 } 2727 2728 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, 2729 void *buf, ssize_t *size, 2730 ssize_t *bytes_copied) 2731 { 2732 int buf_size_left = count - *bytes_copied; 2733 2734 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); 2735 if (*size > buf_size_left) 2736 *size = buf_size_left; 2737 2738 *size -= copy_to_user(user_buf, buf, *size); 2739 *bytes_copied += *size; 2740 2741 if (buf_size_left == *size) 2742 return true; 2743 return false; 2744 } 2745 2746 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, 2747 char __user *user_buf, 2748 size_t count, loff_t *ppos) 2749 { 2750 struct iwl_trans *trans = file->private_data; 2751 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2752 void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; 2753 struct cont_rec *data = &trans_pcie->fw_mon_data; 2754 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; 2755 ssize_t size, bytes_copied = 0; 2756 bool b_full; 2757 2758 if (trans->dbg.dest_tlv) { 2759 write_ptr_addr = 2760 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 2761 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 2762 } else { 2763 write_ptr_addr = MON_BUFF_WRPTR; 2764 wrap_cnt_addr = MON_BUFF_CYCLE_CNT; 2765 } 2766 2767 if (unlikely(!trans->dbg.rec_on)) 2768 return 0; 2769 2770 mutex_lock(&data->mutex); 2771 if (data->state == 2772 IWL_FW_MON_DBGFS_STATE_DISABLED) { 2773 mutex_unlock(&data->mutex); 2774 return 0; 2775 } 2776 2777 /* write_ptr position in bytes rather then DW */ 2778 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); 2779 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); 2780 2781 if (data->prev_wrap_cnt == wrap_cnt) { 2782 size = write_ptr - data->prev_wr_ptr; 2783 curr_buf = cpu_addr + data->prev_wr_ptr; 2784 b_full = iwl_write_to_user_buf(user_buf, count, 2785 curr_buf, &size, 2786 &bytes_copied); 2787 data->prev_wr_ptr += size; 2788 2789 } else if (data->prev_wrap_cnt == wrap_cnt - 1 && 2790 write_ptr < data->prev_wr_ptr) { 2791 size = trans->dbg.fw_mon.size - data->prev_wr_ptr; 2792 curr_buf = cpu_addr + data->prev_wr_ptr; 2793 b_full = iwl_write_to_user_buf(user_buf, count, 2794 curr_buf, &size, 2795 &bytes_copied); 2796 data->prev_wr_ptr += size; 2797 2798 if (!b_full) { 2799 size = write_ptr; 2800 b_full = iwl_write_to_user_buf(user_buf, count, 2801 cpu_addr, &size, 2802 &bytes_copied); 2803 data->prev_wr_ptr = size; 2804 data->prev_wrap_cnt++; 2805 } 2806 } else { 2807 if (data->prev_wrap_cnt == wrap_cnt - 1 && 2808 write_ptr > data->prev_wr_ptr) 2809 IWL_WARN(trans, 2810 "write pointer passed previous write pointer, start copying from the beginning\n"); 2811 else if (!unlikely(data->prev_wrap_cnt == 0 && 2812 data->prev_wr_ptr == 0)) 2813 IWL_WARN(trans, 2814 "monitor data is out of sync, start copying from the beginning\n"); 2815 2816 size = write_ptr; 2817 b_full = iwl_write_to_user_buf(user_buf, count, 2818 cpu_addr, &size, 2819 &bytes_copied); 2820 data->prev_wr_ptr = size; 2821 data->prev_wrap_cnt = wrap_cnt; 2822 } 2823 2824 mutex_unlock(&data->mutex); 2825 2826 return bytes_copied; 2827 } 2828 2829 DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2830 DEBUGFS_READ_FILE_OPS(fh_reg); 2831 DEBUGFS_READ_FILE_OPS(rx_queue); 2832 DEBUGFS_WRITE_FILE_OPS(csr); 2833 DEBUGFS_READ_WRITE_FILE_OPS(rfkill); 2834 static const struct file_operations iwl_dbgfs_tx_queue_ops = { 2835 .owner = THIS_MODULE, 2836 .open = iwl_dbgfs_tx_queue_open, 2837 .read = seq_read, 2838 .llseek = seq_lseek, 2839 .release = seq_release_private, 2840 }; 2841 2842 static const struct file_operations iwl_dbgfs_monitor_data_ops = { 2843 .read = iwl_dbgfs_monitor_data_read, 2844 .open = iwl_dbgfs_monitor_data_open, 2845 .release = iwl_dbgfs_monitor_data_release, 2846 }; 2847 2848 /* Create the debugfs files and directories */ 2849 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 2850 { 2851 struct dentry *dir = trans->dbgfs_dir; 2852 2853 DEBUGFS_ADD_FILE(rx_queue, dir, 0400); 2854 DEBUGFS_ADD_FILE(tx_queue, dir, 0400); 2855 DEBUGFS_ADD_FILE(interrupt, dir, 0600); 2856 DEBUGFS_ADD_FILE(csr, dir, 0200); 2857 DEBUGFS_ADD_FILE(fh_reg, dir, 0400); 2858 DEBUGFS_ADD_FILE(rfkill, dir, 0600); 2859 DEBUGFS_ADD_FILE(monitor_data, dir, 0400); 2860 } 2861 2862 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) 2863 { 2864 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2865 struct cont_rec *data = &trans_pcie->fw_mon_data; 2866 2867 mutex_lock(&data->mutex); 2868 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; 2869 mutex_unlock(&data->mutex); 2870 } 2871 #endif /*CONFIG_IWLWIFI_DEBUGFS */ 2872 2873 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 2874 { 2875 u32 cmdlen = 0; 2876 int i; 2877 2878 for (i = 0; i < trans->txqs.tfd.max_tbs; i++) 2879 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); 2880 2881 return cmdlen; 2882 } 2883 2884 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 2885 struct iwl_fw_error_dump_data **data, 2886 int allocated_rb_nums) 2887 { 2888 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2889 int max_len = trans_pcie->rx_buf_bytes; 2890 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 2891 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 2892 u32 i, r, j, rb_len = 0; 2893 2894 spin_lock(&rxq->lock); 2895 2896 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 2897 2898 for (i = rxq->read, j = 0; 2899 i != r && j < allocated_rb_nums; 2900 i = (i + 1) & RX_QUEUE_MASK, j++) { 2901 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 2902 struct iwl_fw_error_dump_rb *rb; 2903 2904 dma_unmap_page(trans->dev, rxb->page_dma, max_len, 2905 DMA_FROM_DEVICE); 2906 2907 rb_len += sizeof(**data) + sizeof(*rb) + max_len; 2908 2909 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 2910 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 2911 rb = (void *)(*data)->data; 2912 rb->index = cpu_to_le32(i); 2913 memcpy(rb->data, page_address(rxb->page), max_len); 2914 /* remap the page for the free benefit */ 2915 rxb->page_dma = dma_map_page(trans->dev, rxb->page, 2916 rxb->offset, max_len, 2917 DMA_FROM_DEVICE); 2918 2919 *data = iwl_fw_error_next_data(*data); 2920 } 2921 2922 spin_unlock(&rxq->lock); 2923 2924 return rb_len; 2925 } 2926 #define IWL_CSR_TO_DUMP (0x250) 2927 2928 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 2929 struct iwl_fw_error_dump_data **data) 2930 { 2931 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 2932 __le32 *val; 2933 int i; 2934 2935 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 2936 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 2937 val = (void *)(*data)->data; 2938 2939 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 2940 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 2941 2942 *data = iwl_fw_error_next_data(*data); 2943 2944 return csr_len; 2945 } 2946 2947 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 2948 struct iwl_fw_error_dump_data **data) 2949 { 2950 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 2951 unsigned long flags; 2952 __le32 *val; 2953 int i; 2954 2955 if (!iwl_trans_grab_nic_access(trans, &flags)) 2956 return 0; 2957 2958 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 2959 (*data)->len = cpu_to_le32(fh_regs_len); 2960 val = (void *)(*data)->data; 2961 2962 if (!trans->trans_cfg->gen2) 2963 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; 2964 i += sizeof(u32)) 2965 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 2966 else 2967 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); 2968 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); 2969 i += sizeof(u32)) 2970 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, 2971 i)); 2972 2973 iwl_trans_release_nic_access(trans, &flags); 2974 2975 *data = iwl_fw_error_next_data(*data); 2976 2977 return sizeof(**data) + fh_regs_len; 2978 } 2979 2980 static u32 2981 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 2982 struct iwl_fw_error_dump_fw_mon *fw_mon_data, 2983 u32 monitor_len) 2984 { 2985 u32 buf_size_in_dwords = (monitor_len >> 2); 2986 u32 *buffer = (u32 *)fw_mon_data->data; 2987 unsigned long flags; 2988 u32 i; 2989 2990 if (!iwl_trans_grab_nic_access(trans, &flags)) 2991 return 0; 2992 2993 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 2994 for (i = 0; i < buf_size_in_dwords; i++) 2995 buffer[i] = iwl_read_umac_prph_no_grab(trans, 2996 MON_DMARB_RD_DATA_ADDR); 2997 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 2998 2999 iwl_trans_release_nic_access(trans, &flags); 3000 3001 return monitor_len; 3002 } 3003 3004 static void 3005 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, 3006 struct iwl_fw_error_dump_fw_mon *fw_mon_data) 3007 { 3008 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; 3009 3010 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3011 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; 3012 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; 3013 write_ptr = DBGC_CUR_DBGBUF_STATUS; 3014 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; 3015 } else if (trans->dbg.dest_tlv) { 3016 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 3017 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 3018 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3019 } else { 3020 base = MON_BUFF_BASE_ADDR; 3021 write_ptr = MON_BUFF_WRPTR; 3022 wrap_cnt = MON_BUFF_CYCLE_CNT; 3023 } 3024 3025 write_ptr_val = iwl_read_prph(trans, write_ptr); 3026 fw_mon_data->fw_mon_cycle_cnt = 3027 cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 3028 fw_mon_data->fw_mon_base_ptr = 3029 cpu_to_le32(iwl_read_prph(trans, base)); 3030 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3031 fw_mon_data->fw_mon_base_high_ptr = 3032 cpu_to_le32(iwl_read_prph(trans, base_high)); 3033 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; 3034 /* convert wrtPtr to DWs, to align with all HWs */ 3035 write_ptr_val >>= 2; 3036 } 3037 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); 3038 } 3039 3040 static u32 3041 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 3042 struct iwl_fw_error_dump_data **data, 3043 u32 monitor_len) 3044 { 3045 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 3046 u32 len = 0; 3047 3048 if (trans->dbg.dest_tlv || 3049 (fw_mon->size && 3050 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || 3051 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { 3052 struct iwl_fw_error_dump_fw_mon *fw_mon_data; 3053 3054 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 3055 fw_mon_data = (void *)(*data)->data; 3056 3057 iwl_trans_pcie_dump_pointers(trans, fw_mon_data); 3058 3059 len += sizeof(**data) + sizeof(*fw_mon_data); 3060 if (fw_mon->size) { 3061 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); 3062 monitor_len = fw_mon->size; 3063 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { 3064 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); 3065 /* 3066 * Update pointers to reflect actual values after 3067 * shifting 3068 */ 3069 if (trans->dbg.dest_tlv->version) { 3070 base = (iwl_read_prph(trans, base) & 3071 IWL_LDBG_M2S_BUF_BA_MSK) << 3072 trans->dbg.dest_tlv->base_shift; 3073 base *= IWL_M2S_UNIT_SIZE; 3074 base += trans->cfg->smem_offset; 3075 } else { 3076 base = iwl_read_prph(trans, base) << 3077 trans->dbg.dest_tlv->base_shift; 3078 } 3079 3080 iwl_trans_read_mem(trans, base, fw_mon_data->data, 3081 monitor_len / sizeof(u32)); 3082 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { 3083 monitor_len = 3084 iwl_trans_pci_dump_marbh_monitor(trans, 3085 fw_mon_data, 3086 monitor_len); 3087 } else { 3088 /* Didn't match anything - output no monitor data */ 3089 monitor_len = 0; 3090 } 3091 3092 len += monitor_len; 3093 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 3094 } 3095 3096 return len; 3097 } 3098 3099 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) 3100 { 3101 if (trans->dbg.fw_mon.size) { 3102 *len += sizeof(struct iwl_fw_error_dump_data) + 3103 sizeof(struct iwl_fw_error_dump_fw_mon) + 3104 trans->dbg.fw_mon.size; 3105 return trans->dbg.fw_mon.size; 3106 } else if (trans->dbg.dest_tlv) { 3107 u32 base, end, cfg_reg, monitor_len; 3108 3109 if (trans->dbg.dest_tlv->version == 1) { 3110 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3111 cfg_reg = iwl_read_prph(trans, cfg_reg); 3112 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << 3113 trans->dbg.dest_tlv->base_shift; 3114 base *= IWL_M2S_UNIT_SIZE; 3115 base += trans->cfg->smem_offset; 3116 3117 monitor_len = 3118 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> 3119 trans->dbg.dest_tlv->end_shift; 3120 monitor_len *= IWL_M2S_UNIT_SIZE; 3121 } else { 3122 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3123 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); 3124 3125 base = iwl_read_prph(trans, base) << 3126 trans->dbg.dest_tlv->base_shift; 3127 end = iwl_read_prph(trans, end) << 3128 trans->dbg.dest_tlv->end_shift; 3129 3130 /* Make "end" point to the actual end */ 3131 if (trans->trans_cfg->device_family >= 3132 IWL_DEVICE_FAMILY_8000 || 3133 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) 3134 end += (1 << trans->dbg.dest_tlv->end_shift); 3135 monitor_len = end - base; 3136 } 3137 *len += sizeof(struct iwl_fw_error_dump_data) + 3138 sizeof(struct iwl_fw_error_dump_fw_mon) + 3139 monitor_len; 3140 return monitor_len; 3141 } 3142 return 0; 3143 } 3144 3145 static struct iwl_trans_dump_data 3146 *iwl_trans_pcie_dump_data(struct iwl_trans *trans, 3147 u32 dump_mask) 3148 { 3149 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3150 struct iwl_fw_error_dump_data *data; 3151 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; 3152 struct iwl_fw_error_dump_txcmd *txcmd; 3153 struct iwl_trans_dump_data *dump_data; 3154 u32 len, num_rbs = 0, monitor_len = 0; 3155 int i, ptr; 3156 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 3157 !trans->trans_cfg->mq_rx_supported && 3158 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); 3159 3160 if (!dump_mask) 3161 return NULL; 3162 3163 /* transport dump header */ 3164 len = sizeof(*dump_data); 3165 3166 /* host commands */ 3167 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) 3168 len += sizeof(*data) + 3169 cmdq->n_window * (sizeof(*txcmd) + 3170 TFD_MAX_PAYLOAD_SIZE); 3171 3172 /* FW monitor */ 3173 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3174 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); 3175 3176 /* CSR registers */ 3177 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3178 len += sizeof(*data) + IWL_CSR_TO_DUMP; 3179 3180 /* FH registers */ 3181 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { 3182 if (trans->trans_cfg->gen2) 3183 len += sizeof(*data) + 3184 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - 3185 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); 3186 else 3187 len += sizeof(*data) + 3188 (FH_MEM_UPPER_BOUND - 3189 FH_MEM_LOWER_BOUND); 3190 } 3191 3192 if (dump_rbs) { 3193 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3194 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3195 /* RBs */ 3196 num_rbs = 3197 le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) 3198 & 0x0FFF; 3199 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 3200 len += num_rbs * (sizeof(*data) + 3201 sizeof(struct iwl_fw_error_dump_rb) + 3202 (PAGE_SIZE << trans_pcie->rx_page_order)); 3203 } 3204 3205 /* Paged memory for gen2 HW */ 3206 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) 3207 for (i = 0; i < trans->init_dram.paging_cnt; i++) 3208 len += sizeof(*data) + 3209 sizeof(struct iwl_fw_error_dump_paging) + 3210 trans->init_dram.paging[i].size; 3211 3212 dump_data = vzalloc(len); 3213 if (!dump_data) 3214 return NULL; 3215 3216 len = 0; 3217 data = (void *)dump_data->data; 3218 3219 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { 3220 u16 tfd_size = trans->txqs.tfd.size; 3221 3222 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 3223 txcmd = (void *)data->data; 3224 spin_lock_bh(&cmdq->lock); 3225 ptr = cmdq->write_ptr; 3226 for (i = 0; i < cmdq->n_window; i++) { 3227 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); 3228 u8 tfdidx; 3229 u32 caplen, cmdlen; 3230 3231 if (trans->trans_cfg->use_tfh) 3232 tfdidx = idx; 3233 else 3234 tfdidx = ptr; 3235 3236 cmdlen = iwl_trans_pcie_get_cmdlen(trans, 3237 (u8 *)cmdq->tfds + 3238 tfd_size * tfdidx); 3239 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 3240 3241 if (cmdlen) { 3242 len += sizeof(*txcmd) + caplen; 3243 txcmd->cmdlen = cpu_to_le32(cmdlen); 3244 txcmd->caplen = cpu_to_le32(caplen); 3245 memcpy(txcmd->data, cmdq->entries[idx].cmd, 3246 caplen); 3247 txcmd = (void *)((u8 *)txcmd->data + caplen); 3248 } 3249 3250 ptr = iwl_txq_dec_wrap(trans, ptr); 3251 } 3252 spin_unlock_bh(&cmdq->lock); 3253 3254 data->len = cpu_to_le32(len); 3255 len += sizeof(*data); 3256 data = iwl_fw_error_next_data(data); 3257 } 3258 3259 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3260 len += iwl_trans_pcie_dump_csr(trans, &data); 3261 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) 3262 len += iwl_trans_pcie_fh_regs_dump(trans, &data); 3263 if (dump_rbs) 3264 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 3265 3266 /* Paged memory for gen2 HW */ 3267 if (trans->trans_cfg->gen2 && 3268 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { 3269 for (i = 0; i < trans->init_dram.paging_cnt; i++) { 3270 struct iwl_fw_error_dump_paging *paging; 3271 u32 page_len = trans->init_dram.paging[i].size; 3272 3273 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 3274 data->len = cpu_to_le32(sizeof(*paging) + page_len); 3275 paging = (void *)data->data; 3276 paging->index = cpu_to_le32(i); 3277 memcpy(paging->data, 3278 trans->init_dram.paging[i].block, page_len); 3279 data = iwl_fw_error_next_data(data); 3280 3281 len += sizeof(*data) + sizeof(*paging) + page_len; 3282 } 3283 } 3284 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3285 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 3286 3287 dump_data->len = len; 3288 3289 return dump_data; 3290 } 3291 3292 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) 3293 { 3294 if (enable) 3295 iwl_enable_interrupts(trans); 3296 else 3297 iwl_disable_interrupts(trans); 3298 } 3299 3300 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) 3301 { 3302 u32 inta_addr, sw_err_bit; 3303 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3304 3305 if (trans_pcie->msix_enabled) { 3306 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; 3307 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; 3308 } else { 3309 inta_addr = CSR_INT; 3310 sw_err_bit = CSR_INT_BIT_SW_ERR; 3311 } 3312 3313 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); 3314 } 3315 3316 #define IWL_TRANS_COMMON_OPS \ 3317 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ 3318 .write8 = iwl_trans_pcie_write8, \ 3319 .write32 = iwl_trans_pcie_write32, \ 3320 .read32 = iwl_trans_pcie_read32, \ 3321 .read_prph = iwl_trans_pcie_read_prph, \ 3322 .write_prph = iwl_trans_pcie_write_prph, \ 3323 .read_mem = iwl_trans_pcie_read_mem, \ 3324 .write_mem = iwl_trans_pcie_write_mem, \ 3325 .read_config32 = iwl_trans_pcie_read_config32, \ 3326 .configure = iwl_trans_pcie_configure, \ 3327 .set_pmi = iwl_trans_pcie_set_pmi, \ 3328 .sw_reset = iwl_trans_pcie_sw_reset, \ 3329 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ 3330 .release_nic_access = iwl_trans_pcie_release_nic_access, \ 3331 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ 3332 .dump_data = iwl_trans_pcie_dump_data, \ 3333 .d3_suspend = iwl_trans_pcie_d3_suspend, \ 3334 .d3_resume = iwl_trans_pcie_d3_resume, \ 3335 .interrupts = iwl_trans_pci_interrupts, \ 3336 .sync_nmi = iwl_trans_pcie_sync_nmi \ 3337 3338 static const struct iwl_trans_ops trans_ops_pcie = { 3339 IWL_TRANS_COMMON_OPS, 3340 .start_hw = iwl_trans_pcie_start_hw, 3341 .fw_alive = iwl_trans_pcie_fw_alive, 3342 .start_fw = iwl_trans_pcie_start_fw, 3343 .stop_device = iwl_trans_pcie_stop_device, 3344 3345 .send_cmd = iwl_pcie_enqueue_hcmd, 3346 3347 .tx = iwl_trans_pcie_tx, 3348 .reclaim = iwl_txq_reclaim, 3349 3350 .txq_disable = iwl_trans_pcie_txq_disable, 3351 .txq_enable = iwl_trans_pcie_txq_enable, 3352 3353 .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, 3354 3355 .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, 3356 3357 .freeze_txq_timer = iwl_trans_txq_freeze_timer, 3358 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, 3359 #ifdef CONFIG_IWLWIFI_DEBUGFS 3360 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3361 #endif 3362 }; 3363 3364 static const struct iwl_trans_ops trans_ops_pcie_gen2 = { 3365 IWL_TRANS_COMMON_OPS, 3366 .start_hw = iwl_trans_pcie_start_hw, 3367 .fw_alive = iwl_trans_pcie_gen2_fw_alive, 3368 .start_fw = iwl_trans_pcie_gen2_start_fw, 3369 .stop_device = iwl_trans_pcie_gen2_stop_device, 3370 3371 .send_cmd = iwl_pcie_gen2_enqueue_hcmd, 3372 3373 .tx = iwl_txq_gen2_tx, 3374 .reclaim = iwl_txq_reclaim, 3375 3376 .set_q_ptrs = iwl_txq_set_q_ptrs, 3377 3378 .txq_alloc = iwl_txq_dyn_alloc, 3379 .txq_free = iwl_txq_dyn_free, 3380 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, 3381 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, 3382 .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, 3383 #ifdef CONFIG_IWLWIFI_DEBUGFS 3384 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3385 #endif 3386 }; 3387 3388 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 3389 const struct pci_device_id *ent, 3390 const struct iwl_cfg_trans_params *cfg_trans) 3391 { 3392 struct iwl_trans_pcie *trans_pcie; 3393 struct iwl_trans *trans; 3394 int ret, addr_size; 3395 const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; 3396 3397 if (!cfg_trans->gen2) 3398 ops = &trans_ops_pcie; 3399 3400 ret = pcim_enable_device(pdev); 3401 if (ret) 3402 return ERR_PTR(ret); 3403 3404 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, 3405 cfg_trans); 3406 if (!trans) 3407 return ERR_PTR(-ENOMEM); 3408 3409 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3410 3411 trans_pcie->trans = trans; 3412 trans_pcie->opmode_down = true; 3413 spin_lock_init(&trans_pcie->irq_lock); 3414 spin_lock_init(&trans_pcie->reg_lock); 3415 spin_lock_init(&trans_pcie->alloc_page_lock); 3416 mutex_init(&trans_pcie->mutex); 3417 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 3418 init_waitqueue_head(&trans_pcie->fw_reset_waitq); 3419 3420 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", 3421 WQ_HIGHPRI | WQ_UNBOUND, 1); 3422 if (!trans_pcie->rba.alloc_wq) { 3423 ret = -ENOMEM; 3424 goto out_free_trans; 3425 } 3426 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); 3427 3428 trans_pcie->debug_rfkill = -1; 3429 3430 if (!cfg_trans->base_params->pcie_l1_allowed) { 3431 /* 3432 * W/A - seems to solve weird behavior. We need to remove this 3433 * if we don't want to stay in L1 all the time. This wastes a 3434 * lot of power. 3435 */ 3436 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 3437 PCIE_LINK_STATE_L1 | 3438 PCIE_LINK_STATE_CLKPM); 3439 } 3440 3441 trans_pcie->def_rx_queue = 0; 3442 3443 pci_set_master(pdev); 3444 3445 addr_size = trans->txqs.tfd.addr_size; 3446 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); 3447 if (!ret) 3448 ret = pci_set_consistent_dma_mask(pdev, 3449 DMA_BIT_MASK(addr_size)); 3450 if (ret) { 3451 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3452 if (!ret) 3453 ret = pci_set_consistent_dma_mask(pdev, 3454 DMA_BIT_MASK(32)); 3455 /* both attempts failed: */ 3456 if (ret) { 3457 dev_err(&pdev->dev, "No suitable DMA available\n"); 3458 goto out_no_pci; 3459 } 3460 } 3461 3462 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); 3463 if (ret) { 3464 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); 3465 goto out_no_pci; 3466 } 3467 3468 trans_pcie->hw_base = pcim_iomap_table(pdev)[0]; 3469 if (!trans_pcie->hw_base) { 3470 dev_err(&pdev->dev, "pcim_iomap_table failed\n"); 3471 ret = -ENODEV; 3472 goto out_no_pci; 3473 } 3474 3475 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3476 * PCI Tx retries from interfering with C3 CPU state */ 3477 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3478 3479 trans_pcie->pci_dev = pdev; 3480 iwl_disable_interrupts(trans); 3481 3482 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 3483 if (trans->hw_rev == 0xffffffff) { 3484 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); 3485 ret = -EIO; 3486 goto out_no_pci; 3487 } 3488 3489 /* 3490 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3491 * changed, and now the revision step also includes bit 0-1 (no more 3492 * "dash" value). To keep hw_rev backwards compatible - we'll store it 3493 * in the old format. 3494 */ 3495 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) 3496 trans->hw_rev = (trans->hw_rev & 0xfff0) | 3497 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 3498 3499 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); 3500 3501 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); 3502 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 3503 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 3504 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 3505 3506 init_waitqueue_head(&trans_pcie->sx_waitq); 3507 3508 3509 if (trans_pcie->msix_enabled) { 3510 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); 3511 if (ret) 3512 goto out_no_pci; 3513 } else { 3514 ret = iwl_pcie_alloc_ict(trans); 3515 if (ret) 3516 goto out_no_pci; 3517 3518 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 3519 iwl_pcie_isr, 3520 iwl_pcie_irq_handler, 3521 IRQF_SHARED, DRV_NAME, trans); 3522 if (ret) { 3523 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3524 goto out_free_ict; 3525 } 3526 } 3527 3528 #ifdef CONFIG_IWLWIFI_DEBUGFS 3529 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 3530 mutex_init(&trans_pcie->fw_mon_data.mutex); 3531 #endif 3532 3533 iwl_dbg_tlv_init(trans); 3534 3535 return trans; 3536 3537 out_free_ict: 3538 iwl_pcie_free_ict(trans); 3539 out_no_pci: 3540 destroy_workqueue(trans_pcie->rba.alloc_wq); 3541 out_free_trans: 3542 iwl_trans_free(trans); 3543 return ERR_PTR(ret); 3544 } 3545