1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2007-2015, 2018-2022 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/debugfs.h> 10 #include <linux/sched.h> 11 #include <linux/bitops.h> 12 #include <linux/gfp.h> 13 #include <linux/vmalloc.h> 14 #include <linux/module.h> 15 #include <linux/wait.h> 16 #include <linux/seq_file.h> 17 18 #include "iwl-drv.h" 19 #include "iwl-trans.h" 20 #include "iwl-csr.h" 21 #include "iwl-prph.h" 22 #include "iwl-scd.h" 23 #include "iwl-agn-hw.h" 24 #include "fw/error-dump.h" 25 #include "fw/dbg.h" 26 #include "fw/api/tx.h" 27 #include "mei/iwl-mei.h" 28 #include "internal.h" 29 #include "iwl-fh.h" 30 #include "iwl-context-info-gen3.h" 31 32 /* extended range in FW SRAM */ 33 #define IWL_FW_MEM_EXTENDED_START 0x40000 34 #define IWL_FW_MEM_EXTENDED_END 0x57FFF 35 36 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) 37 { 38 #define PCI_DUMP_SIZE 352 39 #define PCI_MEM_DUMP_SIZE 64 40 #define PCI_PARENT_DUMP_SIZE 524 41 #define PREFIX_LEN 32 42 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 43 struct pci_dev *pdev = trans_pcie->pci_dev; 44 u32 i, pos, alloc_size, *ptr, *buf; 45 char *prefix; 46 47 if (trans_pcie->pcie_dbg_dumped_once) 48 return; 49 50 /* Should be a multiple of 4 */ 51 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); 52 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); 53 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); 54 55 /* Alloc a max size buffer */ 56 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; 57 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); 58 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); 59 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); 60 61 buf = kmalloc(alloc_size, GFP_ATOMIC); 62 if (!buf) 63 return; 64 prefix = (char *)buf + alloc_size - PREFIX_LEN; 65 66 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); 67 68 /* Print wifi device registers */ 69 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 70 IWL_ERR(trans, "iwlwifi device config registers:\n"); 71 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) 72 if (pci_read_config_dword(pdev, i, ptr)) 73 goto err_read; 74 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 75 76 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); 77 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) 78 *ptr = iwl_read32(trans, i); 79 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 80 81 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 82 if (pos) { 83 IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); 84 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) 85 if (pci_read_config_dword(pdev, pos + i, ptr)) 86 goto err_read; 87 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 88 32, 4, buf, i, 0); 89 } 90 91 /* Print parent device registers next */ 92 if (!pdev->bus->self) 93 goto out; 94 95 pdev = pdev->bus->self; 96 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 97 98 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", 99 pci_name(pdev)); 100 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) 101 if (pci_read_config_dword(pdev, i, ptr)) 102 goto err_read; 103 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 104 105 /* Print root port AER registers */ 106 pos = 0; 107 pdev = pcie_find_root_port(pdev); 108 if (pdev) 109 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 110 if (pos) { 111 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", 112 pci_name(pdev)); 113 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 114 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) 115 if (pci_read_config_dword(pdev, pos + i, ptr)) 116 goto err_read; 117 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 118 4, buf, i, 0); 119 } 120 goto out; 121 122 err_read: 123 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 124 IWL_ERR(trans, "Read failed at 0x%X\n", i); 125 out: 126 trans_pcie->pcie_dbg_dumped_once = 1; 127 kfree(buf); 128 } 129 130 static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, 131 bool retake_ownership) 132 { 133 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 134 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 135 iwl_set_bit(trans, CSR_GP_CNTRL, 136 CSR_GP_CNTRL_REG_FLAG_SW_RESET); 137 usleep_range(10000, 20000); 138 } else { 139 iwl_set_bit(trans, CSR_RESET, 140 CSR_RESET_REG_FLAG_SW_RESET); 141 usleep_range(5000, 6000); 142 } 143 144 if (retake_ownership) 145 return iwl_pcie_prepare_card_hw(trans); 146 147 return 0; 148 } 149 150 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 151 { 152 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 153 154 if (!fw_mon->size) 155 return; 156 157 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, 158 fw_mon->physical); 159 160 fw_mon->block = NULL; 161 fw_mon->physical = 0; 162 fw_mon->size = 0; 163 } 164 165 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, 166 u8 max_power, u8 min_power) 167 { 168 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 169 void *block = NULL; 170 dma_addr_t physical = 0; 171 u32 size = 0; 172 u8 power; 173 174 if (fw_mon->size) 175 return; 176 177 for (power = max_power; power >= min_power; power--) { 178 size = BIT(power); 179 block = dma_alloc_coherent(trans->dev, size, &physical, 180 GFP_KERNEL | __GFP_NOWARN); 181 if (!block) 182 continue; 183 184 IWL_INFO(trans, 185 "Allocated 0x%08x bytes for firmware monitor.\n", 186 size); 187 break; 188 } 189 190 if (WARN_ON_ONCE(!block)) 191 return; 192 193 if (power != max_power) 194 IWL_ERR(trans, 195 "Sorry - debug buffer is only %luK while you requested %luK\n", 196 (unsigned long)BIT(power - 10), 197 (unsigned long)BIT(max_power - 10)); 198 199 fw_mon->block = block; 200 fw_mon->physical = physical; 201 fw_mon->size = size; 202 } 203 204 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 205 { 206 if (!max_power) { 207 /* default max_power is maximum */ 208 max_power = 26; 209 } else { 210 max_power += 11; 211 } 212 213 if (WARN(max_power > 26, 214 "External buffer size for monitor is too big %d, check the FW TLV\n", 215 max_power)) 216 return; 217 218 if (trans->dbg.fw_mon.size) 219 return; 220 221 iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); 222 } 223 224 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 225 { 226 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 227 ((reg & 0x0000ffff) | (2 << 28))); 228 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 229 } 230 231 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 232 { 233 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 234 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 235 ((reg & 0x0000ffff) | (3 << 28))); 236 } 237 238 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 239 { 240 if (trans->cfg->apmg_not_supported) 241 return; 242 243 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 244 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 245 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 246 ~APMG_PS_CTRL_MSK_PWR_SRC); 247 else 248 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 249 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 250 ~APMG_PS_CTRL_MSK_PWR_SRC); 251 } 252 253 /* PCI registers */ 254 #define PCI_CFG_RETRY_TIMEOUT 0x041 255 256 void iwl_pcie_apm_config(struct iwl_trans *trans) 257 { 258 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 259 u16 lctl; 260 u16 cap; 261 262 /* 263 * L0S states have been found to be unstable with our devices 264 * and in newer hardware they are not officially supported at 265 * all, so we must always set the L0S_DISABLED bit. 266 */ 267 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); 268 269 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 270 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 271 272 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 273 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 274 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", 275 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 276 trans->ltr_enabled ? "En" : "Dis"); 277 } 278 279 /* 280 * Start up NIC's basic functionality after it has been reset 281 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 282 * NOTE: This does not load uCode nor start the embedded processor 283 */ 284 static int iwl_pcie_apm_init(struct iwl_trans *trans) 285 { 286 int ret; 287 288 IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 289 290 /* 291 * Use "set_bit" below rather than "write", to preserve any hardware 292 * bits already set by default after reset. 293 */ 294 295 /* Disable L0S exit timer (platform NMI Work/Around) */ 296 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 297 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 298 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 299 300 /* 301 * Disable L0s without affecting L1; 302 * don't wait for ICH L0s (ICH bug W/A) 303 */ 304 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 305 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 306 307 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 308 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 309 310 /* 311 * Enable HAP INTA (interrupt from management bus) to 312 * wake device's PCI Express link L1a -> L0s 313 */ 314 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 315 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 316 317 iwl_pcie_apm_config(trans); 318 319 /* Configure analog phase-lock-loop before activating to D0A */ 320 if (trans->trans_cfg->base_params->pll_cfg) 321 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 322 323 ret = iwl_finish_nic_init(trans); 324 if (ret) 325 return ret; 326 327 if (trans->cfg->host_interrupt_operation_mode) { 328 /* 329 * This is a bit of an abuse - This is needed for 7260 / 3160 330 * only check host_interrupt_operation_mode even if this is 331 * not related to host_interrupt_operation_mode. 332 * 333 * Enable the oscillator to count wake up time for L1 exit. This 334 * consumes slightly more power (100uA) - but allows to be sure 335 * that we wake up from L1 on time. 336 * 337 * This looks weird: read twice the same register, discard the 338 * value, set a bit, and yet again, read that same register 339 * just to discard the value. But that's the way the hardware 340 * seems to like it. 341 */ 342 iwl_read_prph(trans, OSC_CLK); 343 iwl_read_prph(trans, OSC_CLK); 344 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 345 iwl_read_prph(trans, OSC_CLK); 346 iwl_read_prph(trans, OSC_CLK); 347 } 348 349 /* 350 * Enable DMA clock and wait for it to stabilize. 351 * 352 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 353 * bits do not disable clocks. This preserves any hardware 354 * bits already set by default in "CLK_CTRL_REG" after reset. 355 */ 356 if (!trans->cfg->apmg_not_supported) { 357 iwl_write_prph(trans, APMG_CLK_EN_REG, 358 APMG_CLK_VAL_DMA_CLK_RQT); 359 udelay(20); 360 361 /* Disable L1-Active */ 362 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 363 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 364 365 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 366 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 367 APMG_RTC_INT_STT_RFKILL); 368 } 369 370 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 371 372 return 0; 373 } 374 375 /* 376 * Enable LP XTAL to avoid HW bug where device may consume much power if 377 * FW is not loaded after device reset. LP XTAL is disabled by default 378 * after device HW reset. Do it only if XTAL is fed by internal source. 379 * Configure device's "persistence" mode to avoid resetting XTAL again when 380 * SHRD_HW_RST occurs in S3. 381 */ 382 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 383 { 384 int ret; 385 u32 apmg_gp1_reg; 386 u32 apmg_xtal_cfg_reg; 387 u32 dl_cfg_reg; 388 389 /* Force XTAL ON */ 390 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 391 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 392 393 ret = iwl_trans_pcie_sw_reset(trans, true); 394 395 if (!ret) 396 ret = iwl_finish_nic_init(trans); 397 398 if (WARN_ON(ret)) { 399 /* Release XTAL ON request */ 400 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 401 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 402 return; 403 } 404 405 /* 406 * Clear "disable persistence" to avoid LP XTAL resetting when 407 * SHRD_HW_RST is applied in S3. 408 */ 409 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 410 APMG_PCIDEV_STT_VAL_PERSIST_DIS); 411 412 /* 413 * Force APMG XTAL to be active to prevent its disabling by HW 414 * caused by APMG idle state. 415 */ 416 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 417 SHR_APMG_XTAL_CFG_REG); 418 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 419 apmg_xtal_cfg_reg | 420 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 421 422 ret = iwl_trans_pcie_sw_reset(trans, true); 423 if (ret) 424 IWL_ERR(trans, 425 "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); 426 427 /* Enable LP XTAL by indirect access through CSR */ 428 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 429 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 430 SHR_APMG_GP1_WF_XTAL_LP_EN | 431 SHR_APMG_GP1_CHICKEN_BIT_SELECT); 432 433 /* Clear delay line clock power up */ 434 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 435 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 436 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 437 438 /* 439 * Enable persistence mode to avoid LP XTAL resetting when 440 * SHRD_HW_RST is applied in S3. 441 */ 442 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 443 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 444 445 /* 446 * Clear "initialization complete" bit to move adapter from 447 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 448 */ 449 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 450 451 /* Activates XTAL resources monitor */ 452 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 453 CSR_MONITOR_XTAL_RESOURCES); 454 455 /* Release XTAL ON request */ 456 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 457 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 458 udelay(10); 459 460 /* Release APMG XTAL */ 461 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 462 apmg_xtal_cfg_reg & 463 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 464 } 465 466 void iwl_pcie_apm_stop_master(struct iwl_trans *trans) 467 { 468 int ret; 469 470 /* stop device's busmaster DMA activity */ 471 472 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 473 iwl_set_bit(trans, CSR_GP_CNTRL, 474 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); 475 476 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 477 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 478 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 479 100); 480 usleep_range(10000, 20000); 481 } else { 482 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 483 484 ret = iwl_poll_bit(trans, CSR_RESET, 485 CSR_RESET_REG_FLAG_MASTER_DISABLED, 486 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 487 } 488 489 if (ret < 0) 490 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 491 492 IWL_DEBUG_INFO(trans, "stop master\n"); 493 } 494 495 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 496 { 497 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 498 499 if (op_mode_leave) { 500 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 501 iwl_pcie_apm_init(trans); 502 503 /* inform ME that we are leaving */ 504 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) 505 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 506 APMG_PCIDEV_STT_VAL_WAKE_ME); 507 else if (trans->trans_cfg->device_family >= 508 IWL_DEVICE_FAMILY_8000) { 509 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 510 CSR_RESET_LINK_PWR_MGMT_DISABLED); 511 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 512 CSR_HW_IF_CONFIG_REG_PREPARE | 513 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 514 mdelay(1); 515 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 516 CSR_RESET_LINK_PWR_MGMT_DISABLED); 517 } 518 mdelay(5); 519 } 520 521 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 522 523 /* Stop device's DMA activity */ 524 iwl_pcie_apm_stop_master(trans); 525 526 if (trans->cfg->lp_xtal_workaround) { 527 iwl_pcie_apm_lp_xtal_enable(trans); 528 return; 529 } 530 531 iwl_trans_pcie_sw_reset(trans, false); 532 533 /* 534 * Clear "initialization complete" bit to move adapter from 535 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 536 */ 537 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 538 } 539 540 static int iwl_pcie_nic_init(struct iwl_trans *trans) 541 { 542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 543 int ret; 544 545 /* nic_init */ 546 spin_lock_bh(&trans_pcie->irq_lock); 547 ret = iwl_pcie_apm_init(trans); 548 spin_unlock_bh(&trans_pcie->irq_lock); 549 550 if (ret) 551 return ret; 552 553 iwl_pcie_set_pwr(trans, false); 554 555 iwl_op_mode_nic_config(trans->op_mode); 556 557 /* Allocate the RX queue, or reset if it is already allocated */ 558 ret = iwl_pcie_rx_init(trans); 559 if (ret) 560 return ret; 561 562 /* Allocate or reset and init all Tx and Command queues */ 563 if (iwl_pcie_tx_init(trans)) { 564 iwl_pcie_rx_free(trans); 565 return -ENOMEM; 566 } 567 568 if (trans->trans_cfg->base_params->shadow_reg_enable) { 569 /* enable shadow regs in HW */ 570 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 571 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 572 } 573 574 return 0; 575 } 576 577 #define HW_READY_TIMEOUT (50) 578 579 /* Note: returns poll_bit return value, which is >= 0 if success */ 580 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 581 { 582 int ret; 583 584 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 585 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 586 587 /* See if we got it */ 588 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 589 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 590 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 591 HW_READY_TIMEOUT); 592 593 if (ret >= 0) 594 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 595 596 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 597 return ret; 598 } 599 600 /* Note: returns standard 0/-ERROR code */ 601 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 602 { 603 int ret; 604 int iter; 605 606 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 607 608 ret = iwl_pcie_set_hw_ready(trans); 609 /* If the card is ready, exit 0 */ 610 if (ret >= 0) { 611 trans->csme_own = false; 612 return 0; 613 } 614 615 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 616 CSR_RESET_LINK_PWR_MGMT_DISABLED); 617 usleep_range(1000, 2000); 618 619 for (iter = 0; iter < 10; iter++) { 620 int t = 0; 621 622 /* If HW is not ready, prepare the conditions to check again */ 623 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 624 CSR_HW_IF_CONFIG_REG_PREPARE); 625 626 do { 627 ret = iwl_pcie_set_hw_ready(trans); 628 if (ret >= 0) { 629 trans->csme_own = false; 630 return 0; 631 } 632 633 if (iwl_mei_is_connected()) { 634 IWL_DEBUG_INFO(trans, 635 "Couldn't prepare the card but SAP is connected\n"); 636 trans->csme_own = true; 637 if (trans->trans_cfg->device_family != 638 IWL_DEVICE_FAMILY_9000) 639 IWL_ERR(trans, 640 "SAP not supported for this NIC family\n"); 641 642 return -EBUSY; 643 } 644 645 usleep_range(200, 1000); 646 t += 200; 647 } while (t < 150000); 648 msleep(25); 649 } 650 651 IWL_ERR(trans, "Couldn't prepare the card\n"); 652 653 return ret; 654 } 655 656 /* 657 * ucode 658 */ 659 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 660 u32 dst_addr, dma_addr_t phy_addr, 661 u32 byte_cnt) 662 { 663 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 664 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 665 666 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 667 dst_addr); 668 669 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 670 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 671 672 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 673 (iwl_get_dma_hi_addr(phy_addr) 674 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 675 676 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 677 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 678 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 679 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 680 681 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 682 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 683 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 684 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 685 } 686 687 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 688 u32 dst_addr, dma_addr_t phy_addr, 689 u32 byte_cnt) 690 { 691 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 692 int ret; 693 694 trans_pcie->ucode_write_complete = false; 695 696 if (!iwl_trans_grab_nic_access(trans)) 697 return -EIO; 698 699 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 700 byte_cnt); 701 iwl_trans_release_nic_access(trans); 702 703 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 704 trans_pcie->ucode_write_complete, 5 * HZ); 705 if (!ret) { 706 IWL_ERR(trans, "Failed to load firmware chunk!\n"); 707 iwl_trans_pcie_dump_regs(trans); 708 return -ETIMEDOUT; 709 } 710 711 return 0; 712 } 713 714 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 715 const struct fw_desc *section) 716 { 717 u8 *v_addr; 718 dma_addr_t p_addr; 719 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 720 int ret = 0; 721 722 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 723 section_num); 724 725 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 726 GFP_KERNEL | __GFP_NOWARN); 727 if (!v_addr) { 728 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 729 chunk_sz = PAGE_SIZE; 730 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 731 &p_addr, GFP_KERNEL); 732 if (!v_addr) 733 return -ENOMEM; 734 } 735 736 for (offset = 0; offset < section->len; offset += chunk_sz) { 737 u32 copy_size, dst_addr; 738 bool extended_addr = false; 739 740 copy_size = min_t(u32, chunk_sz, section->len - offset); 741 dst_addr = section->offset + offset; 742 743 if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 744 dst_addr <= IWL_FW_MEM_EXTENDED_END) 745 extended_addr = true; 746 747 if (extended_addr) 748 iwl_set_bits_prph(trans, LMPM_CHICK, 749 LMPM_CHICK_EXTENDED_ADDR_SPACE); 750 751 memcpy(v_addr, (const u8 *)section->data + offset, copy_size); 752 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 753 copy_size); 754 755 if (extended_addr) 756 iwl_clear_bits_prph(trans, LMPM_CHICK, 757 LMPM_CHICK_EXTENDED_ADDR_SPACE); 758 759 if (ret) { 760 IWL_ERR(trans, 761 "Could not load the [%d] uCode section\n", 762 section_num); 763 break; 764 } 765 } 766 767 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 768 return ret; 769 } 770 771 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 772 const struct fw_img *image, 773 int cpu, 774 int *first_ucode_section) 775 { 776 int shift_param; 777 int i, ret = 0, sec_num = 0x1; 778 u32 val, last_read_idx = 0; 779 780 if (cpu == 1) { 781 shift_param = 0; 782 *first_ucode_section = 0; 783 } else { 784 shift_param = 16; 785 (*first_ucode_section)++; 786 } 787 788 for (i = *first_ucode_section; i < image->num_sec; i++) { 789 last_read_idx = i; 790 791 /* 792 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 793 * CPU1 to CPU2. 794 * PAGING_SEPARATOR_SECTION delimiter - separate between 795 * CPU2 non paged to CPU2 paging sec. 796 */ 797 if (!image->sec[i].data || 798 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 799 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 800 IWL_DEBUG_FW(trans, 801 "Break since Data not valid or Empty section, sec = %d\n", 802 i); 803 break; 804 } 805 806 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 807 if (ret) 808 return ret; 809 810 /* Notify ucode of loaded section number and status */ 811 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 812 val = val | (sec_num << shift_param); 813 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 814 815 sec_num = (sec_num << 1) | 0x1; 816 } 817 818 *first_ucode_section = last_read_idx; 819 820 iwl_enable_interrupts(trans); 821 822 if (trans->trans_cfg->use_tfh) { 823 if (cpu == 1) 824 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 825 0xFFFF); 826 else 827 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 828 0xFFFFFFFF); 829 } else { 830 if (cpu == 1) 831 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 832 0xFFFF); 833 else 834 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 835 0xFFFFFFFF); 836 } 837 838 return 0; 839 } 840 841 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 842 const struct fw_img *image, 843 int cpu, 844 int *first_ucode_section) 845 { 846 int i, ret = 0; 847 u32 last_read_idx = 0; 848 849 if (cpu == 1) 850 *first_ucode_section = 0; 851 else 852 (*first_ucode_section)++; 853 854 for (i = *first_ucode_section; i < image->num_sec; i++) { 855 last_read_idx = i; 856 857 /* 858 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 859 * CPU1 to CPU2. 860 * PAGING_SEPARATOR_SECTION delimiter - separate between 861 * CPU2 non paged to CPU2 paging sec. 862 */ 863 if (!image->sec[i].data || 864 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 865 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 866 IWL_DEBUG_FW(trans, 867 "Break since Data not valid or Empty section, sec = %d\n", 868 i); 869 break; 870 } 871 872 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 873 if (ret) 874 return ret; 875 } 876 877 *first_ucode_section = last_read_idx; 878 879 return 0; 880 } 881 882 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) 883 { 884 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; 885 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 886 &trans->dbg.fw_mon_cfg[alloc_id]; 887 struct iwl_dram_data *frag; 888 889 if (!iwl_trans_dbg_ini_valid(trans)) 890 return; 891 892 if (le32_to_cpu(fw_mon_cfg->buf_location) == 893 IWL_FW_INI_LOCATION_SRAM_PATH) { 894 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); 895 /* set sram monitor by enabling bit 7 */ 896 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 897 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); 898 899 return; 900 } 901 902 if (le32_to_cpu(fw_mon_cfg->buf_location) != 903 IWL_FW_INI_LOCATION_DRAM_PATH || 904 !trans->dbg.fw_mon_ini[alloc_id].num_frags) 905 return; 906 907 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; 908 909 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", 910 alloc_id); 911 912 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, 913 frag->physical >> MON_BUFF_SHIFT_VER2); 914 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, 915 (frag->physical + frag->size - 256) >> 916 MON_BUFF_SHIFT_VER2); 917 } 918 919 void iwl_pcie_apply_destination(struct iwl_trans *trans) 920 { 921 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; 922 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 923 int i; 924 925 if (iwl_trans_dbg_ini_valid(trans)) { 926 iwl_pcie_apply_destination_ini(trans); 927 return; 928 } 929 930 IWL_INFO(trans, "Applying debug destination %s\n", 931 get_fw_dbg_mode_string(dest->monitor_mode)); 932 933 if (dest->monitor_mode == EXTERNAL_MODE) 934 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 935 else 936 IWL_WARN(trans, "PCI should have external buffer debug\n"); 937 938 for (i = 0; i < trans->dbg.n_dest_reg; i++) { 939 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 940 u32 val = le32_to_cpu(dest->reg_ops[i].val); 941 942 switch (dest->reg_ops[i].op) { 943 case CSR_ASSIGN: 944 iwl_write32(trans, addr, val); 945 break; 946 case CSR_SETBIT: 947 iwl_set_bit(trans, addr, BIT(val)); 948 break; 949 case CSR_CLEARBIT: 950 iwl_clear_bit(trans, addr, BIT(val)); 951 break; 952 case PRPH_ASSIGN: 953 iwl_write_prph(trans, addr, val); 954 break; 955 case PRPH_SETBIT: 956 iwl_set_bits_prph(trans, addr, BIT(val)); 957 break; 958 case PRPH_CLEARBIT: 959 iwl_clear_bits_prph(trans, addr, BIT(val)); 960 break; 961 case PRPH_BLOCKBIT: 962 if (iwl_read_prph(trans, addr) & BIT(val)) { 963 IWL_ERR(trans, 964 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 965 val, addr); 966 goto monitor; 967 } 968 break; 969 default: 970 IWL_ERR(trans, "FW debug - unknown OP %d\n", 971 dest->reg_ops[i].op); 972 break; 973 } 974 } 975 976 monitor: 977 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { 978 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 979 fw_mon->physical >> dest->base_shift); 980 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 981 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 982 (fw_mon->physical + fw_mon->size - 983 256) >> dest->end_shift); 984 else 985 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 986 (fw_mon->physical + fw_mon->size) >> 987 dest->end_shift); 988 } 989 } 990 991 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 992 const struct fw_img *image) 993 { 994 int ret = 0; 995 int first_ucode_section; 996 997 IWL_DEBUG_FW(trans, "working with %s CPU\n", 998 image->is_dual_cpus ? "Dual" : "Single"); 999 1000 /* load to FW the binary non secured sections of CPU1 */ 1001 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 1002 if (ret) 1003 return ret; 1004 1005 if (image->is_dual_cpus) { 1006 /* set CPU2 header address */ 1007 iwl_write_prph(trans, 1008 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 1009 LMPM_SECURE_CPU2_HDR_MEM_SPACE); 1010 1011 /* load to FW the binary sections of CPU2 */ 1012 ret = iwl_pcie_load_cpu_sections(trans, image, 2, 1013 &first_ucode_section); 1014 if (ret) 1015 return ret; 1016 } 1017 1018 if (iwl_pcie_dbg_on(trans)) 1019 iwl_pcie_apply_destination(trans); 1020 1021 iwl_enable_interrupts(trans); 1022 1023 /* release CPU reset */ 1024 iwl_write32(trans, CSR_RESET, 0); 1025 1026 return 0; 1027 } 1028 1029 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 1030 const struct fw_img *image) 1031 { 1032 int ret = 0; 1033 int first_ucode_section; 1034 1035 IWL_DEBUG_FW(trans, "working with %s CPU\n", 1036 image->is_dual_cpus ? "Dual" : "Single"); 1037 1038 if (iwl_pcie_dbg_on(trans)) 1039 iwl_pcie_apply_destination(trans); 1040 1041 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 1042 iwl_read_prph(trans, WFPM_GP2)); 1043 1044 /* 1045 * Set default value. On resume reading the values that were 1046 * zeored can provide debug data on the resume flow. 1047 * This is for debugging only and has no functional impact. 1048 */ 1049 iwl_write_prph(trans, WFPM_GP2, 0x01010101); 1050 1051 /* configure the ucode to be ready to get the secured image */ 1052 /* release CPU reset */ 1053 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 1054 1055 /* load to FW the binary Secured sections of CPU1 */ 1056 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 1057 &first_ucode_section); 1058 if (ret) 1059 return ret; 1060 1061 /* load to FW the binary sections of CPU2 */ 1062 return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 1063 &first_ucode_section); 1064 } 1065 1066 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) 1067 { 1068 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1069 bool hw_rfkill = iwl_is_rfkill_set(trans); 1070 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1071 bool report; 1072 1073 if (hw_rfkill) { 1074 set_bit(STATUS_RFKILL_HW, &trans->status); 1075 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1076 } else { 1077 clear_bit(STATUS_RFKILL_HW, &trans->status); 1078 if (trans_pcie->opmode_down) 1079 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1080 } 1081 1082 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1083 1084 if (prev != report) 1085 iwl_trans_pcie_rf_kill(trans, report); 1086 1087 return hw_rfkill; 1088 } 1089 1090 struct iwl_causes_list { 1091 u16 mask_reg; 1092 u8 bit; 1093 u8 addr; 1094 }; 1095 1096 #define IWL_CAUSE(reg, mask) \ 1097 { \ 1098 .mask_reg = reg, \ 1099 .bit = ilog2(mask), \ 1100 .addr = ilog2(mask) + \ 1101 ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \ 1102 (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \ 1103 0xffff), /* causes overflow warning */ \ 1104 } 1105 1106 static const struct iwl_causes_list causes_list_common[] = { 1107 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM), 1108 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM), 1109 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D), 1110 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR), 1111 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), 1112 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), 1113 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), 1114 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), 1115 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), 1116 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), 1117 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD), 1118 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX), 1119 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR), 1120 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP), 1121 }; 1122 1123 static const struct iwl_causes_list causes_list_pre_bz[] = { 1124 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR), 1125 }; 1126 1127 static const struct iwl_causes_list causes_list_bz[] = { 1128 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ), 1129 }; 1130 1131 static void iwl_pcie_map_list(struct iwl_trans *trans, 1132 const struct iwl_causes_list *causes, 1133 int arr_size, int val) 1134 { 1135 int i; 1136 1137 for (i = 0; i < arr_size; i++) { 1138 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); 1139 iwl_clear_bit(trans, causes[i].mask_reg, 1140 BIT(causes[i].bit)); 1141 } 1142 } 1143 1144 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 1145 { 1146 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1147 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 1148 /* 1149 * Access all non RX causes and map them to the default irq. 1150 * In case we are missing at least one interrupt vector, 1151 * the first interrupt vector will serve non-RX and FBQ causes. 1152 */ 1153 iwl_pcie_map_list(trans, causes_list_common, 1154 ARRAY_SIZE(causes_list_common), val); 1155 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1156 iwl_pcie_map_list(trans, causes_list_bz, 1157 ARRAY_SIZE(causes_list_bz), val); 1158 else 1159 iwl_pcie_map_list(trans, causes_list_pre_bz, 1160 ARRAY_SIZE(causes_list_pre_bz), val); 1161 } 1162 1163 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 1164 { 1165 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1166 u32 offset = 1167 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 1168 u32 val, idx; 1169 1170 /* 1171 * The first RX queue - fallback queue, which is designated for 1172 * management frame, command responses etc, is always mapped to the 1173 * first interrupt vector. The other RX queues are mapped to 1174 * the other (N - 2) interrupt vectors. 1175 */ 1176 val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 1177 for (idx = 1; idx < trans->num_rx_queues; idx++) { 1178 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 1179 MSIX_FH_INT_CAUSES_Q(idx - offset)); 1180 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 1181 } 1182 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 1183 1184 val = MSIX_FH_INT_CAUSES_Q(0); 1185 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 1186 val |= MSIX_NON_AUTO_CLEAR_CAUSE; 1187 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 1188 1189 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 1190 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 1191 } 1192 1193 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 1194 { 1195 struct iwl_trans *trans = trans_pcie->trans; 1196 1197 if (!trans_pcie->msix_enabled) { 1198 if (trans->trans_cfg->mq_rx_supported && 1199 test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1200 iwl_write_umac_prph(trans, UREG_CHICK, 1201 UREG_CHICK_MSI_ENABLE); 1202 return; 1203 } 1204 /* 1205 * The IVAR table needs to be configured again after reset, 1206 * but if the device is disabled, we can't write to 1207 * prph. 1208 */ 1209 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1210 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 1211 1212 /* 1213 * Each cause from the causes list above and the RX causes is 1214 * represented as a byte in the IVAR table. The first nibble 1215 * represents the bound interrupt vector of the cause, the second 1216 * represents no auto clear for this cause. This will be set if its 1217 * interrupt vector is bound to serve other causes. 1218 */ 1219 iwl_pcie_map_rx_causes(trans); 1220 1221 iwl_pcie_map_non_rx_causes(trans); 1222 } 1223 1224 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 1225 { 1226 struct iwl_trans *trans = trans_pcie->trans; 1227 1228 iwl_pcie_conf_msix_hw(trans_pcie); 1229 1230 if (!trans_pcie->msix_enabled) 1231 return; 1232 1233 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 1234 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 1235 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 1236 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 1237 } 1238 1239 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1240 { 1241 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1242 1243 lockdep_assert_held(&trans_pcie->mutex); 1244 1245 if (trans_pcie->is_down) 1246 return; 1247 1248 trans_pcie->is_down = true; 1249 1250 /* tell the device to stop sending interrupts */ 1251 iwl_disable_interrupts(trans); 1252 1253 /* device going down, Stop using ICT table */ 1254 iwl_pcie_disable_ict(trans); 1255 1256 /* 1257 * If a HW restart happens during firmware loading, 1258 * then the firmware loading might call this function 1259 * and later it might be called again due to the 1260 * restart. So don't process again if the device is 1261 * already dead. 1262 */ 1263 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1264 IWL_DEBUG_INFO(trans, 1265 "DEVICE_ENABLED bit was set and is now cleared\n"); 1266 iwl_pcie_rx_napi_sync(trans); 1267 iwl_pcie_tx_stop(trans); 1268 iwl_pcie_rx_stop(trans); 1269 1270 /* Power-down device's busmaster DMA clocks */ 1271 if (!trans->cfg->apmg_not_supported) { 1272 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1273 APMG_CLK_VAL_DMA_CLK_RQT); 1274 udelay(5); 1275 } 1276 } 1277 1278 /* Make sure (redundant) we've released our request to stay awake */ 1279 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1280 iwl_clear_bit(trans, CSR_GP_CNTRL, 1281 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 1282 else 1283 iwl_clear_bit(trans, CSR_GP_CNTRL, 1284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1285 1286 /* Stop the device, and put it in low power state */ 1287 iwl_pcie_apm_stop(trans, false); 1288 1289 /* re-take ownership to prevent other users from stealing the device */ 1290 iwl_trans_pcie_sw_reset(trans, true); 1291 1292 /* 1293 * Upon stop, the IVAR table gets erased, so msi-x won't 1294 * work. This causes a bug in RF-KILL flows, since the interrupt 1295 * that enables radio won't fire on the correct irq, and the 1296 * driver won't be able to handle the interrupt. 1297 * Configure the IVAR table again after reset. 1298 */ 1299 iwl_pcie_conf_msix_hw(trans_pcie); 1300 1301 /* 1302 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1303 * This is a bug in certain verions of the hardware. 1304 * Certain devices also keep sending HW RF kill interrupt all 1305 * the time, unless the interrupt is ACKed even if the interrupt 1306 * should be masked. Re-ACK all the interrupts here. 1307 */ 1308 iwl_disable_interrupts(trans); 1309 1310 /* clear all status bits */ 1311 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1312 clear_bit(STATUS_INT_ENABLED, &trans->status); 1313 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1314 1315 /* 1316 * Even if we stop the HW, we still want the RF kill 1317 * interrupt 1318 */ 1319 iwl_enable_rfkill_int(trans); 1320 } 1321 1322 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 1323 { 1324 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1325 1326 if (trans_pcie->msix_enabled) { 1327 int i; 1328 1329 for (i = 0; i < trans_pcie->alloc_vecs; i++) 1330 synchronize_irq(trans_pcie->msix_entries[i].vector); 1331 } else { 1332 synchronize_irq(trans_pcie->pci_dev->irq); 1333 } 1334 } 1335 1336 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1337 const struct fw_img *fw, bool run_in_rfkill) 1338 { 1339 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1340 bool hw_rfkill; 1341 int ret; 1342 1343 /* This may fail if AMT took ownership of the device */ 1344 if (iwl_pcie_prepare_card_hw(trans)) { 1345 IWL_WARN(trans, "Exit HW not ready\n"); 1346 return -EIO; 1347 } 1348 1349 iwl_enable_rfkill_int(trans); 1350 1351 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1352 1353 /* 1354 * We enabled the RF-Kill interrupt and the handler may very 1355 * well be running. Disable the interrupts to make sure no other 1356 * interrupt can be fired. 1357 */ 1358 iwl_disable_interrupts(trans); 1359 1360 /* Make sure it finished running */ 1361 iwl_pcie_synchronize_irqs(trans); 1362 1363 mutex_lock(&trans_pcie->mutex); 1364 1365 /* If platform's RF_KILL switch is NOT set to KILL */ 1366 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1367 if (hw_rfkill && !run_in_rfkill) { 1368 ret = -ERFKILL; 1369 goto out; 1370 } 1371 1372 /* Someone called stop_device, don't try to start_fw */ 1373 if (trans_pcie->is_down) { 1374 IWL_WARN(trans, 1375 "Can't start_fw since the HW hasn't been started\n"); 1376 ret = -EIO; 1377 goto out; 1378 } 1379 1380 /* make sure rfkill handshake bits are cleared */ 1381 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1382 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1383 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1384 1385 /* clear (again), then enable host interrupts */ 1386 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1387 1388 ret = iwl_pcie_nic_init(trans); 1389 if (ret) { 1390 IWL_ERR(trans, "Unable to init nic\n"); 1391 goto out; 1392 } 1393 1394 /* 1395 * Now, we load the firmware and don't want to be interrupted, even 1396 * by the RF-Kill interrupt (hence mask all the interrupt besides the 1397 * FH_TX interrupt which is needed to load the firmware). If the 1398 * RF-Kill switch is toggled, we will find out after having loaded 1399 * the firmware and return the proper value to the caller. 1400 */ 1401 iwl_enable_fw_load_int(trans); 1402 1403 /* really make sure rfkill handshake bits are cleared */ 1404 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1405 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1406 1407 /* Load the given image to the HW */ 1408 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1409 ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1410 else 1411 ret = iwl_pcie_load_given_ucode(trans, fw); 1412 1413 /* re-check RF-Kill state since we may have missed the interrupt */ 1414 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1415 if (hw_rfkill && !run_in_rfkill) 1416 ret = -ERFKILL; 1417 1418 out: 1419 mutex_unlock(&trans_pcie->mutex); 1420 return ret; 1421 } 1422 1423 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1424 { 1425 iwl_pcie_reset_ict(trans); 1426 iwl_pcie_tx_start(trans, scd_addr); 1427 } 1428 1429 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1430 bool was_in_rfkill) 1431 { 1432 bool hw_rfkill; 1433 1434 /* 1435 * Check again since the RF kill state may have changed while 1436 * all the interrupts were disabled, in this case we couldn't 1437 * receive the RF kill interrupt and update the state in the 1438 * op_mode. 1439 * Don't call the op_mode if the rkfill state hasn't changed. 1440 * This allows the op_mode to call stop_device from the rfkill 1441 * notification without endless recursion. Under very rare 1442 * circumstances, we might have a small recursion if the rfkill 1443 * state changed exactly now while we were called from stop_device. 1444 * This is very unlikely but can happen and is supported. 1445 */ 1446 hw_rfkill = iwl_is_rfkill_set(trans); 1447 if (hw_rfkill) { 1448 set_bit(STATUS_RFKILL_HW, &trans->status); 1449 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1450 } else { 1451 clear_bit(STATUS_RFKILL_HW, &trans->status); 1452 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1453 } 1454 if (hw_rfkill != was_in_rfkill) 1455 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1456 } 1457 1458 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1459 { 1460 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1461 bool was_in_rfkill; 1462 1463 iwl_op_mode_time_point(trans->op_mode, 1464 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, 1465 NULL); 1466 1467 mutex_lock(&trans_pcie->mutex); 1468 trans_pcie->opmode_down = true; 1469 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1470 _iwl_trans_pcie_stop_device(trans); 1471 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); 1472 mutex_unlock(&trans_pcie->mutex); 1473 } 1474 1475 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1476 { 1477 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1478 IWL_TRANS_GET_PCIE_TRANS(trans); 1479 1480 lockdep_assert_held(&trans_pcie->mutex); 1481 1482 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", 1483 state ? "disabled" : "enabled"); 1484 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { 1485 if (trans->trans_cfg->gen2) 1486 _iwl_trans_pcie_gen2_stop_device(trans); 1487 else 1488 _iwl_trans_pcie_stop_device(trans); 1489 } 1490 } 1491 1492 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1493 bool test, bool reset) 1494 { 1495 iwl_disable_interrupts(trans); 1496 1497 /* 1498 * in testing mode, the host stays awake and the 1499 * hardware won't be reset (not even partially) 1500 */ 1501 if (test) 1502 return; 1503 1504 iwl_pcie_disable_ict(trans); 1505 1506 iwl_pcie_synchronize_irqs(trans); 1507 1508 iwl_clear_bit(trans, CSR_GP_CNTRL, 1509 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1510 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1511 1512 if (reset) { 1513 /* 1514 * reset TX queues -- some of their registers reset during S3 1515 * so if we don't reset everything here the D3 image would try 1516 * to execute some invalid memory upon resume 1517 */ 1518 iwl_trans_pcie_tx_reset(trans); 1519 } 1520 1521 iwl_pcie_set_pwr(trans, true); 1522 } 1523 1524 static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) 1525 { 1526 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1527 int ret; 1528 1529 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) 1530 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1531 suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : 1532 UREG_DOORBELL_TO_ISR6_RESUME); 1533 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1534 iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, 1535 suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : 1536 CSR_IPC_SLEEP_CONTROL_RESUME); 1537 else 1538 return 0; 1539 1540 ret = wait_event_timeout(trans_pcie->sx_waitq, 1541 trans_pcie->sx_complete, 2 * HZ); 1542 1543 /* Invalidate it toward next suspend or resume */ 1544 trans_pcie->sx_complete = false; 1545 1546 if (!ret) { 1547 IWL_ERR(trans, "Timeout %s D3\n", 1548 suspend ? "entering" : "exiting"); 1549 return -ETIMEDOUT; 1550 } 1551 1552 return 0; 1553 } 1554 1555 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 1556 bool reset) 1557 { 1558 int ret; 1559 1560 if (!reset) 1561 /* Enable persistence mode to avoid reset */ 1562 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1563 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1564 1565 ret = iwl_pcie_d3_handshake(trans, true); 1566 if (ret) 1567 return ret; 1568 1569 iwl_pcie_d3_complete_suspend(trans, test, reset); 1570 1571 return 0; 1572 } 1573 1574 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1575 enum iwl_d3_status *status, 1576 bool test, bool reset) 1577 { 1578 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1579 u32 val; 1580 int ret; 1581 1582 if (test) { 1583 iwl_enable_interrupts(trans); 1584 *status = IWL_D3_STATUS_ALIVE; 1585 ret = 0; 1586 goto out; 1587 } 1588 1589 iwl_set_bit(trans, CSR_GP_CNTRL, 1590 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1591 1592 ret = iwl_finish_nic_init(trans); 1593 if (ret) 1594 return ret; 1595 1596 /* 1597 * Reconfigure IVAR table in case of MSIX or reset ict table in 1598 * MSI mode since HW reset erased it. 1599 * Also enables interrupts - none will happen as 1600 * the device doesn't know we're waking it up, only when 1601 * the opmode actually tells it after this call. 1602 */ 1603 iwl_pcie_conf_msix_hw(trans_pcie); 1604 if (!trans_pcie->msix_enabled) 1605 iwl_pcie_reset_ict(trans); 1606 iwl_enable_interrupts(trans); 1607 1608 iwl_pcie_set_pwr(trans, false); 1609 1610 if (!reset) { 1611 iwl_clear_bit(trans, CSR_GP_CNTRL, 1612 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1613 } else { 1614 iwl_trans_pcie_tx_reset(trans); 1615 1616 ret = iwl_pcie_rx_init(trans); 1617 if (ret) { 1618 IWL_ERR(trans, 1619 "Failed to resume the device (RX reset)\n"); 1620 return ret; 1621 } 1622 } 1623 1624 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 1625 iwl_read_umac_prph(trans, WFPM_GP2)); 1626 1627 val = iwl_read32(trans, CSR_RESET); 1628 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1629 *status = IWL_D3_STATUS_RESET; 1630 else 1631 *status = IWL_D3_STATUS_ALIVE; 1632 1633 out: 1634 if (*status == IWL_D3_STATUS_ALIVE) 1635 ret = iwl_pcie_d3_handshake(trans, false); 1636 1637 return ret; 1638 } 1639 1640 static void 1641 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 1642 struct iwl_trans *trans, 1643 const struct iwl_cfg_trans_params *cfg_trans) 1644 { 1645 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1646 int max_irqs, num_irqs, i, ret; 1647 u16 pci_cmd; 1648 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; 1649 1650 if (!cfg_trans->mq_rx_supported) 1651 goto enable_msi; 1652 1653 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) 1654 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; 1655 1656 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); 1657 for (i = 0; i < max_irqs; i++) 1658 trans_pcie->msix_entries[i].entry = i; 1659 1660 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 1661 MSIX_MIN_INTERRUPT_VECTORS, 1662 max_irqs); 1663 if (num_irqs < 0) { 1664 IWL_DEBUG_INFO(trans, 1665 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 1666 num_irqs); 1667 goto enable_msi; 1668 } 1669 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1670 1671 IWL_DEBUG_INFO(trans, 1672 "MSI-X enabled. %d interrupt vectors were allocated\n", 1673 num_irqs); 1674 1675 /* 1676 * In case the OS provides fewer interrupts than requested, different 1677 * causes will share the same interrupt vector as follows: 1678 * One interrupt less: non rx causes shared with FBQ. 1679 * Two interrupts less: non rx causes shared with FBQ and RSS. 1680 * More than two interrupts: we will use fewer RSS queues. 1681 */ 1682 if (num_irqs <= max_irqs - 2) { 1683 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1684 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1685 IWL_SHARED_IRQ_FIRST_RSS; 1686 } else if (num_irqs == max_irqs - 1) { 1687 trans_pcie->trans->num_rx_queues = num_irqs; 1688 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1689 } else { 1690 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1691 } 1692 1693 IWL_DEBUG_INFO(trans, 1694 "MSI-X enabled with rx queues %d, vec mask 0x%x\n", 1695 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); 1696 1697 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1698 1699 trans_pcie->alloc_vecs = num_irqs; 1700 trans_pcie->msix_enabled = true; 1701 return; 1702 1703 enable_msi: 1704 ret = pci_enable_msi(pdev); 1705 if (ret) { 1706 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 1707 /* enable rfkill interrupt: hw bug w/a */ 1708 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 1709 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 1710 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 1711 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 1712 } 1713 } 1714 } 1715 1716 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) 1717 { 1718 int iter_rx_q, i, ret, cpu, offset; 1719 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1720 1721 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 1722 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; 1723 offset = 1 + i; 1724 for (; i < iter_rx_q ; i++) { 1725 /* 1726 * Get the cpu prior to the place to search 1727 * (i.e. return will be > i - 1). 1728 */ 1729 cpu = cpumask_next(i - offset, cpu_online_mask); 1730 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 1731 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 1732 &trans_pcie->affinity_mask[i]); 1733 if (ret) 1734 IWL_ERR(trans_pcie->trans, 1735 "Failed to set affinity mask for IRQ %d\n", 1736 trans_pcie->msix_entries[i].vector); 1737 } 1738 } 1739 1740 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 1741 struct iwl_trans_pcie *trans_pcie) 1742 { 1743 int i; 1744 1745 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1746 int ret; 1747 struct msix_entry *msix_entry; 1748 const char *qname = queue_name(&pdev->dev, trans_pcie, i); 1749 1750 if (!qname) 1751 return -ENOMEM; 1752 1753 msix_entry = &trans_pcie->msix_entries[i]; 1754 ret = devm_request_threaded_irq(&pdev->dev, 1755 msix_entry->vector, 1756 iwl_pcie_msix_isr, 1757 (i == trans_pcie->def_irq) ? 1758 iwl_pcie_irq_msix_handler : 1759 iwl_pcie_irq_rx_msix_handler, 1760 IRQF_SHARED, 1761 qname, 1762 msix_entry); 1763 if (ret) { 1764 IWL_ERR(trans_pcie->trans, 1765 "Error allocating IRQ %d\n", i); 1766 1767 return ret; 1768 } 1769 } 1770 iwl_pcie_irq_set_affinity(trans_pcie->trans); 1771 1772 return 0; 1773 } 1774 1775 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) 1776 { 1777 u32 hpm, wprot; 1778 1779 switch (trans->trans_cfg->device_family) { 1780 case IWL_DEVICE_FAMILY_9000: 1781 wprot = PREG_PRPH_WPROT_9000; 1782 break; 1783 case IWL_DEVICE_FAMILY_22000: 1784 wprot = PREG_PRPH_WPROT_22000; 1785 break; 1786 default: 1787 return 0; 1788 } 1789 1790 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); 1791 if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { 1792 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); 1793 1794 if (wprot_val & PREG_WFPM_ACCESS) { 1795 IWL_ERR(trans, 1796 "Error, can not clear persistence bit\n"); 1797 return -EPERM; 1798 } 1799 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, 1800 hpm & ~PERSISTENCE_BIT); 1801 } 1802 1803 return 0; 1804 } 1805 1806 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) 1807 { 1808 int ret; 1809 1810 ret = iwl_finish_nic_init(trans); 1811 if (ret < 0) 1812 return ret; 1813 1814 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1815 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1816 udelay(20); 1817 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1818 HPM_HIPM_GEN_CFG_CR_PG_EN | 1819 HPM_HIPM_GEN_CFG_CR_SLP_EN); 1820 udelay(20); 1821 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, 1822 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1823 1824 return iwl_trans_pcie_sw_reset(trans, true); 1825 } 1826 1827 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1828 { 1829 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1830 int err; 1831 1832 lockdep_assert_held(&trans_pcie->mutex); 1833 1834 err = iwl_pcie_prepare_card_hw(trans); 1835 if (err) { 1836 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1837 return err; 1838 } 1839 1840 err = iwl_trans_pcie_clear_persistence_bit(trans); 1841 if (err) 1842 return err; 1843 1844 err = iwl_trans_pcie_sw_reset(trans, true); 1845 if (err) 1846 return err; 1847 1848 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && 1849 trans->trans_cfg->integrated) { 1850 err = iwl_pcie_gen2_force_power_gating(trans); 1851 if (err) 1852 return err; 1853 } 1854 1855 err = iwl_pcie_apm_init(trans); 1856 if (err) 1857 return err; 1858 1859 iwl_pcie_init_msix(trans_pcie); 1860 1861 /* From now on, the op_mode will be kept updated about RF kill state */ 1862 iwl_enable_rfkill_int(trans); 1863 1864 trans_pcie->opmode_down = false; 1865 1866 /* Set is_down to false here so that...*/ 1867 trans_pcie->is_down = false; 1868 1869 /* ...rfkill can call stop_device and set it false if needed */ 1870 iwl_pcie_check_hw_rf_kill(trans); 1871 1872 return 0; 1873 } 1874 1875 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1876 { 1877 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1878 int ret; 1879 1880 mutex_lock(&trans_pcie->mutex); 1881 ret = _iwl_trans_pcie_start_hw(trans); 1882 mutex_unlock(&trans_pcie->mutex); 1883 1884 return ret; 1885 } 1886 1887 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1888 { 1889 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1890 1891 mutex_lock(&trans_pcie->mutex); 1892 1893 /* disable interrupts - don't enable HW RF kill interrupt */ 1894 iwl_disable_interrupts(trans); 1895 1896 iwl_pcie_apm_stop(trans, true); 1897 1898 iwl_disable_interrupts(trans); 1899 1900 iwl_pcie_disable_ict(trans); 1901 1902 mutex_unlock(&trans_pcie->mutex); 1903 1904 iwl_pcie_synchronize_irqs(trans); 1905 } 1906 1907 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1908 { 1909 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1910 } 1911 1912 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1913 { 1914 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1915 } 1916 1917 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1918 { 1919 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1920 } 1921 1922 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) 1923 { 1924 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1925 return 0x00FFFFFF; 1926 else 1927 return 0x000FFFFF; 1928 } 1929 1930 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1931 { 1932 u32 mask = iwl_trans_pcie_prph_msk(trans); 1933 1934 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1935 ((reg & mask) | (3 << 24))); 1936 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1937 } 1938 1939 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1940 u32 val) 1941 { 1942 u32 mask = iwl_trans_pcie_prph_msk(trans); 1943 1944 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1945 ((addr & mask) | (3 << 24))); 1946 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1947 } 1948 1949 static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1950 const struct iwl_trans_config *trans_cfg) 1951 { 1952 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1953 1954 /* free all first - we might be reconfigured for a different size */ 1955 iwl_pcie_free_rbs_pool(trans); 1956 1957 trans->txqs.cmd.q_id = trans_cfg->cmd_queue; 1958 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; 1959 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 1960 trans->txqs.page_offs = trans_cfg->cb_data_offs; 1961 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); 1962 trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; 1963 1964 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1965 trans_pcie->n_no_reclaim_cmds = 0; 1966 else 1967 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 1968 if (trans_pcie->n_no_reclaim_cmds) 1969 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1970 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1971 1972 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 1973 trans_pcie->rx_page_order = 1974 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 1975 trans_pcie->rx_buf_bytes = 1976 iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 1977 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); 1978 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1979 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); 1980 1981 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; 1982 trans_pcie->scd_set_active = trans_cfg->scd_set_active; 1983 1984 trans->command_groups = trans_cfg->command_groups; 1985 trans->command_groups_size = trans_cfg->command_groups_size; 1986 1987 /* Initialize NAPI here - it should be before registering to mac80211 1988 * in the opmode but after the HW struct is allocated. 1989 * As this function may be called again in some corner cases don't 1990 * do anything if NAPI was already initialized. 1991 */ 1992 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 1993 init_dummy_netdev(&trans_pcie->napi_dev); 1994 1995 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; 1996 } 1997 1998 void iwl_trans_pcie_free_pnvm_dram(struct iwl_trans_pcie *trans_pcie, 1999 struct device *dev) 2000 { 2001 u8 i; 2002 2003 for (i = 0; i < trans_pcie->n_pnvm_regions; i++) { 2004 dma_free_coherent(dev, trans_pcie->pnvm_dram[i].size, 2005 trans_pcie->pnvm_dram[i].block, 2006 trans_pcie->pnvm_dram[i].physical); 2007 } 2008 trans_pcie->n_pnvm_regions = 0; 2009 } 2010 2011 void iwl_trans_pcie_free(struct iwl_trans *trans) 2012 { 2013 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2014 int i; 2015 2016 iwl_pcie_synchronize_irqs(trans); 2017 2018 if (trans->trans_cfg->gen2) 2019 iwl_txq_gen2_tx_free(trans); 2020 else 2021 iwl_pcie_tx_free(trans); 2022 iwl_pcie_rx_free(trans); 2023 2024 if (trans_pcie->rba.alloc_wq) { 2025 destroy_workqueue(trans_pcie->rba.alloc_wq); 2026 trans_pcie->rba.alloc_wq = NULL; 2027 } 2028 2029 if (trans_pcie->msix_enabled) { 2030 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 2031 irq_set_affinity_hint( 2032 trans_pcie->msix_entries[i].vector, 2033 NULL); 2034 } 2035 2036 trans_pcie->msix_enabled = false; 2037 } else { 2038 iwl_pcie_free_ict(trans); 2039 } 2040 2041 iwl_pcie_free_fw_monitor(trans); 2042 2043 iwl_trans_pcie_free_pnvm_dram(trans_pcie, trans->dev); 2044 2045 if (trans_pcie->reduce_power_dram.size) 2046 dma_free_coherent(trans->dev, 2047 trans_pcie->reduce_power_dram.size, 2048 trans_pcie->reduce_power_dram.block, 2049 trans_pcie->reduce_power_dram.physical); 2050 2051 mutex_destroy(&trans_pcie->mutex); 2052 iwl_trans_free(trans); 2053 } 2054 2055 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 2056 { 2057 if (state) 2058 set_bit(STATUS_TPOWER_PMI, &trans->status); 2059 else 2060 clear_bit(STATUS_TPOWER_PMI, &trans->status); 2061 } 2062 2063 struct iwl_trans_pcie_removal { 2064 struct pci_dev *pdev; 2065 struct work_struct work; 2066 bool rescan; 2067 }; 2068 2069 static void iwl_trans_pcie_removal_wk(struct work_struct *wk) 2070 { 2071 struct iwl_trans_pcie_removal *removal = 2072 container_of(wk, struct iwl_trans_pcie_removal, work); 2073 struct pci_dev *pdev = removal->pdev; 2074 static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; 2075 struct pci_bus *bus = pdev->bus; 2076 2077 dev_err(&pdev->dev, "Device gone - attempting removal\n"); 2078 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); 2079 pci_lock_rescan_remove(); 2080 pci_dev_put(pdev); 2081 pci_stop_and_remove_bus_device(pdev); 2082 if (removal->rescan) 2083 pci_rescan_bus(bus->parent); 2084 pci_unlock_rescan_remove(); 2085 2086 kfree(removal); 2087 module_put(THIS_MODULE); 2088 } 2089 2090 void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan) 2091 { 2092 struct iwl_trans_pcie_removal *removal; 2093 2094 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2095 return; 2096 2097 IWL_ERR(trans, "Device gone - scheduling removal!\n"); 2098 2099 /* 2100 * get a module reference to avoid doing this 2101 * while unloading anyway and to avoid 2102 * scheduling a work with code that's being 2103 * removed. 2104 */ 2105 if (!try_module_get(THIS_MODULE)) { 2106 IWL_ERR(trans, 2107 "Module is being unloaded - abort\n"); 2108 return; 2109 } 2110 2111 removal = kzalloc(sizeof(*removal), GFP_ATOMIC); 2112 if (!removal) { 2113 module_put(THIS_MODULE); 2114 return; 2115 } 2116 /* 2117 * we don't need to clear this flag, because 2118 * the trans will be freed and reallocated. 2119 */ 2120 set_bit(STATUS_TRANS_DEAD, &trans->status); 2121 2122 removal->pdev = to_pci_dev(trans->dev); 2123 removal->rescan = rescan; 2124 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); 2125 pci_dev_get(removal->pdev); 2126 schedule_work(&removal->work); 2127 } 2128 EXPORT_SYMBOL(iwl_trans_pcie_remove); 2129 2130 /* 2131 * This version doesn't disable BHs but rather assumes they're 2132 * already disabled. 2133 */ 2134 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2135 { 2136 int ret; 2137 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2138 u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; 2139 u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 2140 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; 2141 u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; 2142 2143 spin_lock(&trans_pcie->reg_lock); 2144 2145 if (trans_pcie->cmd_hold_nic_awake) 2146 goto out; 2147 2148 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 2149 write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; 2150 mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2151 poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2152 } 2153 2154 /* this bit wakes up the NIC */ 2155 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); 2156 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 2157 udelay(2); 2158 2159 /* 2160 * These bits say the device is running, and should keep running for 2161 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 2162 * but they do not indicate that embedded SRAM is restored yet; 2163 * HW with volatile SRAM must save/restore contents to/from 2164 * host DRAM when sleeping/waking for power-saving. 2165 * Each direction takes approximately 1/4 millisecond; with this 2166 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 2167 * series of register accesses are expected (e.g. reading Event Log), 2168 * to keep device from sleeping. 2169 * 2170 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 2171 * SRAM is okay/restored. We don't check that here because this call 2172 * is just for hardware register access; but GP1 MAC_SLEEP 2173 * check is a good idea before accessing the SRAM of HW with 2174 * volatile SRAM (e.g. reading Event Log). 2175 * 2176 * 5000 series and later (including 1000 series) have non-volatile SRAM, 2177 * and do not save/restore SRAM when power cycling. 2178 */ 2179 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); 2180 if (unlikely(ret < 0)) { 2181 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); 2182 2183 WARN_ONCE(1, 2184 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 2185 cntrl); 2186 2187 iwl_trans_pcie_dump_regs(trans); 2188 2189 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) 2190 iwl_trans_pcie_remove(trans, false); 2191 else 2192 iwl_write32(trans, CSR_RESET, 2193 CSR_RESET_REG_FLAG_FORCE_NMI); 2194 2195 spin_unlock(&trans_pcie->reg_lock); 2196 return false; 2197 } 2198 2199 out: 2200 /* 2201 * Fool sparse by faking we release the lock - sparse will 2202 * track nic_access anyway. 2203 */ 2204 __release(&trans_pcie->reg_lock); 2205 return true; 2206 } 2207 2208 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2209 { 2210 bool ret; 2211 2212 local_bh_disable(); 2213 ret = __iwl_trans_pcie_grab_nic_access(trans); 2214 if (ret) { 2215 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ 2216 return ret; 2217 } 2218 local_bh_enable(); 2219 return false; 2220 } 2221 2222 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) 2223 { 2224 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2225 2226 lockdep_assert_held(&trans_pcie->reg_lock); 2227 2228 /* 2229 * Fool sparse by faking we acquiring the lock - sparse will 2230 * track nic_access anyway. 2231 */ 2232 __acquire(&trans_pcie->reg_lock); 2233 2234 if (trans_pcie->cmd_hold_nic_awake) 2235 goto out; 2236 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 2237 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2238 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 2239 else 2240 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2241 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2242 /* 2243 * Above we read the CSR_GP_CNTRL register, which will flush 2244 * any previous writes, but we need the write that clears the 2245 * MAC_ACCESS_REQ bit to be performed before any other writes 2246 * scheduled on different CPUs (after we drop reg_lock). 2247 */ 2248 out: 2249 spin_unlock_bh(&trans_pcie->reg_lock); 2250 } 2251 2252 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 2253 void *buf, int dwords) 2254 { 2255 int offs = 0; 2256 u32 *vals = buf; 2257 2258 while (offs < dwords) { 2259 /* limit the time we spin here under lock to 1/2s */ 2260 unsigned long end = jiffies + HZ / 2; 2261 bool resched = false; 2262 2263 if (iwl_trans_grab_nic_access(trans)) { 2264 iwl_write32(trans, HBUS_TARG_MEM_RADDR, 2265 addr + 4 * offs); 2266 2267 while (offs < dwords) { 2268 vals[offs] = iwl_read32(trans, 2269 HBUS_TARG_MEM_RDAT); 2270 offs++; 2271 2272 if (time_after(jiffies, end)) { 2273 resched = true; 2274 break; 2275 } 2276 } 2277 iwl_trans_release_nic_access(trans); 2278 2279 if (resched) 2280 cond_resched(); 2281 } else { 2282 return -EBUSY; 2283 } 2284 } 2285 2286 return 0; 2287 } 2288 2289 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 2290 const void *buf, int dwords) 2291 { 2292 int offs, ret = 0; 2293 const u32 *vals = buf; 2294 2295 if (iwl_trans_grab_nic_access(trans)) { 2296 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 2297 for (offs = 0; offs < dwords; offs++) 2298 iwl_write32(trans, HBUS_TARG_MEM_WDAT, 2299 vals ? vals[offs] : 0); 2300 iwl_trans_release_nic_access(trans); 2301 } else { 2302 ret = -EBUSY; 2303 } 2304 return ret; 2305 } 2306 2307 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, 2308 u32 *val) 2309 { 2310 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, 2311 ofs, val); 2312 } 2313 2314 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 2315 { 2316 int i; 2317 2318 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 2319 struct iwl_txq *txq = trans->txqs.txq[i]; 2320 2321 if (i == trans->txqs.cmd.q_id) 2322 continue; 2323 2324 spin_lock_bh(&txq->lock); 2325 2326 if (!block && !(WARN_ON_ONCE(!txq->block))) { 2327 txq->block--; 2328 if (!txq->block) { 2329 iwl_write32(trans, HBUS_TARG_WRPTR, 2330 txq->write_ptr | (i << 8)); 2331 } 2332 } else if (block) { 2333 txq->block++; 2334 } 2335 2336 spin_unlock_bh(&txq->lock); 2337 } 2338 } 2339 2340 #define IWL_FLUSH_WAIT_MS 2000 2341 2342 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, 2343 struct iwl_trans_rxq_dma_data *data) 2344 { 2345 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2346 2347 if (queue >= trans->num_rx_queues || !trans_pcie->rxq) 2348 return -EINVAL; 2349 2350 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; 2351 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; 2352 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; 2353 data->fr_bd_wid = 0; 2354 2355 return 0; 2356 } 2357 2358 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2359 { 2360 struct iwl_txq *txq; 2361 unsigned long now = jiffies; 2362 bool overflow_tx; 2363 u8 wr_ptr; 2364 2365 /* Make sure the NIC is still alive in the bus */ 2366 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2367 return -ENODEV; 2368 2369 if (!test_bit(txq_idx, trans->txqs.queue_used)) 2370 return -EINVAL; 2371 2372 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); 2373 txq = trans->txqs.txq[txq_idx]; 2374 2375 spin_lock_bh(&txq->lock); 2376 overflow_tx = txq->overflow_tx || 2377 !skb_queue_empty(&txq->overflow_q); 2378 spin_unlock_bh(&txq->lock); 2379 2380 wr_ptr = READ_ONCE(txq->write_ptr); 2381 2382 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || 2383 overflow_tx) && 2384 !time_after(jiffies, 2385 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2386 u8 write_ptr = READ_ONCE(txq->write_ptr); 2387 2388 /* 2389 * If write pointer moved during the wait, warn only 2390 * if the TX came from op mode. In case TX came from 2391 * trans layer (overflow TX) don't warn. 2392 */ 2393 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, 2394 "WR pointer moved while flushing %d -> %d\n", 2395 wr_ptr, write_ptr)) 2396 return -ETIMEDOUT; 2397 wr_ptr = write_ptr; 2398 2399 usleep_range(1000, 2000); 2400 2401 spin_lock_bh(&txq->lock); 2402 overflow_tx = txq->overflow_tx || 2403 !skb_queue_empty(&txq->overflow_q); 2404 spin_unlock_bh(&txq->lock); 2405 } 2406 2407 if (txq->read_ptr != txq->write_ptr) { 2408 IWL_ERR(trans, 2409 "fail to flush all tx fifo queues Q %d\n", txq_idx); 2410 iwl_txq_log_scd_error(trans, txq); 2411 return -ETIMEDOUT; 2412 } 2413 2414 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); 2415 2416 return 0; 2417 } 2418 2419 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) 2420 { 2421 int cnt; 2422 int ret = 0; 2423 2424 /* waiting for all the tx frames complete might take a while */ 2425 for (cnt = 0; 2426 cnt < trans->trans_cfg->base_params->num_of_queues; 2427 cnt++) { 2428 2429 if (cnt == trans->txqs.cmd.q_id) 2430 continue; 2431 if (!test_bit(cnt, trans->txqs.queue_used)) 2432 continue; 2433 if (!(BIT(cnt) & txq_bm)) 2434 continue; 2435 2436 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); 2437 if (ret) 2438 break; 2439 } 2440 2441 return ret; 2442 } 2443 2444 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2445 u32 mask, u32 value) 2446 { 2447 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2448 2449 spin_lock_bh(&trans_pcie->reg_lock); 2450 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 2451 spin_unlock_bh(&trans_pcie->reg_lock); 2452 } 2453 2454 static const char *get_csr_string(int cmd) 2455 { 2456 #define IWL_CMD(x) case x: return #x 2457 switch (cmd) { 2458 IWL_CMD(CSR_HW_IF_CONFIG_REG); 2459 IWL_CMD(CSR_INT_COALESCING); 2460 IWL_CMD(CSR_INT); 2461 IWL_CMD(CSR_INT_MASK); 2462 IWL_CMD(CSR_FH_INT_STATUS); 2463 IWL_CMD(CSR_GPIO_IN); 2464 IWL_CMD(CSR_RESET); 2465 IWL_CMD(CSR_GP_CNTRL); 2466 IWL_CMD(CSR_HW_REV); 2467 IWL_CMD(CSR_EEPROM_REG); 2468 IWL_CMD(CSR_EEPROM_GP); 2469 IWL_CMD(CSR_OTP_GP_REG); 2470 IWL_CMD(CSR_GIO_REG); 2471 IWL_CMD(CSR_GP_UCODE_REG); 2472 IWL_CMD(CSR_GP_DRIVER_REG); 2473 IWL_CMD(CSR_UCODE_DRV_GP1); 2474 IWL_CMD(CSR_UCODE_DRV_GP2); 2475 IWL_CMD(CSR_LED_REG); 2476 IWL_CMD(CSR_DRAM_INT_TBL_REG); 2477 IWL_CMD(CSR_GIO_CHICKEN_BITS); 2478 IWL_CMD(CSR_ANA_PLL_CFG); 2479 IWL_CMD(CSR_HW_REV_WA_REG); 2480 IWL_CMD(CSR_MONITOR_STATUS_REG); 2481 IWL_CMD(CSR_DBG_HPET_MEM_REG); 2482 default: 2483 return "UNKNOWN"; 2484 } 2485 #undef IWL_CMD 2486 } 2487 2488 void iwl_pcie_dump_csr(struct iwl_trans *trans) 2489 { 2490 int i; 2491 static const u32 csr_tbl[] = { 2492 CSR_HW_IF_CONFIG_REG, 2493 CSR_INT_COALESCING, 2494 CSR_INT, 2495 CSR_INT_MASK, 2496 CSR_FH_INT_STATUS, 2497 CSR_GPIO_IN, 2498 CSR_RESET, 2499 CSR_GP_CNTRL, 2500 CSR_HW_REV, 2501 CSR_EEPROM_REG, 2502 CSR_EEPROM_GP, 2503 CSR_OTP_GP_REG, 2504 CSR_GIO_REG, 2505 CSR_GP_UCODE_REG, 2506 CSR_GP_DRIVER_REG, 2507 CSR_UCODE_DRV_GP1, 2508 CSR_UCODE_DRV_GP2, 2509 CSR_LED_REG, 2510 CSR_DRAM_INT_TBL_REG, 2511 CSR_GIO_CHICKEN_BITS, 2512 CSR_ANA_PLL_CFG, 2513 CSR_MONITOR_STATUS_REG, 2514 CSR_HW_REV_WA_REG, 2515 CSR_DBG_HPET_MEM_REG 2516 }; 2517 IWL_ERR(trans, "CSR values:\n"); 2518 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2519 "CSR_INT_PERIODIC_REG)\n"); 2520 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2521 IWL_ERR(trans, " %25s: 0X%08x\n", 2522 get_csr_string(csr_tbl[i]), 2523 iwl_read32(trans, csr_tbl[i])); 2524 } 2525 } 2526 2527 #ifdef CONFIG_IWLWIFI_DEBUGFS 2528 /* create and remove of files */ 2529 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2530 debugfs_create_file(#name, mode, parent, trans, \ 2531 &iwl_dbgfs_##name##_ops); \ 2532 } while (0) 2533 2534 /* file operation */ 2535 #define DEBUGFS_READ_FILE_OPS(name) \ 2536 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2537 .read = iwl_dbgfs_##name##_read, \ 2538 .open = simple_open, \ 2539 .llseek = generic_file_llseek, \ 2540 }; 2541 2542 #define DEBUGFS_WRITE_FILE_OPS(name) \ 2543 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2544 .write = iwl_dbgfs_##name##_write, \ 2545 .open = simple_open, \ 2546 .llseek = generic_file_llseek, \ 2547 }; 2548 2549 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2550 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2551 .write = iwl_dbgfs_##name##_write, \ 2552 .read = iwl_dbgfs_##name##_read, \ 2553 .open = simple_open, \ 2554 .llseek = generic_file_llseek, \ 2555 }; 2556 2557 struct iwl_dbgfs_tx_queue_priv { 2558 struct iwl_trans *trans; 2559 }; 2560 2561 struct iwl_dbgfs_tx_queue_state { 2562 loff_t pos; 2563 }; 2564 2565 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) 2566 { 2567 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2568 struct iwl_dbgfs_tx_queue_state *state; 2569 2570 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2571 return NULL; 2572 2573 state = kmalloc(sizeof(*state), GFP_KERNEL); 2574 if (!state) 2575 return NULL; 2576 state->pos = *pos; 2577 return state; 2578 } 2579 2580 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, 2581 void *v, loff_t *pos) 2582 { 2583 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2584 struct iwl_dbgfs_tx_queue_state *state = v; 2585 2586 *pos = ++state->pos; 2587 2588 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2589 return NULL; 2590 2591 return state; 2592 } 2593 2594 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) 2595 { 2596 kfree(v); 2597 } 2598 2599 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) 2600 { 2601 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2602 struct iwl_dbgfs_tx_queue_state *state = v; 2603 struct iwl_trans *trans = priv->trans; 2604 struct iwl_txq *txq = trans->txqs.txq[state->pos]; 2605 2606 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", 2607 (unsigned int)state->pos, 2608 !!test_bit(state->pos, trans->txqs.queue_used), 2609 !!test_bit(state->pos, trans->txqs.queue_stopped)); 2610 if (txq) 2611 seq_printf(seq, 2612 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", 2613 txq->read_ptr, txq->write_ptr, 2614 txq->need_update, txq->frozen, 2615 txq->n_window, txq->ampdu); 2616 else 2617 seq_puts(seq, "(unallocated)"); 2618 2619 if (state->pos == trans->txqs.cmd.q_id) 2620 seq_puts(seq, " (HCMD)"); 2621 seq_puts(seq, "\n"); 2622 2623 return 0; 2624 } 2625 2626 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { 2627 .start = iwl_dbgfs_tx_queue_seq_start, 2628 .next = iwl_dbgfs_tx_queue_seq_next, 2629 .stop = iwl_dbgfs_tx_queue_seq_stop, 2630 .show = iwl_dbgfs_tx_queue_seq_show, 2631 }; 2632 2633 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) 2634 { 2635 struct iwl_dbgfs_tx_queue_priv *priv; 2636 2637 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, 2638 sizeof(*priv)); 2639 2640 if (!priv) 2641 return -ENOMEM; 2642 2643 priv->trans = inode->i_private; 2644 return 0; 2645 } 2646 2647 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2648 char __user *user_buf, 2649 size_t count, loff_t *ppos) 2650 { 2651 struct iwl_trans *trans = file->private_data; 2652 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2653 char *buf; 2654 int pos = 0, i, ret; 2655 size_t bufsz; 2656 2657 bufsz = sizeof(char) * 121 * trans->num_rx_queues; 2658 2659 if (!trans_pcie->rxq) 2660 return -EAGAIN; 2661 2662 buf = kzalloc(bufsz, GFP_KERNEL); 2663 if (!buf) 2664 return -ENOMEM; 2665 2666 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 2667 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2668 2669 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 2670 i); 2671 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2672 rxq->read); 2673 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2674 rxq->write); 2675 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2676 rxq->write_actual); 2677 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2678 rxq->need_update); 2679 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2680 rxq->free_count); 2681 if (rxq->rb_stts) { 2682 u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, 2683 rxq)); 2684 pos += scnprintf(buf + pos, bufsz - pos, 2685 "\tclosed_rb_num: %u\n", 2686 r & 0x0FFF); 2687 } else { 2688 pos += scnprintf(buf + pos, bufsz - pos, 2689 "\tclosed_rb_num: Not Allocated\n"); 2690 } 2691 } 2692 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2693 kfree(buf); 2694 2695 return ret; 2696 } 2697 2698 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2699 char __user *user_buf, 2700 size_t count, loff_t *ppos) 2701 { 2702 struct iwl_trans *trans = file->private_data; 2703 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2704 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2705 2706 int pos = 0; 2707 char *buf; 2708 int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2709 ssize_t ret; 2710 2711 buf = kzalloc(bufsz, GFP_KERNEL); 2712 if (!buf) 2713 return -ENOMEM; 2714 2715 pos += scnprintf(buf + pos, bufsz - pos, 2716 "Interrupt Statistics Report:\n"); 2717 2718 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2719 isr_stats->hw); 2720 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2721 isr_stats->sw); 2722 if (isr_stats->sw || isr_stats->hw) { 2723 pos += scnprintf(buf + pos, bufsz - pos, 2724 "\tLast Restarting Code: 0x%X\n", 2725 isr_stats->err_code); 2726 } 2727 #ifdef CONFIG_IWLWIFI_DEBUG 2728 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2729 isr_stats->sch); 2730 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2731 isr_stats->alive); 2732 #endif 2733 pos += scnprintf(buf + pos, bufsz - pos, 2734 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2735 2736 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2737 isr_stats->ctkill); 2738 2739 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2740 isr_stats->wakeup); 2741 2742 pos += scnprintf(buf + pos, bufsz - pos, 2743 "Rx command responses:\t\t %u\n", isr_stats->rx); 2744 2745 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2746 isr_stats->tx); 2747 2748 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2749 isr_stats->unhandled); 2750 2751 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2752 kfree(buf); 2753 return ret; 2754 } 2755 2756 static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2757 const char __user *user_buf, 2758 size_t count, loff_t *ppos) 2759 { 2760 struct iwl_trans *trans = file->private_data; 2761 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2762 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2763 u32 reset_flag; 2764 int ret; 2765 2766 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); 2767 if (ret) 2768 return ret; 2769 if (reset_flag == 0) 2770 memset(isr_stats, 0, sizeof(*isr_stats)); 2771 2772 return count; 2773 } 2774 2775 static ssize_t iwl_dbgfs_csr_write(struct file *file, 2776 const char __user *user_buf, 2777 size_t count, loff_t *ppos) 2778 { 2779 struct iwl_trans *trans = file->private_data; 2780 2781 iwl_pcie_dump_csr(trans); 2782 2783 return count; 2784 } 2785 2786 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2787 char __user *user_buf, 2788 size_t count, loff_t *ppos) 2789 { 2790 struct iwl_trans *trans = file->private_data; 2791 char *buf = NULL; 2792 ssize_t ret; 2793 2794 ret = iwl_dump_fh(trans, &buf); 2795 if (ret < 0) 2796 return ret; 2797 if (!buf) 2798 return -EINVAL; 2799 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2800 kfree(buf); 2801 return ret; 2802 } 2803 2804 static ssize_t iwl_dbgfs_rfkill_read(struct file *file, 2805 char __user *user_buf, 2806 size_t count, loff_t *ppos) 2807 { 2808 struct iwl_trans *trans = file->private_data; 2809 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2810 char buf[100]; 2811 int pos; 2812 2813 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", 2814 trans_pcie->debug_rfkill, 2815 !(iwl_read32(trans, CSR_GP_CNTRL) & 2816 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); 2817 2818 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2819 } 2820 2821 static ssize_t iwl_dbgfs_rfkill_write(struct file *file, 2822 const char __user *user_buf, 2823 size_t count, loff_t *ppos) 2824 { 2825 struct iwl_trans *trans = file->private_data; 2826 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2827 bool new_value; 2828 int ret; 2829 2830 ret = kstrtobool_from_user(user_buf, count, &new_value); 2831 if (ret) 2832 return ret; 2833 if (new_value == trans_pcie->debug_rfkill) 2834 return count; 2835 IWL_WARN(trans, "changing debug rfkill %d->%d\n", 2836 trans_pcie->debug_rfkill, new_value); 2837 trans_pcie->debug_rfkill = new_value; 2838 iwl_pcie_handle_rfkill_irq(trans); 2839 2840 return count; 2841 } 2842 2843 static int iwl_dbgfs_monitor_data_open(struct inode *inode, 2844 struct file *file) 2845 { 2846 struct iwl_trans *trans = inode->i_private; 2847 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2848 2849 if (!trans->dbg.dest_tlv || 2850 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { 2851 IWL_ERR(trans, "Debug destination is not set to DRAM\n"); 2852 return -ENOENT; 2853 } 2854 2855 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) 2856 return -EBUSY; 2857 2858 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; 2859 return simple_open(inode, file); 2860 } 2861 2862 static int iwl_dbgfs_monitor_data_release(struct inode *inode, 2863 struct file *file) 2864 { 2865 struct iwl_trans_pcie *trans_pcie = 2866 IWL_TRANS_GET_PCIE_TRANS(inode->i_private); 2867 2868 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) 2869 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 2870 return 0; 2871 } 2872 2873 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, 2874 void *buf, ssize_t *size, 2875 ssize_t *bytes_copied) 2876 { 2877 ssize_t buf_size_left = count - *bytes_copied; 2878 2879 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); 2880 if (*size > buf_size_left) 2881 *size = buf_size_left; 2882 2883 *size -= copy_to_user(user_buf, buf, *size); 2884 *bytes_copied += *size; 2885 2886 if (buf_size_left == *size) 2887 return true; 2888 return false; 2889 } 2890 2891 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, 2892 char __user *user_buf, 2893 size_t count, loff_t *ppos) 2894 { 2895 struct iwl_trans *trans = file->private_data; 2896 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2897 u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; 2898 struct cont_rec *data = &trans_pcie->fw_mon_data; 2899 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; 2900 ssize_t size, bytes_copied = 0; 2901 bool b_full; 2902 2903 if (trans->dbg.dest_tlv) { 2904 write_ptr_addr = 2905 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 2906 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 2907 } else { 2908 write_ptr_addr = MON_BUFF_WRPTR; 2909 wrap_cnt_addr = MON_BUFF_CYCLE_CNT; 2910 } 2911 2912 if (unlikely(!trans->dbg.rec_on)) 2913 return 0; 2914 2915 mutex_lock(&data->mutex); 2916 if (data->state == 2917 IWL_FW_MON_DBGFS_STATE_DISABLED) { 2918 mutex_unlock(&data->mutex); 2919 return 0; 2920 } 2921 2922 /* write_ptr position in bytes rather then DW */ 2923 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); 2924 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); 2925 2926 if (data->prev_wrap_cnt == wrap_cnt) { 2927 size = write_ptr - data->prev_wr_ptr; 2928 curr_buf = cpu_addr + data->prev_wr_ptr; 2929 b_full = iwl_write_to_user_buf(user_buf, count, 2930 curr_buf, &size, 2931 &bytes_copied); 2932 data->prev_wr_ptr += size; 2933 2934 } else if (data->prev_wrap_cnt == wrap_cnt - 1 && 2935 write_ptr < data->prev_wr_ptr) { 2936 size = trans->dbg.fw_mon.size - data->prev_wr_ptr; 2937 curr_buf = cpu_addr + data->prev_wr_ptr; 2938 b_full = iwl_write_to_user_buf(user_buf, count, 2939 curr_buf, &size, 2940 &bytes_copied); 2941 data->prev_wr_ptr += size; 2942 2943 if (!b_full) { 2944 size = write_ptr; 2945 b_full = iwl_write_to_user_buf(user_buf, count, 2946 cpu_addr, &size, 2947 &bytes_copied); 2948 data->prev_wr_ptr = size; 2949 data->prev_wrap_cnt++; 2950 } 2951 } else { 2952 if (data->prev_wrap_cnt == wrap_cnt - 1 && 2953 write_ptr > data->prev_wr_ptr) 2954 IWL_WARN(trans, 2955 "write pointer passed previous write pointer, start copying from the beginning\n"); 2956 else if (!unlikely(data->prev_wrap_cnt == 0 && 2957 data->prev_wr_ptr == 0)) 2958 IWL_WARN(trans, 2959 "monitor data is out of sync, start copying from the beginning\n"); 2960 2961 size = write_ptr; 2962 b_full = iwl_write_to_user_buf(user_buf, count, 2963 cpu_addr, &size, 2964 &bytes_copied); 2965 data->prev_wr_ptr = size; 2966 data->prev_wrap_cnt = wrap_cnt; 2967 } 2968 2969 mutex_unlock(&data->mutex); 2970 2971 return bytes_copied; 2972 } 2973 2974 static ssize_t iwl_dbgfs_rf_read(struct file *file, 2975 char __user *user_buf, 2976 size_t count, loff_t *ppos) 2977 { 2978 struct iwl_trans *trans = file->private_data; 2979 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2980 2981 if (!trans_pcie->rf_name[0]) 2982 return -ENODEV; 2983 2984 return simple_read_from_buffer(user_buf, count, ppos, 2985 trans_pcie->rf_name, 2986 strlen(trans_pcie->rf_name)); 2987 } 2988 2989 DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2990 DEBUGFS_READ_FILE_OPS(fh_reg); 2991 DEBUGFS_READ_FILE_OPS(rx_queue); 2992 DEBUGFS_WRITE_FILE_OPS(csr); 2993 DEBUGFS_READ_WRITE_FILE_OPS(rfkill); 2994 DEBUGFS_READ_FILE_OPS(rf); 2995 2996 static const struct file_operations iwl_dbgfs_tx_queue_ops = { 2997 .owner = THIS_MODULE, 2998 .open = iwl_dbgfs_tx_queue_open, 2999 .read = seq_read, 3000 .llseek = seq_lseek, 3001 .release = seq_release_private, 3002 }; 3003 3004 static const struct file_operations iwl_dbgfs_monitor_data_ops = { 3005 .read = iwl_dbgfs_monitor_data_read, 3006 .open = iwl_dbgfs_monitor_data_open, 3007 .release = iwl_dbgfs_monitor_data_release, 3008 }; 3009 3010 /* Create the debugfs files and directories */ 3011 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 3012 { 3013 struct dentry *dir = trans->dbgfs_dir; 3014 3015 DEBUGFS_ADD_FILE(rx_queue, dir, 0400); 3016 DEBUGFS_ADD_FILE(tx_queue, dir, 0400); 3017 DEBUGFS_ADD_FILE(interrupt, dir, 0600); 3018 DEBUGFS_ADD_FILE(csr, dir, 0200); 3019 DEBUGFS_ADD_FILE(fh_reg, dir, 0400); 3020 DEBUGFS_ADD_FILE(rfkill, dir, 0600); 3021 DEBUGFS_ADD_FILE(monitor_data, dir, 0400); 3022 DEBUGFS_ADD_FILE(rf, dir, 0400); 3023 } 3024 3025 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) 3026 { 3027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3028 struct cont_rec *data = &trans_pcie->fw_mon_data; 3029 3030 mutex_lock(&data->mutex); 3031 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; 3032 mutex_unlock(&data->mutex); 3033 } 3034 #endif /*CONFIG_IWLWIFI_DEBUGFS */ 3035 3036 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 3037 { 3038 u32 cmdlen = 0; 3039 int i; 3040 3041 for (i = 0; i < trans->txqs.tfd.max_tbs; i++) 3042 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); 3043 3044 return cmdlen; 3045 } 3046 3047 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 3048 struct iwl_fw_error_dump_data **data, 3049 int allocated_rb_nums) 3050 { 3051 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3052 int max_len = trans_pcie->rx_buf_bytes; 3053 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3054 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3055 u32 i, r, j, rb_len = 0; 3056 3057 spin_lock(&rxq->lock); 3058 3059 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 3060 3061 for (i = rxq->read, j = 0; 3062 i != r && j < allocated_rb_nums; 3063 i = (i + 1) & RX_QUEUE_MASK, j++) { 3064 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 3065 struct iwl_fw_error_dump_rb *rb; 3066 3067 dma_sync_single_for_cpu(trans->dev, rxb->page_dma, 3068 max_len, DMA_FROM_DEVICE); 3069 3070 rb_len += sizeof(**data) + sizeof(*rb) + max_len; 3071 3072 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 3073 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 3074 rb = (void *)(*data)->data; 3075 rb->index = cpu_to_le32(i); 3076 memcpy(rb->data, page_address(rxb->page), max_len); 3077 3078 *data = iwl_fw_error_next_data(*data); 3079 } 3080 3081 spin_unlock(&rxq->lock); 3082 3083 return rb_len; 3084 } 3085 #define IWL_CSR_TO_DUMP (0x250) 3086 3087 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 3088 struct iwl_fw_error_dump_data **data) 3089 { 3090 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 3091 __le32 *val; 3092 int i; 3093 3094 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 3095 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 3096 val = (void *)(*data)->data; 3097 3098 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 3099 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3100 3101 *data = iwl_fw_error_next_data(*data); 3102 3103 return csr_len; 3104 } 3105 3106 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 3107 struct iwl_fw_error_dump_data **data) 3108 { 3109 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 3110 __le32 *val; 3111 int i; 3112 3113 if (!iwl_trans_grab_nic_access(trans)) 3114 return 0; 3115 3116 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 3117 (*data)->len = cpu_to_le32(fh_regs_len); 3118 val = (void *)(*data)->data; 3119 3120 if (!trans->trans_cfg->gen2) 3121 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; 3122 i += sizeof(u32)) 3123 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3124 else 3125 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); 3126 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); 3127 i += sizeof(u32)) 3128 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, 3129 i)); 3130 3131 iwl_trans_release_nic_access(trans); 3132 3133 *data = iwl_fw_error_next_data(*data); 3134 3135 return sizeof(**data) + fh_regs_len; 3136 } 3137 3138 static u32 3139 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 3140 struct iwl_fw_error_dump_fw_mon *fw_mon_data, 3141 u32 monitor_len) 3142 { 3143 u32 buf_size_in_dwords = (monitor_len >> 2); 3144 u32 *buffer = (u32 *)fw_mon_data->data; 3145 u32 i; 3146 3147 if (!iwl_trans_grab_nic_access(trans)) 3148 return 0; 3149 3150 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 3151 for (i = 0; i < buf_size_in_dwords; i++) 3152 buffer[i] = iwl_read_umac_prph_no_grab(trans, 3153 MON_DMARB_RD_DATA_ADDR); 3154 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 3155 3156 iwl_trans_release_nic_access(trans); 3157 3158 return monitor_len; 3159 } 3160 3161 static void 3162 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, 3163 struct iwl_fw_error_dump_fw_mon *fw_mon_data) 3164 { 3165 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; 3166 3167 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3168 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; 3169 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; 3170 write_ptr = DBGC_CUR_DBGBUF_STATUS; 3171 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; 3172 } else if (trans->dbg.dest_tlv) { 3173 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 3174 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 3175 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3176 } else { 3177 base = MON_BUFF_BASE_ADDR; 3178 write_ptr = MON_BUFF_WRPTR; 3179 wrap_cnt = MON_BUFF_CYCLE_CNT; 3180 } 3181 3182 write_ptr_val = iwl_read_prph(trans, write_ptr); 3183 fw_mon_data->fw_mon_cycle_cnt = 3184 cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 3185 fw_mon_data->fw_mon_base_ptr = 3186 cpu_to_le32(iwl_read_prph(trans, base)); 3187 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3188 fw_mon_data->fw_mon_base_high_ptr = 3189 cpu_to_le32(iwl_read_prph(trans, base_high)); 3190 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; 3191 /* convert wrtPtr to DWs, to align with all HWs */ 3192 write_ptr_val >>= 2; 3193 } 3194 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); 3195 } 3196 3197 static u32 3198 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 3199 struct iwl_fw_error_dump_data **data, 3200 u32 monitor_len) 3201 { 3202 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 3203 u32 len = 0; 3204 3205 if (trans->dbg.dest_tlv || 3206 (fw_mon->size && 3207 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || 3208 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { 3209 struct iwl_fw_error_dump_fw_mon *fw_mon_data; 3210 3211 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 3212 fw_mon_data = (void *)(*data)->data; 3213 3214 iwl_trans_pcie_dump_pointers(trans, fw_mon_data); 3215 3216 len += sizeof(**data) + sizeof(*fw_mon_data); 3217 if (fw_mon->size) { 3218 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); 3219 monitor_len = fw_mon->size; 3220 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { 3221 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); 3222 /* 3223 * Update pointers to reflect actual values after 3224 * shifting 3225 */ 3226 if (trans->dbg.dest_tlv->version) { 3227 base = (iwl_read_prph(trans, base) & 3228 IWL_LDBG_M2S_BUF_BA_MSK) << 3229 trans->dbg.dest_tlv->base_shift; 3230 base *= IWL_M2S_UNIT_SIZE; 3231 base += trans->cfg->smem_offset; 3232 } else { 3233 base = iwl_read_prph(trans, base) << 3234 trans->dbg.dest_tlv->base_shift; 3235 } 3236 3237 iwl_trans_read_mem(trans, base, fw_mon_data->data, 3238 monitor_len / sizeof(u32)); 3239 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { 3240 monitor_len = 3241 iwl_trans_pci_dump_marbh_monitor(trans, 3242 fw_mon_data, 3243 monitor_len); 3244 } else { 3245 /* Didn't match anything - output no monitor data */ 3246 monitor_len = 0; 3247 } 3248 3249 len += monitor_len; 3250 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 3251 } 3252 3253 return len; 3254 } 3255 3256 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) 3257 { 3258 if (trans->dbg.fw_mon.size) { 3259 *len += sizeof(struct iwl_fw_error_dump_data) + 3260 sizeof(struct iwl_fw_error_dump_fw_mon) + 3261 trans->dbg.fw_mon.size; 3262 return trans->dbg.fw_mon.size; 3263 } else if (trans->dbg.dest_tlv) { 3264 u32 base, end, cfg_reg, monitor_len; 3265 3266 if (trans->dbg.dest_tlv->version == 1) { 3267 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3268 cfg_reg = iwl_read_prph(trans, cfg_reg); 3269 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << 3270 trans->dbg.dest_tlv->base_shift; 3271 base *= IWL_M2S_UNIT_SIZE; 3272 base += trans->cfg->smem_offset; 3273 3274 monitor_len = 3275 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> 3276 trans->dbg.dest_tlv->end_shift; 3277 monitor_len *= IWL_M2S_UNIT_SIZE; 3278 } else { 3279 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3280 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); 3281 3282 base = iwl_read_prph(trans, base) << 3283 trans->dbg.dest_tlv->base_shift; 3284 end = iwl_read_prph(trans, end) << 3285 trans->dbg.dest_tlv->end_shift; 3286 3287 /* Make "end" point to the actual end */ 3288 if (trans->trans_cfg->device_family >= 3289 IWL_DEVICE_FAMILY_8000 || 3290 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) 3291 end += (1 << trans->dbg.dest_tlv->end_shift); 3292 monitor_len = end - base; 3293 } 3294 *len += sizeof(struct iwl_fw_error_dump_data) + 3295 sizeof(struct iwl_fw_error_dump_fw_mon) + 3296 monitor_len; 3297 return monitor_len; 3298 } 3299 return 0; 3300 } 3301 3302 static struct iwl_trans_dump_data * 3303 iwl_trans_pcie_dump_data(struct iwl_trans *trans, 3304 u32 dump_mask, 3305 const struct iwl_dump_sanitize_ops *sanitize_ops, 3306 void *sanitize_ctx) 3307 { 3308 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3309 struct iwl_fw_error_dump_data *data; 3310 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; 3311 struct iwl_fw_error_dump_txcmd *txcmd; 3312 struct iwl_trans_dump_data *dump_data; 3313 u32 len, num_rbs = 0, monitor_len = 0; 3314 int i, ptr; 3315 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 3316 !trans->trans_cfg->mq_rx_supported && 3317 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); 3318 3319 if (!dump_mask) 3320 return NULL; 3321 3322 /* transport dump header */ 3323 len = sizeof(*dump_data); 3324 3325 /* host commands */ 3326 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) 3327 len += sizeof(*data) + 3328 cmdq->n_window * (sizeof(*txcmd) + 3329 TFD_MAX_PAYLOAD_SIZE); 3330 3331 /* FW monitor */ 3332 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3333 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); 3334 3335 /* CSR registers */ 3336 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3337 len += sizeof(*data) + IWL_CSR_TO_DUMP; 3338 3339 /* FH registers */ 3340 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { 3341 if (trans->trans_cfg->gen2) 3342 len += sizeof(*data) + 3343 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - 3344 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); 3345 else 3346 len += sizeof(*data) + 3347 (FH_MEM_UPPER_BOUND - 3348 FH_MEM_LOWER_BOUND); 3349 } 3350 3351 if (dump_rbs) { 3352 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3353 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3354 /* RBs */ 3355 num_rbs = 3356 le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) 3357 & 0x0FFF; 3358 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 3359 len += num_rbs * (sizeof(*data) + 3360 sizeof(struct iwl_fw_error_dump_rb) + 3361 (PAGE_SIZE << trans_pcie->rx_page_order)); 3362 } 3363 3364 /* Paged memory for gen2 HW */ 3365 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) 3366 for (i = 0; i < trans->init_dram.paging_cnt; i++) 3367 len += sizeof(*data) + 3368 sizeof(struct iwl_fw_error_dump_paging) + 3369 trans->init_dram.paging[i].size; 3370 3371 dump_data = vzalloc(len); 3372 if (!dump_data) 3373 return NULL; 3374 3375 len = 0; 3376 data = (void *)dump_data->data; 3377 3378 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { 3379 u16 tfd_size = trans->txqs.tfd.size; 3380 3381 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 3382 txcmd = (void *)data->data; 3383 spin_lock_bh(&cmdq->lock); 3384 ptr = cmdq->write_ptr; 3385 for (i = 0; i < cmdq->n_window; i++) { 3386 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); 3387 u8 tfdidx; 3388 u32 caplen, cmdlen; 3389 3390 if (trans->trans_cfg->use_tfh) 3391 tfdidx = idx; 3392 else 3393 tfdidx = ptr; 3394 3395 cmdlen = iwl_trans_pcie_get_cmdlen(trans, 3396 (u8 *)cmdq->tfds + 3397 tfd_size * tfdidx); 3398 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 3399 3400 if (cmdlen) { 3401 len += sizeof(*txcmd) + caplen; 3402 txcmd->cmdlen = cpu_to_le32(cmdlen); 3403 txcmd->caplen = cpu_to_le32(caplen); 3404 memcpy(txcmd->data, cmdq->entries[idx].cmd, 3405 caplen); 3406 if (sanitize_ops && sanitize_ops->frob_hcmd) 3407 sanitize_ops->frob_hcmd(sanitize_ctx, 3408 txcmd->data, 3409 caplen); 3410 txcmd = (void *)((u8 *)txcmd->data + caplen); 3411 } 3412 3413 ptr = iwl_txq_dec_wrap(trans, ptr); 3414 } 3415 spin_unlock_bh(&cmdq->lock); 3416 3417 data->len = cpu_to_le32(len); 3418 len += sizeof(*data); 3419 data = iwl_fw_error_next_data(data); 3420 } 3421 3422 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3423 len += iwl_trans_pcie_dump_csr(trans, &data); 3424 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) 3425 len += iwl_trans_pcie_fh_regs_dump(trans, &data); 3426 if (dump_rbs) 3427 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 3428 3429 /* Paged memory for gen2 HW */ 3430 if (trans->trans_cfg->gen2 && 3431 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { 3432 for (i = 0; i < trans->init_dram.paging_cnt; i++) { 3433 struct iwl_fw_error_dump_paging *paging; 3434 u32 page_len = trans->init_dram.paging[i].size; 3435 3436 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 3437 data->len = cpu_to_le32(sizeof(*paging) + page_len); 3438 paging = (void *)data->data; 3439 paging->index = cpu_to_le32(i); 3440 memcpy(paging->data, 3441 trans->init_dram.paging[i].block, page_len); 3442 data = iwl_fw_error_next_data(data); 3443 3444 len += sizeof(*data) + sizeof(*paging) + page_len; 3445 } 3446 } 3447 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3448 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 3449 3450 dump_data->len = len; 3451 3452 return dump_data; 3453 } 3454 3455 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) 3456 { 3457 if (enable) 3458 iwl_enable_interrupts(trans); 3459 else 3460 iwl_disable_interrupts(trans); 3461 } 3462 3463 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) 3464 { 3465 u32 inta_addr, sw_err_bit; 3466 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3467 3468 if (trans_pcie->msix_enabled) { 3469 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; 3470 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 3471 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; 3472 else 3473 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; 3474 } else { 3475 inta_addr = CSR_INT; 3476 sw_err_bit = CSR_INT_BIT_SW_ERR; 3477 } 3478 3479 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); 3480 } 3481 3482 #define IWL_TRANS_COMMON_OPS \ 3483 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ 3484 .write8 = iwl_trans_pcie_write8, \ 3485 .write32 = iwl_trans_pcie_write32, \ 3486 .read32 = iwl_trans_pcie_read32, \ 3487 .read_prph = iwl_trans_pcie_read_prph, \ 3488 .write_prph = iwl_trans_pcie_write_prph, \ 3489 .read_mem = iwl_trans_pcie_read_mem, \ 3490 .write_mem = iwl_trans_pcie_write_mem, \ 3491 .read_config32 = iwl_trans_pcie_read_config32, \ 3492 .configure = iwl_trans_pcie_configure, \ 3493 .set_pmi = iwl_trans_pcie_set_pmi, \ 3494 .sw_reset = iwl_trans_pcie_sw_reset, \ 3495 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ 3496 .release_nic_access = iwl_trans_pcie_release_nic_access, \ 3497 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ 3498 .dump_data = iwl_trans_pcie_dump_data, \ 3499 .d3_suspend = iwl_trans_pcie_d3_suspend, \ 3500 .d3_resume = iwl_trans_pcie_d3_resume, \ 3501 .interrupts = iwl_trans_pci_interrupts, \ 3502 .sync_nmi = iwl_trans_pcie_sync_nmi, \ 3503 .imr_dma_data = iwl_trans_pcie_copy_imr \ 3504 3505 static const struct iwl_trans_ops trans_ops_pcie = { 3506 IWL_TRANS_COMMON_OPS, 3507 .start_hw = iwl_trans_pcie_start_hw, 3508 .fw_alive = iwl_trans_pcie_fw_alive, 3509 .start_fw = iwl_trans_pcie_start_fw, 3510 .stop_device = iwl_trans_pcie_stop_device, 3511 3512 .send_cmd = iwl_pcie_enqueue_hcmd, 3513 3514 .tx = iwl_trans_pcie_tx, 3515 .reclaim = iwl_txq_reclaim, 3516 3517 .txq_disable = iwl_trans_pcie_txq_disable, 3518 .txq_enable = iwl_trans_pcie_txq_enable, 3519 3520 .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, 3521 3522 .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, 3523 3524 .freeze_txq_timer = iwl_trans_txq_freeze_timer, 3525 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, 3526 #ifdef CONFIG_IWLWIFI_DEBUGFS 3527 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3528 #endif 3529 }; 3530 3531 static const struct iwl_trans_ops trans_ops_pcie_gen2 = { 3532 IWL_TRANS_COMMON_OPS, 3533 .start_hw = iwl_trans_pcie_start_hw, 3534 .fw_alive = iwl_trans_pcie_gen2_fw_alive, 3535 .start_fw = iwl_trans_pcie_gen2_start_fw, 3536 .stop_device = iwl_trans_pcie_gen2_stop_device, 3537 3538 .send_cmd = iwl_pcie_gen2_enqueue_hcmd, 3539 3540 .tx = iwl_txq_gen2_tx, 3541 .reclaim = iwl_txq_reclaim, 3542 3543 .set_q_ptrs = iwl_txq_set_q_ptrs, 3544 3545 .txq_alloc = iwl_txq_dyn_alloc, 3546 .txq_free = iwl_txq_dyn_free, 3547 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, 3548 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, 3549 .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm, 3550 .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, 3551 .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, 3552 #ifdef CONFIG_IWLWIFI_DEBUGFS 3553 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3554 #endif 3555 }; 3556 3557 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 3558 const struct pci_device_id *ent, 3559 const struct iwl_cfg_trans_params *cfg_trans) 3560 { 3561 struct iwl_trans_pcie *trans_pcie; 3562 struct iwl_trans *trans; 3563 int ret, addr_size; 3564 const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; 3565 void __iomem * const *table; 3566 3567 if (!cfg_trans->gen2) 3568 ops = &trans_ops_pcie; 3569 3570 ret = pcim_enable_device(pdev); 3571 if (ret) 3572 return ERR_PTR(ret); 3573 3574 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, 3575 cfg_trans); 3576 if (!trans) 3577 return ERR_PTR(-ENOMEM); 3578 3579 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3580 3581 trans_pcie->trans = trans; 3582 trans_pcie->opmode_down = true; 3583 spin_lock_init(&trans_pcie->irq_lock); 3584 spin_lock_init(&trans_pcie->reg_lock); 3585 spin_lock_init(&trans_pcie->alloc_page_lock); 3586 mutex_init(&trans_pcie->mutex); 3587 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 3588 init_waitqueue_head(&trans_pcie->fw_reset_waitq); 3589 init_waitqueue_head(&trans_pcie->imr_waitq); 3590 3591 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", 3592 WQ_HIGHPRI | WQ_UNBOUND, 1); 3593 if (!trans_pcie->rba.alloc_wq) { 3594 ret = -ENOMEM; 3595 goto out_free_trans; 3596 } 3597 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); 3598 3599 trans_pcie->debug_rfkill = -1; 3600 3601 if (!cfg_trans->base_params->pcie_l1_allowed) { 3602 /* 3603 * W/A - seems to solve weird behavior. We need to remove this 3604 * if we don't want to stay in L1 all the time. This wastes a 3605 * lot of power. 3606 */ 3607 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 3608 PCIE_LINK_STATE_L1 | 3609 PCIE_LINK_STATE_CLKPM); 3610 } 3611 3612 trans_pcie->def_rx_queue = 0; 3613 3614 pci_set_master(pdev); 3615 3616 addr_size = trans->txqs.tfd.addr_size; 3617 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); 3618 if (ret) { 3619 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3620 /* both attempts failed: */ 3621 if (ret) { 3622 dev_err(&pdev->dev, "No suitable DMA available\n"); 3623 goto out_no_pci; 3624 } 3625 } 3626 3627 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); 3628 if (ret) { 3629 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); 3630 goto out_no_pci; 3631 } 3632 3633 table = pcim_iomap_table(pdev); 3634 if (!table) { 3635 dev_err(&pdev->dev, "pcim_iomap_table failed\n"); 3636 ret = -ENOMEM; 3637 goto out_no_pci; 3638 } 3639 3640 trans_pcie->hw_base = table[0]; 3641 if (!trans_pcie->hw_base) { 3642 dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); 3643 ret = -ENODEV; 3644 goto out_no_pci; 3645 } 3646 3647 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3648 * PCI Tx retries from interfering with C3 CPU state */ 3649 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3650 3651 trans_pcie->pci_dev = pdev; 3652 iwl_disable_interrupts(trans); 3653 3654 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 3655 if (trans->hw_rev == 0xffffffff) { 3656 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); 3657 ret = -EIO; 3658 goto out_no_pci; 3659 } 3660 3661 /* 3662 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3663 * changed, and now the revision step also includes bit 0-1 (no more 3664 * "dash" value). To keep hw_rev backwards compatible - we'll store it 3665 * in the old format. 3666 */ 3667 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) 3668 trans->hw_rev_step = trans->hw_rev & 0xF; 3669 else 3670 trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; 3671 3672 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); 3673 3674 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); 3675 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 3676 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 3677 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 3678 3679 init_waitqueue_head(&trans_pcie->sx_waitq); 3680 3681 3682 if (trans_pcie->msix_enabled) { 3683 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); 3684 if (ret) 3685 goto out_no_pci; 3686 } else { 3687 ret = iwl_pcie_alloc_ict(trans); 3688 if (ret) 3689 goto out_no_pci; 3690 3691 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 3692 iwl_pcie_isr, 3693 iwl_pcie_irq_handler, 3694 IRQF_SHARED, DRV_NAME, trans); 3695 if (ret) { 3696 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3697 goto out_free_ict; 3698 } 3699 } 3700 3701 #ifdef CONFIG_IWLWIFI_DEBUGFS 3702 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 3703 mutex_init(&trans_pcie->fw_mon_data.mutex); 3704 #endif 3705 3706 iwl_dbg_tlv_init(trans); 3707 3708 return trans; 3709 3710 out_free_ict: 3711 iwl_pcie_free_ict(trans); 3712 out_no_pci: 3713 destroy_workqueue(trans_pcie->rba.alloc_wq); 3714 out_free_trans: 3715 iwl_trans_free(trans); 3716 return ERR_PTR(ret); 3717 } 3718 3719 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, 3720 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3721 { 3722 iwl_write_prph(trans, IMR_UREG_CHICK, 3723 iwl_read_prph(trans, IMR_UREG_CHICK) | 3724 IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); 3725 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); 3726 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, 3727 (u32)(src_addr & 0xFFFFFFFF)); 3728 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, 3729 iwl_get_dma_hi_addr(src_addr)); 3730 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); 3731 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, 3732 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | 3733 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | 3734 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); 3735 } 3736 3737 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, 3738 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3739 { 3740 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3741 int ret = -1; 3742 3743 trans_pcie->imr_status = IMR_D2S_REQUESTED; 3744 iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); 3745 ret = wait_event_timeout(trans_pcie->imr_waitq, 3746 trans_pcie->imr_status != 3747 IMR_D2S_REQUESTED, 5 * HZ); 3748 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { 3749 IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); 3750 iwl_trans_pcie_dump_regs(trans); 3751 return -ETIMEDOUT; 3752 } 3753 trans_pcie->imr_status = IMR_D2S_IDLE; 3754 return 0; 3755 } 3756