1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2007-2015, 2018-2023 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/debugfs.h> 10 #include <linux/sched.h> 11 #include <linux/bitops.h> 12 #include <linux/gfp.h> 13 #include <linux/vmalloc.h> 14 #include <linux/module.h> 15 #include <linux/wait.h> 16 #include <linux/seq_file.h> 17 18 #include "iwl-drv.h" 19 #include "iwl-trans.h" 20 #include "iwl-csr.h" 21 #include "iwl-prph.h" 22 #include "iwl-scd.h" 23 #include "iwl-agn-hw.h" 24 #include "fw/error-dump.h" 25 #include "fw/dbg.h" 26 #include "fw/api/tx.h" 27 #include "mei/iwl-mei.h" 28 #include "internal.h" 29 #include "iwl-fh.h" 30 #include "iwl-context-info-gen3.h" 31 32 /* extended range in FW SRAM */ 33 #define IWL_FW_MEM_EXTENDED_START 0x40000 34 #define IWL_FW_MEM_EXTENDED_END 0x57FFF 35 36 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) 37 { 38 #define PCI_DUMP_SIZE 352 39 #define PCI_MEM_DUMP_SIZE 64 40 #define PCI_PARENT_DUMP_SIZE 524 41 #define PREFIX_LEN 32 42 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 43 struct pci_dev *pdev = trans_pcie->pci_dev; 44 u32 i, pos, alloc_size, *ptr, *buf; 45 char *prefix; 46 47 if (trans_pcie->pcie_dbg_dumped_once) 48 return; 49 50 /* Should be a multiple of 4 */ 51 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); 52 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); 53 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); 54 55 /* Alloc a max size buffer */ 56 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; 57 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); 58 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); 59 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); 60 61 buf = kmalloc(alloc_size, GFP_ATOMIC); 62 if (!buf) 63 return; 64 prefix = (char *)buf + alloc_size - PREFIX_LEN; 65 66 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); 67 68 /* Print wifi device registers */ 69 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 70 IWL_ERR(trans, "iwlwifi device config registers:\n"); 71 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) 72 if (pci_read_config_dword(pdev, i, ptr)) 73 goto err_read; 74 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 75 76 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); 77 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) 78 *ptr = iwl_read32(trans, i); 79 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 80 81 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 82 if (pos) { 83 IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); 84 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) 85 if (pci_read_config_dword(pdev, pos + i, ptr)) 86 goto err_read; 87 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 88 32, 4, buf, i, 0); 89 } 90 91 /* Print parent device registers next */ 92 if (!pdev->bus->self) 93 goto out; 94 95 pdev = pdev->bus->self; 96 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 97 98 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", 99 pci_name(pdev)); 100 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) 101 if (pci_read_config_dword(pdev, i, ptr)) 102 goto err_read; 103 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 104 105 /* Print root port AER registers */ 106 pos = 0; 107 pdev = pcie_find_root_port(pdev); 108 if (pdev) 109 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 110 if (pos) { 111 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", 112 pci_name(pdev)); 113 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 114 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) 115 if (pci_read_config_dword(pdev, pos + i, ptr)) 116 goto err_read; 117 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 118 4, buf, i, 0); 119 } 120 goto out; 121 122 err_read: 123 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 124 IWL_ERR(trans, "Read failed at 0x%X\n", i); 125 out: 126 trans_pcie->pcie_dbg_dumped_once = 1; 127 kfree(buf); 128 } 129 130 static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, 131 bool retake_ownership) 132 { 133 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 134 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 135 iwl_set_bit(trans, CSR_GP_CNTRL, 136 CSR_GP_CNTRL_REG_FLAG_SW_RESET); 137 usleep_range(10000, 20000); 138 } else { 139 iwl_set_bit(trans, CSR_RESET, 140 CSR_RESET_REG_FLAG_SW_RESET); 141 usleep_range(5000, 6000); 142 } 143 144 if (retake_ownership) 145 return iwl_pcie_prepare_card_hw(trans); 146 147 return 0; 148 } 149 150 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 151 { 152 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 153 154 if (!fw_mon->size) 155 return; 156 157 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, 158 fw_mon->physical); 159 160 fw_mon->block = NULL; 161 fw_mon->physical = 0; 162 fw_mon->size = 0; 163 } 164 165 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, 166 u8 max_power, u8 min_power) 167 { 168 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 169 void *block = NULL; 170 dma_addr_t physical = 0; 171 u32 size = 0; 172 u8 power; 173 174 if (fw_mon->size) { 175 memset(fw_mon->block, 0, fw_mon->size); 176 return; 177 } 178 179 for (power = max_power; power >= min_power; power--) { 180 size = BIT(power); 181 block = dma_alloc_coherent(trans->dev, size, &physical, 182 GFP_KERNEL | __GFP_NOWARN); 183 if (!block) 184 continue; 185 186 IWL_INFO(trans, 187 "Allocated 0x%08x bytes for firmware monitor.\n", 188 size); 189 break; 190 } 191 192 if (WARN_ON_ONCE(!block)) 193 return; 194 195 if (power != max_power) 196 IWL_ERR(trans, 197 "Sorry - debug buffer is only %luK while you requested %luK\n", 198 (unsigned long)BIT(power - 10), 199 (unsigned long)BIT(max_power - 10)); 200 201 fw_mon->block = block; 202 fw_mon->physical = physical; 203 fw_mon->size = size; 204 } 205 206 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 207 { 208 if (!max_power) { 209 /* default max_power is maximum */ 210 max_power = 26; 211 } else { 212 max_power += 11; 213 } 214 215 if (WARN(max_power > 26, 216 "External buffer size for monitor is too big %d, check the FW TLV\n", 217 max_power)) 218 return; 219 220 iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); 221 } 222 223 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 224 { 225 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 226 ((reg & 0x0000ffff) | (2 << 28))); 227 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 228 } 229 230 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 231 { 232 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 233 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 234 ((reg & 0x0000ffff) | (3 << 28))); 235 } 236 237 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 238 { 239 if (trans->cfg->apmg_not_supported) 240 return; 241 242 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 243 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 244 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 245 ~APMG_PS_CTRL_MSK_PWR_SRC); 246 else 247 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 248 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 249 ~APMG_PS_CTRL_MSK_PWR_SRC); 250 } 251 252 /* PCI registers */ 253 #define PCI_CFG_RETRY_TIMEOUT 0x041 254 255 void iwl_pcie_apm_config(struct iwl_trans *trans) 256 { 257 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 258 u16 lctl; 259 u16 cap; 260 261 /* 262 * L0S states have been found to be unstable with our devices 263 * and in newer hardware they are not officially supported at 264 * all, so we must always set the L0S_DISABLED bit. 265 */ 266 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); 267 268 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 269 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 270 271 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 272 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 273 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", 274 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 275 trans->ltr_enabled ? "En" : "Dis"); 276 } 277 278 /* 279 * Start up NIC's basic functionality after it has been reset 280 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 281 * NOTE: This does not load uCode nor start the embedded processor 282 */ 283 static int iwl_pcie_apm_init(struct iwl_trans *trans) 284 { 285 int ret; 286 287 IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 288 289 /* 290 * Use "set_bit" below rather than "write", to preserve any hardware 291 * bits already set by default after reset. 292 */ 293 294 /* Disable L0S exit timer (platform NMI Work/Around) */ 295 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 296 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 297 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 298 299 /* 300 * Disable L0s without affecting L1; 301 * don't wait for ICH L0s (ICH bug W/A) 302 */ 303 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 304 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 305 306 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 307 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 308 309 /* 310 * Enable HAP INTA (interrupt from management bus) to 311 * wake device's PCI Express link L1a -> L0s 312 */ 313 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 314 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 315 316 iwl_pcie_apm_config(trans); 317 318 /* Configure analog phase-lock-loop before activating to D0A */ 319 if (trans->trans_cfg->base_params->pll_cfg) 320 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 321 322 ret = iwl_finish_nic_init(trans); 323 if (ret) 324 return ret; 325 326 if (trans->cfg->host_interrupt_operation_mode) { 327 /* 328 * This is a bit of an abuse - This is needed for 7260 / 3160 329 * only check host_interrupt_operation_mode even if this is 330 * not related to host_interrupt_operation_mode. 331 * 332 * Enable the oscillator to count wake up time for L1 exit. This 333 * consumes slightly more power (100uA) - but allows to be sure 334 * that we wake up from L1 on time. 335 * 336 * This looks weird: read twice the same register, discard the 337 * value, set a bit, and yet again, read that same register 338 * just to discard the value. But that's the way the hardware 339 * seems to like it. 340 */ 341 iwl_read_prph(trans, OSC_CLK); 342 iwl_read_prph(trans, OSC_CLK); 343 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 344 iwl_read_prph(trans, OSC_CLK); 345 iwl_read_prph(trans, OSC_CLK); 346 } 347 348 /* 349 * Enable DMA clock and wait for it to stabilize. 350 * 351 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 352 * bits do not disable clocks. This preserves any hardware 353 * bits already set by default in "CLK_CTRL_REG" after reset. 354 */ 355 if (!trans->cfg->apmg_not_supported) { 356 iwl_write_prph(trans, APMG_CLK_EN_REG, 357 APMG_CLK_VAL_DMA_CLK_RQT); 358 udelay(20); 359 360 /* Disable L1-Active */ 361 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 362 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 363 364 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 365 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 366 APMG_RTC_INT_STT_RFKILL); 367 } 368 369 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 370 371 return 0; 372 } 373 374 /* 375 * Enable LP XTAL to avoid HW bug where device may consume much power if 376 * FW is not loaded after device reset. LP XTAL is disabled by default 377 * after device HW reset. Do it only if XTAL is fed by internal source. 378 * Configure device's "persistence" mode to avoid resetting XTAL again when 379 * SHRD_HW_RST occurs in S3. 380 */ 381 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 382 { 383 int ret; 384 u32 apmg_gp1_reg; 385 u32 apmg_xtal_cfg_reg; 386 u32 dl_cfg_reg; 387 388 /* Force XTAL ON */ 389 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 390 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 391 392 ret = iwl_trans_pcie_sw_reset(trans, true); 393 394 if (!ret) 395 ret = iwl_finish_nic_init(trans); 396 397 if (WARN_ON(ret)) { 398 /* Release XTAL ON request */ 399 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 400 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 401 return; 402 } 403 404 /* 405 * Clear "disable persistence" to avoid LP XTAL resetting when 406 * SHRD_HW_RST is applied in S3. 407 */ 408 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 409 APMG_PCIDEV_STT_VAL_PERSIST_DIS); 410 411 /* 412 * Force APMG XTAL to be active to prevent its disabling by HW 413 * caused by APMG idle state. 414 */ 415 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 416 SHR_APMG_XTAL_CFG_REG); 417 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 418 apmg_xtal_cfg_reg | 419 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 420 421 ret = iwl_trans_pcie_sw_reset(trans, true); 422 if (ret) 423 IWL_ERR(trans, 424 "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); 425 426 /* Enable LP XTAL by indirect access through CSR */ 427 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 428 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 429 SHR_APMG_GP1_WF_XTAL_LP_EN | 430 SHR_APMG_GP1_CHICKEN_BIT_SELECT); 431 432 /* Clear delay line clock power up */ 433 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 434 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 435 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 436 437 /* 438 * Enable persistence mode to avoid LP XTAL resetting when 439 * SHRD_HW_RST is applied in S3. 440 */ 441 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 442 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 443 444 /* 445 * Clear "initialization complete" bit to move adapter from 446 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 447 */ 448 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 449 450 /* Activates XTAL resources monitor */ 451 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 452 CSR_MONITOR_XTAL_RESOURCES); 453 454 /* Release XTAL ON request */ 455 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 456 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 457 udelay(10); 458 459 /* Release APMG XTAL */ 460 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 461 apmg_xtal_cfg_reg & 462 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 463 } 464 465 void iwl_pcie_apm_stop_master(struct iwl_trans *trans) 466 { 467 int ret; 468 469 /* stop device's busmaster DMA activity */ 470 471 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 472 iwl_set_bit(trans, CSR_GP_CNTRL, 473 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); 474 475 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 476 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 477 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 478 100); 479 usleep_range(10000, 20000); 480 } else { 481 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 482 483 ret = iwl_poll_bit(trans, CSR_RESET, 484 CSR_RESET_REG_FLAG_MASTER_DISABLED, 485 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 486 } 487 488 if (ret < 0) 489 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 490 491 IWL_DEBUG_INFO(trans, "stop master\n"); 492 } 493 494 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 495 { 496 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 497 498 if (op_mode_leave) { 499 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 500 iwl_pcie_apm_init(trans); 501 502 /* inform ME that we are leaving */ 503 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) 504 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 505 APMG_PCIDEV_STT_VAL_WAKE_ME); 506 else if (trans->trans_cfg->device_family >= 507 IWL_DEVICE_FAMILY_8000) { 508 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 509 CSR_RESET_LINK_PWR_MGMT_DISABLED); 510 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 511 CSR_HW_IF_CONFIG_REG_PREPARE | 512 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 513 mdelay(1); 514 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 515 CSR_RESET_LINK_PWR_MGMT_DISABLED); 516 } 517 mdelay(5); 518 } 519 520 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 521 522 /* Stop device's DMA activity */ 523 iwl_pcie_apm_stop_master(trans); 524 525 if (trans->cfg->lp_xtal_workaround) { 526 iwl_pcie_apm_lp_xtal_enable(trans); 527 return; 528 } 529 530 iwl_trans_pcie_sw_reset(trans, false); 531 532 /* 533 * Clear "initialization complete" bit to move adapter from 534 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 535 */ 536 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 537 } 538 539 static int iwl_pcie_nic_init(struct iwl_trans *trans) 540 { 541 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 542 int ret; 543 544 /* nic_init */ 545 spin_lock_bh(&trans_pcie->irq_lock); 546 ret = iwl_pcie_apm_init(trans); 547 spin_unlock_bh(&trans_pcie->irq_lock); 548 549 if (ret) 550 return ret; 551 552 iwl_pcie_set_pwr(trans, false); 553 554 iwl_op_mode_nic_config(trans->op_mode); 555 556 /* Allocate the RX queue, or reset if it is already allocated */ 557 ret = iwl_pcie_rx_init(trans); 558 if (ret) 559 return ret; 560 561 /* Allocate or reset and init all Tx and Command queues */ 562 if (iwl_pcie_tx_init(trans)) { 563 iwl_pcie_rx_free(trans); 564 return -ENOMEM; 565 } 566 567 if (trans->trans_cfg->base_params->shadow_reg_enable) { 568 /* enable shadow regs in HW */ 569 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 570 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 571 } 572 573 return 0; 574 } 575 576 #define HW_READY_TIMEOUT (50) 577 578 /* Note: returns poll_bit return value, which is >= 0 if success */ 579 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 580 { 581 int ret; 582 583 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 584 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 585 586 /* See if we got it */ 587 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 588 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 589 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 590 HW_READY_TIMEOUT); 591 592 if (ret >= 0) 593 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 594 595 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 596 return ret; 597 } 598 599 /* Note: returns standard 0/-ERROR code */ 600 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 601 { 602 int ret; 603 int iter; 604 605 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 606 607 ret = iwl_pcie_set_hw_ready(trans); 608 /* If the card is ready, exit 0 */ 609 if (ret >= 0) { 610 trans->csme_own = false; 611 return 0; 612 } 613 614 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 615 CSR_RESET_LINK_PWR_MGMT_DISABLED); 616 usleep_range(1000, 2000); 617 618 for (iter = 0; iter < 10; iter++) { 619 int t = 0; 620 621 /* If HW is not ready, prepare the conditions to check again */ 622 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 623 CSR_HW_IF_CONFIG_REG_PREPARE); 624 625 do { 626 ret = iwl_pcie_set_hw_ready(trans); 627 if (ret >= 0) { 628 trans->csme_own = false; 629 return 0; 630 } 631 632 if (iwl_mei_is_connected()) { 633 IWL_DEBUG_INFO(trans, 634 "Couldn't prepare the card but SAP is connected\n"); 635 trans->csme_own = true; 636 if (trans->trans_cfg->device_family != 637 IWL_DEVICE_FAMILY_9000) 638 IWL_ERR(trans, 639 "SAP not supported for this NIC family\n"); 640 641 return -EBUSY; 642 } 643 644 usleep_range(200, 1000); 645 t += 200; 646 } while (t < 150000); 647 msleep(25); 648 } 649 650 IWL_ERR(trans, "Couldn't prepare the card\n"); 651 652 return ret; 653 } 654 655 /* 656 * ucode 657 */ 658 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 659 u32 dst_addr, dma_addr_t phy_addr, 660 u32 byte_cnt) 661 { 662 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 663 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 664 665 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 666 dst_addr); 667 668 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 669 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 670 671 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 672 (iwl_get_dma_hi_addr(phy_addr) 673 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 674 675 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 676 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 677 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 678 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 679 680 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 681 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 682 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 683 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 684 } 685 686 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 687 u32 dst_addr, dma_addr_t phy_addr, 688 u32 byte_cnt) 689 { 690 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 691 int ret; 692 693 trans_pcie->ucode_write_complete = false; 694 695 if (!iwl_trans_grab_nic_access(trans)) 696 return -EIO; 697 698 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 699 byte_cnt); 700 iwl_trans_release_nic_access(trans); 701 702 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 703 trans_pcie->ucode_write_complete, 5 * HZ); 704 if (!ret) { 705 IWL_ERR(trans, "Failed to load firmware chunk!\n"); 706 iwl_trans_pcie_dump_regs(trans); 707 return -ETIMEDOUT; 708 } 709 710 return 0; 711 } 712 713 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 714 const struct fw_desc *section) 715 { 716 u8 *v_addr; 717 dma_addr_t p_addr; 718 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 719 int ret = 0; 720 721 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 722 section_num); 723 724 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 725 GFP_KERNEL | __GFP_NOWARN); 726 if (!v_addr) { 727 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 728 chunk_sz = PAGE_SIZE; 729 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 730 &p_addr, GFP_KERNEL); 731 if (!v_addr) 732 return -ENOMEM; 733 } 734 735 for (offset = 0; offset < section->len; offset += chunk_sz) { 736 u32 copy_size, dst_addr; 737 bool extended_addr = false; 738 739 copy_size = min_t(u32, chunk_sz, section->len - offset); 740 dst_addr = section->offset + offset; 741 742 if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 743 dst_addr <= IWL_FW_MEM_EXTENDED_END) 744 extended_addr = true; 745 746 if (extended_addr) 747 iwl_set_bits_prph(trans, LMPM_CHICK, 748 LMPM_CHICK_EXTENDED_ADDR_SPACE); 749 750 memcpy(v_addr, (const u8 *)section->data + offset, copy_size); 751 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 752 copy_size); 753 754 if (extended_addr) 755 iwl_clear_bits_prph(trans, LMPM_CHICK, 756 LMPM_CHICK_EXTENDED_ADDR_SPACE); 757 758 if (ret) { 759 IWL_ERR(trans, 760 "Could not load the [%d] uCode section\n", 761 section_num); 762 break; 763 } 764 } 765 766 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 767 return ret; 768 } 769 770 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 771 const struct fw_img *image, 772 int cpu, 773 int *first_ucode_section) 774 { 775 int shift_param; 776 int i, ret = 0, sec_num = 0x1; 777 u32 val, last_read_idx = 0; 778 779 if (cpu == 1) { 780 shift_param = 0; 781 *first_ucode_section = 0; 782 } else { 783 shift_param = 16; 784 (*first_ucode_section)++; 785 } 786 787 for (i = *first_ucode_section; i < image->num_sec; i++) { 788 last_read_idx = i; 789 790 /* 791 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 792 * CPU1 to CPU2. 793 * PAGING_SEPARATOR_SECTION delimiter - separate between 794 * CPU2 non paged to CPU2 paging sec. 795 */ 796 if (!image->sec[i].data || 797 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 798 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 799 IWL_DEBUG_FW(trans, 800 "Break since Data not valid or Empty section, sec = %d\n", 801 i); 802 break; 803 } 804 805 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 806 if (ret) 807 return ret; 808 809 /* Notify ucode of loaded section number and status */ 810 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 811 val = val | (sec_num << shift_param); 812 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 813 814 sec_num = (sec_num << 1) | 0x1; 815 } 816 817 *first_ucode_section = last_read_idx; 818 819 iwl_enable_interrupts(trans); 820 821 if (trans->trans_cfg->use_tfh) { 822 if (cpu == 1) 823 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 824 0xFFFF); 825 else 826 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 827 0xFFFFFFFF); 828 } else { 829 if (cpu == 1) 830 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 831 0xFFFF); 832 else 833 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 834 0xFFFFFFFF); 835 } 836 837 return 0; 838 } 839 840 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 841 const struct fw_img *image, 842 int cpu, 843 int *first_ucode_section) 844 { 845 int i, ret = 0; 846 u32 last_read_idx = 0; 847 848 if (cpu == 1) 849 *first_ucode_section = 0; 850 else 851 (*first_ucode_section)++; 852 853 for (i = *first_ucode_section; i < image->num_sec; i++) { 854 last_read_idx = i; 855 856 /* 857 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 858 * CPU1 to CPU2. 859 * PAGING_SEPARATOR_SECTION delimiter - separate between 860 * CPU2 non paged to CPU2 paging sec. 861 */ 862 if (!image->sec[i].data || 863 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 864 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 865 IWL_DEBUG_FW(trans, 866 "Break since Data not valid or Empty section, sec = %d\n", 867 i); 868 break; 869 } 870 871 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 872 if (ret) 873 return ret; 874 } 875 876 *first_ucode_section = last_read_idx; 877 878 return 0; 879 } 880 881 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) 882 { 883 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; 884 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 885 &trans->dbg.fw_mon_cfg[alloc_id]; 886 struct iwl_dram_data *frag; 887 888 if (!iwl_trans_dbg_ini_valid(trans)) 889 return; 890 891 if (le32_to_cpu(fw_mon_cfg->buf_location) == 892 IWL_FW_INI_LOCATION_SRAM_PATH) { 893 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); 894 /* set sram monitor by enabling bit 7 */ 895 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 896 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); 897 898 return; 899 } 900 901 if (le32_to_cpu(fw_mon_cfg->buf_location) != 902 IWL_FW_INI_LOCATION_DRAM_PATH || 903 !trans->dbg.fw_mon_ini[alloc_id].num_frags) 904 return; 905 906 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; 907 908 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", 909 alloc_id); 910 911 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, 912 frag->physical >> MON_BUFF_SHIFT_VER2); 913 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, 914 (frag->physical + frag->size - 256) >> 915 MON_BUFF_SHIFT_VER2); 916 } 917 918 void iwl_pcie_apply_destination(struct iwl_trans *trans) 919 { 920 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; 921 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 922 int i; 923 924 if (iwl_trans_dbg_ini_valid(trans)) { 925 iwl_pcie_apply_destination_ini(trans); 926 return; 927 } 928 929 IWL_INFO(trans, "Applying debug destination %s\n", 930 get_fw_dbg_mode_string(dest->monitor_mode)); 931 932 if (dest->monitor_mode == EXTERNAL_MODE) 933 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 934 else 935 IWL_WARN(trans, "PCI should have external buffer debug\n"); 936 937 for (i = 0; i < trans->dbg.n_dest_reg; i++) { 938 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 939 u32 val = le32_to_cpu(dest->reg_ops[i].val); 940 941 switch (dest->reg_ops[i].op) { 942 case CSR_ASSIGN: 943 iwl_write32(trans, addr, val); 944 break; 945 case CSR_SETBIT: 946 iwl_set_bit(trans, addr, BIT(val)); 947 break; 948 case CSR_CLEARBIT: 949 iwl_clear_bit(trans, addr, BIT(val)); 950 break; 951 case PRPH_ASSIGN: 952 iwl_write_prph(trans, addr, val); 953 break; 954 case PRPH_SETBIT: 955 iwl_set_bits_prph(trans, addr, BIT(val)); 956 break; 957 case PRPH_CLEARBIT: 958 iwl_clear_bits_prph(trans, addr, BIT(val)); 959 break; 960 case PRPH_BLOCKBIT: 961 if (iwl_read_prph(trans, addr) & BIT(val)) { 962 IWL_ERR(trans, 963 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 964 val, addr); 965 goto monitor; 966 } 967 break; 968 default: 969 IWL_ERR(trans, "FW debug - unknown OP %d\n", 970 dest->reg_ops[i].op); 971 break; 972 } 973 } 974 975 monitor: 976 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { 977 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 978 fw_mon->physical >> dest->base_shift); 979 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 980 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 981 (fw_mon->physical + fw_mon->size - 982 256) >> dest->end_shift); 983 else 984 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 985 (fw_mon->physical + fw_mon->size) >> 986 dest->end_shift); 987 } 988 } 989 990 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 991 const struct fw_img *image) 992 { 993 int ret = 0; 994 int first_ucode_section; 995 996 IWL_DEBUG_FW(trans, "working with %s CPU\n", 997 image->is_dual_cpus ? "Dual" : "Single"); 998 999 /* load to FW the binary non secured sections of CPU1 */ 1000 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 1001 if (ret) 1002 return ret; 1003 1004 if (image->is_dual_cpus) { 1005 /* set CPU2 header address */ 1006 iwl_write_prph(trans, 1007 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 1008 LMPM_SECURE_CPU2_HDR_MEM_SPACE); 1009 1010 /* load to FW the binary sections of CPU2 */ 1011 ret = iwl_pcie_load_cpu_sections(trans, image, 2, 1012 &first_ucode_section); 1013 if (ret) 1014 return ret; 1015 } 1016 1017 if (iwl_pcie_dbg_on(trans)) 1018 iwl_pcie_apply_destination(trans); 1019 1020 iwl_enable_interrupts(trans); 1021 1022 /* release CPU reset */ 1023 iwl_write32(trans, CSR_RESET, 0); 1024 1025 return 0; 1026 } 1027 1028 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 1029 const struct fw_img *image) 1030 { 1031 int ret = 0; 1032 int first_ucode_section; 1033 1034 IWL_DEBUG_FW(trans, "working with %s CPU\n", 1035 image->is_dual_cpus ? "Dual" : "Single"); 1036 1037 if (iwl_pcie_dbg_on(trans)) 1038 iwl_pcie_apply_destination(trans); 1039 1040 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 1041 iwl_read_prph(trans, WFPM_GP2)); 1042 1043 /* 1044 * Set default value. On resume reading the values that were 1045 * zeored can provide debug data on the resume flow. 1046 * This is for debugging only and has no functional impact. 1047 */ 1048 iwl_write_prph(trans, WFPM_GP2, 0x01010101); 1049 1050 /* configure the ucode to be ready to get the secured image */ 1051 /* release CPU reset */ 1052 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 1053 1054 /* load to FW the binary Secured sections of CPU1 */ 1055 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 1056 &first_ucode_section); 1057 if (ret) 1058 return ret; 1059 1060 /* load to FW the binary sections of CPU2 */ 1061 return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 1062 &first_ucode_section); 1063 } 1064 1065 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) 1066 { 1067 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1068 bool hw_rfkill = iwl_is_rfkill_set(trans); 1069 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1070 bool report; 1071 1072 if (hw_rfkill) { 1073 set_bit(STATUS_RFKILL_HW, &trans->status); 1074 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1075 } else { 1076 clear_bit(STATUS_RFKILL_HW, &trans->status); 1077 if (trans_pcie->opmode_down) 1078 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1079 } 1080 1081 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1082 1083 if (prev != report) 1084 iwl_trans_pcie_rf_kill(trans, report); 1085 1086 return hw_rfkill; 1087 } 1088 1089 struct iwl_causes_list { 1090 u16 mask_reg; 1091 u8 bit; 1092 u8 addr; 1093 }; 1094 1095 #define IWL_CAUSE(reg, mask) \ 1096 { \ 1097 .mask_reg = reg, \ 1098 .bit = ilog2(mask), \ 1099 .addr = ilog2(mask) + \ 1100 ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \ 1101 (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \ 1102 0xffff), /* causes overflow warning */ \ 1103 } 1104 1105 static const struct iwl_causes_list causes_list_common[] = { 1106 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM), 1107 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM), 1108 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D), 1109 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR), 1110 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), 1111 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), 1112 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), 1113 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), 1114 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), 1115 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), 1116 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD), 1117 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX), 1118 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR), 1119 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP), 1120 }; 1121 1122 static const struct iwl_causes_list causes_list_pre_bz[] = { 1123 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR), 1124 }; 1125 1126 static const struct iwl_causes_list causes_list_bz[] = { 1127 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ), 1128 }; 1129 1130 static void iwl_pcie_map_list(struct iwl_trans *trans, 1131 const struct iwl_causes_list *causes, 1132 int arr_size, int val) 1133 { 1134 int i; 1135 1136 for (i = 0; i < arr_size; i++) { 1137 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); 1138 iwl_clear_bit(trans, causes[i].mask_reg, 1139 BIT(causes[i].bit)); 1140 } 1141 } 1142 1143 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 1144 { 1145 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1146 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 1147 /* 1148 * Access all non RX causes and map them to the default irq. 1149 * In case we are missing at least one interrupt vector, 1150 * the first interrupt vector will serve non-RX and FBQ causes. 1151 */ 1152 iwl_pcie_map_list(trans, causes_list_common, 1153 ARRAY_SIZE(causes_list_common), val); 1154 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1155 iwl_pcie_map_list(trans, causes_list_bz, 1156 ARRAY_SIZE(causes_list_bz), val); 1157 else 1158 iwl_pcie_map_list(trans, causes_list_pre_bz, 1159 ARRAY_SIZE(causes_list_pre_bz), val); 1160 } 1161 1162 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 1163 { 1164 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1165 u32 offset = 1166 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 1167 u32 val, idx; 1168 1169 /* 1170 * The first RX queue - fallback queue, which is designated for 1171 * management frame, command responses etc, is always mapped to the 1172 * first interrupt vector. The other RX queues are mapped to 1173 * the other (N - 2) interrupt vectors. 1174 */ 1175 val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 1176 for (idx = 1; idx < trans->num_rx_queues; idx++) { 1177 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 1178 MSIX_FH_INT_CAUSES_Q(idx - offset)); 1179 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 1180 } 1181 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 1182 1183 val = MSIX_FH_INT_CAUSES_Q(0); 1184 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 1185 val |= MSIX_NON_AUTO_CLEAR_CAUSE; 1186 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 1187 1188 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 1189 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 1190 } 1191 1192 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 1193 { 1194 struct iwl_trans *trans = trans_pcie->trans; 1195 1196 if (!trans_pcie->msix_enabled) { 1197 if (trans->trans_cfg->mq_rx_supported && 1198 test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1199 iwl_write_umac_prph(trans, UREG_CHICK, 1200 UREG_CHICK_MSI_ENABLE); 1201 return; 1202 } 1203 /* 1204 * The IVAR table needs to be configured again after reset, 1205 * but if the device is disabled, we can't write to 1206 * prph. 1207 */ 1208 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1209 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 1210 1211 /* 1212 * Each cause from the causes list above and the RX causes is 1213 * represented as a byte in the IVAR table. The first nibble 1214 * represents the bound interrupt vector of the cause, the second 1215 * represents no auto clear for this cause. This will be set if its 1216 * interrupt vector is bound to serve other causes. 1217 */ 1218 iwl_pcie_map_rx_causes(trans); 1219 1220 iwl_pcie_map_non_rx_causes(trans); 1221 } 1222 1223 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 1224 { 1225 struct iwl_trans *trans = trans_pcie->trans; 1226 1227 iwl_pcie_conf_msix_hw(trans_pcie); 1228 1229 if (!trans_pcie->msix_enabled) 1230 return; 1231 1232 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 1233 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 1234 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 1235 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 1236 } 1237 1238 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1239 { 1240 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1241 1242 lockdep_assert_held(&trans_pcie->mutex); 1243 1244 if (trans_pcie->is_down) 1245 return; 1246 1247 trans_pcie->is_down = true; 1248 1249 /* tell the device to stop sending interrupts */ 1250 iwl_disable_interrupts(trans); 1251 1252 /* device going down, Stop using ICT table */ 1253 iwl_pcie_disable_ict(trans); 1254 1255 /* 1256 * If a HW restart happens during firmware loading, 1257 * then the firmware loading might call this function 1258 * and later it might be called again due to the 1259 * restart. So don't process again if the device is 1260 * already dead. 1261 */ 1262 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1263 IWL_DEBUG_INFO(trans, 1264 "DEVICE_ENABLED bit was set and is now cleared\n"); 1265 iwl_pcie_rx_napi_sync(trans); 1266 iwl_pcie_tx_stop(trans); 1267 iwl_pcie_rx_stop(trans); 1268 1269 /* Power-down device's busmaster DMA clocks */ 1270 if (!trans->cfg->apmg_not_supported) { 1271 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1272 APMG_CLK_VAL_DMA_CLK_RQT); 1273 udelay(5); 1274 } 1275 } 1276 1277 /* Make sure (redundant) we've released our request to stay awake */ 1278 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1279 iwl_clear_bit(trans, CSR_GP_CNTRL, 1280 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 1281 else 1282 iwl_clear_bit(trans, CSR_GP_CNTRL, 1283 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1284 1285 /* Stop the device, and put it in low power state */ 1286 iwl_pcie_apm_stop(trans, false); 1287 1288 /* re-take ownership to prevent other users from stealing the device */ 1289 iwl_trans_pcie_sw_reset(trans, true); 1290 1291 /* 1292 * Upon stop, the IVAR table gets erased, so msi-x won't 1293 * work. This causes a bug in RF-KILL flows, since the interrupt 1294 * that enables radio won't fire on the correct irq, and the 1295 * driver won't be able to handle the interrupt. 1296 * Configure the IVAR table again after reset. 1297 */ 1298 iwl_pcie_conf_msix_hw(trans_pcie); 1299 1300 /* 1301 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1302 * This is a bug in certain verions of the hardware. 1303 * Certain devices also keep sending HW RF kill interrupt all 1304 * the time, unless the interrupt is ACKed even if the interrupt 1305 * should be masked. Re-ACK all the interrupts here. 1306 */ 1307 iwl_disable_interrupts(trans); 1308 1309 /* clear all status bits */ 1310 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1311 clear_bit(STATUS_INT_ENABLED, &trans->status); 1312 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1313 1314 /* 1315 * Even if we stop the HW, we still want the RF kill 1316 * interrupt 1317 */ 1318 iwl_enable_rfkill_int(trans); 1319 } 1320 1321 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 1322 { 1323 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1324 1325 if (trans_pcie->msix_enabled) { 1326 int i; 1327 1328 for (i = 0; i < trans_pcie->alloc_vecs; i++) 1329 synchronize_irq(trans_pcie->msix_entries[i].vector); 1330 } else { 1331 synchronize_irq(trans_pcie->pci_dev->irq); 1332 } 1333 } 1334 1335 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1336 const struct fw_img *fw, bool run_in_rfkill) 1337 { 1338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1339 bool hw_rfkill; 1340 int ret; 1341 1342 /* This may fail if AMT took ownership of the device */ 1343 if (iwl_pcie_prepare_card_hw(trans)) { 1344 IWL_WARN(trans, "Exit HW not ready\n"); 1345 return -EIO; 1346 } 1347 1348 iwl_enable_rfkill_int(trans); 1349 1350 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1351 1352 /* 1353 * We enabled the RF-Kill interrupt and the handler may very 1354 * well be running. Disable the interrupts to make sure no other 1355 * interrupt can be fired. 1356 */ 1357 iwl_disable_interrupts(trans); 1358 1359 /* Make sure it finished running */ 1360 iwl_pcie_synchronize_irqs(trans); 1361 1362 mutex_lock(&trans_pcie->mutex); 1363 1364 /* If platform's RF_KILL switch is NOT set to KILL */ 1365 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1366 if (hw_rfkill && !run_in_rfkill) { 1367 ret = -ERFKILL; 1368 goto out; 1369 } 1370 1371 /* Someone called stop_device, don't try to start_fw */ 1372 if (trans_pcie->is_down) { 1373 IWL_WARN(trans, 1374 "Can't start_fw since the HW hasn't been started\n"); 1375 ret = -EIO; 1376 goto out; 1377 } 1378 1379 /* make sure rfkill handshake bits are cleared */ 1380 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1381 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1382 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1383 1384 /* clear (again), then enable host interrupts */ 1385 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1386 1387 ret = iwl_pcie_nic_init(trans); 1388 if (ret) { 1389 IWL_ERR(trans, "Unable to init nic\n"); 1390 goto out; 1391 } 1392 1393 /* 1394 * Now, we load the firmware and don't want to be interrupted, even 1395 * by the RF-Kill interrupt (hence mask all the interrupt besides the 1396 * FH_TX interrupt which is needed to load the firmware). If the 1397 * RF-Kill switch is toggled, we will find out after having loaded 1398 * the firmware and return the proper value to the caller. 1399 */ 1400 iwl_enable_fw_load_int(trans); 1401 1402 /* really make sure rfkill handshake bits are cleared */ 1403 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1404 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1405 1406 /* Load the given image to the HW */ 1407 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1408 ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1409 else 1410 ret = iwl_pcie_load_given_ucode(trans, fw); 1411 1412 /* re-check RF-Kill state since we may have missed the interrupt */ 1413 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1414 if (hw_rfkill && !run_in_rfkill) 1415 ret = -ERFKILL; 1416 1417 out: 1418 mutex_unlock(&trans_pcie->mutex); 1419 return ret; 1420 } 1421 1422 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1423 { 1424 iwl_pcie_reset_ict(trans); 1425 iwl_pcie_tx_start(trans, scd_addr); 1426 } 1427 1428 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1429 bool was_in_rfkill) 1430 { 1431 bool hw_rfkill; 1432 1433 /* 1434 * Check again since the RF kill state may have changed while 1435 * all the interrupts were disabled, in this case we couldn't 1436 * receive the RF kill interrupt and update the state in the 1437 * op_mode. 1438 * Don't call the op_mode if the rkfill state hasn't changed. 1439 * This allows the op_mode to call stop_device from the rfkill 1440 * notification without endless recursion. Under very rare 1441 * circumstances, we might have a small recursion if the rfkill 1442 * state changed exactly now while we were called from stop_device. 1443 * This is very unlikely but can happen and is supported. 1444 */ 1445 hw_rfkill = iwl_is_rfkill_set(trans); 1446 if (hw_rfkill) { 1447 set_bit(STATUS_RFKILL_HW, &trans->status); 1448 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1449 } else { 1450 clear_bit(STATUS_RFKILL_HW, &trans->status); 1451 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1452 } 1453 if (hw_rfkill != was_in_rfkill) 1454 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1455 } 1456 1457 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1458 { 1459 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1460 bool was_in_rfkill; 1461 1462 iwl_op_mode_time_point(trans->op_mode, 1463 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, 1464 NULL); 1465 1466 mutex_lock(&trans_pcie->mutex); 1467 trans_pcie->opmode_down = true; 1468 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1469 _iwl_trans_pcie_stop_device(trans); 1470 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); 1471 mutex_unlock(&trans_pcie->mutex); 1472 } 1473 1474 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1475 { 1476 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1477 IWL_TRANS_GET_PCIE_TRANS(trans); 1478 1479 lockdep_assert_held(&trans_pcie->mutex); 1480 1481 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", 1482 state ? "disabled" : "enabled"); 1483 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { 1484 if (trans->trans_cfg->gen2) 1485 _iwl_trans_pcie_gen2_stop_device(trans); 1486 else 1487 _iwl_trans_pcie_stop_device(trans); 1488 } 1489 } 1490 1491 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1492 bool test, bool reset) 1493 { 1494 iwl_disable_interrupts(trans); 1495 1496 /* 1497 * in testing mode, the host stays awake and the 1498 * hardware won't be reset (not even partially) 1499 */ 1500 if (test) 1501 return; 1502 1503 iwl_pcie_disable_ict(trans); 1504 1505 iwl_pcie_synchronize_irqs(trans); 1506 1507 iwl_clear_bit(trans, CSR_GP_CNTRL, 1508 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1509 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1510 1511 if (reset) { 1512 /* 1513 * reset TX queues -- some of their registers reset during S3 1514 * so if we don't reset everything here the D3 image would try 1515 * to execute some invalid memory upon resume 1516 */ 1517 iwl_trans_pcie_tx_reset(trans); 1518 } 1519 1520 iwl_pcie_set_pwr(trans, true); 1521 } 1522 1523 static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) 1524 { 1525 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1526 int ret; 1527 1528 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) 1529 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1530 suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : 1531 UREG_DOORBELL_TO_ISR6_RESUME); 1532 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1533 iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, 1534 suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : 1535 CSR_IPC_SLEEP_CONTROL_RESUME); 1536 else 1537 return 0; 1538 1539 ret = wait_event_timeout(trans_pcie->sx_waitq, 1540 trans_pcie->sx_complete, 2 * HZ); 1541 1542 /* Invalidate it toward next suspend or resume */ 1543 trans_pcie->sx_complete = false; 1544 1545 if (!ret) { 1546 IWL_ERR(trans, "Timeout %s D3\n", 1547 suspend ? "entering" : "exiting"); 1548 return -ETIMEDOUT; 1549 } 1550 1551 return 0; 1552 } 1553 1554 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 1555 bool reset) 1556 { 1557 int ret; 1558 1559 if (!reset) 1560 /* Enable persistence mode to avoid reset */ 1561 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1562 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1563 1564 ret = iwl_pcie_d3_handshake(trans, true); 1565 if (ret) 1566 return ret; 1567 1568 iwl_pcie_d3_complete_suspend(trans, test, reset); 1569 1570 return 0; 1571 } 1572 1573 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1574 enum iwl_d3_status *status, 1575 bool test, bool reset) 1576 { 1577 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1578 u32 val; 1579 int ret; 1580 1581 if (test) { 1582 iwl_enable_interrupts(trans); 1583 *status = IWL_D3_STATUS_ALIVE; 1584 ret = 0; 1585 goto out; 1586 } 1587 1588 iwl_set_bit(trans, CSR_GP_CNTRL, 1589 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1590 1591 ret = iwl_finish_nic_init(trans); 1592 if (ret) 1593 return ret; 1594 1595 /* 1596 * Reconfigure IVAR table in case of MSIX or reset ict table in 1597 * MSI mode since HW reset erased it. 1598 * Also enables interrupts - none will happen as 1599 * the device doesn't know we're waking it up, only when 1600 * the opmode actually tells it after this call. 1601 */ 1602 iwl_pcie_conf_msix_hw(trans_pcie); 1603 if (!trans_pcie->msix_enabled) 1604 iwl_pcie_reset_ict(trans); 1605 iwl_enable_interrupts(trans); 1606 1607 iwl_pcie_set_pwr(trans, false); 1608 1609 if (!reset) { 1610 iwl_clear_bit(trans, CSR_GP_CNTRL, 1611 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1612 } else { 1613 iwl_trans_pcie_tx_reset(trans); 1614 1615 ret = iwl_pcie_rx_init(trans); 1616 if (ret) { 1617 IWL_ERR(trans, 1618 "Failed to resume the device (RX reset)\n"); 1619 return ret; 1620 } 1621 } 1622 1623 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 1624 iwl_read_umac_prph(trans, WFPM_GP2)); 1625 1626 val = iwl_read32(trans, CSR_RESET); 1627 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1628 *status = IWL_D3_STATUS_RESET; 1629 else 1630 *status = IWL_D3_STATUS_ALIVE; 1631 1632 out: 1633 if (*status == IWL_D3_STATUS_ALIVE) 1634 ret = iwl_pcie_d3_handshake(trans, false); 1635 1636 return ret; 1637 } 1638 1639 static void 1640 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 1641 struct iwl_trans *trans, 1642 const struct iwl_cfg_trans_params *cfg_trans) 1643 { 1644 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1645 int max_irqs, num_irqs, i, ret; 1646 u16 pci_cmd; 1647 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; 1648 1649 if (!cfg_trans->mq_rx_supported) 1650 goto enable_msi; 1651 1652 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) 1653 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; 1654 1655 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); 1656 for (i = 0; i < max_irqs; i++) 1657 trans_pcie->msix_entries[i].entry = i; 1658 1659 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 1660 MSIX_MIN_INTERRUPT_VECTORS, 1661 max_irqs); 1662 if (num_irqs < 0) { 1663 IWL_DEBUG_INFO(trans, 1664 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 1665 num_irqs); 1666 goto enable_msi; 1667 } 1668 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1669 1670 IWL_DEBUG_INFO(trans, 1671 "MSI-X enabled. %d interrupt vectors were allocated\n", 1672 num_irqs); 1673 1674 /* 1675 * In case the OS provides fewer interrupts than requested, different 1676 * causes will share the same interrupt vector as follows: 1677 * One interrupt less: non rx causes shared with FBQ. 1678 * Two interrupts less: non rx causes shared with FBQ and RSS. 1679 * More than two interrupts: we will use fewer RSS queues. 1680 */ 1681 if (num_irqs <= max_irqs - 2) { 1682 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1683 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1684 IWL_SHARED_IRQ_FIRST_RSS; 1685 } else if (num_irqs == max_irqs - 1) { 1686 trans_pcie->trans->num_rx_queues = num_irqs; 1687 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1688 } else { 1689 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1690 } 1691 1692 IWL_DEBUG_INFO(trans, 1693 "MSI-X enabled with rx queues %d, vec mask 0x%x\n", 1694 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); 1695 1696 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1697 1698 trans_pcie->alloc_vecs = num_irqs; 1699 trans_pcie->msix_enabled = true; 1700 return; 1701 1702 enable_msi: 1703 ret = pci_enable_msi(pdev); 1704 if (ret) { 1705 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 1706 /* enable rfkill interrupt: hw bug w/a */ 1707 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 1708 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 1709 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 1710 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 1711 } 1712 } 1713 } 1714 1715 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) 1716 { 1717 int iter_rx_q, i, ret, cpu, offset; 1718 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1719 1720 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 1721 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; 1722 offset = 1 + i; 1723 for (; i < iter_rx_q ; i++) { 1724 /* 1725 * Get the cpu prior to the place to search 1726 * (i.e. return will be > i - 1). 1727 */ 1728 cpu = cpumask_next(i - offset, cpu_online_mask); 1729 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 1730 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 1731 &trans_pcie->affinity_mask[i]); 1732 if (ret) 1733 IWL_ERR(trans_pcie->trans, 1734 "Failed to set affinity mask for IRQ %d\n", 1735 trans_pcie->msix_entries[i].vector); 1736 } 1737 } 1738 1739 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 1740 struct iwl_trans_pcie *trans_pcie) 1741 { 1742 int i; 1743 1744 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1745 int ret; 1746 struct msix_entry *msix_entry; 1747 const char *qname = queue_name(&pdev->dev, trans_pcie, i); 1748 1749 if (!qname) 1750 return -ENOMEM; 1751 1752 msix_entry = &trans_pcie->msix_entries[i]; 1753 ret = devm_request_threaded_irq(&pdev->dev, 1754 msix_entry->vector, 1755 iwl_pcie_msix_isr, 1756 (i == trans_pcie->def_irq) ? 1757 iwl_pcie_irq_msix_handler : 1758 iwl_pcie_irq_rx_msix_handler, 1759 IRQF_SHARED, 1760 qname, 1761 msix_entry); 1762 if (ret) { 1763 IWL_ERR(trans_pcie->trans, 1764 "Error allocating IRQ %d\n", i); 1765 1766 return ret; 1767 } 1768 } 1769 iwl_pcie_irq_set_affinity(trans_pcie->trans); 1770 1771 return 0; 1772 } 1773 1774 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) 1775 { 1776 u32 hpm, wprot; 1777 1778 switch (trans->trans_cfg->device_family) { 1779 case IWL_DEVICE_FAMILY_9000: 1780 wprot = PREG_PRPH_WPROT_9000; 1781 break; 1782 case IWL_DEVICE_FAMILY_22000: 1783 wprot = PREG_PRPH_WPROT_22000; 1784 break; 1785 default: 1786 return 0; 1787 } 1788 1789 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); 1790 if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) { 1791 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); 1792 1793 if (wprot_val & PREG_WFPM_ACCESS) { 1794 IWL_ERR(trans, 1795 "Error, can not clear persistence bit\n"); 1796 return -EPERM; 1797 } 1798 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, 1799 hpm & ~PERSISTENCE_BIT); 1800 } 1801 1802 return 0; 1803 } 1804 1805 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) 1806 { 1807 int ret; 1808 1809 ret = iwl_finish_nic_init(trans); 1810 if (ret < 0) 1811 return ret; 1812 1813 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1814 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1815 udelay(20); 1816 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1817 HPM_HIPM_GEN_CFG_CR_PG_EN | 1818 HPM_HIPM_GEN_CFG_CR_SLP_EN); 1819 udelay(20); 1820 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, 1821 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1822 1823 return iwl_trans_pcie_sw_reset(trans, true); 1824 } 1825 1826 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1827 { 1828 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1829 int err; 1830 1831 lockdep_assert_held(&trans_pcie->mutex); 1832 1833 err = iwl_pcie_prepare_card_hw(trans); 1834 if (err) { 1835 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1836 return err; 1837 } 1838 1839 err = iwl_trans_pcie_clear_persistence_bit(trans); 1840 if (err) 1841 return err; 1842 1843 err = iwl_trans_pcie_sw_reset(trans, true); 1844 if (err) 1845 return err; 1846 1847 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && 1848 trans->trans_cfg->integrated) { 1849 err = iwl_pcie_gen2_force_power_gating(trans); 1850 if (err) 1851 return err; 1852 } 1853 1854 err = iwl_pcie_apm_init(trans); 1855 if (err) 1856 return err; 1857 1858 iwl_pcie_init_msix(trans_pcie); 1859 1860 /* From now on, the op_mode will be kept updated about RF kill state */ 1861 iwl_enable_rfkill_int(trans); 1862 1863 trans_pcie->opmode_down = false; 1864 1865 /* Set is_down to false here so that...*/ 1866 trans_pcie->is_down = false; 1867 1868 /* ...rfkill can call stop_device and set it false if needed */ 1869 iwl_pcie_check_hw_rf_kill(trans); 1870 1871 return 0; 1872 } 1873 1874 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1875 { 1876 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1877 int ret; 1878 1879 mutex_lock(&trans_pcie->mutex); 1880 ret = _iwl_trans_pcie_start_hw(trans); 1881 mutex_unlock(&trans_pcie->mutex); 1882 1883 return ret; 1884 } 1885 1886 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1887 { 1888 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1889 1890 mutex_lock(&trans_pcie->mutex); 1891 1892 /* disable interrupts - don't enable HW RF kill interrupt */ 1893 iwl_disable_interrupts(trans); 1894 1895 iwl_pcie_apm_stop(trans, true); 1896 1897 iwl_disable_interrupts(trans); 1898 1899 iwl_pcie_disable_ict(trans); 1900 1901 mutex_unlock(&trans_pcie->mutex); 1902 1903 iwl_pcie_synchronize_irqs(trans); 1904 } 1905 1906 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1907 { 1908 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1909 } 1910 1911 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1912 { 1913 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1914 } 1915 1916 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1917 { 1918 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1919 } 1920 1921 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) 1922 { 1923 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1924 return 0x00FFFFFF; 1925 else 1926 return 0x000FFFFF; 1927 } 1928 1929 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1930 { 1931 u32 mask = iwl_trans_pcie_prph_msk(trans); 1932 1933 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1934 ((reg & mask) | (3 << 24))); 1935 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1936 } 1937 1938 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1939 u32 val) 1940 { 1941 u32 mask = iwl_trans_pcie_prph_msk(trans); 1942 1943 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1944 ((addr & mask) | (3 << 24))); 1945 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1946 } 1947 1948 static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1949 const struct iwl_trans_config *trans_cfg) 1950 { 1951 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1952 1953 /* free all first - we might be reconfigured for a different size */ 1954 iwl_pcie_free_rbs_pool(trans); 1955 1956 trans->txqs.cmd.q_id = trans_cfg->cmd_queue; 1957 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; 1958 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 1959 trans->txqs.page_offs = trans_cfg->cb_data_offs; 1960 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); 1961 trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; 1962 1963 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1964 trans_pcie->n_no_reclaim_cmds = 0; 1965 else 1966 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 1967 if (trans_pcie->n_no_reclaim_cmds) 1968 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1969 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1970 1971 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 1972 trans_pcie->rx_page_order = 1973 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 1974 trans_pcie->rx_buf_bytes = 1975 iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 1976 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); 1977 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1978 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); 1979 1980 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; 1981 trans_pcie->scd_set_active = trans_cfg->scd_set_active; 1982 1983 trans->command_groups = trans_cfg->command_groups; 1984 trans->command_groups_size = trans_cfg->command_groups_size; 1985 1986 /* Initialize NAPI here - it should be before registering to mac80211 1987 * in the opmode but after the HW struct is allocated. 1988 * As this function may be called again in some corner cases don't 1989 * do anything if NAPI was already initialized. 1990 */ 1991 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 1992 init_dummy_netdev(&trans_pcie->napi_dev); 1993 1994 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; 1995 } 1996 1997 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, 1998 struct device *dev) 1999 { 2000 u8 i; 2001 struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; 2002 2003 /* free DRAM payloads */ 2004 for (i = 0; i < dram_regions->n_regions; i++) { 2005 dma_free_coherent(dev, dram_regions->drams[i].size, 2006 dram_regions->drams[i].block, 2007 dram_regions->drams[i].physical); 2008 } 2009 dram_regions->n_regions = 0; 2010 2011 /* free DRAM addresses array */ 2012 if (desc_dram->block) { 2013 dma_free_coherent(dev, desc_dram->size, 2014 desc_dram->block, 2015 desc_dram->physical); 2016 } 2017 memset(desc_dram, 0, sizeof(*desc_dram)); 2018 } 2019 2020 void iwl_trans_pcie_free(struct iwl_trans *trans) 2021 { 2022 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2023 int i; 2024 2025 iwl_pcie_synchronize_irqs(trans); 2026 2027 if (trans->trans_cfg->gen2) 2028 iwl_txq_gen2_tx_free(trans); 2029 else 2030 iwl_pcie_tx_free(trans); 2031 iwl_pcie_rx_free(trans); 2032 2033 if (trans_pcie->rba.alloc_wq) { 2034 destroy_workqueue(trans_pcie->rba.alloc_wq); 2035 trans_pcie->rba.alloc_wq = NULL; 2036 } 2037 2038 if (trans_pcie->msix_enabled) { 2039 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 2040 irq_set_affinity_hint( 2041 trans_pcie->msix_entries[i].vector, 2042 NULL); 2043 } 2044 2045 trans_pcie->msix_enabled = false; 2046 } else { 2047 iwl_pcie_free_ict(trans); 2048 } 2049 2050 iwl_pcie_free_fw_monitor(trans); 2051 2052 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data, 2053 trans->dev); 2054 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data, 2055 trans->dev); 2056 2057 mutex_destroy(&trans_pcie->mutex); 2058 iwl_trans_free(trans); 2059 } 2060 2061 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 2062 { 2063 if (state) 2064 set_bit(STATUS_TPOWER_PMI, &trans->status); 2065 else 2066 clear_bit(STATUS_TPOWER_PMI, &trans->status); 2067 } 2068 2069 struct iwl_trans_pcie_removal { 2070 struct pci_dev *pdev; 2071 struct work_struct work; 2072 bool rescan; 2073 }; 2074 2075 static void iwl_trans_pcie_removal_wk(struct work_struct *wk) 2076 { 2077 struct iwl_trans_pcie_removal *removal = 2078 container_of(wk, struct iwl_trans_pcie_removal, work); 2079 struct pci_dev *pdev = removal->pdev; 2080 static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; 2081 struct pci_bus *bus = pdev->bus; 2082 2083 dev_err(&pdev->dev, "Device gone - attempting removal\n"); 2084 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); 2085 pci_lock_rescan_remove(); 2086 pci_dev_put(pdev); 2087 pci_stop_and_remove_bus_device(pdev); 2088 if (removal->rescan) 2089 pci_rescan_bus(bus->parent); 2090 pci_unlock_rescan_remove(); 2091 2092 kfree(removal); 2093 module_put(THIS_MODULE); 2094 } 2095 2096 void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan) 2097 { 2098 struct iwl_trans_pcie_removal *removal; 2099 2100 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2101 return; 2102 2103 IWL_ERR(trans, "Device gone - scheduling removal!\n"); 2104 2105 /* 2106 * get a module reference to avoid doing this 2107 * while unloading anyway and to avoid 2108 * scheduling a work with code that's being 2109 * removed. 2110 */ 2111 if (!try_module_get(THIS_MODULE)) { 2112 IWL_ERR(trans, 2113 "Module is being unloaded - abort\n"); 2114 return; 2115 } 2116 2117 removal = kzalloc(sizeof(*removal), GFP_ATOMIC); 2118 if (!removal) { 2119 module_put(THIS_MODULE); 2120 return; 2121 } 2122 /* 2123 * we don't need to clear this flag, because 2124 * the trans will be freed and reallocated. 2125 */ 2126 set_bit(STATUS_TRANS_DEAD, &trans->status); 2127 2128 removal->pdev = to_pci_dev(trans->dev); 2129 removal->rescan = rescan; 2130 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); 2131 pci_dev_get(removal->pdev); 2132 schedule_work(&removal->work); 2133 } 2134 EXPORT_SYMBOL(iwl_trans_pcie_remove); 2135 2136 /* 2137 * This version doesn't disable BHs but rather assumes they're 2138 * already disabled. 2139 */ 2140 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2141 { 2142 int ret; 2143 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2144 u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; 2145 u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 2146 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; 2147 u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; 2148 2149 spin_lock(&trans_pcie->reg_lock); 2150 2151 if (trans_pcie->cmd_hold_nic_awake) 2152 goto out; 2153 2154 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 2155 write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; 2156 mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2157 poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2158 } 2159 2160 /* this bit wakes up the NIC */ 2161 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); 2162 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 2163 udelay(2); 2164 2165 /* 2166 * These bits say the device is running, and should keep running for 2167 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 2168 * but they do not indicate that embedded SRAM is restored yet; 2169 * HW with volatile SRAM must save/restore contents to/from 2170 * host DRAM when sleeping/waking for power-saving. 2171 * Each direction takes approximately 1/4 millisecond; with this 2172 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 2173 * series of register accesses are expected (e.g. reading Event Log), 2174 * to keep device from sleeping. 2175 * 2176 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 2177 * SRAM is okay/restored. We don't check that here because this call 2178 * is just for hardware register access; but GP1 MAC_SLEEP 2179 * check is a good idea before accessing the SRAM of HW with 2180 * volatile SRAM (e.g. reading Event Log). 2181 * 2182 * 5000 series and later (including 1000 series) have non-volatile SRAM, 2183 * and do not save/restore SRAM when power cycling. 2184 */ 2185 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); 2186 if (unlikely(ret < 0)) { 2187 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); 2188 2189 WARN_ONCE(1, 2190 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 2191 cntrl); 2192 2193 iwl_trans_pcie_dump_regs(trans); 2194 2195 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) 2196 iwl_trans_pcie_remove(trans, false); 2197 else 2198 iwl_write32(trans, CSR_RESET, 2199 CSR_RESET_REG_FLAG_FORCE_NMI); 2200 2201 spin_unlock(&trans_pcie->reg_lock); 2202 return false; 2203 } 2204 2205 out: 2206 /* 2207 * Fool sparse by faking we release the lock - sparse will 2208 * track nic_access anyway. 2209 */ 2210 __release(&trans_pcie->reg_lock); 2211 return true; 2212 } 2213 2214 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2215 { 2216 bool ret; 2217 2218 local_bh_disable(); 2219 ret = __iwl_trans_pcie_grab_nic_access(trans); 2220 if (ret) { 2221 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ 2222 return ret; 2223 } 2224 local_bh_enable(); 2225 return false; 2226 } 2227 2228 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) 2229 { 2230 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2231 2232 lockdep_assert_held(&trans_pcie->reg_lock); 2233 2234 /* 2235 * Fool sparse by faking we acquiring the lock - sparse will 2236 * track nic_access anyway. 2237 */ 2238 __acquire(&trans_pcie->reg_lock); 2239 2240 if (trans_pcie->cmd_hold_nic_awake) 2241 goto out; 2242 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 2243 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2244 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 2245 else 2246 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2247 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2248 /* 2249 * Above we read the CSR_GP_CNTRL register, which will flush 2250 * any previous writes, but we need the write that clears the 2251 * MAC_ACCESS_REQ bit to be performed before any other writes 2252 * scheduled on different CPUs (after we drop reg_lock). 2253 */ 2254 out: 2255 spin_unlock_bh(&trans_pcie->reg_lock); 2256 } 2257 2258 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 2259 void *buf, int dwords) 2260 { 2261 int offs = 0; 2262 u32 *vals = buf; 2263 2264 while (offs < dwords) { 2265 /* limit the time we spin here under lock to 1/2s */ 2266 unsigned long end = jiffies + HZ / 2; 2267 bool resched = false; 2268 2269 if (iwl_trans_grab_nic_access(trans)) { 2270 iwl_write32(trans, HBUS_TARG_MEM_RADDR, 2271 addr + 4 * offs); 2272 2273 while (offs < dwords) { 2274 vals[offs] = iwl_read32(trans, 2275 HBUS_TARG_MEM_RDAT); 2276 offs++; 2277 2278 if (time_after(jiffies, end)) { 2279 resched = true; 2280 break; 2281 } 2282 } 2283 iwl_trans_release_nic_access(trans); 2284 2285 if (resched) 2286 cond_resched(); 2287 } else { 2288 return -EBUSY; 2289 } 2290 } 2291 2292 return 0; 2293 } 2294 2295 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 2296 const void *buf, int dwords) 2297 { 2298 int offs, ret = 0; 2299 const u32 *vals = buf; 2300 2301 if (iwl_trans_grab_nic_access(trans)) { 2302 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 2303 for (offs = 0; offs < dwords; offs++) 2304 iwl_write32(trans, HBUS_TARG_MEM_WDAT, 2305 vals ? vals[offs] : 0); 2306 iwl_trans_release_nic_access(trans); 2307 } else { 2308 ret = -EBUSY; 2309 } 2310 return ret; 2311 } 2312 2313 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, 2314 u32 *val) 2315 { 2316 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, 2317 ofs, val); 2318 } 2319 2320 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 2321 { 2322 int i; 2323 2324 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 2325 struct iwl_txq *txq = trans->txqs.txq[i]; 2326 2327 if (i == trans->txqs.cmd.q_id) 2328 continue; 2329 2330 spin_lock_bh(&txq->lock); 2331 2332 if (!block && !(WARN_ON_ONCE(!txq->block))) { 2333 txq->block--; 2334 if (!txq->block) { 2335 iwl_write32(trans, HBUS_TARG_WRPTR, 2336 txq->write_ptr | (i << 8)); 2337 } 2338 } else if (block) { 2339 txq->block++; 2340 } 2341 2342 spin_unlock_bh(&txq->lock); 2343 } 2344 } 2345 2346 #define IWL_FLUSH_WAIT_MS 2000 2347 2348 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, 2349 struct iwl_trans_rxq_dma_data *data) 2350 { 2351 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2352 2353 if (queue >= trans->num_rx_queues || !trans_pcie->rxq) 2354 return -EINVAL; 2355 2356 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; 2357 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; 2358 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; 2359 data->fr_bd_wid = 0; 2360 2361 return 0; 2362 } 2363 2364 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2365 { 2366 struct iwl_txq *txq; 2367 unsigned long now = jiffies; 2368 bool overflow_tx; 2369 u8 wr_ptr; 2370 2371 /* Make sure the NIC is still alive in the bus */ 2372 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2373 return -ENODEV; 2374 2375 if (!test_bit(txq_idx, trans->txqs.queue_used)) 2376 return -EINVAL; 2377 2378 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); 2379 txq = trans->txqs.txq[txq_idx]; 2380 2381 spin_lock_bh(&txq->lock); 2382 overflow_tx = txq->overflow_tx || 2383 !skb_queue_empty(&txq->overflow_q); 2384 spin_unlock_bh(&txq->lock); 2385 2386 wr_ptr = READ_ONCE(txq->write_ptr); 2387 2388 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || 2389 overflow_tx) && 2390 !time_after(jiffies, 2391 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2392 u8 write_ptr = READ_ONCE(txq->write_ptr); 2393 2394 /* 2395 * If write pointer moved during the wait, warn only 2396 * if the TX came from op mode. In case TX came from 2397 * trans layer (overflow TX) don't warn. 2398 */ 2399 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, 2400 "WR pointer moved while flushing %d -> %d\n", 2401 wr_ptr, write_ptr)) 2402 return -ETIMEDOUT; 2403 wr_ptr = write_ptr; 2404 2405 usleep_range(1000, 2000); 2406 2407 spin_lock_bh(&txq->lock); 2408 overflow_tx = txq->overflow_tx || 2409 !skb_queue_empty(&txq->overflow_q); 2410 spin_unlock_bh(&txq->lock); 2411 } 2412 2413 if (txq->read_ptr != txq->write_ptr) { 2414 IWL_ERR(trans, 2415 "fail to flush all tx fifo queues Q %d\n", txq_idx); 2416 iwl_txq_log_scd_error(trans, txq); 2417 return -ETIMEDOUT; 2418 } 2419 2420 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); 2421 2422 return 0; 2423 } 2424 2425 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) 2426 { 2427 int cnt; 2428 int ret = 0; 2429 2430 /* waiting for all the tx frames complete might take a while */ 2431 for (cnt = 0; 2432 cnt < trans->trans_cfg->base_params->num_of_queues; 2433 cnt++) { 2434 2435 if (cnt == trans->txqs.cmd.q_id) 2436 continue; 2437 if (!test_bit(cnt, trans->txqs.queue_used)) 2438 continue; 2439 if (!(BIT(cnt) & txq_bm)) 2440 continue; 2441 2442 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); 2443 if (ret) 2444 break; 2445 } 2446 2447 return ret; 2448 } 2449 2450 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2451 u32 mask, u32 value) 2452 { 2453 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2454 2455 spin_lock_bh(&trans_pcie->reg_lock); 2456 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 2457 spin_unlock_bh(&trans_pcie->reg_lock); 2458 } 2459 2460 static const char *get_csr_string(int cmd) 2461 { 2462 #define IWL_CMD(x) case x: return #x 2463 switch (cmd) { 2464 IWL_CMD(CSR_HW_IF_CONFIG_REG); 2465 IWL_CMD(CSR_INT_COALESCING); 2466 IWL_CMD(CSR_INT); 2467 IWL_CMD(CSR_INT_MASK); 2468 IWL_CMD(CSR_FH_INT_STATUS); 2469 IWL_CMD(CSR_GPIO_IN); 2470 IWL_CMD(CSR_RESET); 2471 IWL_CMD(CSR_GP_CNTRL); 2472 IWL_CMD(CSR_HW_REV); 2473 IWL_CMD(CSR_EEPROM_REG); 2474 IWL_CMD(CSR_EEPROM_GP); 2475 IWL_CMD(CSR_OTP_GP_REG); 2476 IWL_CMD(CSR_GIO_REG); 2477 IWL_CMD(CSR_GP_UCODE_REG); 2478 IWL_CMD(CSR_GP_DRIVER_REG); 2479 IWL_CMD(CSR_UCODE_DRV_GP1); 2480 IWL_CMD(CSR_UCODE_DRV_GP2); 2481 IWL_CMD(CSR_LED_REG); 2482 IWL_CMD(CSR_DRAM_INT_TBL_REG); 2483 IWL_CMD(CSR_GIO_CHICKEN_BITS); 2484 IWL_CMD(CSR_ANA_PLL_CFG); 2485 IWL_CMD(CSR_HW_REV_WA_REG); 2486 IWL_CMD(CSR_MONITOR_STATUS_REG); 2487 IWL_CMD(CSR_DBG_HPET_MEM_REG); 2488 default: 2489 return "UNKNOWN"; 2490 } 2491 #undef IWL_CMD 2492 } 2493 2494 void iwl_pcie_dump_csr(struct iwl_trans *trans) 2495 { 2496 int i; 2497 static const u32 csr_tbl[] = { 2498 CSR_HW_IF_CONFIG_REG, 2499 CSR_INT_COALESCING, 2500 CSR_INT, 2501 CSR_INT_MASK, 2502 CSR_FH_INT_STATUS, 2503 CSR_GPIO_IN, 2504 CSR_RESET, 2505 CSR_GP_CNTRL, 2506 CSR_HW_REV, 2507 CSR_EEPROM_REG, 2508 CSR_EEPROM_GP, 2509 CSR_OTP_GP_REG, 2510 CSR_GIO_REG, 2511 CSR_GP_UCODE_REG, 2512 CSR_GP_DRIVER_REG, 2513 CSR_UCODE_DRV_GP1, 2514 CSR_UCODE_DRV_GP2, 2515 CSR_LED_REG, 2516 CSR_DRAM_INT_TBL_REG, 2517 CSR_GIO_CHICKEN_BITS, 2518 CSR_ANA_PLL_CFG, 2519 CSR_MONITOR_STATUS_REG, 2520 CSR_HW_REV_WA_REG, 2521 CSR_DBG_HPET_MEM_REG 2522 }; 2523 IWL_ERR(trans, "CSR values:\n"); 2524 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2525 "CSR_INT_PERIODIC_REG)\n"); 2526 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2527 IWL_ERR(trans, " %25s: 0X%08x\n", 2528 get_csr_string(csr_tbl[i]), 2529 iwl_read32(trans, csr_tbl[i])); 2530 } 2531 } 2532 2533 #ifdef CONFIG_IWLWIFI_DEBUGFS 2534 /* create and remove of files */ 2535 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2536 debugfs_create_file(#name, mode, parent, trans, \ 2537 &iwl_dbgfs_##name##_ops); \ 2538 } while (0) 2539 2540 /* file operation */ 2541 #define DEBUGFS_READ_FILE_OPS(name) \ 2542 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2543 .read = iwl_dbgfs_##name##_read, \ 2544 .open = simple_open, \ 2545 .llseek = generic_file_llseek, \ 2546 }; 2547 2548 #define DEBUGFS_WRITE_FILE_OPS(name) \ 2549 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2550 .write = iwl_dbgfs_##name##_write, \ 2551 .open = simple_open, \ 2552 .llseek = generic_file_llseek, \ 2553 }; 2554 2555 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2556 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2557 .write = iwl_dbgfs_##name##_write, \ 2558 .read = iwl_dbgfs_##name##_read, \ 2559 .open = simple_open, \ 2560 .llseek = generic_file_llseek, \ 2561 }; 2562 2563 struct iwl_dbgfs_tx_queue_priv { 2564 struct iwl_trans *trans; 2565 }; 2566 2567 struct iwl_dbgfs_tx_queue_state { 2568 loff_t pos; 2569 }; 2570 2571 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) 2572 { 2573 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2574 struct iwl_dbgfs_tx_queue_state *state; 2575 2576 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2577 return NULL; 2578 2579 state = kmalloc(sizeof(*state), GFP_KERNEL); 2580 if (!state) 2581 return NULL; 2582 state->pos = *pos; 2583 return state; 2584 } 2585 2586 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, 2587 void *v, loff_t *pos) 2588 { 2589 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2590 struct iwl_dbgfs_tx_queue_state *state = v; 2591 2592 *pos = ++state->pos; 2593 2594 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2595 return NULL; 2596 2597 return state; 2598 } 2599 2600 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) 2601 { 2602 kfree(v); 2603 } 2604 2605 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) 2606 { 2607 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2608 struct iwl_dbgfs_tx_queue_state *state = v; 2609 struct iwl_trans *trans = priv->trans; 2610 struct iwl_txq *txq = trans->txqs.txq[state->pos]; 2611 2612 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", 2613 (unsigned int)state->pos, 2614 !!test_bit(state->pos, trans->txqs.queue_used), 2615 !!test_bit(state->pos, trans->txqs.queue_stopped)); 2616 if (txq) 2617 seq_printf(seq, 2618 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", 2619 txq->read_ptr, txq->write_ptr, 2620 txq->need_update, txq->frozen, 2621 txq->n_window, txq->ampdu); 2622 else 2623 seq_puts(seq, "(unallocated)"); 2624 2625 if (state->pos == trans->txqs.cmd.q_id) 2626 seq_puts(seq, " (HCMD)"); 2627 seq_puts(seq, "\n"); 2628 2629 return 0; 2630 } 2631 2632 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { 2633 .start = iwl_dbgfs_tx_queue_seq_start, 2634 .next = iwl_dbgfs_tx_queue_seq_next, 2635 .stop = iwl_dbgfs_tx_queue_seq_stop, 2636 .show = iwl_dbgfs_tx_queue_seq_show, 2637 }; 2638 2639 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) 2640 { 2641 struct iwl_dbgfs_tx_queue_priv *priv; 2642 2643 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, 2644 sizeof(*priv)); 2645 2646 if (!priv) 2647 return -ENOMEM; 2648 2649 priv->trans = inode->i_private; 2650 return 0; 2651 } 2652 2653 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2654 char __user *user_buf, 2655 size_t count, loff_t *ppos) 2656 { 2657 struct iwl_trans *trans = file->private_data; 2658 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2659 char *buf; 2660 int pos = 0, i, ret; 2661 size_t bufsz; 2662 2663 bufsz = sizeof(char) * 121 * trans->num_rx_queues; 2664 2665 if (!trans_pcie->rxq) 2666 return -EAGAIN; 2667 2668 buf = kzalloc(bufsz, GFP_KERNEL); 2669 if (!buf) 2670 return -ENOMEM; 2671 2672 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 2673 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2674 2675 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 2676 i); 2677 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2678 rxq->read); 2679 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2680 rxq->write); 2681 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2682 rxq->write_actual); 2683 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2684 rxq->need_update); 2685 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2686 rxq->free_count); 2687 if (rxq->rb_stts) { 2688 u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, 2689 rxq)); 2690 pos += scnprintf(buf + pos, bufsz - pos, 2691 "\tclosed_rb_num: %u\n", 2692 r & 0x0FFF); 2693 } else { 2694 pos += scnprintf(buf + pos, bufsz - pos, 2695 "\tclosed_rb_num: Not Allocated\n"); 2696 } 2697 } 2698 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2699 kfree(buf); 2700 2701 return ret; 2702 } 2703 2704 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2705 char __user *user_buf, 2706 size_t count, loff_t *ppos) 2707 { 2708 struct iwl_trans *trans = file->private_data; 2709 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2710 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2711 2712 int pos = 0; 2713 char *buf; 2714 int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2715 ssize_t ret; 2716 2717 buf = kzalloc(bufsz, GFP_KERNEL); 2718 if (!buf) 2719 return -ENOMEM; 2720 2721 pos += scnprintf(buf + pos, bufsz - pos, 2722 "Interrupt Statistics Report:\n"); 2723 2724 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2725 isr_stats->hw); 2726 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2727 isr_stats->sw); 2728 if (isr_stats->sw || isr_stats->hw) { 2729 pos += scnprintf(buf + pos, bufsz - pos, 2730 "\tLast Restarting Code: 0x%X\n", 2731 isr_stats->err_code); 2732 } 2733 #ifdef CONFIG_IWLWIFI_DEBUG 2734 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2735 isr_stats->sch); 2736 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2737 isr_stats->alive); 2738 #endif 2739 pos += scnprintf(buf + pos, bufsz - pos, 2740 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2741 2742 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2743 isr_stats->ctkill); 2744 2745 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2746 isr_stats->wakeup); 2747 2748 pos += scnprintf(buf + pos, bufsz - pos, 2749 "Rx command responses:\t\t %u\n", isr_stats->rx); 2750 2751 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2752 isr_stats->tx); 2753 2754 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2755 isr_stats->unhandled); 2756 2757 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2758 kfree(buf); 2759 return ret; 2760 } 2761 2762 static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2763 const char __user *user_buf, 2764 size_t count, loff_t *ppos) 2765 { 2766 struct iwl_trans *trans = file->private_data; 2767 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2768 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2769 u32 reset_flag; 2770 int ret; 2771 2772 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); 2773 if (ret) 2774 return ret; 2775 if (reset_flag == 0) 2776 memset(isr_stats, 0, sizeof(*isr_stats)); 2777 2778 return count; 2779 } 2780 2781 static ssize_t iwl_dbgfs_csr_write(struct file *file, 2782 const char __user *user_buf, 2783 size_t count, loff_t *ppos) 2784 { 2785 struct iwl_trans *trans = file->private_data; 2786 2787 iwl_pcie_dump_csr(trans); 2788 2789 return count; 2790 } 2791 2792 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2793 char __user *user_buf, 2794 size_t count, loff_t *ppos) 2795 { 2796 struct iwl_trans *trans = file->private_data; 2797 char *buf = NULL; 2798 ssize_t ret; 2799 2800 ret = iwl_dump_fh(trans, &buf); 2801 if (ret < 0) 2802 return ret; 2803 if (!buf) 2804 return -EINVAL; 2805 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2806 kfree(buf); 2807 return ret; 2808 } 2809 2810 static ssize_t iwl_dbgfs_rfkill_read(struct file *file, 2811 char __user *user_buf, 2812 size_t count, loff_t *ppos) 2813 { 2814 struct iwl_trans *trans = file->private_data; 2815 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2816 char buf[100]; 2817 int pos; 2818 2819 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", 2820 trans_pcie->debug_rfkill, 2821 !(iwl_read32(trans, CSR_GP_CNTRL) & 2822 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); 2823 2824 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2825 } 2826 2827 static ssize_t iwl_dbgfs_rfkill_write(struct file *file, 2828 const char __user *user_buf, 2829 size_t count, loff_t *ppos) 2830 { 2831 struct iwl_trans *trans = file->private_data; 2832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2833 bool new_value; 2834 int ret; 2835 2836 ret = kstrtobool_from_user(user_buf, count, &new_value); 2837 if (ret) 2838 return ret; 2839 if (new_value == trans_pcie->debug_rfkill) 2840 return count; 2841 IWL_WARN(trans, "changing debug rfkill %d->%d\n", 2842 trans_pcie->debug_rfkill, new_value); 2843 trans_pcie->debug_rfkill = new_value; 2844 iwl_pcie_handle_rfkill_irq(trans); 2845 2846 return count; 2847 } 2848 2849 static int iwl_dbgfs_monitor_data_open(struct inode *inode, 2850 struct file *file) 2851 { 2852 struct iwl_trans *trans = inode->i_private; 2853 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2854 2855 if (!trans->dbg.dest_tlv || 2856 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { 2857 IWL_ERR(trans, "Debug destination is not set to DRAM\n"); 2858 return -ENOENT; 2859 } 2860 2861 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) 2862 return -EBUSY; 2863 2864 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; 2865 return simple_open(inode, file); 2866 } 2867 2868 static int iwl_dbgfs_monitor_data_release(struct inode *inode, 2869 struct file *file) 2870 { 2871 struct iwl_trans_pcie *trans_pcie = 2872 IWL_TRANS_GET_PCIE_TRANS(inode->i_private); 2873 2874 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) 2875 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 2876 return 0; 2877 } 2878 2879 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, 2880 void *buf, ssize_t *size, 2881 ssize_t *bytes_copied) 2882 { 2883 ssize_t buf_size_left = count - *bytes_copied; 2884 2885 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); 2886 if (*size > buf_size_left) 2887 *size = buf_size_left; 2888 2889 *size -= copy_to_user(user_buf, buf, *size); 2890 *bytes_copied += *size; 2891 2892 if (buf_size_left == *size) 2893 return true; 2894 return false; 2895 } 2896 2897 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, 2898 char __user *user_buf, 2899 size_t count, loff_t *ppos) 2900 { 2901 struct iwl_trans *trans = file->private_data; 2902 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2903 u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; 2904 struct cont_rec *data = &trans_pcie->fw_mon_data; 2905 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; 2906 ssize_t size, bytes_copied = 0; 2907 bool b_full; 2908 2909 if (trans->dbg.dest_tlv) { 2910 write_ptr_addr = 2911 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 2912 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 2913 } else { 2914 write_ptr_addr = MON_BUFF_WRPTR; 2915 wrap_cnt_addr = MON_BUFF_CYCLE_CNT; 2916 } 2917 2918 if (unlikely(!trans->dbg.rec_on)) 2919 return 0; 2920 2921 mutex_lock(&data->mutex); 2922 if (data->state == 2923 IWL_FW_MON_DBGFS_STATE_DISABLED) { 2924 mutex_unlock(&data->mutex); 2925 return 0; 2926 } 2927 2928 /* write_ptr position in bytes rather then DW */ 2929 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); 2930 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); 2931 2932 if (data->prev_wrap_cnt == wrap_cnt) { 2933 size = write_ptr - data->prev_wr_ptr; 2934 curr_buf = cpu_addr + data->prev_wr_ptr; 2935 b_full = iwl_write_to_user_buf(user_buf, count, 2936 curr_buf, &size, 2937 &bytes_copied); 2938 data->prev_wr_ptr += size; 2939 2940 } else if (data->prev_wrap_cnt == wrap_cnt - 1 && 2941 write_ptr < data->prev_wr_ptr) { 2942 size = trans->dbg.fw_mon.size - data->prev_wr_ptr; 2943 curr_buf = cpu_addr + data->prev_wr_ptr; 2944 b_full = iwl_write_to_user_buf(user_buf, count, 2945 curr_buf, &size, 2946 &bytes_copied); 2947 data->prev_wr_ptr += size; 2948 2949 if (!b_full) { 2950 size = write_ptr; 2951 b_full = iwl_write_to_user_buf(user_buf, count, 2952 cpu_addr, &size, 2953 &bytes_copied); 2954 data->prev_wr_ptr = size; 2955 data->prev_wrap_cnt++; 2956 } 2957 } else { 2958 if (data->prev_wrap_cnt == wrap_cnt - 1 && 2959 write_ptr > data->prev_wr_ptr) 2960 IWL_WARN(trans, 2961 "write pointer passed previous write pointer, start copying from the beginning\n"); 2962 else if (!unlikely(data->prev_wrap_cnt == 0 && 2963 data->prev_wr_ptr == 0)) 2964 IWL_WARN(trans, 2965 "monitor data is out of sync, start copying from the beginning\n"); 2966 2967 size = write_ptr; 2968 b_full = iwl_write_to_user_buf(user_buf, count, 2969 cpu_addr, &size, 2970 &bytes_copied); 2971 data->prev_wr_ptr = size; 2972 data->prev_wrap_cnt = wrap_cnt; 2973 } 2974 2975 mutex_unlock(&data->mutex); 2976 2977 return bytes_copied; 2978 } 2979 2980 static ssize_t iwl_dbgfs_rf_read(struct file *file, 2981 char __user *user_buf, 2982 size_t count, loff_t *ppos) 2983 { 2984 struct iwl_trans *trans = file->private_data; 2985 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2986 2987 if (!trans_pcie->rf_name[0]) 2988 return -ENODEV; 2989 2990 return simple_read_from_buffer(user_buf, count, ppos, 2991 trans_pcie->rf_name, 2992 strlen(trans_pcie->rf_name)); 2993 } 2994 2995 DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2996 DEBUGFS_READ_FILE_OPS(fh_reg); 2997 DEBUGFS_READ_FILE_OPS(rx_queue); 2998 DEBUGFS_WRITE_FILE_OPS(csr); 2999 DEBUGFS_READ_WRITE_FILE_OPS(rfkill); 3000 DEBUGFS_READ_FILE_OPS(rf); 3001 3002 static const struct file_operations iwl_dbgfs_tx_queue_ops = { 3003 .owner = THIS_MODULE, 3004 .open = iwl_dbgfs_tx_queue_open, 3005 .read = seq_read, 3006 .llseek = seq_lseek, 3007 .release = seq_release_private, 3008 }; 3009 3010 static const struct file_operations iwl_dbgfs_monitor_data_ops = { 3011 .read = iwl_dbgfs_monitor_data_read, 3012 .open = iwl_dbgfs_monitor_data_open, 3013 .release = iwl_dbgfs_monitor_data_release, 3014 }; 3015 3016 /* Create the debugfs files and directories */ 3017 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 3018 { 3019 struct dentry *dir = trans->dbgfs_dir; 3020 3021 DEBUGFS_ADD_FILE(rx_queue, dir, 0400); 3022 DEBUGFS_ADD_FILE(tx_queue, dir, 0400); 3023 DEBUGFS_ADD_FILE(interrupt, dir, 0600); 3024 DEBUGFS_ADD_FILE(csr, dir, 0200); 3025 DEBUGFS_ADD_FILE(fh_reg, dir, 0400); 3026 DEBUGFS_ADD_FILE(rfkill, dir, 0600); 3027 DEBUGFS_ADD_FILE(monitor_data, dir, 0400); 3028 DEBUGFS_ADD_FILE(rf, dir, 0400); 3029 } 3030 3031 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) 3032 { 3033 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3034 struct cont_rec *data = &trans_pcie->fw_mon_data; 3035 3036 mutex_lock(&data->mutex); 3037 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; 3038 mutex_unlock(&data->mutex); 3039 } 3040 #endif /*CONFIG_IWLWIFI_DEBUGFS */ 3041 3042 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 3043 { 3044 u32 cmdlen = 0; 3045 int i; 3046 3047 for (i = 0; i < trans->txqs.tfd.max_tbs; i++) 3048 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); 3049 3050 return cmdlen; 3051 } 3052 3053 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 3054 struct iwl_fw_error_dump_data **data, 3055 int allocated_rb_nums) 3056 { 3057 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3058 int max_len = trans_pcie->rx_buf_bytes; 3059 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3060 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3061 u32 i, r, j, rb_len = 0; 3062 3063 spin_lock(&rxq->lock); 3064 3065 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 3066 3067 for (i = rxq->read, j = 0; 3068 i != r && j < allocated_rb_nums; 3069 i = (i + 1) & RX_QUEUE_MASK, j++) { 3070 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 3071 struct iwl_fw_error_dump_rb *rb; 3072 3073 dma_sync_single_for_cpu(trans->dev, rxb->page_dma, 3074 max_len, DMA_FROM_DEVICE); 3075 3076 rb_len += sizeof(**data) + sizeof(*rb) + max_len; 3077 3078 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 3079 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 3080 rb = (void *)(*data)->data; 3081 rb->index = cpu_to_le32(i); 3082 memcpy(rb->data, page_address(rxb->page), max_len); 3083 3084 *data = iwl_fw_error_next_data(*data); 3085 } 3086 3087 spin_unlock(&rxq->lock); 3088 3089 return rb_len; 3090 } 3091 #define IWL_CSR_TO_DUMP (0x250) 3092 3093 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 3094 struct iwl_fw_error_dump_data **data) 3095 { 3096 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 3097 __le32 *val; 3098 int i; 3099 3100 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 3101 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 3102 val = (void *)(*data)->data; 3103 3104 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 3105 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3106 3107 *data = iwl_fw_error_next_data(*data); 3108 3109 return csr_len; 3110 } 3111 3112 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 3113 struct iwl_fw_error_dump_data **data) 3114 { 3115 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 3116 __le32 *val; 3117 int i; 3118 3119 if (!iwl_trans_grab_nic_access(trans)) 3120 return 0; 3121 3122 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 3123 (*data)->len = cpu_to_le32(fh_regs_len); 3124 val = (void *)(*data)->data; 3125 3126 if (!trans->trans_cfg->gen2) 3127 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; 3128 i += sizeof(u32)) 3129 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3130 else 3131 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); 3132 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); 3133 i += sizeof(u32)) 3134 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, 3135 i)); 3136 3137 iwl_trans_release_nic_access(trans); 3138 3139 *data = iwl_fw_error_next_data(*data); 3140 3141 return sizeof(**data) + fh_regs_len; 3142 } 3143 3144 static u32 3145 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 3146 struct iwl_fw_error_dump_fw_mon *fw_mon_data, 3147 u32 monitor_len) 3148 { 3149 u32 buf_size_in_dwords = (monitor_len >> 2); 3150 u32 *buffer = (u32 *)fw_mon_data->data; 3151 u32 i; 3152 3153 if (!iwl_trans_grab_nic_access(trans)) 3154 return 0; 3155 3156 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 3157 for (i = 0; i < buf_size_in_dwords; i++) 3158 buffer[i] = iwl_read_umac_prph_no_grab(trans, 3159 MON_DMARB_RD_DATA_ADDR); 3160 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 3161 3162 iwl_trans_release_nic_access(trans); 3163 3164 return monitor_len; 3165 } 3166 3167 static void 3168 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, 3169 struct iwl_fw_error_dump_fw_mon *fw_mon_data) 3170 { 3171 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; 3172 3173 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3174 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; 3175 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; 3176 write_ptr = DBGC_CUR_DBGBUF_STATUS; 3177 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; 3178 } else if (trans->dbg.dest_tlv) { 3179 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 3180 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 3181 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3182 } else { 3183 base = MON_BUFF_BASE_ADDR; 3184 write_ptr = MON_BUFF_WRPTR; 3185 wrap_cnt = MON_BUFF_CYCLE_CNT; 3186 } 3187 3188 write_ptr_val = iwl_read_prph(trans, write_ptr); 3189 fw_mon_data->fw_mon_cycle_cnt = 3190 cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 3191 fw_mon_data->fw_mon_base_ptr = 3192 cpu_to_le32(iwl_read_prph(trans, base)); 3193 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3194 fw_mon_data->fw_mon_base_high_ptr = 3195 cpu_to_le32(iwl_read_prph(trans, base_high)); 3196 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; 3197 /* convert wrtPtr to DWs, to align with all HWs */ 3198 write_ptr_val >>= 2; 3199 } 3200 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); 3201 } 3202 3203 static u32 3204 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 3205 struct iwl_fw_error_dump_data **data, 3206 u32 monitor_len) 3207 { 3208 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 3209 u32 len = 0; 3210 3211 if (trans->dbg.dest_tlv || 3212 (fw_mon->size && 3213 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || 3214 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { 3215 struct iwl_fw_error_dump_fw_mon *fw_mon_data; 3216 3217 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 3218 fw_mon_data = (void *)(*data)->data; 3219 3220 iwl_trans_pcie_dump_pointers(trans, fw_mon_data); 3221 3222 len += sizeof(**data) + sizeof(*fw_mon_data); 3223 if (fw_mon->size) { 3224 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); 3225 monitor_len = fw_mon->size; 3226 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { 3227 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); 3228 /* 3229 * Update pointers to reflect actual values after 3230 * shifting 3231 */ 3232 if (trans->dbg.dest_tlv->version) { 3233 base = (iwl_read_prph(trans, base) & 3234 IWL_LDBG_M2S_BUF_BA_MSK) << 3235 trans->dbg.dest_tlv->base_shift; 3236 base *= IWL_M2S_UNIT_SIZE; 3237 base += trans->cfg->smem_offset; 3238 } else { 3239 base = iwl_read_prph(trans, base) << 3240 trans->dbg.dest_tlv->base_shift; 3241 } 3242 3243 iwl_trans_read_mem(trans, base, fw_mon_data->data, 3244 monitor_len / sizeof(u32)); 3245 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { 3246 monitor_len = 3247 iwl_trans_pci_dump_marbh_monitor(trans, 3248 fw_mon_data, 3249 monitor_len); 3250 } else { 3251 /* Didn't match anything - output no monitor data */ 3252 monitor_len = 0; 3253 } 3254 3255 len += monitor_len; 3256 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 3257 } 3258 3259 return len; 3260 } 3261 3262 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) 3263 { 3264 if (trans->dbg.fw_mon.size) { 3265 *len += sizeof(struct iwl_fw_error_dump_data) + 3266 sizeof(struct iwl_fw_error_dump_fw_mon) + 3267 trans->dbg.fw_mon.size; 3268 return trans->dbg.fw_mon.size; 3269 } else if (trans->dbg.dest_tlv) { 3270 u32 base, end, cfg_reg, monitor_len; 3271 3272 if (trans->dbg.dest_tlv->version == 1) { 3273 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3274 cfg_reg = iwl_read_prph(trans, cfg_reg); 3275 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << 3276 trans->dbg.dest_tlv->base_shift; 3277 base *= IWL_M2S_UNIT_SIZE; 3278 base += trans->cfg->smem_offset; 3279 3280 monitor_len = 3281 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> 3282 trans->dbg.dest_tlv->end_shift; 3283 monitor_len *= IWL_M2S_UNIT_SIZE; 3284 } else { 3285 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3286 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); 3287 3288 base = iwl_read_prph(trans, base) << 3289 trans->dbg.dest_tlv->base_shift; 3290 end = iwl_read_prph(trans, end) << 3291 trans->dbg.dest_tlv->end_shift; 3292 3293 /* Make "end" point to the actual end */ 3294 if (trans->trans_cfg->device_family >= 3295 IWL_DEVICE_FAMILY_8000 || 3296 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) 3297 end += (1 << trans->dbg.dest_tlv->end_shift); 3298 monitor_len = end - base; 3299 } 3300 *len += sizeof(struct iwl_fw_error_dump_data) + 3301 sizeof(struct iwl_fw_error_dump_fw_mon) + 3302 monitor_len; 3303 return monitor_len; 3304 } 3305 return 0; 3306 } 3307 3308 static struct iwl_trans_dump_data * 3309 iwl_trans_pcie_dump_data(struct iwl_trans *trans, 3310 u32 dump_mask, 3311 const struct iwl_dump_sanitize_ops *sanitize_ops, 3312 void *sanitize_ctx) 3313 { 3314 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3315 struct iwl_fw_error_dump_data *data; 3316 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; 3317 struct iwl_fw_error_dump_txcmd *txcmd; 3318 struct iwl_trans_dump_data *dump_data; 3319 u32 len, num_rbs = 0, monitor_len = 0; 3320 int i, ptr; 3321 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 3322 !trans->trans_cfg->mq_rx_supported && 3323 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); 3324 3325 if (!dump_mask) 3326 return NULL; 3327 3328 /* transport dump header */ 3329 len = sizeof(*dump_data); 3330 3331 /* host commands */ 3332 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) 3333 len += sizeof(*data) + 3334 cmdq->n_window * (sizeof(*txcmd) + 3335 TFD_MAX_PAYLOAD_SIZE); 3336 3337 /* FW monitor */ 3338 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3339 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); 3340 3341 /* CSR registers */ 3342 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3343 len += sizeof(*data) + IWL_CSR_TO_DUMP; 3344 3345 /* FH registers */ 3346 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { 3347 if (trans->trans_cfg->gen2) 3348 len += sizeof(*data) + 3349 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - 3350 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); 3351 else 3352 len += sizeof(*data) + 3353 (FH_MEM_UPPER_BOUND - 3354 FH_MEM_LOWER_BOUND); 3355 } 3356 3357 if (dump_rbs) { 3358 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3359 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3360 /* RBs */ 3361 num_rbs = 3362 le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) 3363 & 0x0FFF; 3364 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 3365 len += num_rbs * (sizeof(*data) + 3366 sizeof(struct iwl_fw_error_dump_rb) + 3367 (PAGE_SIZE << trans_pcie->rx_page_order)); 3368 } 3369 3370 /* Paged memory for gen2 HW */ 3371 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) 3372 for (i = 0; i < trans->init_dram.paging_cnt; i++) 3373 len += sizeof(*data) + 3374 sizeof(struct iwl_fw_error_dump_paging) + 3375 trans->init_dram.paging[i].size; 3376 3377 dump_data = vzalloc(len); 3378 if (!dump_data) 3379 return NULL; 3380 3381 len = 0; 3382 data = (void *)dump_data->data; 3383 3384 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { 3385 u16 tfd_size = trans->txqs.tfd.size; 3386 3387 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 3388 txcmd = (void *)data->data; 3389 spin_lock_bh(&cmdq->lock); 3390 ptr = cmdq->write_ptr; 3391 for (i = 0; i < cmdq->n_window; i++) { 3392 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); 3393 u8 tfdidx; 3394 u32 caplen, cmdlen; 3395 3396 if (trans->trans_cfg->use_tfh) 3397 tfdidx = idx; 3398 else 3399 tfdidx = ptr; 3400 3401 cmdlen = iwl_trans_pcie_get_cmdlen(trans, 3402 (u8 *)cmdq->tfds + 3403 tfd_size * tfdidx); 3404 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 3405 3406 if (cmdlen) { 3407 len += sizeof(*txcmd) + caplen; 3408 txcmd->cmdlen = cpu_to_le32(cmdlen); 3409 txcmd->caplen = cpu_to_le32(caplen); 3410 memcpy(txcmd->data, cmdq->entries[idx].cmd, 3411 caplen); 3412 if (sanitize_ops && sanitize_ops->frob_hcmd) 3413 sanitize_ops->frob_hcmd(sanitize_ctx, 3414 txcmd->data, 3415 caplen); 3416 txcmd = (void *)((u8 *)txcmd->data + caplen); 3417 } 3418 3419 ptr = iwl_txq_dec_wrap(trans, ptr); 3420 } 3421 spin_unlock_bh(&cmdq->lock); 3422 3423 data->len = cpu_to_le32(len); 3424 len += sizeof(*data); 3425 data = iwl_fw_error_next_data(data); 3426 } 3427 3428 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3429 len += iwl_trans_pcie_dump_csr(trans, &data); 3430 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) 3431 len += iwl_trans_pcie_fh_regs_dump(trans, &data); 3432 if (dump_rbs) 3433 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 3434 3435 /* Paged memory for gen2 HW */ 3436 if (trans->trans_cfg->gen2 && 3437 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { 3438 for (i = 0; i < trans->init_dram.paging_cnt; i++) { 3439 struct iwl_fw_error_dump_paging *paging; 3440 u32 page_len = trans->init_dram.paging[i].size; 3441 3442 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 3443 data->len = cpu_to_le32(sizeof(*paging) + page_len); 3444 paging = (void *)data->data; 3445 paging->index = cpu_to_le32(i); 3446 memcpy(paging->data, 3447 trans->init_dram.paging[i].block, page_len); 3448 data = iwl_fw_error_next_data(data); 3449 3450 len += sizeof(*data) + sizeof(*paging) + page_len; 3451 } 3452 } 3453 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3454 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 3455 3456 dump_data->len = len; 3457 3458 return dump_data; 3459 } 3460 3461 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) 3462 { 3463 if (enable) 3464 iwl_enable_interrupts(trans); 3465 else 3466 iwl_disable_interrupts(trans); 3467 } 3468 3469 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) 3470 { 3471 u32 inta_addr, sw_err_bit; 3472 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3473 3474 if (trans_pcie->msix_enabled) { 3475 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; 3476 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 3477 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; 3478 else 3479 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; 3480 } else { 3481 inta_addr = CSR_INT; 3482 sw_err_bit = CSR_INT_BIT_SW_ERR; 3483 } 3484 3485 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); 3486 } 3487 3488 #define IWL_TRANS_COMMON_OPS \ 3489 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ 3490 .write8 = iwl_trans_pcie_write8, \ 3491 .write32 = iwl_trans_pcie_write32, \ 3492 .read32 = iwl_trans_pcie_read32, \ 3493 .read_prph = iwl_trans_pcie_read_prph, \ 3494 .write_prph = iwl_trans_pcie_write_prph, \ 3495 .read_mem = iwl_trans_pcie_read_mem, \ 3496 .write_mem = iwl_trans_pcie_write_mem, \ 3497 .read_config32 = iwl_trans_pcie_read_config32, \ 3498 .configure = iwl_trans_pcie_configure, \ 3499 .set_pmi = iwl_trans_pcie_set_pmi, \ 3500 .sw_reset = iwl_trans_pcie_sw_reset, \ 3501 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ 3502 .release_nic_access = iwl_trans_pcie_release_nic_access, \ 3503 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ 3504 .dump_data = iwl_trans_pcie_dump_data, \ 3505 .d3_suspend = iwl_trans_pcie_d3_suspend, \ 3506 .d3_resume = iwl_trans_pcie_d3_resume, \ 3507 .interrupts = iwl_trans_pci_interrupts, \ 3508 .sync_nmi = iwl_trans_pcie_sync_nmi, \ 3509 .imr_dma_data = iwl_trans_pcie_copy_imr \ 3510 3511 static const struct iwl_trans_ops trans_ops_pcie = { 3512 IWL_TRANS_COMMON_OPS, 3513 .start_hw = iwl_trans_pcie_start_hw, 3514 .fw_alive = iwl_trans_pcie_fw_alive, 3515 .start_fw = iwl_trans_pcie_start_fw, 3516 .stop_device = iwl_trans_pcie_stop_device, 3517 3518 .send_cmd = iwl_pcie_enqueue_hcmd, 3519 3520 .tx = iwl_trans_pcie_tx, 3521 .reclaim = iwl_txq_reclaim, 3522 3523 .txq_disable = iwl_trans_pcie_txq_disable, 3524 .txq_enable = iwl_trans_pcie_txq_enable, 3525 3526 .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, 3527 3528 .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, 3529 3530 .freeze_txq_timer = iwl_trans_txq_freeze_timer, 3531 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, 3532 #ifdef CONFIG_IWLWIFI_DEBUGFS 3533 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3534 #endif 3535 }; 3536 3537 static const struct iwl_trans_ops trans_ops_pcie_gen2 = { 3538 IWL_TRANS_COMMON_OPS, 3539 .start_hw = iwl_trans_pcie_start_hw, 3540 .fw_alive = iwl_trans_pcie_gen2_fw_alive, 3541 .start_fw = iwl_trans_pcie_gen2_start_fw, 3542 .stop_device = iwl_trans_pcie_gen2_stop_device, 3543 3544 .send_cmd = iwl_pcie_gen2_enqueue_hcmd, 3545 3546 .tx = iwl_txq_gen2_tx, 3547 .reclaim = iwl_txq_reclaim, 3548 3549 .set_q_ptrs = iwl_txq_set_q_ptrs, 3550 3551 .txq_alloc = iwl_txq_dyn_alloc, 3552 .txq_free = iwl_txq_dyn_free, 3553 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, 3554 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, 3555 .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm, 3556 .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, 3557 .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power, 3558 .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, 3559 #ifdef CONFIG_IWLWIFI_DEBUGFS 3560 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3561 #endif 3562 }; 3563 3564 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 3565 const struct pci_device_id *ent, 3566 const struct iwl_cfg_trans_params *cfg_trans) 3567 { 3568 struct iwl_trans_pcie *trans_pcie; 3569 struct iwl_trans *trans; 3570 int ret, addr_size; 3571 const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; 3572 void __iomem * const *table; 3573 3574 if (!cfg_trans->gen2) 3575 ops = &trans_ops_pcie; 3576 3577 ret = pcim_enable_device(pdev); 3578 if (ret) 3579 return ERR_PTR(ret); 3580 3581 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, 3582 cfg_trans); 3583 if (!trans) 3584 return ERR_PTR(-ENOMEM); 3585 3586 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3587 3588 trans_pcie->trans = trans; 3589 trans_pcie->opmode_down = true; 3590 spin_lock_init(&trans_pcie->irq_lock); 3591 spin_lock_init(&trans_pcie->reg_lock); 3592 spin_lock_init(&trans_pcie->alloc_page_lock); 3593 mutex_init(&trans_pcie->mutex); 3594 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 3595 init_waitqueue_head(&trans_pcie->fw_reset_waitq); 3596 init_waitqueue_head(&trans_pcie->imr_waitq); 3597 3598 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", 3599 WQ_HIGHPRI | WQ_UNBOUND, 1); 3600 if (!trans_pcie->rba.alloc_wq) { 3601 ret = -ENOMEM; 3602 goto out_free_trans; 3603 } 3604 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); 3605 3606 trans_pcie->debug_rfkill = -1; 3607 3608 if (!cfg_trans->base_params->pcie_l1_allowed) { 3609 /* 3610 * W/A - seems to solve weird behavior. We need to remove this 3611 * if we don't want to stay in L1 all the time. This wastes a 3612 * lot of power. 3613 */ 3614 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 3615 PCIE_LINK_STATE_L1 | 3616 PCIE_LINK_STATE_CLKPM); 3617 } 3618 3619 trans_pcie->def_rx_queue = 0; 3620 3621 pci_set_master(pdev); 3622 3623 addr_size = trans->txqs.tfd.addr_size; 3624 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); 3625 if (ret) { 3626 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3627 /* both attempts failed: */ 3628 if (ret) { 3629 dev_err(&pdev->dev, "No suitable DMA available\n"); 3630 goto out_no_pci; 3631 } 3632 } 3633 3634 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); 3635 if (ret) { 3636 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); 3637 goto out_no_pci; 3638 } 3639 3640 table = pcim_iomap_table(pdev); 3641 if (!table) { 3642 dev_err(&pdev->dev, "pcim_iomap_table failed\n"); 3643 ret = -ENOMEM; 3644 goto out_no_pci; 3645 } 3646 3647 trans_pcie->hw_base = table[0]; 3648 if (!trans_pcie->hw_base) { 3649 dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); 3650 ret = -ENODEV; 3651 goto out_no_pci; 3652 } 3653 3654 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3655 * PCI Tx retries from interfering with C3 CPU state */ 3656 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3657 3658 trans_pcie->pci_dev = pdev; 3659 iwl_disable_interrupts(trans); 3660 3661 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 3662 if (trans->hw_rev == 0xffffffff) { 3663 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); 3664 ret = -EIO; 3665 goto out_no_pci; 3666 } 3667 3668 /* 3669 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3670 * changed, and now the revision step also includes bit 0-1 (no more 3671 * "dash" value). To keep hw_rev backwards compatible - we'll store it 3672 * in the old format. 3673 */ 3674 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) 3675 trans->hw_rev_step = trans->hw_rev & 0xF; 3676 else 3677 trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; 3678 3679 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); 3680 3681 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); 3682 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 3683 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 3684 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 3685 3686 init_waitqueue_head(&trans_pcie->sx_waitq); 3687 3688 3689 if (trans_pcie->msix_enabled) { 3690 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); 3691 if (ret) 3692 goto out_no_pci; 3693 } else { 3694 ret = iwl_pcie_alloc_ict(trans); 3695 if (ret) 3696 goto out_no_pci; 3697 3698 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 3699 iwl_pcie_isr, 3700 iwl_pcie_irq_handler, 3701 IRQF_SHARED, DRV_NAME, trans); 3702 if (ret) { 3703 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3704 goto out_free_ict; 3705 } 3706 } 3707 3708 #ifdef CONFIG_IWLWIFI_DEBUGFS 3709 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 3710 mutex_init(&trans_pcie->fw_mon_data.mutex); 3711 #endif 3712 3713 iwl_dbg_tlv_init(trans); 3714 3715 return trans; 3716 3717 out_free_ict: 3718 iwl_pcie_free_ict(trans); 3719 out_no_pci: 3720 destroy_workqueue(trans_pcie->rba.alloc_wq); 3721 out_free_trans: 3722 iwl_trans_free(trans); 3723 return ERR_PTR(ret); 3724 } 3725 3726 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, 3727 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3728 { 3729 iwl_write_prph(trans, IMR_UREG_CHICK, 3730 iwl_read_prph(trans, IMR_UREG_CHICK) | 3731 IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); 3732 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); 3733 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, 3734 (u32)(src_addr & 0xFFFFFFFF)); 3735 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, 3736 iwl_get_dma_hi_addr(src_addr)); 3737 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); 3738 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, 3739 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | 3740 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | 3741 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); 3742 } 3743 3744 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, 3745 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3746 { 3747 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3748 int ret = -1; 3749 3750 trans_pcie->imr_status = IMR_D2S_REQUESTED; 3751 iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); 3752 ret = wait_event_timeout(trans_pcie->imr_waitq, 3753 trans_pcie->imr_status != 3754 IMR_D2S_REQUESTED, 5 * HZ); 3755 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { 3756 IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); 3757 iwl_trans_pcie_dump_regs(trans); 3758 return -ETIMEDOUT; 3759 } 3760 trans_pcie->imr_status = IMR_D2S_IDLE; 3761 return 0; 3762 } 3763