1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* 3 * Copyright (C) 2003-2015, 2018-2023 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #ifndef __iwl_trans_int_pcie_h__ 8 #define __iwl_trans_int_pcie_h__ 9 10 #include <linux/spinlock.h> 11 #include <linux/interrupt.h> 12 #include <linux/skbuff.h> 13 #include <linux/wait.h> 14 #include <linux/pci.h> 15 #include <linux/timer.h> 16 #include <linux/cpu.h> 17 18 #include "iwl-fh.h" 19 #include "iwl-csr.h" 20 #include "iwl-trans.h" 21 #include "iwl-debug.h" 22 #include "iwl-io.h" 23 #include "iwl-op-mode.h" 24 #include "iwl-drv.h" 25 #include "queue/tx.h" 26 #include "iwl-context-info.h" 27 28 /* 29 * RX related structures and functions 30 */ 31 #define RX_NUM_QUEUES 1 32 #define RX_POST_REQ_ALLOC 2 33 #define RX_CLAIM_REQ_ALLOC 8 34 #define RX_PENDING_WATERMARK 16 35 #define FIRST_RX_QUEUE 512 36 37 struct iwl_host_cmd; 38 39 /*This file includes the declaration that are internal to the 40 * trans_pcie layer */ 41 42 /** 43 * struct iwl_rx_mem_buffer 44 * @page_dma: bus address of rxb page 45 * @page: driver's pointer to the rxb page 46 * @list: list entry for the membuffer 47 * @invalid: rxb is in driver ownership - not owned by HW 48 * @vid: index of this rxb in the global table 49 * @offset: indicates which offset of the page (in bytes) 50 * this buffer uses (if multiple RBs fit into one page) 51 */ 52 struct iwl_rx_mem_buffer { 53 dma_addr_t page_dma; 54 struct page *page; 55 struct list_head list; 56 u32 offset; 57 u16 vid; 58 bool invalid; 59 }; 60 61 /** 62 * struct isr_statistics - interrupt statistics 63 * 64 */ 65 struct isr_statistics { 66 u32 hw; 67 u32 sw; 68 u32 err_code; 69 u32 sch; 70 u32 alive; 71 u32 rfkill; 72 u32 ctkill; 73 u32 wakeup; 74 u32 rx; 75 u32 tx; 76 u32 unhandled; 77 }; 78 79 /** 80 * struct iwl_rx_transfer_desc - transfer descriptor 81 * @addr: ptr to free buffer start address 82 * @rbid: unique tag of the buffer 83 * @reserved: reserved 84 */ 85 struct iwl_rx_transfer_desc { 86 __le16 rbid; 87 __le16 reserved[3]; 88 __le64 addr; 89 } __packed; 90 91 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) 92 93 /** 94 * struct iwl_rx_completion_desc - completion descriptor 95 * @reserved1: reserved 96 * @rbid: unique tag of the received buffer 97 * @flags: flags (0: fragmented, all others: reserved) 98 * @reserved2: reserved 99 */ 100 struct iwl_rx_completion_desc { 101 __le32 reserved1; 102 __le16 rbid; 103 u8 flags; 104 u8 reserved2[25]; 105 } __packed; 106 107 /** 108 * struct iwl_rx_completion_desc_bz - Bz completion descriptor 109 * @rbid: unique tag of the received buffer 110 * @flags: flags (0: fragmented, all others: reserved) 111 * @reserved: reserved 112 */ 113 struct iwl_rx_completion_desc_bz { 114 __le16 rbid; 115 u8 flags; 116 u8 reserved[1]; 117 } __packed; 118 119 /** 120 * struct iwl_rxq - Rx queue 121 * @id: queue index 122 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 123 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 124 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's 125 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 126 * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd) 127 * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd) 128 * @read: Shared index to newest available Rx buffer 129 * @write: Shared index to oldest written Rx packet 130 * @free_count: Number of pre-allocated buffers in rx_free 131 * @used_count: Number of RBDs handled to allocator to use for allocation 132 * @write_actual: 133 * @rx_free: list of RBDs with allocated RB ready for use 134 * @rx_used: list of RBDs with no RB attached 135 * @need_update: flag to indicate we need to update read/write index 136 * @rb_stts: driver's pointer to receive buffer status 137 * @rb_stts_dma: bus address of receive buffer status 138 * @lock: 139 * @queue: actual rx queue. Not used for multi-rx queue. 140 * @next_rb_is_fragment: indicates that the previous RB that we handled set 141 * the fragmented flag, so the next one is still another fragment 142 * 143 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 144 */ 145 struct iwl_rxq { 146 int id; 147 void *bd; 148 dma_addr_t bd_dma; 149 void *used_bd; 150 dma_addr_t used_bd_dma; 151 u32 read; 152 u32 write; 153 u32 free_count; 154 u32 used_count; 155 u32 write_actual; 156 u32 queue_size; 157 struct list_head rx_free; 158 struct list_head rx_used; 159 bool need_update, next_rb_is_fragment; 160 void *rb_stts; 161 dma_addr_t rb_stts_dma; 162 spinlock_t lock; 163 struct napi_struct napi; 164 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 165 }; 166 167 /** 168 * struct iwl_rb_allocator - Rx allocator 169 * @req_pending: number of requests the allcator had not processed yet 170 * @req_ready: number of requests honored and ready for claiming 171 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 172 * the queue. This is a list of &struct iwl_rx_mem_buffer 173 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 174 * of &struct iwl_rx_mem_buffer 175 * @lock: protects the rbd_allocated and rbd_empty lists 176 * @alloc_wq: work queue for background calls 177 * @rx_alloc: work struct for background calls 178 */ 179 struct iwl_rb_allocator { 180 atomic_t req_pending; 181 atomic_t req_ready; 182 struct list_head rbd_allocated; 183 struct list_head rbd_empty; 184 spinlock_t lock; 185 struct workqueue_struct *alloc_wq; 186 struct work_struct rx_alloc; 187 }; 188 189 /** 190 * iwl_get_closed_rb_stts - get closed rb stts from different structs 191 * @rxq - the rxq to get the rb stts from 192 */ 193 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, 194 struct iwl_rxq *rxq) 195 { 196 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 197 __le16 *rb_stts = rxq->rb_stts; 198 199 return READ_ONCE(*rb_stts); 200 } else { 201 struct iwl_rb_status *rb_stts = rxq->rb_stts; 202 203 return READ_ONCE(rb_stts->closed_rb_num); 204 } 205 } 206 207 #ifdef CONFIG_IWLWIFI_DEBUGFS 208 /** 209 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data 210 * debugfs file 211 * 212 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. 213 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. 214 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is 215 * set the file can no longer be used. 216 */ 217 enum iwl_fw_mon_dbgfs_state { 218 IWL_FW_MON_DBGFS_STATE_CLOSED, 219 IWL_FW_MON_DBGFS_STATE_OPEN, 220 IWL_FW_MON_DBGFS_STATE_DISABLED, 221 }; 222 #endif 223 224 /** 225 * enum iwl_shared_irq_flags - level of sharing for irq 226 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 227 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 228 */ 229 enum iwl_shared_irq_flags { 230 IWL_SHARED_IRQ_NON_RX = BIT(0), 231 IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 232 }; 233 234 /** 235 * enum iwl_image_response_code - image response values 236 * @IWL_IMAGE_RESP_DEF: the default value of the register 237 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 238 * @IWL_IMAGE_RESP_FAIL: iml reading failed 239 */ 240 enum iwl_image_response_code { 241 IWL_IMAGE_RESP_DEF = 0, 242 IWL_IMAGE_RESP_SUCCESS = 1, 243 IWL_IMAGE_RESP_FAIL = 2, 244 }; 245 246 /** 247 * struct cont_rec: continuous recording data structure 248 * @prev_wr_ptr: the last address that was read in monitor_data 249 * debugfs file 250 * @prev_wrap_cnt: the wrap count that was used during the last read in 251 * monitor_data debugfs file 252 * @state: the state of monitor_data debugfs file as described 253 * in &iwl_fw_mon_dbgfs_state enum 254 * @mutex: locked while reading from monitor_data debugfs file 255 */ 256 #ifdef CONFIG_IWLWIFI_DEBUGFS 257 struct cont_rec { 258 u32 prev_wr_ptr; 259 u32 prev_wrap_cnt; 260 u8 state; 261 /* Used to sync monitor_data debugfs file with driver unload flow */ 262 struct mutex mutex; 263 }; 264 #endif 265 266 enum iwl_pcie_fw_reset_state { 267 FW_RESET_IDLE, 268 FW_RESET_REQUESTED, 269 FW_RESET_OK, 270 FW_RESET_ERROR, 271 }; 272 273 /** 274 * enum wl_pcie_imr_status - imr dma transfer state 275 * @IMR_D2S_IDLE: default value of the dma transfer 276 * @IMR_D2S_REQUESTED: dma transfer requested 277 * @IMR_D2S_COMPLETED: dma transfer completed 278 * @IMR_D2S_ERROR: dma transfer error 279 */ 280 enum iwl_pcie_imr_status { 281 IMR_D2S_IDLE, 282 IMR_D2S_REQUESTED, 283 IMR_D2S_COMPLETED, 284 IMR_D2S_ERROR, 285 }; 286 287 /** 288 * struct iwl_trans_pcie - PCIe transport specific data 289 * @rxq: all the RX queue data 290 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 291 * @global_table: table mapping received VID from hw to rxb 292 * @rba: allocator for RX replenishing 293 * @ctxt_info: context information for FW self init 294 * @ctxt_info_gen3: context information for gen3 devices 295 * @prph_info: prph info for self init 296 * @prph_scratch: prph scratch for self init 297 * @ctxt_info_dma_addr: dma addr of context information 298 * @prph_info_dma_addr: dma addr of prph info 299 * @prph_scratch_dma_addr: dma addr of prph scratch 300 * @ctxt_info_dma_addr: dma addr of context information 301 * @init_dram: DRAM data of firmware image (including paging). 302 * Context information addresses will be taken from here. 303 * This is driver's local copy for keeping track of size and 304 * count for allocating and freeing the memory. 305 * @iml: image loader image virtual address 306 * @iml_dma_addr: image loader image DMA address 307 * @trans: pointer to the generic transport area 308 * @scd_base_addr: scheduler sram base address in SRAM 309 * @kw: keep warm address 310 * @pnvm_data: holds info about pnvm payloads allocated in DRAM 311 * @reduced_tables_data: holds info about power reduced tablse 312 * payloads allocated in DRAM 313 * @pci_dev: basic pci-network driver stuff 314 * @hw_base: pci hardware address support 315 * @ucode_write_complete: indicates that the ucode has been copied. 316 * @ucode_write_waitq: wait queue for uCode load 317 * @cmd_queue - command queue number 318 * @rx_buf_size: Rx buffer size 319 * @scd_set_active: should the transport configure the SCD for HCMD queue 320 * @rx_page_order: page order for receive buffer size 321 * @rx_buf_bytes: RX buffer (RB) size in bytes 322 * @reg_lock: protect hw register access 323 * @mutex: to protect stop_device / start_fw / start_hw 324 * @cmd_in_flight: true when we have a host command in flight 325 #ifdef CONFIG_IWLWIFI_DEBUGFS 326 * @fw_mon_data: fw continuous recording data 327 #endif 328 * @msix_entries: array of MSI-X entries 329 * @msix_enabled: true if managed to enable MSI-X 330 * @shared_vec_mask: the type of causes the shared vector handles 331 * (see iwl_shared_irq_flags). 332 * @alloc_vecs: the number of interrupt vectors allocated by the OS 333 * @def_irq: default irq for non rx causes 334 * @fh_init_mask: initial unmasked fh causes 335 * @hw_init_mask: initial unmasked hw causes 336 * @fh_mask: current unmasked fh causes 337 * @hw_mask: current unmasked hw causes 338 * @in_rescan: true if we have triggered a device rescan 339 * @base_rb_stts: base virtual address of receive buffer status for all queues 340 * @base_rb_stts_dma: base physical address of receive buffer status 341 * @supported_dma_mask: DMA mask to validate the actual address against, 342 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device 343 * @alloc_page_lock: spinlock for the page allocator 344 * @alloc_page: allocated page to still use parts of 345 * @alloc_page_used: how much of the allocated page was already used (bytes) 346 * @imr_status: imr dma state machine 347 * @wait_queue_head_t: imr wait queue for dma completion 348 * @rf_name: name/version of the CRF, if any 349 */ 350 struct iwl_trans_pcie { 351 struct iwl_rxq *rxq; 352 struct iwl_rx_mem_buffer *rx_pool; 353 struct iwl_rx_mem_buffer **global_table; 354 struct iwl_rb_allocator rba; 355 union { 356 struct iwl_context_info *ctxt_info; 357 struct iwl_context_info_gen3 *ctxt_info_gen3; 358 }; 359 struct iwl_prph_info *prph_info; 360 struct iwl_prph_scratch *prph_scratch; 361 void *iml; 362 dma_addr_t ctxt_info_dma_addr; 363 dma_addr_t prph_info_dma_addr; 364 dma_addr_t prph_scratch_dma_addr; 365 dma_addr_t iml_dma_addr; 366 struct iwl_trans *trans; 367 368 struct net_device napi_dev; 369 370 /* INT ICT Table */ 371 __le32 *ict_tbl; 372 dma_addr_t ict_tbl_dma; 373 int ict_index; 374 bool use_ict; 375 bool is_down, opmode_down; 376 s8 debug_rfkill; 377 struct isr_statistics isr_stats; 378 379 spinlock_t irq_lock; 380 struct mutex mutex; 381 u32 inta_mask; 382 u32 scd_base_addr; 383 struct iwl_dma_ptr kw; 384 385 /* pnvm data */ 386 struct iwl_dram_regions pnvm_data; 387 struct iwl_dram_regions reduced_tables_data; 388 389 struct iwl_txq *txq_memory; 390 391 /* PCI bus related data */ 392 struct pci_dev *pci_dev; 393 u8 __iomem *hw_base; 394 395 bool ucode_write_complete; 396 bool sx_complete; 397 wait_queue_head_t ucode_write_waitq; 398 wait_queue_head_t sx_waitq; 399 400 u8 n_no_reclaim_cmds; 401 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 402 u16 num_rx_bufs; 403 404 enum iwl_amsdu_size rx_buf_size; 405 bool scd_set_active; 406 bool pcie_dbg_dumped_once; 407 u32 rx_page_order; 408 u32 rx_buf_bytes; 409 u32 supported_dma_mask; 410 411 /* allocator lock for the two values below */ 412 spinlock_t alloc_page_lock; 413 struct page *alloc_page; 414 u32 alloc_page_used; 415 416 /*protect hw register */ 417 spinlock_t reg_lock; 418 bool cmd_hold_nic_awake; 419 420 #ifdef CONFIG_IWLWIFI_DEBUGFS 421 struct cont_rec fw_mon_data; 422 #endif 423 424 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 425 bool msix_enabled; 426 u8 shared_vec_mask; 427 u32 alloc_vecs; 428 u32 def_irq; 429 u32 fh_init_mask; 430 u32 hw_init_mask; 431 u32 fh_mask; 432 u32 hw_mask; 433 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 434 u16 tx_cmd_queue_size; 435 bool in_rescan; 436 437 void *base_rb_stts; 438 dma_addr_t base_rb_stts_dma; 439 440 bool fw_reset_handshake; 441 enum iwl_pcie_fw_reset_state fw_reset_state; 442 wait_queue_head_t fw_reset_waitq; 443 enum iwl_pcie_imr_status imr_status; 444 wait_queue_head_t imr_waitq; 445 char rf_name[32]; 446 }; 447 448 static inline struct iwl_trans_pcie * 449 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 450 { 451 return (void *)trans->trans_specific; 452 } 453 454 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue) 455 { 456 /* 457 * Before sending the interrupt the HW disables it to prevent 458 * a nested interrupt. This is done by writing 1 to the corresponding 459 * bit in the mask register. After handling the interrupt, it should be 460 * re-enabled by clearing this bit. This register is defined as 461 * write 1 clear (W1C) register, meaning that it's being clear 462 * by writing 1 to the bit. 463 */ 464 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue)); 465 } 466 467 static inline struct iwl_trans * 468 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 469 { 470 return container_of((void *)trans_pcie, struct iwl_trans, 471 trans_specific); 472 } 473 474 /* 475 * Convention: trans API functions: iwl_trans_pcie_XXX 476 * Other functions: iwl_pcie_XXX 477 */ 478 struct iwl_trans 479 *iwl_trans_pcie_alloc(struct pci_dev *pdev, 480 const struct pci_device_id *ent, 481 const struct iwl_cfg_trans_params *cfg_trans); 482 void iwl_trans_pcie_free(struct iwl_trans *trans); 483 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, 484 struct device *dev); 485 486 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); 487 #define _iwl_trans_pcie_grab_nic_access(trans) \ 488 __cond_lock(nic_access_nobh, \ 489 likely(__iwl_trans_pcie_grab_nic_access(trans))) 490 491 /***************************************************** 492 * RX 493 ******************************************************/ 494 int iwl_pcie_rx_init(struct iwl_trans *trans); 495 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 496 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 497 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 498 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 499 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 500 int iwl_pcie_rx_stop(struct iwl_trans *trans); 501 void iwl_pcie_rx_free(struct iwl_trans *trans); 502 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); 503 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); 504 void iwl_pcie_rx_napi_sync(struct iwl_trans *trans); 505 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 506 struct iwl_rxq *rxq); 507 508 /***************************************************** 509 * ICT - interrupt handling 510 ******************************************************/ 511 irqreturn_t iwl_pcie_isr(int irq, void *data); 512 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 513 void iwl_pcie_free_ict(struct iwl_trans *trans); 514 void iwl_pcie_reset_ict(struct iwl_trans *trans); 515 void iwl_pcie_disable_ict(struct iwl_trans *trans); 516 517 /***************************************************** 518 * TX / HCMD 519 ******************************************************/ 520 int iwl_pcie_tx_init(struct iwl_trans *trans); 521 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 522 int iwl_pcie_tx_stop(struct iwl_trans *trans); 523 void iwl_pcie_tx_free(struct iwl_trans *trans); 524 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 525 const struct iwl_trans_txq_scd_cfg *cfg, 526 unsigned int wdg_timeout); 527 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 528 bool configure_scd); 529 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 530 bool shared_mode); 531 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 532 struct iwl_device_tx_cmd *dev_cmd, int txq_id); 533 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 534 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 535 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 536 struct iwl_rx_cmd_buffer *rxb); 537 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 538 539 /***************************************************** 540 * Error handling 541 ******************************************************/ 542 void iwl_pcie_dump_csr(struct iwl_trans *trans); 543 544 /***************************************************** 545 * Helpers 546 ******************************************************/ 547 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 548 { 549 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 550 551 clear_bit(STATUS_INT_ENABLED, &trans->status); 552 if (!trans_pcie->msix_enabled) { 553 /* disable interrupts from uCode/NIC to host */ 554 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 555 556 /* acknowledge/clear/reset any interrupts still pending 557 * from uCode or flow handler (Rx/Tx DMA) */ 558 iwl_write32(trans, CSR_INT, 0xffffffff); 559 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 560 } else { 561 /* disable all the interrupt we might use */ 562 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 563 trans_pcie->fh_init_mask); 564 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 565 trans_pcie->hw_init_mask); 566 } 567 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 568 } 569 570 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 571 int start) 572 { 573 int i = 0; 574 575 while (start < fw->num_sec && 576 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 577 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 578 start++; 579 i++; 580 } 581 582 return i; 583 } 584 585 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 586 { 587 struct iwl_self_init_dram *dram = &trans->init_dram; 588 int i; 589 590 if (!dram->fw) { 591 WARN_ON(dram->fw_cnt); 592 return; 593 } 594 595 for (i = 0; i < dram->fw_cnt; i++) 596 dma_free_coherent(trans->dev, dram->fw[i].size, 597 dram->fw[i].block, dram->fw[i].physical); 598 599 kfree(dram->fw); 600 dram->fw_cnt = 0; 601 dram->fw = NULL; 602 } 603 604 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 605 { 606 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 607 608 spin_lock_bh(&trans_pcie->irq_lock); 609 _iwl_disable_interrupts(trans); 610 spin_unlock_bh(&trans_pcie->irq_lock); 611 } 612 613 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 614 { 615 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 616 617 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 618 set_bit(STATUS_INT_ENABLED, &trans->status); 619 if (!trans_pcie->msix_enabled) { 620 trans_pcie->inta_mask = CSR_INI_SET_MASK; 621 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 622 } else { 623 /* 624 * fh/hw_mask keeps all the unmasked causes. 625 * Unlike msi, in msix cause is enabled when it is unset. 626 */ 627 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 628 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 629 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 630 ~trans_pcie->fh_mask); 631 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 632 ~trans_pcie->hw_mask); 633 } 634 } 635 636 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 637 { 638 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 639 640 spin_lock_bh(&trans_pcie->irq_lock); 641 _iwl_enable_interrupts(trans); 642 spin_unlock_bh(&trans_pcie->irq_lock); 643 } 644 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 645 { 646 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 647 648 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 649 trans_pcie->hw_mask = msk; 650 } 651 652 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 653 { 654 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 655 656 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 657 trans_pcie->fh_mask = msk; 658 } 659 660 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 661 { 662 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 663 664 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 665 if (!trans_pcie->msix_enabled) { 666 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 667 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 668 } else { 669 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 670 trans_pcie->hw_init_mask); 671 iwl_enable_fh_int_msk_msix(trans, 672 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 673 } 674 } 675 676 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) 677 { 678 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 679 680 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); 681 682 if (!trans_pcie->msix_enabled) { 683 /* 684 * When we'll receive the ALIVE interrupt, the ISR will call 685 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE 686 * interrupt (which is not really needed anymore) but also the 687 * RX interrupt which will allow us to receive the ALIVE 688 * notification (which is Rx) and continue the flow. 689 */ 690 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; 691 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 692 } else { 693 iwl_enable_hw_int_msk_msix(trans, 694 MSIX_HW_INT_CAUSES_REG_ALIVE); 695 /* 696 * Leave all the FH causes enabled to get the ALIVE 697 * notification. 698 */ 699 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); 700 } 701 } 702 703 static inline const char *queue_name(struct device *dev, 704 struct iwl_trans_pcie *trans_p, int i) 705 { 706 if (trans_p->shared_vec_mask) { 707 int vec = trans_p->shared_vec_mask & 708 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 709 710 if (i == 0) 711 return DRV_NAME ":shared_IRQ"; 712 713 return devm_kasprintf(dev, GFP_KERNEL, 714 DRV_NAME ":queue_%d", i + vec); 715 } 716 if (i == 0) 717 return DRV_NAME ":default_queue"; 718 719 if (i == trans_p->alloc_vecs - 1) 720 return DRV_NAME ":exception"; 721 722 return devm_kasprintf(dev, GFP_KERNEL, 723 DRV_NAME ":queue_%d", i); 724 } 725 726 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 727 { 728 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 729 730 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 731 if (!trans_pcie->msix_enabled) { 732 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 733 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 734 } else { 735 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 736 trans_pcie->fh_init_mask); 737 iwl_enable_hw_int_msk_msix(trans, 738 MSIX_HW_INT_CAUSES_REG_RF_KILL); 739 } 740 741 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { 742 /* 743 * On 9000-series devices this bit isn't enabled by default, so 744 * when we power down the device we need set the bit to allow it 745 * to wake up the PCI-E bus for RF-kill interrupts. 746 */ 747 iwl_set_bit(trans, CSR_GP_CNTRL, 748 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 749 } 750 } 751 752 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 753 754 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 755 { 756 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 757 758 lockdep_assert_held(&trans_pcie->mutex); 759 760 if (trans_pcie->debug_rfkill == 1) 761 return true; 762 763 return !(iwl_read32(trans, CSR_GP_CNTRL) & 764 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 765 } 766 767 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 768 u32 reg, u32 mask, u32 value) 769 { 770 u32 v; 771 772 #ifdef CONFIG_IWLWIFI_DEBUG 773 WARN_ON_ONCE(value & ~mask); 774 #endif 775 776 v = iwl_read32(trans, reg); 777 v &= ~mask; 778 v |= value; 779 iwl_write32(trans, reg, v); 780 } 781 782 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 783 u32 reg, u32 mask) 784 { 785 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 786 } 787 788 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 789 u32 reg, u32 mask) 790 { 791 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 792 } 793 794 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) 795 { 796 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); 797 } 798 799 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 800 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); 801 802 #ifdef CONFIG_IWLWIFI_DEBUGFS 803 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 804 #else 805 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } 806 #endif 807 808 void iwl_pcie_rx_allocator_work(struct work_struct *data); 809 810 /* common functions that are used by gen2 transport */ 811 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); 812 void iwl_pcie_apm_config(struct iwl_trans *trans); 813 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 814 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 815 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 816 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 817 bool was_in_rfkill); 818 void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 819 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 820 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 821 struct iwl_dma_ptr *ptr, size_t size); 822 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 823 void iwl_pcie_apply_destination(struct iwl_trans *trans); 824 825 /* common functions that are used by gen3 transport */ 826 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 827 828 /* transport gen 2 exported functions */ 829 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 830 const struct fw_img *fw, bool run_in_rfkill); 831 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 832 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 833 struct iwl_host_cmd *cmd); 834 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); 835 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); 836 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 837 bool test, bool reset); 838 int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, 839 struct iwl_host_cmd *cmd); 840 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 841 struct iwl_host_cmd *cmd); 842 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, 843 u32 dst_addr, u64 src_addr, u32 byte_cnt); 844 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, 845 u32 dst_addr, u64 src_addr, u32 byte_cnt); 846 847 #endif /* __iwl_trans_int_pcie_h__ */ 848