1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6 * 7 * Portions of this file are derived from the ipw3945 project, as well 8 * as portions of the ieee80211 subsystem header files. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #ifndef __iwl_trans_int_pcie_h__ 32 #define __iwl_trans_int_pcie_h__ 33 34 #include <linux/spinlock.h> 35 #include <linux/interrupt.h> 36 #include <linux/skbuff.h> 37 #include <linux/wait.h> 38 #include <linux/pci.h> 39 #include <linux/timer.h> 40 #include <linux/cpu.h> 41 42 #include "iwl-fh.h" 43 #include "iwl-csr.h" 44 #include "iwl-trans.h" 45 #include "iwl-debug.h" 46 #include "iwl-io.h" 47 #include "iwl-op-mode.h" 48 49 /* We need 2 entries for the TX command and header, and another one might 50 * be needed for potential data in the SKB's head. The remaining ones can 51 * be used for frags. 52 */ 53 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 54 55 /* 56 * RX related structures and functions 57 */ 58 #define RX_NUM_QUEUES 1 59 #define RX_POST_REQ_ALLOC 2 60 #define RX_CLAIM_REQ_ALLOC 8 61 #define RX_PENDING_WATERMARK 16 62 63 struct iwl_host_cmd; 64 65 /*This file includes the declaration that are internal to the 66 * trans_pcie layer */ 67 68 /** 69 * struct iwl_rx_mem_buffer 70 * @page_dma: bus address of rxb page 71 * @page: driver's pointer to the rxb page 72 * @invalid: rxb is in driver ownership - not owned by HW 73 * @vid: index of this rxb in the global table 74 */ 75 struct iwl_rx_mem_buffer { 76 dma_addr_t page_dma; 77 struct page *page; 78 u16 vid; 79 bool invalid; 80 struct list_head list; 81 }; 82 83 /** 84 * struct isr_statistics - interrupt statistics 85 * 86 */ 87 struct isr_statistics { 88 u32 hw; 89 u32 sw; 90 u32 err_code; 91 u32 sch; 92 u32 alive; 93 u32 rfkill; 94 u32 ctkill; 95 u32 wakeup; 96 u32 rx; 97 u32 tx; 98 u32 unhandled; 99 }; 100 101 /** 102 * struct iwl_rxq - Rx queue 103 * @id: queue index 104 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 105 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 106 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 107 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 108 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 109 * @read: Shared index to newest available Rx buffer 110 * @write: Shared index to oldest written Rx packet 111 * @free_count: Number of pre-allocated buffers in rx_free 112 * @used_count: Number of RBDs handled to allocator to use for allocation 113 * @write_actual: 114 * @rx_free: list of RBDs with allocated RB ready for use 115 * @rx_used: list of RBDs with no RB attached 116 * @need_update: flag to indicate we need to update read/write index 117 * @rb_stts: driver's pointer to receive buffer status 118 * @rb_stts_dma: bus address of receive buffer status 119 * @lock: 120 * @queue: actual rx queue. Not used for multi-rx queue. 121 * 122 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 123 */ 124 struct iwl_rxq { 125 int id; 126 void *bd; 127 dma_addr_t bd_dma; 128 __le32 *used_bd; 129 dma_addr_t used_bd_dma; 130 u32 read; 131 u32 write; 132 u32 free_count; 133 u32 used_count; 134 u32 write_actual; 135 u32 queue_size; 136 struct list_head rx_free; 137 struct list_head rx_used; 138 bool need_update; 139 struct iwl_rb_status *rb_stts; 140 dma_addr_t rb_stts_dma; 141 spinlock_t lock; 142 struct napi_struct napi; 143 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 144 }; 145 146 /** 147 * struct iwl_rb_allocator - Rx allocator 148 * @req_pending: number of requests the allcator had not processed yet 149 * @req_ready: number of requests honored and ready for claiming 150 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 151 * the queue. This is a list of &struct iwl_rx_mem_buffer 152 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 153 * of &struct iwl_rx_mem_buffer 154 * @lock: protects the rbd_allocated and rbd_empty lists 155 * @alloc_wq: work queue for background calls 156 * @rx_alloc: work struct for background calls 157 */ 158 struct iwl_rb_allocator { 159 atomic_t req_pending; 160 atomic_t req_ready; 161 struct list_head rbd_allocated; 162 struct list_head rbd_empty; 163 spinlock_t lock; 164 struct workqueue_struct *alloc_wq; 165 struct work_struct rx_alloc; 166 }; 167 168 struct iwl_dma_ptr { 169 dma_addr_t dma; 170 void *addr; 171 size_t size; 172 }; 173 174 /** 175 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 176 * @index -- current index 177 */ 178 static inline int iwl_queue_inc_wrap(int index) 179 { 180 return ++index & (TFD_QUEUE_SIZE_MAX - 1); 181 } 182 183 /** 184 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 185 * @index -- current index 186 */ 187 static inline int iwl_queue_dec_wrap(int index) 188 { 189 return --index & (TFD_QUEUE_SIZE_MAX - 1); 190 } 191 192 struct iwl_cmd_meta { 193 /* only for SYNC commands, iff the reply skb is wanted */ 194 struct iwl_host_cmd *source; 195 u32 flags; 196 u32 tbs; 197 }; 198 199 200 #define TFD_TX_CMD_SLOTS 256 201 #define TFD_CMD_SLOTS 32 202 203 /* 204 * The FH will write back to the first TB only, so we need to copy some data 205 * into the buffer regardless of whether it should be mapped or not. 206 * This indicates how big the first TB must be to include the scratch buffer 207 * and the assigned PN. 208 * Since PN location is 8 bytes at offset 12, it's 20 now. 209 * If we make it bigger then allocations will be bigger and copy slower, so 210 * that's probably not useful. 211 */ 212 #define IWL_FIRST_TB_SIZE 20 213 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 214 215 struct iwl_pcie_txq_entry { 216 struct iwl_device_cmd *cmd; 217 struct sk_buff *skb; 218 /* buffer to free after command completes */ 219 const void *free_buf; 220 struct iwl_cmd_meta meta; 221 }; 222 223 struct iwl_pcie_first_tb_buf { 224 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 225 }; 226 227 /** 228 * struct iwl_txq - Tx Queue for DMA 229 * @q: generic Rx/Tx queue descriptor 230 * @tfds: transmit frame descriptors (DMA memory) 231 * @first_tb_bufs: start of command headers, including scratch buffers, for 232 * the writeback -- this is DMA memory and an array holding one buffer 233 * for each command on the queue 234 * @first_tb_dma: DMA address for the first_tb_bufs start 235 * @entries: transmit entries (driver state) 236 * @lock: queue lock 237 * @stuck_timer: timer that fires if queue gets stuck 238 * @trans_pcie: pointer back to transport (for timer) 239 * @need_update: indicates need to update read/write index 240 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 241 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 242 * @frozen: tx stuck queue timer is frozen 243 * @frozen_expiry_remainder: remember how long until the timer fires 244 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 245 * @write_ptr: 1-st empty entry (index) host_w 246 * @read_ptr: last used entry (index) host_r 247 * @dma_addr: physical addr for BD's 248 * @n_window: safe queue window 249 * @id: queue id 250 * @low_mark: low watermark, resume queue if free space more than this 251 * @high_mark: high watermark, stop queue if free space less than this 252 * 253 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 254 * descriptors) and required locking structures. 255 * 256 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 257 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 258 * there might be HW changes in the future). For the normal TX 259 * queues, n_window, which is the size of the software queue data 260 * is also 256; however, for the command queue, n_window is only 261 * 32 since we don't need so many commands pending. Since the HW 262 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 263 * This means that we end up with the following: 264 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 265 * SW entries: | 0 | ... | 31 | 266 * where N is a number between 0 and 7. This means that the SW 267 * data is a window overlayed over the HW queue. 268 */ 269 struct iwl_txq { 270 void *tfds; 271 struct iwl_pcie_first_tb_buf *first_tb_bufs; 272 dma_addr_t first_tb_dma; 273 struct iwl_pcie_txq_entry *entries; 274 spinlock_t lock; 275 unsigned long frozen_expiry_remainder; 276 struct timer_list stuck_timer; 277 struct iwl_trans_pcie *trans_pcie; 278 bool need_update; 279 bool frozen; 280 bool ampdu; 281 int block; 282 unsigned long wd_timeout; 283 struct sk_buff_head overflow_q; 284 struct iwl_dma_ptr bc_tbl; 285 286 int write_ptr; 287 int read_ptr; 288 dma_addr_t dma_addr; 289 int n_window; 290 u32 id; 291 int low_mark; 292 int high_mark; 293 }; 294 295 static inline dma_addr_t 296 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 297 { 298 return txq->first_tb_dma + 299 sizeof(struct iwl_pcie_first_tb_buf) * idx; 300 } 301 302 struct iwl_tso_hdr_page { 303 struct page *page; 304 u8 *pos; 305 }; 306 307 /** 308 * enum iwl_shared_irq_flags - level of sharing for irq 309 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 310 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 311 */ 312 enum iwl_shared_irq_flags { 313 IWL_SHARED_IRQ_NON_RX = BIT(0), 314 IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 315 }; 316 317 /** 318 * struct iwl_dram_data 319 * @physical: page phy pointer 320 * @block: pointer to the allocated block/page 321 * @size: size of the block/page 322 */ 323 struct iwl_dram_data { 324 dma_addr_t physical; 325 void *block; 326 int size; 327 }; 328 329 /** 330 * struct iwl_self_init_dram - dram data used by self init process 331 * @fw: lmac and umac dram data 332 * @fw_cnt: total number of items in array 333 * @paging: paging dram data 334 * @paging_cnt: total number of items in array 335 */ 336 struct iwl_self_init_dram { 337 struct iwl_dram_data *fw; 338 int fw_cnt; 339 struct iwl_dram_data *paging; 340 int paging_cnt; 341 }; 342 343 /** 344 * struct iwl_trans_pcie - PCIe transport specific data 345 * @rxq: all the RX queue data 346 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 347 * @global_table: table mapping received VID from hw to rxb 348 * @rba: allocator for RX replenishing 349 * @ctxt_info: context information for FW self init 350 * @ctxt_info_dma_addr: dma addr of context information 351 * @init_dram: DRAM data of firmware image (including paging). 352 * Context information addresses will be taken from here. 353 * This is driver's local copy for keeping track of size and 354 * count for allocating and freeing the memory. 355 * @trans: pointer to the generic transport area 356 * @scd_base_addr: scheduler sram base address in SRAM 357 * @scd_bc_tbls: pointer to the byte count table of the scheduler 358 * @kw: keep warm address 359 * @pci_dev: basic pci-network driver stuff 360 * @hw_base: pci hardware address support 361 * @ucode_write_complete: indicates that the ucode has been copied. 362 * @ucode_write_waitq: wait queue for uCode load 363 * @cmd_queue - command queue number 364 * @rx_buf_size: Rx buffer size 365 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 366 * @scd_set_active: should the transport configure the SCD for HCMD queue 367 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 368 * frame. 369 * @rx_page_order: page order for receive buffer size 370 * @reg_lock: protect hw register access 371 * @mutex: to protect stop_device / start_fw / start_hw 372 * @cmd_in_flight: true when we have a host command in flight 373 * @fw_mon_phys: physical address of the buffer for the firmware monitor 374 * @fw_mon_page: points to the first page of the buffer for the firmware monitor 375 * @fw_mon_size: size of the buffer for the firmware monitor 376 * @msix_entries: array of MSI-X entries 377 * @msix_enabled: true if managed to enable MSI-X 378 * @shared_vec_mask: the type of causes the shared vector handles 379 * (see iwl_shared_irq_flags). 380 * @alloc_vecs: the number of interrupt vectors allocated by the OS 381 * @def_irq: default irq for non rx causes 382 * @fh_init_mask: initial unmasked fh causes 383 * @hw_init_mask: initial unmasked hw causes 384 * @fh_mask: current unmasked fh causes 385 * @hw_mask: current unmasked hw causes 386 */ 387 struct iwl_trans_pcie { 388 struct iwl_rxq *rxq; 389 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 390 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; 391 struct iwl_rb_allocator rba; 392 struct iwl_context_info *ctxt_info; 393 dma_addr_t ctxt_info_dma_addr; 394 struct iwl_self_init_dram init_dram; 395 struct iwl_trans *trans; 396 397 struct net_device napi_dev; 398 399 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 400 401 /* INT ICT Table */ 402 __le32 *ict_tbl; 403 dma_addr_t ict_tbl_dma; 404 int ict_index; 405 bool use_ict; 406 bool is_down; 407 struct isr_statistics isr_stats; 408 409 spinlock_t irq_lock; 410 struct mutex mutex; 411 u32 inta_mask; 412 u32 scd_base_addr; 413 struct iwl_dma_ptr scd_bc_tbls; 414 struct iwl_dma_ptr kw; 415 416 struct iwl_txq *txq_memory; 417 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 418 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 419 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 420 421 /* PCI bus related data */ 422 struct pci_dev *pci_dev; 423 void __iomem *hw_base; 424 425 bool ucode_write_complete; 426 wait_queue_head_t ucode_write_waitq; 427 wait_queue_head_t wait_command_queue; 428 wait_queue_head_t d0i3_waitq; 429 430 u8 page_offs, dev_cmd_offs; 431 432 u8 cmd_queue; 433 u8 cmd_fifo; 434 unsigned int cmd_q_wdg_timeout; 435 u8 n_no_reclaim_cmds; 436 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 437 u8 max_tbs; 438 u16 tfd_size; 439 440 enum iwl_amsdu_size rx_buf_size; 441 bool bc_table_dword; 442 bool scd_set_active; 443 bool sw_csum_tx; 444 u32 rx_page_order; 445 446 /*protect hw register */ 447 spinlock_t reg_lock; 448 bool cmd_hold_nic_awake; 449 bool ref_cmd_in_flight; 450 451 dma_addr_t fw_mon_phys; 452 struct page *fw_mon_page; 453 u32 fw_mon_size; 454 455 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 456 bool msix_enabled; 457 u8 shared_vec_mask; 458 u32 alloc_vecs; 459 u32 def_irq; 460 u32 fh_init_mask; 461 u32 hw_init_mask; 462 u32 fh_mask; 463 u32 hw_mask; 464 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 465 }; 466 467 static inline struct iwl_trans_pcie * 468 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 469 { 470 return (void *)trans->trans_specific; 471 } 472 473 static inline struct iwl_trans * 474 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 475 { 476 return container_of((void *)trans_pcie, struct iwl_trans, 477 trans_specific); 478 } 479 480 /* 481 * Convention: trans API functions: iwl_trans_pcie_XXX 482 * Other functions: iwl_pcie_XXX 483 */ 484 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 485 const struct pci_device_id *ent, 486 const struct iwl_cfg *cfg); 487 void iwl_trans_pcie_free(struct iwl_trans *trans); 488 489 /***************************************************** 490 * RX 491 ******************************************************/ 492 int iwl_pcie_rx_init(struct iwl_trans *trans); 493 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 494 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 495 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 496 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 497 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 498 int iwl_pcie_rx_stop(struct iwl_trans *trans); 499 void iwl_pcie_rx_free(struct iwl_trans *trans); 500 501 /***************************************************** 502 * ICT - interrupt handling 503 ******************************************************/ 504 irqreturn_t iwl_pcie_isr(int irq, void *data); 505 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 506 void iwl_pcie_free_ict(struct iwl_trans *trans); 507 void iwl_pcie_reset_ict(struct iwl_trans *trans); 508 void iwl_pcie_disable_ict(struct iwl_trans *trans); 509 510 /***************************************************** 511 * TX / HCMD 512 ******************************************************/ 513 int iwl_pcie_tx_init(struct iwl_trans *trans); 514 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans); 515 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 516 int iwl_pcie_tx_stop(struct iwl_trans *trans); 517 void iwl_pcie_tx_free(struct iwl_trans *trans); 518 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 519 const struct iwl_trans_txq_scd_cfg *cfg, 520 unsigned int wdg_timeout); 521 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 522 bool configure_scd); 523 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 524 bool shared_mode); 525 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 526 struct iwl_txq *txq); 527 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 528 struct iwl_device_cmd *dev_cmd, int txq_id); 529 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 530 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 531 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 532 struct iwl_rx_cmd_buffer *rxb); 533 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 534 struct sk_buff_head *skbs); 535 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 536 537 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 538 u8 idx) 539 { 540 if (trans->cfg->use_tfh) { 541 struct iwl_tfh_tfd *tfd = _tfd; 542 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 543 544 return le16_to_cpu(tb->tb_len); 545 } else { 546 struct iwl_tfd *tfd = _tfd; 547 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 548 549 return le16_to_cpu(tb->hi_n_len) >> 4; 550 } 551 } 552 553 /***************************************************** 554 * Error handling 555 ******************************************************/ 556 void iwl_pcie_dump_csr(struct iwl_trans *trans); 557 558 /***************************************************** 559 * Helpers 560 ******************************************************/ 561 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 562 { 563 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 564 565 clear_bit(STATUS_INT_ENABLED, &trans->status); 566 if (!trans_pcie->msix_enabled) { 567 /* disable interrupts from uCode/NIC to host */ 568 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 569 570 /* acknowledge/clear/reset any interrupts still pending 571 * from uCode or flow handler (Rx/Tx DMA) */ 572 iwl_write32(trans, CSR_INT, 0xffffffff); 573 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 574 } else { 575 /* disable all the interrupt we might use */ 576 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 577 trans_pcie->fh_init_mask); 578 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 579 trans_pcie->hw_init_mask); 580 } 581 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 582 } 583 584 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 585 { 586 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 587 588 spin_lock(&trans_pcie->irq_lock); 589 _iwl_disable_interrupts(trans); 590 spin_unlock(&trans_pcie->irq_lock); 591 } 592 593 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 594 { 595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 596 597 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 598 set_bit(STATUS_INT_ENABLED, &trans->status); 599 if (!trans_pcie->msix_enabled) { 600 trans_pcie->inta_mask = CSR_INI_SET_MASK; 601 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 602 } else { 603 /* 604 * fh/hw_mask keeps all the unmasked causes. 605 * Unlike msi, in msix cause is enabled when it is unset. 606 */ 607 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 608 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 609 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 610 ~trans_pcie->fh_mask); 611 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 612 ~trans_pcie->hw_mask); 613 } 614 } 615 616 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 617 { 618 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 619 620 spin_lock(&trans_pcie->irq_lock); 621 _iwl_enable_interrupts(trans); 622 spin_unlock(&trans_pcie->irq_lock); 623 } 624 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 625 { 626 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 627 628 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 629 trans_pcie->hw_mask = msk; 630 } 631 632 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 633 { 634 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 635 636 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 637 trans_pcie->fh_mask = msk; 638 } 639 640 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 641 { 642 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 643 644 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 645 if (!trans_pcie->msix_enabled) { 646 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 647 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 648 } else { 649 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 650 trans_pcie->hw_init_mask); 651 iwl_enable_fh_int_msk_msix(trans, 652 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 653 } 654 } 655 656 static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 657 struct iwl_txq *txq, int idx) 658 { 659 return txq->tfds + trans_pcie->tfd_size * idx; 660 } 661 662 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 663 { 664 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 665 666 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 667 if (!trans_pcie->msix_enabled) { 668 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 669 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 670 } else { 671 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 672 trans_pcie->fh_init_mask); 673 iwl_enable_hw_int_msk_msix(trans, 674 MSIX_HW_INT_CAUSES_REG_RF_KILL); 675 } 676 } 677 678 static inline void iwl_wake_queue(struct iwl_trans *trans, 679 struct iwl_txq *txq) 680 { 681 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 682 683 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { 684 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 685 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 686 } 687 } 688 689 static inline void iwl_stop_queue(struct iwl_trans *trans, 690 struct iwl_txq *txq) 691 { 692 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 693 694 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { 695 iwl_op_mode_queue_full(trans->op_mode, txq->id); 696 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 697 } else 698 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 699 txq->id); 700 } 701 702 static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 703 { 704 return q->write_ptr >= q->read_ptr ? 705 (i >= q->read_ptr && i < q->write_ptr) : 706 !(i < q->read_ptr && i >= q->write_ptr); 707 } 708 709 static inline u8 get_cmd_index(struct iwl_txq *q, u32 index) 710 { 711 return index & (q->n_window - 1); 712 } 713 714 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 715 { 716 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->mutex); 717 718 return !(iwl_read32(trans, CSR_GP_CNTRL) & 719 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 720 } 721 722 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 723 u32 reg, u32 mask, u32 value) 724 { 725 u32 v; 726 727 #ifdef CONFIG_IWLWIFI_DEBUG 728 WARN_ON_ONCE(value & ~mask); 729 #endif 730 731 v = iwl_read32(trans, reg); 732 v &= ~mask; 733 v |= value; 734 iwl_write32(trans, reg, v); 735 } 736 737 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 738 u32 reg, u32 mask) 739 { 740 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 741 } 742 743 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 744 u32 reg, u32 mask) 745 { 746 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 747 } 748 749 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 750 751 #ifdef CONFIG_IWLWIFI_DEBUGFS 752 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 753 #else 754 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 755 { 756 return 0; 757 } 758 #endif 759 760 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); 761 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); 762 763 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); 764 765 /* common functions that are used by gen2 transport */ 766 void iwl_pcie_apm_config(struct iwl_trans *trans); 767 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 768 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 769 bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans); 770 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 771 int iwl_queue_space(const struct iwl_txq *q); 772 int iwl_pcie_apm_stop_master(struct iwl_trans *trans); 773 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 774 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 775 int slots_num, bool cmd_queue); 776 int iwl_pcie_txq_alloc(struct iwl_trans *trans, 777 struct iwl_txq *txq, int slots_num, bool cmd_queue); 778 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 779 struct iwl_dma_ptr *ptr, size_t size); 780 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 781 void iwl_pcie_apply_destination(struct iwl_trans *trans); 782 783 /* transport gen 2 exported functions */ 784 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 785 const struct fw_img *fw, bool run_in_rfkill); 786 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 787 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 788 struct iwl_tx_queue_cfg_cmd *cmd, 789 int cmd_id, 790 unsigned int timeout); 791 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 792 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 793 struct iwl_device_cmd *dev_cmd, int txq_id); 794 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 795 struct iwl_host_cmd *cmd); 796 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, 797 bool low_power); 798 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); 799 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 800 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 801 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 802 #endif /* __iwl_trans_int_pcie_h__ */ 803