1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 Intel Deutschland GmbH 6 * 7 * Portions of this file are derived from the ipw3945 project, as well 8 * as portions of the ieee80211 subsystem header files. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #ifndef __iwl_trans_int_pcie_h__ 32 #define __iwl_trans_int_pcie_h__ 33 34 #include <linux/spinlock.h> 35 #include <linux/interrupt.h> 36 #include <linux/skbuff.h> 37 #include <linux/wait.h> 38 #include <linux/pci.h> 39 #include <linux/timer.h> 40 41 #include "iwl-fh.h" 42 #include "iwl-csr.h" 43 #include "iwl-trans.h" 44 #include "iwl-debug.h" 45 #include "iwl-io.h" 46 #include "iwl-op-mode.h" 47 48 /* We need 2 entries for the TX command and header, and another one might 49 * be needed for potential data in the SKB's head. The remaining ones can 50 * be used for frags. 51 */ 52 #define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) 53 54 /* 55 * RX related structures and functions 56 */ 57 #define RX_NUM_QUEUES 1 58 #define RX_POST_REQ_ALLOC 2 59 #define RX_CLAIM_REQ_ALLOC 8 60 #define RX_PENDING_WATERMARK 16 61 62 struct iwl_host_cmd; 63 64 /*This file includes the declaration that are internal to the 65 * trans_pcie layer */ 66 67 /** 68 * struct iwl_rx_mem_buffer 69 * @page_dma: bus address of rxb page 70 * @page: driver's pointer to the rxb page 71 * @invalid: rxb is in driver ownership - not owned by HW 72 * @vid: index of this rxb in the global table 73 */ 74 struct iwl_rx_mem_buffer { 75 dma_addr_t page_dma; 76 struct page *page; 77 u16 vid; 78 bool invalid; 79 struct list_head list; 80 }; 81 82 /** 83 * struct isr_statistics - interrupt statistics 84 * 85 */ 86 struct isr_statistics { 87 u32 hw; 88 u32 sw; 89 u32 err_code; 90 u32 sch; 91 u32 alive; 92 u32 rfkill; 93 u32 ctkill; 94 u32 wakeup; 95 u32 rx; 96 u32 tx; 97 u32 unhandled; 98 }; 99 100 /** 101 * struct iwl_rxq - Rx queue 102 * @id: queue index 103 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 104 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 105 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 106 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 107 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 108 * @read: Shared index to newest available Rx buffer 109 * @write: Shared index to oldest written Rx packet 110 * @free_count: Number of pre-allocated buffers in rx_free 111 * @used_count: Number of RBDs handled to allocator to use for allocation 112 * @write_actual: 113 * @rx_free: list of RBDs with allocated RB ready for use 114 * @rx_used: list of RBDs with no RB attached 115 * @need_update: flag to indicate we need to update read/write index 116 * @rb_stts: driver's pointer to receive buffer status 117 * @rb_stts_dma: bus address of receive buffer status 118 * @lock: 119 * @queue: actual rx queue. Not used for multi-rx queue. 120 * 121 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 122 */ 123 struct iwl_rxq { 124 int id; 125 void *bd; 126 dma_addr_t bd_dma; 127 __le32 *used_bd; 128 dma_addr_t used_bd_dma; 129 u32 read; 130 u32 write; 131 u32 free_count; 132 u32 used_count; 133 u32 write_actual; 134 u32 queue_size; 135 struct list_head rx_free; 136 struct list_head rx_used; 137 bool need_update; 138 struct iwl_rb_status *rb_stts; 139 dma_addr_t rb_stts_dma; 140 spinlock_t lock; 141 struct napi_struct napi; 142 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 143 }; 144 145 /** 146 * struct iwl_rb_allocator - Rx allocator 147 * @req_pending: number of requests the allcator had not processed yet 148 * @req_ready: number of requests honored and ready for claiming 149 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 150 * the queue. This is a list of &struct iwl_rx_mem_buffer 151 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 152 * of &struct iwl_rx_mem_buffer 153 * @lock: protects the rbd_allocated and rbd_empty lists 154 * @alloc_wq: work queue for background calls 155 * @rx_alloc: work struct for background calls 156 */ 157 struct iwl_rb_allocator { 158 atomic_t req_pending; 159 atomic_t req_ready; 160 struct list_head rbd_allocated; 161 struct list_head rbd_empty; 162 spinlock_t lock; 163 struct workqueue_struct *alloc_wq; 164 struct work_struct rx_alloc; 165 }; 166 167 struct iwl_dma_ptr { 168 dma_addr_t dma; 169 void *addr; 170 size_t size; 171 }; 172 173 /** 174 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 175 * @index -- current index 176 */ 177 static inline int iwl_queue_inc_wrap(int index) 178 { 179 return ++index & (TFD_QUEUE_SIZE_MAX - 1); 180 } 181 182 /** 183 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 184 * @index -- current index 185 */ 186 static inline int iwl_queue_dec_wrap(int index) 187 { 188 return --index & (TFD_QUEUE_SIZE_MAX - 1); 189 } 190 191 struct iwl_cmd_meta { 192 /* only for SYNC commands, iff the reply skb is wanted */ 193 struct iwl_host_cmd *source; 194 u32 flags; 195 }; 196 197 /* 198 * Generic queue structure 199 * 200 * Contains common data for Rx and Tx queues. 201 * 202 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 203 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 204 * there might be HW changes in the future). For the normal TX 205 * queues, n_window, which is the size of the software queue data 206 * is also 256; however, for the command queue, n_window is only 207 * 32 since we don't need so many commands pending. Since the HW 208 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, 209 * the software buffers (in the variables @meta, @txb in struct 210 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in 211 * the same struct) have 256. 212 * This means that we end up with the following: 213 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 214 * SW entries: | 0 | ... | 31 | 215 * where N is a number between 0 and 7. This means that the SW 216 * data is a window overlayed over the HW queue. 217 */ 218 struct iwl_queue { 219 int write_ptr; /* 1-st empty entry (index) host_w*/ 220 int read_ptr; /* last used entry (index) host_r*/ 221 /* use for monitoring and recovering the stuck queue */ 222 dma_addr_t dma_addr; /* physical addr for BD's */ 223 int n_window; /* safe queue window */ 224 u32 id; 225 int low_mark; /* low watermark, resume queue if free 226 * space more than this */ 227 int high_mark; /* high watermark, stop queue if free 228 * space less than this */ 229 }; 230 231 #define TFD_TX_CMD_SLOTS 256 232 #define TFD_CMD_SLOTS 32 233 234 /* 235 * The FH will write back to the first TB only, so we need to copy some data 236 * into the buffer regardless of whether it should be mapped or not. 237 * This indicates how big the first TB must be to include the scratch buffer 238 * and the assigned PN. 239 * Since PN location is 16 bytes at offset 24, it's 40 now. 240 * If we make it bigger then allocations will be bigger and copy slower, so 241 * that's probably not useful. 242 */ 243 #define IWL_FIRST_TB_SIZE 40 244 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 245 246 struct iwl_pcie_txq_entry { 247 struct iwl_device_cmd *cmd; 248 struct sk_buff *skb; 249 /* buffer to free after command completes */ 250 const void *free_buf; 251 struct iwl_cmd_meta meta; 252 }; 253 254 struct iwl_pcie_first_tb_buf { 255 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 256 }; 257 258 /** 259 * struct iwl_txq - Tx Queue for DMA 260 * @q: generic Rx/Tx queue descriptor 261 * @tfds: transmit frame descriptors (DMA memory) 262 * @first_tb_bufs: start of command headers, including scratch buffers, for 263 * the writeback -- this is DMA memory and an array holding one buffer 264 * for each command on the queue 265 * @first_tb_dma: DMA address for the first_tb_bufs start 266 * @entries: transmit entries (driver state) 267 * @lock: queue lock 268 * @stuck_timer: timer that fires if queue gets stuck 269 * @trans_pcie: pointer back to transport (for timer) 270 * @need_update: indicates need to update read/write index 271 * @active: stores if queue is active 272 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 273 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 274 * @frozen: tx stuck queue timer is frozen 275 * @frozen_expiry_remainder: remember how long until the timer fires 276 * 277 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 278 * descriptors) and required locking structures. 279 */ 280 struct iwl_txq { 281 struct iwl_queue q; 282 struct iwl_tfd *tfds; 283 struct iwl_pcie_first_tb_buf *first_tb_bufs; 284 dma_addr_t first_tb_dma; 285 struct iwl_pcie_txq_entry *entries; 286 spinlock_t lock; 287 unsigned long frozen_expiry_remainder; 288 struct timer_list stuck_timer; 289 struct iwl_trans_pcie *trans_pcie; 290 bool need_update; 291 bool frozen; 292 u8 active; 293 bool ampdu; 294 bool block; 295 unsigned long wd_timeout; 296 struct sk_buff_head overflow_q; 297 }; 298 299 static inline dma_addr_t 300 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 301 { 302 return txq->first_tb_dma + 303 sizeof(struct iwl_pcie_first_tb_buf) * idx; 304 } 305 306 struct iwl_tso_hdr_page { 307 struct page *page; 308 u8 *pos; 309 }; 310 311 /** 312 * struct iwl_trans_pcie - PCIe transport specific data 313 * @rxq: all the RX queue data 314 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 315 * @global_table: table mapping received VID from hw to rxb 316 * @rba: allocator for RX replenishing 317 * @trans: pointer to the generic transport area 318 * @scd_base_addr: scheduler sram base address in SRAM 319 * @scd_bc_tbls: pointer to the byte count table of the scheduler 320 * @kw: keep warm address 321 * @pci_dev: basic pci-network driver stuff 322 * @hw_base: pci hardware address support 323 * @ucode_write_complete: indicates that the ucode has been copied. 324 * @ucode_write_waitq: wait queue for uCode load 325 * @cmd_queue - command queue number 326 * @rx_buf_size: Rx buffer size 327 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 328 * @scd_set_active: should the transport configure the SCD for HCMD queue 329 * @wide_cmd_header: true when ucode supports wide command header format 330 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 331 * frame. 332 * @rx_page_order: page order for receive buffer size 333 * @reg_lock: protect hw register access 334 * @mutex: to protect stop_device / start_fw / start_hw 335 * @cmd_in_flight: true when we have a host command in flight 336 * @fw_mon_phys: physical address of the buffer for the firmware monitor 337 * @fw_mon_page: points to the first page of the buffer for the firmware monitor 338 * @fw_mon_size: size of the buffer for the firmware monitor 339 * @msix_entries: array of MSI-X entries 340 * @msix_enabled: true if managed to enable MSI-X 341 * @allocated_vector: the number of interrupt vector allocated by the OS 342 * @default_irq_num: default irq for non rx interrupt 343 * @fh_init_mask: initial unmasked fh causes 344 * @hw_init_mask: initial unmasked hw causes 345 * @fh_mask: current unmasked fh causes 346 * @hw_mask: current unmasked hw causes 347 */ 348 struct iwl_trans_pcie { 349 struct iwl_rxq *rxq; 350 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 351 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; 352 struct iwl_rb_allocator rba; 353 struct iwl_trans *trans; 354 355 struct net_device napi_dev; 356 357 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 358 359 /* INT ICT Table */ 360 __le32 *ict_tbl; 361 dma_addr_t ict_tbl_dma; 362 int ict_index; 363 bool use_ict; 364 bool is_down; 365 struct isr_statistics isr_stats; 366 367 spinlock_t irq_lock; 368 struct mutex mutex; 369 u32 inta_mask; 370 u32 scd_base_addr; 371 struct iwl_dma_ptr scd_bc_tbls; 372 struct iwl_dma_ptr kw; 373 374 struct iwl_txq *txq; 375 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 376 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 377 378 /* PCI bus related data */ 379 struct pci_dev *pci_dev; 380 void __iomem *hw_base; 381 382 bool ucode_write_complete; 383 wait_queue_head_t ucode_write_waitq; 384 wait_queue_head_t wait_command_queue; 385 wait_queue_head_t d0i3_waitq; 386 387 u8 page_offs, dev_cmd_offs; 388 389 u8 cmd_queue; 390 u8 cmd_fifo; 391 unsigned int cmd_q_wdg_timeout; 392 u8 n_no_reclaim_cmds; 393 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 394 395 enum iwl_amsdu_size rx_buf_size; 396 bool bc_table_dword; 397 bool scd_set_active; 398 bool wide_cmd_header; 399 bool sw_csum_tx; 400 u32 rx_page_order; 401 402 /*protect hw register */ 403 spinlock_t reg_lock; 404 bool cmd_hold_nic_awake; 405 bool ref_cmd_in_flight; 406 407 dma_addr_t fw_mon_phys; 408 struct page *fw_mon_page; 409 u32 fw_mon_size; 410 411 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 412 bool msix_enabled; 413 u32 allocated_vector; 414 u32 default_irq_num; 415 u32 fh_init_mask; 416 u32 hw_init_mask; 417 u32 fh_mask; 418 u32 hw_mask; 419 }; 420 421 static inline struct iwl_trans_pcie * 422 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 423 { 424 return (void *)trans->trans_specific; 425 } 426 427 static inline struct iwl_trans * 428 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 429 { 430 return container_of((void *)trans_pcie, struct iwl_trans, 431 trans_specific); 432 } 433 434 /* 435 * Convention: trans API functions: iwl_trans_pcie_XXX 436 * Other functions: iwl_pcie_XXX 437 */ 438 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 439 const struct pci_device_id *ent, 440 const struct iwl_cfg *cfg); 441 void iwl_trans_pcie_free(struct iwl_trans *trans); 442 443 /***************************************************** 444 * RX 445 ******************************************************/ 446 int iwl_pcie_rx_init(struct iwl_trans *trans); 447 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 448 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 449 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 450 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 451 int iwl_pcie_rx_stop(struct iwl_trans *trans); 452 void iwl_pcie_rx_free(struct iwl_trans *trans); 453 454 /***************************************************** 455 * ICT - interrupt handling 456 ******************************************************/ 457 irqreturn_t iwl_pcie_isr(int irq, void *data); 458 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 459 void iwl_pcie_free_ict(struct iwl_trans *trans); 460 void iwl_pcie_reset_ict(struct iwl_trans *trans); 461 void iwl_pcie_disable_ict(struct iwl_trans *trans); 462 463 /***************************************************** 464 * TX / HCMD 465 ******************************************************/ 466 int iwl_pcie_tx_init(struct iwl_trans *trans); 467 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 468 int iwl_pcie_tx_stop(struct iwl_trans *trans); 469 void iwl_pcie_tx_free(struct iwl_trans *trans); 470 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 471 const struct iwl_trans_txq_scd_cfg *cfg, 472 unsigned int wdg_timeout); 473 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 474 bool configure_scd); 475 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 476 bool shared_mode); 477 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 478 struct iwl_txq *txq); 479 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 480 struct iwl_device_cmd *dev_cmd, int txq_id); 481 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 482 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 483 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 484 struct iwl_rx_cmd_buffer *rxb); 485 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 486 struct sk_buff_head *skbs); 487 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 488 489 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) 490 { 491 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 492 493 return le16_to_cpu(tb->hi_n_len) >> 4; 494 } 495 496 /***************************************************** 497 * Error handling 498 ******************************************************/ 499 void iwl_pcie_dump_csr(struct iwl_trans *trans); 500 501 /***************************************************** 502 * Helpers 503 ******************************************************/ 504 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 505 { 506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 507 508 clear_bit(STATUS_INT_ENABLED, &trans->status); 509 if (!trans_pcie->msix_enabled) { 510 /* disable interrupts from uCode/NIC to host */ 511 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 512 513 /* acknowledge/clear/reset any interrupts still pending 514 * from uCode or flow handler (Rx/Tx DMA) */ 515 iwl_write32(trans, CSR_INT, 0xffffffff); 516 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 517 } else { 518 /* disable all the interrupt we might use */ 519 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 520 trans_pcie->fh_init_mask); 521 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 522 trans_pcie->hw_init_mask); 523 } 524 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 525 } 526 527 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 528 { 529 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 530 531 spin_lock(&trans_pcie->irq_lock); 532 _iwl_disable_interrupts(trans); 533 spin_unlock(&trans_pcie->irq_lock); 534 } 535 536 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 537 { 538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 539 540 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 541 set_bit(STATUS_INT_ENABLED, &trans->status); 542 if (!trans_pcie->msix_enabled) { 543 trans_pcie->inta_mask = CSR_INI_SET_MASK; 544 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 545 } else { 546 /* 547 * fh/hw_mask keeps all the unmasked causes. 548 * Unlike msi, in msix cause is enabled when it is unset. 549 */ 550 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 551 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 552 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 553 ~trans_pcie->fh_mask); 554 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 555 ~trans_pcie->hw_mask); 556 } 557 } 558 559 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 560 { 561 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 562 563 spin_lock(&trans_pcie->irq_lock); 564 _iwl_enable_interrupts(trans); 565 spin_unlock(&trans_pcie->irq_lock); 566 } 567 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 568 { 569 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 570 571 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 572 trans_pcie->hw_mask = msk; 573 } 574 575 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 576 { 577 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 578 579 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 580 trans_pcie->fh_mask = msk; 581 } 582 583 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 584 { 585 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 586 587 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 588 if (!trans_pcie->msix_enabled) { 589 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 590 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 591 } else { 592 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 593 trans_pcie->hw_init_mask); 594 iwl_enable_fh_int_msk_msix(trans, 595 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 596 } 597 } 598 599 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 600 { 601 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 602 603 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 604 if (!trans_pcie->msix_enabled) { 605 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 606 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 607 } else { 608 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 609 trans_pcie->fh_init_mask); 610 iwl_enable_hw_int_msk_msix(trans, 611 MSIX_HW_INT_CAUSES_REG_RF_KILL); 612 } 613 } 614 615 static inline void iwl_wake_queue(struct iwl_trans *trans, 616 struct iwl_txq *txq) 617 { 618 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 619 620 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { 621 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); 622 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); 623 } 624 } 625 626 static inline void iwl_stop_queue(struct iwl_trans *trans, 627 struct iwl_txq *txq) 628 { 629 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 630 631 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { 632 iwl_op_mode_queue_full(trans->op_mode, txq->q.id); 633 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); 634 } else 635 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 636 txq->q.id); 637 } 638 639 static inline bool iwl_queue_used(const struct iwl_queue *q, int i) 640 { 641 return q->write_ptr >= q->read_ptr ? 642 (i >= q->read_ptr && i < q->write_ptr) : 643 !(i < q->read_ptr && i >= q->write_ptr); 644 } 645 646 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) 647 { 648 return index & (q->n_window - 1); 649 } 650 651 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 652 { 653 return !(iwl_read32(trans, CSR_GP_CNTRL) & 654 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 655 } 656 657 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 658 u32 reg, u32 mask, u32 value) 659 { 660 u32 v; 661 662 #ifdef CONFIG_IWLWIFI_DEBUG 663 WARN_ON_ONCE(value & ~mask); 664 #endif 665 666 v = iwl_read32(trans, reg); 667 v &= ~mask; 668 v |= value; 669 iwl_write32(trans, reg, v); 670 } 671 672 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 673 u32 reg, u32 mask) 674 { 675 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 676 } 677 678 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 679 u32 reg, u32 mask) 680 { 681 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 682 } 683 684 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 685 686 #ifdef CONFIG_IWLWIFI_DEBUGFS 687 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 688 #else 689 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 690 { 691 return 0; 692 } 693 #endif 694 695 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); 696 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); 697 698 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); 699 700 #endif /* __iwl_trans_int_pcie_h__ */ 701