1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6 * Copyright(c) 2018 Intel Corporation 7 * 8 * Portions of this file are derived from the ipw3945 project, as well 9 * as portions of the ieee80211 subsystem header files. 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of version 2 of the GNU General Public License as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 * more details. 19 * 20 * You should have received a copy of the GNU General Public License along with 21 * this program. 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #ifndef __iwl_trans_int_pcie_h__ 32 #define __iwl_trans_int_pcie_h__ 33 34 #include <linux/spinlock.h> 35 #include <linux/interrupt.h> 36 #include <linux/skbuff.h> 37 #include <linux/wait.h> 38 #include <linux/pci.h> 39 #include <linux/timer.h> 40 #include <linux/cpu.h> 41 42 #include "iwl-fh.h" 43 #include "iwl-csr.h" 44 #include "iwl-trans.h" 45 #include "iwl-debug.h" 46 #include "iwl-io.h" 47 #include "iwl-op-mode.h" 48 49 /* We need 2 entries for the TX command and header, and another one might 50 * be needed for potential data in the SKB's head. The remaining ones can 51 * be used for frags. 52 */ 53 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 54 55 /* 56 * RX related structures and functions 57 */ 58 #define RX_NUM_QUEUES 1 59 #define RX_POST_REQ_ALLOC 2 60 #define RX_CLAIM_REQ_ALLOC 8 61 #define RX_PENDING_WATERMARK 16 62 #define FIRST_RX_QUEUE 512 63 64 struct iwl_host_cmd; 65 66 /*This file includes the declaration that are internal to the 67 * trans_pcie layer */ 68 69 /** 70 * struct iwl_rx_mem_buffer 71 * @page_dma: bus address of rxb page 72 * @page: driver's pointer to the rxb page 73 * @invalid: rxb is in driver ownership - not owned by HW 74 * @vid: index of this rxb in the global table 75 */ 76 struct iwl_rx_mem_buffer { 77 dma_addr_t page_dma; 78 struct page *page; 79 u16 vid; 80 bool invalid; 81 struct list_head list; 82 }; 83 84 /** 85 * struct isr_statistics - interrupt statistics 86 * 87 */ 88 struct isr_statistics { 89 u32 hw; 90 u32 sw; 91 u32 err_code; 92 u32 sch; 93 u32 alive; 94 u32 rfkill; 95 u32 ctkill; 96 u32 wakeup; 97 u32 rx; 98 u32 tx; 99 u32 unhandled; 100 }; 101 102 /** 103 * struct iwl_rxq - Rx queue 104 * @id: queue index 105 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 106 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 107 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 108 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 109 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 110 * @tr_tail: driver's pointer to the transmission ring tail buffer 111 * @tr_tail_dma: physical address of the buffer for the transmission ring tail 112 * @cr_tail: driver's pointer to the completion ring tail buffer 113 * @cr_tail_dma: physical address of the buffer for the completion ring tail 114 * @read: Shared index to newest available Rx buffer 115 * @write: Shared index to oldest written Rx packet 116 * @free_count: Number of pre-allocated buffers in rx_free 117 * @used_count: Number of RBDs handled to allocator to use for allocation 118 * @write_actual: 119 * @rx_free: list of RBDs with allocated RB ready for use 120 * @rx_used: list of RBDs with no RB attached 121 * @need_update: flag to indicate we need to update read/write index 122 * @rb_stts: driver's pointer to receive buffer status 123 * @rb_stts_dma: bus address of receive buffer status 124 * @lock: 125 * @queue: actual rx queue. Not used for multi-rx queue. 126 * 127 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 128 */ 129 struct iwl_rxq { 130 int id; 131 void *bd; 132 dma_addr_t bd_dma; 133 __le32 *used_bd; 134 dma_addr_t used_bd_dma; 135 __le16 *tr_tail; 136 dma_addr_t tr_tail_dma; 137 __le16 *cr_tail; 138 dma_addr_t cr_tail_dma; 139 u32 read; 140 u32 write; 141 u32 free_count; 142 u32 used_count; 143 u32 write_actual; 144 u32 queue_size; 145 struct list_head rx_free; 146 struct list_head rx_used; 147 bool need_update; 148 struct iwl_rb_status *rb_stts; 149 dma_addr_t rb_stts_dma; 150 spinlock_t lock; 151 struct napi_struct napi; 152 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 153 }; 154 155 /** 156 * struct iwl_rb_allocator - Rx allocator 157 * @req_pending: number of requests the allcator had not processed yet 158 * @req_ready: number of requests honored and ready for claiming 159 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 160 * the queue. This is a list of &struct iwl_rx_mem_buffer 161 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 162 * of &struct iwl_rx_mem_buffer 163 * @lock: protects the rbd_allocated and rbd_empty lists 164 * @alloc_wq: work queue for background calls 165 * @rx_alloc: work struct for background calls 166 */ 167 struct iwl_rb_allocator { 168 atomic_t req_pending; 169 atomic_t req_ready; 170 struct list_head rbd_allocated; 171 struct list_head rbd_empty; 172 spinlock_t lock; 173 struct workqueue_struct *alloc_wq; 174 struct work_struct rx_alloc; 175 }; 176 177 struct iwl_dma_ptr { 178 dma_addr_t dma; 179 void *addr; 180 size_t size; 181 }; 182 183 /** 184 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 185 * @index -- current index 186 */ 187 static inline int iwl_queue_inc_wrap(int index) 188 { 189 return ++index & (TFD_QUEUE_SIZE_MAX - 1); 190 } 191 192 /** 193 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 194 * @index -- current index 195 */ 196 static inline int iwl_queue_dec_wrap(int index) 197 { 198 return --index & (TFD_QUEUE_SIZE_MAX - 1); 199 } 200 201 struct iwl_cmd_meta { 202 /* only for SYNC commands, iff the reply skb is wanted */ 203 struct iwl_host_cmd *source; 204 u32 flags; 205 u32 tbs; 206 }; 207 208 209 #define TFD_TX_CMD_SLOTS 256 210 #define TFD_CMD_SLOTS 32 211 212 /* 213 * The FH will write back to the first TB only, so we need to copy some data 214 * into the buffer regardless of whether it should be mapped or not. 215 * This indicates how big the first TB must be to include the scratch buffer 216 * and the assigned PN. 217 * Since PN location is 8 bytes at offset 12, it's 20 now. 218 * If we make it bigger then allocations will be bigger and copy slower, so 219 * that's probably not useful. 220 */ 221 #define IWL_FIRST_TB_SIZE 20 222 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 223 224 struct iwl_pcie_txq_entry { 225 struct iwl_device_cmd *cmd; 226 struct sk_buff *skb; 227 /* buffer to free after command completes */ 228 const void *free_buf; 229 struct iwl_cmd_meta meta; 230 }; 231 232 struct iwl_pcie_first_tb_buf { 233 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 234 }; 235 236 /** 237 * struct iwl_txq - Tx Queue for DMA 238 * @q: generic Rx/Tx queue descriptor 239 * @tfds: transmit frame descriptors (DMA memory) 240 * @first_tb_bufs: start of command headers, including scratch buffers, for 241 * the writeback -- this is DMA memory and an array holding one buffer 242 * for each command on the queue 243 * @first_tb_dma: DMA address for the first_tb_bufs start 244 * @entries: transmit entries (driver state) 245 * @lock: queue lock 246 * @stuck_timer: timer that fires if queue gets stuck 247 * @trans_pcie: pointer back to transport (for timer) 248 * @need_update: indicates need to update read/write index 249 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 250 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 251 * @frozen: tx stuck queue timer is frozen 252 * @frozen_expiry_remainder: remember how long until the timer fires 253 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 254 * @write_ptr: 1-st empty entry (index) host_w 255 * @read_ptr: last used entry (index) host_r 256 * @dma_addr: physical addr for BD's 257 * @n_window: safe queue window 258 * @id: queue id 259 * @low_mark: low watermark, resume queue if free space more than this 260 * @high_mark: high watermark, stop queue if free space less than this 261 * 262 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 263 * descriptors) and required locking structures. 264 * 265 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 266 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 267 * there might be HW changes in the future). For the normal TX 268 * queues, n_window, which is the size of the software queue data 269 * is also 256; however, for the command queue, n_window is only 270 * 32 since we don't need so many commands pending. Since the HW 271 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 272 * This means that we end up with the following: 273 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 274 * SW entries: | 0 | ... | 31 | 275 * where N is a number between 0 and 7. This means that the SW 276 * data is a window overlayed over the HW queue. 277 */ 278 struct iwl_txq { 279 void *tfds; 280 struct iwl_pcie_first_tb_buf *first_tb_bufs; 281 dma_addr_t first_tb_dma; 282 struct iwl_pcie_txq_entry *entries; 283 spinlock_t lock; 284 unsigned long frozen_expiry_remainder; 285 struct timer_list stuck_timer; 286 struct iwl_trans_pcie *trans_pcie; 287 bool need_update; 288 bool frozen; 289 bool ampdu; 290 int block; 291 unsigned long wd_timeout; 292 struct sk_buff_head overflow_q; 293 struct iwl_dma_ptr bc_tbl; 294 295 int write_ptr; 296 int read_ptr; 297 dma_addr_t dma_addr; 298 int n_window; 299 u32 id; 300 int low_mark; 301 int high_mark; 302 }; 303 304 static inline dma_addr_t 305 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 306 { 307 return txq->first_tb_dma + 308 sizeof(struct iwl_pcie_first_tb_buf) * idx; 309 } 310 311 struct iwl_tso_hdr_page { 312 struct page *page; 313 u8 *pos; 314 }; 315 316 /** 317 * enum iwl_shared_irq_flags - level of sharing for irq 318 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 319 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 320 */ 321 enum iwl_shared_irq_flags { 322 IWL_SHARED_IRQ_NON_RX = BIT(0), 323 IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 324 }; 325 326 /** 327 * enum iwl_image_response_code - image response values 328 * @IWL_IMAGE_RESP_DEF: the default value of the register 329 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 330 * @IWL_IMAGE_RESP_FAIL: iml reading failed 331 */ 332 enum iwl_image_response_code { 333 IWL_IMAGE_RESP_DEF = 0, 334 IWL_IMAGE_RESP_SUCCESS = 1, 335 IWL_IMAGE_RESP_FAIL = 2, 336 }; 337 338 /** 339 * struct iwl_dram_data 340 * @physical: page phy pointer 341 * @block: pointer to the allocated block/page 342 * @size: size of the block/page 343 */ 344 struct iwl_dram_data { 345 dma_addr_t physical; 346 void *block; 347 int size; 348 }; 349 350 /** 351 * struct iwl_self_init_dram - dram data used by self init process 352 * @fw: lmac and umac dram data 353 * @fw_cnt: total number of items in array 354 * @paging: paging dram data 355 * @paging_cnt: total number of items in array 356 */ 357 struct iwl_self_init_dram { 358 struct iwl_dram_data *fw; 359 int fw_cnt; 360 struct iwl_dram_data *paging; 361 int paging_cnt; 362 }; 363 364 /** 365 * struct iwl_trans_pcie - PCIe transport specific data 366 * @rxq: all the RX queue data 367 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 368 * @global_table: table mapping received VID from hw to rxb 369 * @rba: allocator for RX replenishing 370 * @ctxt_info: context information for FW self init 371 * @ctxt_info_gen3: context information for gen3 devices 372 * @prph_info: prph info for self init 373 * @prph_scratch: prph scratch for self init 374 * @ctxt_info_dma_addr: dma addr of context information 375 * @prph_info_dma_addr: dma addr of prph info 376 * @prph_scratch_dma_addr: dma addr of prph scratch 377 * @ctxt_info_dma_addr: dma addr of context information 378 * @init_dram: DRAM data of firmware image (including paging). 379 * Context information addresses will be taken from here. 380 * This is driver's local copy for keeping track of size and 381 * count for allocating and freeing the memory. 382 * @trans: pointer to the generic transport area 383 * @scd_base_addr: scheduler sram base address in SRAM 384 * @scd_bc_tbls: pointer to the byte count table of the scheduler 385 * @kw: keep warm address 386 * @pci_dev: basic pci-network driver stuff 387 * @hw_base: pci hardware address support 388 * @ucode_write_complete: indicates that the ucode has been copied. 389 * @ucode_write_waitq: wait queue for uCode load 390 * @cmd_queue - command queue number 391 * @rx_buf_size: Rx buffer size 392 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 393 * @scd_set_active: should the transport configure the SCD for HCMD queue 394 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 395 * frame. 396 * @rx_page_order: page order for receive buffer size 397 * @reg_lock: protect hw register access 398 * @mutex: to protect stop_device / start_fw / start_hw 399 * @cmd_in_flight: true when we have a host command in flight 400 * @fw_mon_phys: physical address of the buffer for the firmware monitor 401 * @fw_mon_page: points to the first page of the buffer for the firmware monitor 402 * @fw_mon_size: size of the buffer for the firmware monitor 403 * @msix_entries: array of MSI-X entries 404 * @msix_enabled: true if managed to enable MSI-X 405 * @shared_vec_mask: the type of causes the shared vector handles 406 * (see iwl_shared_irq_flags). 407 * @alloc_vecs: the number of interrupt vectors allocated by the OS 408 * @def_irq: default irq for non rx causes 409 * @fh_init_mask: initial unmasked fh causes 410 * @hw_init_mask: initial unmasked hw causes 411 * @fh_mask: current unmasked fh causes 412 * @hw_mask: current unmasked hw causes 413 * @in_rescan: true if we have triggered a device rescan 414 * @scheduled_for_removal: true if we have scheduled a device removal 415 */ 416 struct iwl_trans_pcie { 417 struct iwl_rxq *rxq; 418 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 419 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; 420 struct iwl_rb_allocator rba; 421 union { 422 struct iwl_context_info *ctxt_info; 423 struct iwl_context_info_gen3 *ctxt_info_gen3; 424 }; 425 struct iwl_prph_info *prph_info; 426 struct iwl_prph_scratch *prph_scratch; 427 dma_addr_t ctxt_info_dma_addr; 428 dma_addr_t prph_info_dma_addr; 429 dma_addr_t prph_scratch_dma_addr; 430 dma_addr_t iml_dma_addr; 431 struct iwl_self_init_dram init_dram; 432 struct iwl_trans *trans; 433 434 struct net_device napi_dev; 435 436 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 437 438 /* INT ICT Table */ 439 __le32 *ict_tbl; 440 dma_addr_t ict_tbl_dma; 441 int ict_index; 442 bool use_ict; 443 bool is_down, opmode_down; 444 bool debug_rfkill; 445 struct isr_statistics isr_stats; 446 447 spinlock_t irq_lock; 448 struct mutex mutex; 449 u32 inta_mask; 450 u32 scd_base_addr; 451 struct iwl_dma_ptr scd_bc_tbls; 452 struct iwl_dma_ptr kw; 453 454 struct iwl_txq *txq_memory; 455 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 456 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 457 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 458 459 /* PCI bus related data */ 460 struct pci_dev *pci_dev; 461 void __iomem *hw_base; 462 463 bool ucode_write_complete; 464 wait_queue_head_t ucode_write_waitq; 465 wait_queue_head_t wait_command_queue; 466 wait_queue_head_t d0i3_waitq; 467 468 u8 page_offs, dev_cmd_offs; 469 470 u8 cmd_queue; 471 u8 cmd_fifo; 472 unsigned int cmd_q_wdg_timeout; 473 u8 n_no_reclaim_cmds; 474 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 475 u8 max_tbs; 476 u16 tfd_size; 477 478 enum iwl_amsdu_size rx_buf_size; 479 bool bc_table_dword; 480 bool scd_set_active; 481 bool sw_csum_tx; 482 bool pcie_dbg_dumped_once; 483 u32 rx_page_order; 484 485 /*protect hw register */ 486 spinlock_t reg_lock; 487 bool cmd_hold_nic_awake; 488 bool ref_cmd_in_flight; 489 490 dma_addr_t fw_mon_phys; 491 struct page *fw_mon_page; 492 u32 fw_mon_size; 493 494 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 495 bool msix_enabled; 496 u8 shared_vec_mask; 497 u32 alloc_vecs; 498 u32 def_irq; 499 u32 fh_init_mask; 500 u32 hw_init_mask; 501 u32 fh_mask; 502 u32 hw_mask; 503 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 504 u16 tx_cmd_queue_size; 505 bool in_rescan; 506 bool scheduled_for_removal; 507 }; 508 509 static inline struct iwl_trans_pcie * 510 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 511 { 512 return (void *)trans->trans_specific; 513 } 514 515 static inline struct iwl_trans * 516 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 517 { 518 return container_of((void *)trans_pcie, struct iwl_trans, 519 trans_specific); 520 } 521 522 /* 523 * Convention: trans API functions: iwl_trans_pcie_XXX 524 * Other functions: iwl_pcie_XXX 525 */ 526 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 527 const struct pci_device_id *ent, 528 const struct iwl_cfg *cfg); 529 void iwl_trans_pcie_free(struct iwl_trans *trans); 530 531 /***************************************************** 532 * RX 533 ******************************************************/ 534 int iwl_pcie_rx_init(struct iwl_trans *trans); 535 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 536 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 537 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 538 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 539 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 540 int iwl_pcie_rx_stop(struct iwl_trans *trans); 541 void iwl_pcie_rx_free(struct iwl_trans *trans); 542 543 /***************************************************** 544 * ICT - interrupt handling 545 ******************************************************/ 546 irqreturn_t iwl_pcie_isr(int irq, void *data); 547 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 548 void iwl_pcie_free_ict(struct iwl_trans *trans); 549 void iwl_pcie_reset_ict(struct iwl_trans *trans); 550 void iwl_pcie_disable_ict(struct iwl_trans *trans); 551 552 /***************************************************** 553 * TX / HCMD 554 ******************************************************/ 555 int iwl_pcie_tx_init(struct iwl_trans *trans); 556 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans); 557 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 558 int iwl_pcie_tx_stop(struct iwl_trans *trans); 559 void iwl_pcie_tx_free(struct iwl_trans *trans); 560 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 561 const struct iwl_trans_txq_scd_cfg *cfg, 562 unsigned int wdg_timeout); 563 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 564 bool configure_scd); 565 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 566 bool shared_mode); 567 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 568 struct iwl_txq *txq); 569 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 570 struct iwl_device_cmd *dev_cmd, int txq_id); 571 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 572 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 573 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 574 struct iwl_rx_cmd_buffer *rxb); 575 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 576 struct sk_buff_head *skbs); 577 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 578 579 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 580 u8 idx) 581 { 582 if (trans->cfg->use_tfh) { 583 struct iwl_tfh_tfd *tfd = _tfd; 584 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 585 586 return le16_to_cpu(tb->tb_len); 587 } else { 588 struct iwl_tfd *tfd = _tfd; 589 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 590 591 return le16_to_cpu(tb->hi_n_len) >> 4; 592 } 593 } 594 595 /***************************************************** 596 * Error handling 597 ******************************************************/ 598 void iwl_pcie_dump_csr(struct iwl_trans *trans); 599 600 /***************************************************** 601 * Helpers 602 ******************************************************/ 603 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 604 { 605 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 606 607 clear_bit(STATUS_INT_ENABLED, &trans->status); 608 if (!trans_pcie->msix_enabled) { 609 /* disable interrupts from uCode/NIC to host */ 610 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 611 612 /* acknowledge/clear/reset any interrupts still pending 613 * from uCode or flow handler (Rx/Tx DMA) */ 614 iwl_write32(trans, CSR_INT, 0xffffffff); 615 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 616 } else { 617 /* disable all the interrupt we might use */ 618 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 619 trans_pcie->fh_init_mask); 620 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 621 trans_pcie->hw_init_mask); 622 } 623 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 624 } 625 626 #define IWL_NUM_OF_COMPLETION_RINGS 31 627 #define IWL_NUM_OF_TRANSFER_RINGS 527 628 629 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 630 int start) 631 { 632 int i = 0; 633 634 while (start < fw->num_sec && 635 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 636 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 637 start++; 638 i++; 639 } 640 641 return i; 642 } 643 644 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 645 const struct fw_desc *sec, 646 struct iwl_dram_data *dram) 647 { 648 dram->block = dma_alloc_coherent(trans->dev, sec->len, 649 &dram->physical, 650 GFP_KERNEL); 651 if (!dram->block) 652 return -ENOMEM; 653 654 dram->size = sec->len; 655 memcpy(dram->block, sec->data, sec->len); 656 657 return 0; 658 } 659 660 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 661 { 662 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 663 struct iwl_self_init_dram *dram = &trans_pcie->init_dram; 664 int i; 665 666 if (!dram->fw) { 667 WARN_ON(dram->fw_cnt); 668 return; 669 } 670 671 for (i = 0; i < dram->fw_cnt; i++) 672 dma_free_coherent(trans->dev, dram->fw[i].size, 673 dram->fw[i].block, dram->fw[i].physical); 674 675 kfree(dram->fw); 676 dram->fw_cnt = 0; 677 dram->fw = NULL; 678 } 679 680 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 681 { 682 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 683 684 spin_lock(&trans_pcie->irq_lock); 685 _iwl_disable_interrupts(trans); 686 spin_unlock(&trans_pcie->irq_lock); 687 } 688 689 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 690 { 691 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 692 693 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 694 set_bit(STATUS_INT_ENABLED, &trans->status); 695 if (!trans_pcie->msix_enabled) { 696 trans_pcie->inta_mask = CSR_INI_SET_MASK; 697 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 698 } else { 699 /* 700 * fh/hw_mask keeps all the unmasked causes. 701 * Unlike msi, in msix cause is enabled when it is unset. 702 */ 703 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 704 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 705 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 706 ~trans_pcie->fh_mask); 707 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 708 ~trans_pcie->hw_mask); 709 } 710 } 711 712 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 713 { 714 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 715 716 spin_lock(&trans_pcie->irq_lock); 717 _iwl_enable_interrupts(trans); 718 spin_unlock(&trans_pcie->irq_lock); 719 } 720 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 721 { 722 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 723 724 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 725 trans_pcie->hw_mask = msk; 726 } 727 728 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 729 { 730 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 731 732 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 733 trans_pcie->fh_mask = msk; 734 } 735 736 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 737 { 738 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 739 740 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 741 if (!trans_pcie->msix_enabled) { 742 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 743 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 744 } else { 745 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 746 trans_pcie->hw_init_mask); 747 iwl_enable_fh_int_msk_msix(trans, 748 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 749 } 750 } 751 752 static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) 753 { 754 return index & (q->n_window - 1); 755 } 756 757 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, 758 struct iwl_txq *txq, int idx) 759 { 760 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 761 762 if (trans->cfg->use_tfh) 763 idx = iwl_pcie_get_cmd_index(txq, idx); 764 765 return txq->tfds + trans_pcie->tfd_size * idx; 766 } 767 768 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 769 { 770 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 771 772 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 773 if (!trans_pcie->msix_enabled) { 774 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 775 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 776 } else { 777 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 778 trans_pcie->fh_init_mask); 779 iwl_enable_hw_int_msk_msix(trans, 780 MSIX_HW_INT_CAUSES_REG_RF_KILL); 781 } 782 783 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) { 784 /* 785 * On 9000-series devices this bit isn't enabled by default, so 786 * when we power down the device we need set the bit to allow it 787 * to wake up the PCI-E bus for RF-kill interrupts. 788 */ 789 iwl_set_bit(trans, CSR_GP_CNTRL, 790 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 791 } 792 } 793 794 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 795 796 static inline void iwl_wake_queue(struct iwl_trans *trans, 797 struct iwl_txq *txq) 798 { 799 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 800 801 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { 802 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 803 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 804 } 805 } 806 807 static inline void iwl_stop_queue(struct iwl_trans *trans, 808 struct iwl_txq *txq) 809 { 810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 811 812 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { 813 iwl_op_mode_queue_full(trans->op_mode, txq->id); 814 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 815 } else 816 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 817 txq->id); 818 } 819 820 static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 821 { 822 return q->write_ptr >= q->read_ptr ? 823 (i >= q->read_ptr && i < q->write_ptr) : 824 !(i < q->read_ptr && i >= q->write_ptr); 825 } 826 827 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 828 { 829 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 830 831 lockdep_assert_held(&trans_pcie->mutex); 832 833 if (trans_pcie->debug_rfkill) 834 return true; 835 836 return !(iwl_read32(trans, CSR_GP_CNTRL) & 837 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 838 } 839 840 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 841 u32 reg, u32 mask, u32 value) 842 { 843 u32 v; 844 845 #ifdef CONFIG_IWLWIFI_DEBUG 846 WARN_ON_ONCE(value & ~mask); 847 #endif 848 849 v = iwl_read32(trans, reg); 850 v &= ~mask; 851 v |= value; 852 iwl_write32(trans, reg, v); 853 } 854 855 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 856 u32 reg, u32 mask) 857 { 858 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 859 } 860 861 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 862 u32 reg, u32 mask) 863 { 864 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 865 } 866 867 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 868 869 #ifdef CONFIG_IWLWIFI_DEBUGFS 870 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 871 #else 872 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 873 { 874 return 0; 875 } 876 #endif 877 878 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); 879 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); 880 881 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); 882 883 void iwl_pcie_rx_allocator_work(struct work_struct *data); 884 885 /* common functions that are used by gen2 transport */ 886 void iwl_pcie_apm_config(struct iwl_trans *trans); 887 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 888 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 889 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 890 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 891 bool was_in_rfkill); 892 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 893 int iwl_queue_space(const struct iwl_txq *q); 894 void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 895 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 896 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 897 int slots_num, bool cmd_queue); 898 int iwl_pcie_txq_alloc(struct iwl_trans *trans, 899 struct iwl_txq *txq, int slots_num, bool cmd_queue); 900 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 901 struct iwl_dma_ptr *ptr, size_t size); 902 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 903 void iwl_pcie_apply_destination(struct iwl_trans *trans); 904 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 905 struct sk_buff *skb); 906 #ifdef CONFIG_INET 907 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); 908 #endif 909 910 /* common functions that are used by gen3 transport */ 911 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 912 913 /* transport gen 2 exported functions */ 914 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 915 const struct fw_img *fw, bool run_in_rfkill); 916 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 917 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 918 struct iwl_tx_queue_cfg_cmd *cmd, 919 int cmd_id, int size, 920 unsigned int timeout); 921 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 922 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 923 struct iwl_device_cmd *dev_cmd, int txq_id); 924 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 925 struct iwl_host_cmd *cmd); 926 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, 927 bool low_power); 928 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); 929 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 930 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 931 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 932 #endif /* __iwl_trans_int_pcie_h__ */ 933