1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6 * Copyright(c) 2018 Intel Corporation 7 * 8 * Portions of this file are derived from the ipw3945 project, as well 9 * as portions of the ieee80211 subsystem header files. 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of version 2 of the GNU General Public License as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 * more details. 19 * 20 * You should have received a copy of the GNU General Public License along with 21 * this program. 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #ifndef __iwl_trans_int_pcie_h__ 32 #define __iwl_trans_int_pcie_h__ 33 34 #include <linux/spinlock.h> 35 #include <linux/interrupt.h> 36 #include <linux/skbuff.h> 37 #include <linux/wait.h> 38 #include <linux/pci.h> 39 #include <linux/timer.h> 40 #include <linux/cpu.h> 41 42 #include "iwl-fh.h" 43 #include "iwl-csr.h" 44 #include "iwl-trans.h" 45 #include "iwl-debug.h" 46 #include "iwl-io.h" 47 #include "iwl-op-mode.h" 48 #include "iwl-drv.h" 49 50 /* We need 2 entries for the TX command and header, and another one might 51 * be needed for potential data in the SKB's head. The remaining ones can 52 * be used for frags. 53 */ 54 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 55 56 /* 57 * RX related structures and functions 58 */ 59 #define RX_NUM_QUEUES 1 60 #define RX_POST_REQ_ALLOC 2 61 #define RX_CLAIM_REQ_ALLOC 8 62 #define RX_PENDING_WATERMARK 16 63 #define FIRST_RX_QUEUE 512 64 65 struct iwl_host_cmd; 66 67 /*This file includes the declaration that are internal to the 68 * trans_pcie layer */ 69 70 /** 71 * struct iwl_rx_mem_buffer 72 * @page_dma: bus address of rxb page 73 * @page: driver's pointer to the rxb page 74 * @invalid: rxb is in driver ownership - not owned by HW 75 * @vid: index of this rxb in the global table 76 * @size: size used from the buffer 77 */ 78 struct iwl_rx_mem_buffer { 79 dma_addr_t page_dma; 80 struct page *page; 81 u16 vid; 82 bool invalid; 83 struct list_head list; 84 u32 size; 85 }; 86 87 /** 88 * struct isr_statistics - interrupt statistics 89 * 90 */ 91 struct isr_statistics { 92 u32 hw; 93 u32 sw; 94 u32 err_code; 95 u32 sch; 96 u32 alive; 97 u32 rfkill; 98 u32 ctkill; 99 u32 wakeup; 100 u32 rx; 101 u32 tx; 102 u32 unhandled; 103 }; 104 105 #define IWL_RX_TD_TYPE_MSK 0xff000000 106 #define IWL_RX_TD_SIZE_MSK 0x00ffffff 107 #define IWL_RX_TD_SIZE_2K BIT(11) 108 #define IWL_RX_TD_TYPE 0 109 110 /** 111 * struct iwl_rx_transfer_desc - transfer descriptor 112 * @type_n_size: buffer type (bit 0: external buff valid, 113 * bit 1: optional footer valid, bit 2-7: reserved) 114 * and buffer size 115 * @addr: ptr to free buffer start address 116 * @rbid: unique tag of the buffer 117 * @reserved: reserved 118 */ 119 struct iwl_rx_transfer_desc { 120 __le32 type_n_size; 121 __le64 addr; 122 __le16 rbid; 123 __le16 reserved; 124 } __packed; 125 126 #define IWL_RX_CD_SIZE 0xffffff00 127 128 /** 129 * struct iwl_rx_completion_desc - completion descriptor 130 * @type: buffer type (bit 0: external buff valid, 131 * bit 1: optional footer valid, bit 2-7: reserved) 132 * @status: status of the completion 133 * @reserved1: reserved 134 * @rbid: unique tag of the received buffer 135 * @size: buffer size, masked by IWL_RX_CD_SIZE 136 * @reserved2: reserved 137 */ 138 struct iwl_rx_completion_desc { 139 u8 type; 140 u8 status; 141 __le16 reserved1; 142 __le16 rbid; 143 __le32 size; 144 u8 reserved2[22]; 145 } __packed; 146 147 /** 148 * struct iwl_rxq - Rx queue 149 * @id: queue index 150 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 151 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 152 * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's 153 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 154 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 155 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 156 * @tr_tail: driver's pointer to the transmission ring tail buffer 157 * @tr_tail_dma: physical address of the buffer for the transmission ring tail 158 * @cr_tail: driver's pointer to the completion ring tail buffer 159 * @cr_tail_dma: physical address of the buffer for the completion ring tail 160 * @read: Shared index to newest available Rx buffer 161 * @write: Shared index to oldest written Rx packet 162 * @free_count: Number of pre-allocated buffers in rx_free 163 * @used_count: Number of RBDs handled to allocator to use for allocation 164 * @write_actual: 165 * @rx_free: list of RBDs with allocated RB ready for use 166 * @rx_used: list of RBDs with no RB attached 167 * @need_update: flag to indicate we need to update read/write index 168 * @rb_stts: driver's pointer to receive buffer status 169 * @rb_stts_dma: bus address of receive buffer status 170 * @lock: 171 * @queue: actual rx queue. Not used for multi-rx queue. 172 * 173 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 174 */ 175 struct iwl_rxq { 176 int id; 177 void *bd; 178 dma_addr_t bd_dma; 179 union { 180 void *used_bd; 181 __le32 *bd_32; 182 struct iwl_rx_completion_desc *cd; 183 }; 184 dma_addr_t used_bd_dma; 185 __le16 *tr_tail; 186 dma_addr_t tr_tail_dma; 187 __le16 *cr_tail; 188 dma_addr_t cr_tail_dma; 189 u32 read; 190 u32 write; 191 u32 free_count; 192 u32 used_count; 193 u32 write_actual; 194 u32 queue_size; 195 struct list_head rx_free; 196 struct list_head rx_used; 197 bool need_update; 198 void *rb_stts; 199 dma_addr_t rb_stts_dma; 200 spinlock_t lock; 201 struct napi_struct napi; 202 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 203 }; 204 205 /** 206 * struct iwl_rb_allocator - Rx allocator 207 * @req_pending: number of requests the allcator had not processed yet 208 * @req_ready: number of requests honored and ready for claiming 209 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 210 * the queue. This is a list of &struct iwl_rx_mem_buffer 211 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 212 * of &struct iwl_rx_mem_buffer 213 * @lock: protects the rbd_allocated and rbd_empty lists 214 * @alloc_wq: work queue for background calls 215 * @rx_alloc: work struct for background calls 216 */ 217 struct iwl_rb_allocator { 218 atomic_t req_pending; 219 atomic_t req_ready; 220 struct list_head rbd_allocated; 221 struct list_head rbd_empty; 222 spinlock_t lock; 223 struct workqueue_struct *alloc_wq; 224 struct work_struct rx_alloc; 225 }; 226 227 struct iwl_dma_ptr { 228 dma_addr_t dma; 229 void *addr; 230 size_t size; 231 }; 232 233 /** 234 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 235 * @index -- current index 236 */ 237 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) 238 { 239 return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); 240 } 241 242 /** 243 * iwl_get_closed_rb_stts - get closed rb stts from different structs 244 * @rxq - the rxq to get the rb stts from 245 */ 246 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, 247 struct iwl_rxq *rxq) 248 { 249 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { 250 __le16 *rb_stts = rxq->rb_stts; 251 252 return READ_ONCE(*rb_stts); 253 } else { 254 struct iwl_rb_status *rb_stts = rxq->rb_stts; 255 256 return READ_ONCE(rb_stts->closed_rb_num); 257 } 258 } 259 260 /** 261 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 262 * @index -- current index 263 */ 264 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) 265 { 266 return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); 267 } 268 269 struct iwl_cmd_meta { 270 /* only for SYNC commands, iff the reply skb is wanted */ 271 struct iwl_host_cmd *source; 272 u32 flags; 273 u32 tbs; 274 }; 275 276 277 #define TFD_TX_CMD_SLOTS 256 278 #define TFD_CMD_SLOTS 32 279 280 /* 281 * The FH will write back to the first TB only, so we need to copy some data 282 * into the buffer regardless of whether it should be mapped or not. 283 * This indicates how big the first TB must be to include the scratch buffer 284 * and the assigned PN. 285 * Since PN location is 8 bytes at offset 12, it's 20 now. 286 * If we make it bigger then allocations will be bigger and copy slower, so 287 * that's probably not useful. 288 */ 289 #define IWL_FIRST_TB_SIZE 20 290 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 291 292 struct iwl_pcie_txq_entry { 293 struct iwl_device_cmd *cmd; 294 struct sk_buff *skb; 295 /* buffer to free after command completes */ 296 const void *free_buf; 297 struct iwl_cmd_meta meta; 298 }; 299 300 struct iwl_pcie_first_tb_buf { 301 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 302 }; 303 304 /** 305 * struct iwl_txq - Tx Queue for DMA 306 * @q: generic Rx/Tx queue descriptor 307 * @tfds: transmit frame descriptors (DMA memory) 308 * @first_tb_bufs: start of command headers, including scratch buffers, for 309 * the writeback -- this is DMA memory and an array holding one buffer 310 * for each command on the queue 311 * @first_tb_dma: DMA address for the first_tb_bufs start 312 * @entries: transmit entries (driver state) 313 * @lock: queue lock 314 * @stuck_timer: timer that fires if queue gets stuck 315 * @trans_pcie: pointer back to transport (for timer) 316 * @need_update: indicates need to update read/write index 317 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 318 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 319 * @frozen: tx stuck queue timer is frozen 320 * @frozen_expiry_remainder: remember how long until the timer fires 321 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 322 * @write_ptr: 1-st empty entry (index) host_w 323 * @read_ptr: last used entry (index) host_r 324 * @dma_addr: physical addr for BD's 325 * @n_window: safe queue window 326 * @id: queue id 327 * @low_mark: low watermark, resume queue if free space more than this 328 * @high_mark: high watermark, stop queue if free space less than this 329 * 330 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 331 * descriptors) and required locking structures. 332 * 333 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 334 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 335 * there might be HW changes in the future). For the normal TX 336 * queues, n_window, which is the size of the software queue data 337 * is also 256; however, for the command queue, n_window is only 338 * 32 since we don't need so many commands pending. Since the HW 339 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 340 * This means that we end up with the following: 341 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 342 * SW entries: | 0 | ... | 31 | 343 * where N is a number between 0 and 7. This means that the SW 344 * data is a window overlayed over the HW queue. 345 */ 346 struct iwl_txq { 347 void *tfds; 348 struct iwl_pcie_first_tb_buf *first_tb_bufs; 349 dma_addr_t first_tb_dma; 350 struct iwl_pcie_txq_entry *entries; 351 spinlock_t lock; 352 unsigned long frozen_expiry_remainder; 353 struct timer_list stuck_timer; 354 struct iwl_trans_pcie *trans_pcie; 355 bool need_update; 356 bool frozen; 357 bool ampdu; 358 int block; 359 unsigned long wd_timeout; 360 struct sk_buff_head overflow_q; 361 struct iwl_dma_ptr bc_tbl; 362 363 int write_ptr; 364 int read_ptr; 365 dma_addr_t dma_addr; 366 int n_window; 367 u32 id; 368 int low_mark; 369 int high_mark; 370 }; 371 372 static inline dma_addr_t 373 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 374 { 375 return txq->first_tb_dma + 376 sizeof(struct iwl_pcie_first_tb_buf) * idx; 377 } 378 379 struct iwl_tso_hdr_page { 380 struct page *page; 381 u8 *pos; 382 }; 383 384 /** 385 * enum iwl_shared_irq_flags - level of sharing for irq 386 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 387 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 388 */ 389 enum iwl_shared_irq_flags { 390 IWL_SHARED_IRQ_NON_RX = BIT(0), 391 IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 392 }; 393 394 /** 395 * enum iwl_image_response_code - image response values 396 * @IWL_IMAGE_RESP_DEF: the default value of the register 397 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 398 * @IWL_IMAGE_RESP_FAIL: iml reading failed 399 */ 400 enum iwl_image_response_code { 401 IWL_IMAGE_RESP_DEF = 0, 402 IWL_IMAGE_RESP_SUCCESS = 1, 403 IWL_IMAGE_RESP_FAIL = 2, 404 }; 405 406 /** 407 * struct iwl_dram_data 408 * @physical: page phy pointer 409 * @block: pointer to the allocated block/page 410 * @size: size of the block/page 411 */ 412 struct iwl_dram_data { 413 dma_addr_t physical; 414 void *block; 415 int size; 416 }; 417 418 /** 419 * struct iwl_self_init_dram - dram data used by self init process 420 * @fw: lmac and umac dram data 421 * @fw_cnt: total number of items in array 422 * @paging: paging dram data 423 * @paging_cnt: total number of items in array 424 */ 425 struct iwl_self_init_dram { 426 struct iwl_dram_data *fw; 427 int fw_cnt; 428 struct iwl_dram_data *paging; 429 int paging_cnt; 430 }; 431 432 /** 433 * struct iwl_trans_pcie - PCIe transport specific data 434 * @rxq: all the RX queue data 435 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 436 * @global_table: table mapping received VID from hw to rxb 437 * @rba: allocator for RX replenishing 438 * @ctxt_info: context information for FW self init 439 * @ctxt_info_gen3: context information for gen3 devices 440 * @prph_info: prph info for self init 441 * @prph_scratch: prph scratch for self init 442 * @ctxt_info_dma_addr: dma addr of context information 443 * @prph_info_dma_addr: dma addr of prph info 444 * @prph_scratch_dma_addr: dma addr of prph scratch 445 * @ctxt_info_dma_addr: dma addr of context information 446 * @init_dram: DRAM data of firmware image (including paging). 447 * Context information addresses will be taken from here. 448 * This is driver's local copy for keeping track of size and 449 * count for allocating and freeing the memory. 450 * @trans: pointer to the generic transport area 451 * @scd_base_addr: scheduler sram base address in SRAM 452 * @scd_bc_tbls: pointer to the byte count table of the scheduler 453 * @kw: keep warm address 454 * @pci_dev: basic pci-network driver stuff 455 * @hw_base: pci hardware address support 456 * @ucode_write_complete: indicates that the ucode has been copied. 457 * @ucode_write_waitq: wait queue for uCode load 458 * @cmd_queue - command queue number 459 * @def_rx_queue - default rx queue number 460 * @rx_buf_size: Rx buffer size 461 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 462 * @scd_set_active: should the transport configure the SCD for HCMD queue 463 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 464 * frame. 465 * @rx_page_order: page order for receive buffer size 466 * @reg_lock: protect hw register access 467 * @mutex: to protect stop_device / start_fw / start_hw 468 * @cmd_in_flight: true when we have a host command in flight 469 * @fw_mon_phys: physical address of the buffer for the firmware monitor 470 * @fw_mon_page: points to the first page of the buffer for the firmware monitor 471 * @fw_mon_size: size of the buffer for the firmware monitor 472 * @msix_entries: array of MSI-X entries 473 * @msix_enabled: true if managed to enable MSI-X 474 * @shared_vec_mask: the type of causes the shared vector handles 475 * (see iwl_shared_irq_flags). 476 * @alloc_vecs: the number of interrupt vectors allocated by the OS 477 * @def_irq: default irq for non rx causes 478 * @fh_init_mask: initial unmasked fh causes 479 * @hw_init_mask: initial unmasked hw causes 480 * @fh_mask: current unmasked fh causes 481 * @hw_mask: current unmasked hw causes 482 * @in_rescan: true if we have triggered a device rescan 483 * @scheduled_for_removal: true if we have scheduled a device removal 484 */ 485 struct iwl_trans_pcie { 486 struct iwl_rxq *rxq; 487 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 488 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; 489 struct iwl_rb_allocator rba; 490 union { 491 struct iwl_context_info *ctxt_info; 492 struct iwl_context_info_gen3 *ctxt_info_gen3; 493 }; 494 struct iwl_prph_info *prph_info; 495 struct iwl_prph_scratch *prph_scratch; 496 dma_addr_t ctxt_info_dma_addr; 497 dma_addr_t prph_info_dma_addr; 498 dma_addr_t prph_scratch_dma_addr; 499 dma_addr_t iml_dma_addr; 500 struct iwl_self_init_dram init_dram; 501 struct iwl_trans *trans; 502 503 struct net_device napi_dev; 504 505 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 506 507 /* INT ICT Table */ 508 __le32 *ict_tbl; 509 dma_addr_t ict_tbl_dma; 510 int ict_index; 511 bool use_ict; 512 bool is_down, opmode_down; 513 bool debug_rfkill; 514 struct isr_statistics isr_stats; 515 516 spinlock_t irq_lock; 517 struct mutex mutex; 518 u32 inta_mask; 519 u32 scd_base_addr; 520 struct iwl_dma_ptr scd_bc_tbls; 521 struct iwl_dma_ptr kw; 522 523 struct iwl_txq *txq_memory; 524 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 525 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 526 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 527 528 /* PCI bus related data */ 529 struct pci_dev *pci_dev; 530 void __iomem *hw_base; 531 532 bool ucode_write_complete; 533 wait_queue_head_t ucode_write_waitq; 534 wait_queue_head_t wait_command_queue; 535 wait_queue_head_t d0i3_waitq; 536 537 u8 page_offs, dev_cmd_offs; 538 539 u8 cmd_queue; 540 u8 def_rx_queue; 541 u8 cmd_fifo; 542 unsigned int cmd_q_wdg_timeout; 543 u8 n_no_reclaim_cmds; 544 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 545 u8 max_tbs; 546 u16 tfd_size; 547 548 enum iwl_amsdu_size rx_buf_size; 549 bool bc_table_dword; 550 bool scd_set_active; 551 bool sw_csum_tx; 552 bool pcie_dbg_dumped_once; 553 u32 rx_page_order; 554 555 /*protect hw register */ 556 spinlock_t reg_lock; 557 bool cmd_hold_nic_awake; 558 bool ref_cmd_in_flight; 559 560 dma_addr_t fw_mon_phys; 561 struct page *fw_mon_page; 562 u32 fw_mon_size; 563 564 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 565 bool msix_enabled; 566 u8 shared_vec_mask; 567 u32 alloc_vecs; 568 u32 def_irq; 569 u32 fh_init_mask; 570 u32 hw_init_mask; 571 u32 fh_mask; 572 u32 hw_mask; 573 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 574 u16 tx_cmd_queue_size; 575 bool in_rescan; 576 bool scheduled_for_removal; 577 }; 578 579 static inline struct iwl_trans_pcie * 580 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 581 { 582 return (void *)trans->trans_specific; 583 } 584 585 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 586 struct msix_entry *entry) 587 { 588 /* 589 * Before sending the interrupt the HW disables it to prevent 590 * a nested interrupt. This is done by writing 1 to the corresponding 591 * bit in the mask register. After handling the interrupt, it should be 592 * re-enabled by clearing this bit. This register is defined as 593 * write 1 clear (W1C) register, meaning that it's being clear 594 * by writing 1 to the bit. 595 */ 596 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 597 } 598 599 static inline struct iwl_trans * 600 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 601 { 602 return container_of((void *)trans_pcie, struct iwl_trans, 603 trans_specific); 604 } 605 606 /* 607 * Convention: trans API functions: iwl_trans_pcie_XXX 608 * Other functions: iwl_pcie_XXX 609 */ 610 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 611 const struct pci_device_id *ent, 612 const struct iwl_cfg *cfg); 613 void iwl_trans_pcie_free(struct iwl_trans *trans); 614 615 /***************************************************** 616 * RX 617 ******************************************************/ 618 int _iwl_pcie_rx_init(struct iwl_trans *trans); 619 int iwl_pcie_rx_init(struct iwl_trans *trans); 620 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 621 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 622 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 623 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 624 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 625 int iwl_pcie_rx_stop(struct iwl_trans *trans); 626 void iwl_pcie_rx_free(struct iwl_trans *trans); 627 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); 628 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); 629 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); 630 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 631 struct iwl_rxq *rxq); 632 int iwl_pcie_rx_alloc(struct iwl_trans *trans); 633 634 /***************************************************** 635 * ICT - interrupt handling 636 ******************************************************/ 637 irqreturn_t iwl_pcie_isr(int irq, void *data); 638 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 639 void iwl_pcie_free_ict(struct iwl_trans *trans); 640 void iwl_pcie_reset_ict(struct iwl_trans *trans); 641 void iwl_pcie_disable_ict(struct iwl_trans *trans); 642 643 /***************************************************** 644 * TX / HCMD 645 ******************************************************/ 646 int iwl_pcie_tx_init(struct iwl_trans *trans); 647 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, 648 int queue_size); 649 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 650 int iwl_pcie_tx_stop(struct iwl_trans *trans); 651 void iwl_pcie_tx_free(struct iwl_trans *trans); 652 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 653 const struct iwl_trans_txq_scd_cfg *cfg, 654 unsigned int wdg_timeout); 655 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 656 bool configure_scd); 657 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 658 bool shared_mode); 659 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 660 struct iwl_txq *txq); 661 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 662 struct iwl_device_cmd *dev_cmd, int txq_id); 663 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 664 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 665 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); 666 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, 667 struct iwl_txq *txq); 668 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 669 struct iwl_rx_cmd_buffer *rxb); 670 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 671 struct sk_buff_head *skbs); 672 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 673 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, 674 struct iwl_txq *txq, u16 byte_cnt, 675 int num_tbs); 676 677 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 678 u8 idx) 679 { 680 if (trans->cfg->use_tfh) { 681 struct iwl_tfh_tfd *tfd = _tfd; 682 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 683 684 return le16_to_cpu(tb->tb_len); 685 } else { 686 struct iwl_tfd *tfd = _tfd; 687 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 688 689 return le16_to_cpu(tb->hi_n_len) >> 4; 690 } 691 } 692 693 /***************************************************** 694 * Error handling 695 ******************************************************/ 696 void iwl_pcie_dump_csr(struct iwl_trans *trans); 697 698 /***************************************************** 699 * Helpers 700 ******************************************************/ 701 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 702 { 703 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 704 705 clear_bit(STATUS_INT_ENABLED, &trans->status); 706 if (!trans_pcie->msix_enabled) { 707 /* disable interrupts from uCode/NIC to host */ 708 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 709 710 /* acknowledge/clear/reset any interrupts still pending 711 * from uCode or flow handler (Rx/Tx DMA) */ 712 iwl_write32(trans, CSR_INT, 0xffffffff); 713 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 714 } else { 715 /* disable all the interrupt we might use */ 716 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 717 trans_pcie->fh_init_mask); 718 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 719 trans_pcie->hw_init_mask); 720 } 721 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 722 } 723 724 #define IWL_NUM_OF_COMPLETION_RINGS 31 725 #define IWL_NUM_OF_TRANSFER_RINGS 527 726 727 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 728 int start) 729 { 730 int i = 0; 731 732 while (start < fw->num_sec && 733 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 734 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 735 start++; 736 i++; 737 } 738 739 return i; 740 } 741 742 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 743 const struct fw_desc *sec, 744 struct iwl_dram_data *dram) 745 { 746 dram->block = dma_alloc_coherent(trans->dev, sec->len, 747 &dram->physical, 748 GFP_KERNEL); 749 if (!dram->block) 750 return -ENOMEM; 751 752 dram->size = sec->len; 753 memcpy(dram->block, sec->data, sec->len); 754 755 return 0; 756 } 757 758 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 759 { 760 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 761 struct iwl_self_init_dram *dram = &trans_pcie->init_dram; 762 int i; 763 764 if (!dram->fw) { 765 WARN_ON(dram->fw_cnt); 766 return; 767 } 768 769 for (i = 0; i < dram->fw_cnt; i++) 770 dma_free_coherent(trans->dev, dram->fw[i].size, 771 dram->fw[i].block, dram->fw[i].physical); 772 773 kfree(dram->fw); 774 dram->fw_cnt = 0; 775 dram->fw = NULL; 776 } 777 778 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 779 { 780 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 781 782 spin_lock(&trans_pcie->irq_lock); 783 _iwl_disable_interrupts(trans); 784 spin_unlock(&trans_pcie->irq_lock); 785 } 786 787 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 788 { 789 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 790 791 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 792 set_bit(STATUS_INT_ENABLED, &trans->status); 793 if (!trans_pcie->msix_enabled) { 794 trans_pcie->inta_mask = CSR_INI_SET_MASK; 795 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 796 } else { 797 /* 798 * fh/hw_mask keeps all the unmasked causes. 799 * Unlike msi, in msix cause is enabled when it is unset. 800 */ 801 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 802 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 803 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 804 ~trans_pcie->fh_mask); 805 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 806 ~trans_pcie->hw_mask); 807 } 808 } 809 810 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 811 { 812 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 813 814 spin_lock(&trans_pcie->irq_lock); 815 _iwl_enable_interrupts(trans); 816 spin_unlock(&trans_pcie->irq_lock); 817 } 818 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 819 { 820 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 821 822 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 823 trans_pcie->hw_mask = msk; 824 } 825 826 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 827 { 828 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 829 830 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 831 trans_pcie->fh_mask = msk; 832 } 833 834 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 835 { 836 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 837 838 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 839 if (!trans_pcie->msix_enabled) { 840 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 841 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 842 } else { 843 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 844 trans_pcie->hw_init_mask); 845 iwl_enable_fh_int_msk_msix(trans, 846 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 847 } 848 } 849 850 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) 851 { 852 return index & (q->n_window - 1); 853 } 854 855 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, 856 struct iwl_txq *txq, int idx) 857 { 858 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 859 860 if (trans->cfg->use_tfh) 861 idx = iwl_pcie_get_cmd_index(txq, idx); 862 863 return txq->tfds + trans_pcie->tfd_size * idx; 864 } 865 866 static inline const char *queue_name(struct device *dev, 867 struct iwl_trans_pcie *trans_p, int i) 868 { 869 if (trans_p->shared_vec_mask) { 870 int vec = trans_p->shared_vec_mask & 871 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 872 873 if (i == 0) 874 return DRV_NAME ": shared IRQ"; 875 876 return devm_kasprintf(dev, GFP_KERNEL, 877 DRV_NAME ": queue %d", i + vec); 878 } 879 if (i == 0) 880 return DRV_NAME ": default queue"; 881 882 if (i == trans_p->alloc_vecs - 1) 883 return DRV_NAME ": exception"; 884 885 return devm_kasprintf(dev, GFP_KERNEL, 886 DRV_NAME ": queue %d", i); 887 } 888 889 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 890 { 891 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 892 893 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 894 if (!trans_pcie->msix_enabled) { 895 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 896 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 897 } else { 898 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 899 trans_pcie->fh_init_mask); 900 iwl_enable_hw_int_msk_msix(trans, 901 MSIX_HW_INT_CAUSES_REG_RF_KILL); 902 } 903 904 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) { 905 /* 906 * On 9000-series devices this bit isn't enabled by default, so 907 * when we power down the device we need set the bit to allow it 908 * to wake up the PCI-E bus for RF-kill interrupts. 909 */ 910 iwl_set_bit(trans, CSR_GP_CNTRL, 911 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 912 } 913 } 914 915 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 916 917 static inline void iwl_wake_queue(struct iwl_trans *trans, 918 struct iwl_txq *txq) 919 { 920 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 921 922 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { 923 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 924 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 925 } 926 } 927 928 static inline void iwl_stop_queue(struct iwl_trans *trans, 929 struct iwl_txq *txq) 930 { 931 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 932 933 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { 934 iwl_op_mode_queue_full(trans->op_mode, txq->id); 935 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 936 } else 937 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 938 txq->id); 939 } 940 941 static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 942 { 943 int index = iwl_pcie_get_cmd_index(q, i); 944 int r = iwl_pcie_get_cmd_index(q, q->read_ptr); 945 int w = iwl_pcie_get_cmd_index(q, q->write_ptr); 946 947 return w >= r ? 948 (index >= r && index < w) : 949 !(index < r && index >= w); 950 } 951 952 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 953 { 954 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 955 956 lockdep_assert_held(&trans_pcie->mutex); 957 958 if (trans_pcie->debug_rfkill) 959 return true; 960 961 return !(iwl_read32(trans, CSR_GP_CNTRL) & 962 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 963 } 964 965 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 966 u32 reg, u32 mask, u32 value) 967 { 968 u32 v; 969 970 #ifdef CONFIG_IWLWIFI_DEBUG 971 WARN_ON_ONCE(value & ~mask); 972 #endif 973 974 v = iwl_read32(trans, reg); 975 v &= ~mask; 976 v |= value; 977 iwl_write32(trans, reg, v); 978 } 979 980 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 981 u32 reg, u32 mask) 982 { 983 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 984 } 985 986 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 987 u32 reg, u32 mask) 988 { 989 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 990 } 991 992 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 993 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); 994 995 #ifdef CONFIG_IWLWIFI_DEBUGFS 996 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 997 #else 998 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 999 { 1000 return 0; 1001 } 1002 #endif 1003 1004 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); 1005 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); 1006 1007 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); 1008 1009 void iwl_pcie_rx_allocator_work(struct work_struct *data); 1010 1011 /* common functions that are used by gen2 transport */ 1012 void iwl_pcie_apm_config(struct iwl_trans *trans); 1013 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 1014 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 1015 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 1016 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1017 bool was_in_rfkill); 1018 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 1019 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); 1020 void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 1021 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 1022 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 1023 int slots_num, bool cmd_queue); 1024 int iwl_pcie_txq_alloc(struct iwl_trans *trans, 1025 struct iwl_txq *txq, int slots_num, bool cmd_queue); 1026 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 1027 struct iwl_dma_ptr *ptr, size_t size); 1028 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 1029 void iwl_pcie_apply_destination(struct iwl_trans *trans); 1030 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 1031 struct sk_buff *skb); 1032 #ifdef CONFIG_INET 1033 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); 1034 #endif 1035 1036 /* common functions that are used by gen3 transport */ 1037 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 1038 1039 /* transport gen 2 exported functions */ 1040 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 1041 const struct fw_img *fw, bool run_in_rfkill); 1042 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 1043 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 1044 struct iwl_tx_queue_cfg_cmd *cmd, 1045 int cmd_id, int size, 1046 unsigned int timeout); 1047 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 1048 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 1049 struct iwl_device_cmd *dev_cmd, int txq_id); 1050 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 1051 struct iwl_host_cmd *cmd); 1052 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, 1053 bool low_power); 1054 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); 1055 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 1056 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 1057 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 1058 #endif /* __iwl_trans_int_pcie_h__ */ 1059