1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6 * Copyright(c) 2018 Intel Corporation 7 * 8 * Portions of this file are derived from the ipw3945 project, as well 9 * as portions of the ieee80211 subsystem header files. 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of version 2 of the GNU General Public License as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 * more details. 19 * 20 * You should have received a copy of the GNU General Public License along with 21 * this program. 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #ifndef __iwl_trans_int_pcie_h__ 32 #define __iwl_trans_int_pcie_h__ 33 34 #include <linux/spinlock.h> 35 #include <linux/interrupt.h> 36 #include <linux/skbuff.h> 37 #include <linux/wait.h> 38 #include <linux/pci.h> 39 #include <linux/timer.h> 40 #include <linux/cpu.h> 41 42 #include "iwl-fh.h" 43 #include "iwl-csr.h" 44 #include "iwl-trans.h" 45 #include "iwl-debug.h" 46 #include "iwl-io.h" 47 #include "iwl-op-mode.h" 48 49 /* We need 2 entries for the TX command and header, and another one might 50 * be needed for potential data in the SKB's head. The remaining ones can 51 * be used for frags. 52 */ 53 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 54 55 /* 56 * RX related structures and functions 57 */ 58 #define RX_NUM_QUEUES 1 59 #define RX_POST_REQ_ALLOC 2 60 #define RX_CLAIM_REQ_ALLOC 8 61 #define RX_PENDING_WATERMARK 16 62 #define FIRST_RX_QUEUE 512 63 64 struct iwl_host_cmd; 65 66 /*This file includes the declaration that are internal to the 67 * trans_pcie layer */ 68 69 /** 70 * struct iwl_rx_mem_buffer 71 * @page_dma: bus address of rxb page 72 * @page: driver's pointer to the rxb page 73 * @invalid: rxb is in driver ownership - not owned by HW 74 * @vid: index of this rxb in the global table 75 * @size: size used from the buffer 76 */ 77 struct iwl_rx_mem_buffer { 78 dma_addr_t page_dma; 79 struct page *page; 80 u16 vid; 81 bool invalid; 82 struct list_head list; 83 u32 size; 84 }; 85 86 /** 87 * struct isr_statistics - interrupt statistics 88 * 89 */ 90 struct isr_statistics { 91 u32 hw; 92 u32 sw; 93 u32 err_code; 94 u32 sch; 95 u32 alive; 96 u32 rfkill; 97 u32 ctkill; 98 u32 wakeup; 99 u32 rx; 100 u32 tx; 101 u32 unhandled; 102 }; 103 104 #define IWL_CD_STTS_OPTIMIZED_POS 0 105 #define IWL_CD_STTS_OPTIMIZED_MSK 0x01 106 #define IWL_CD_STTS_TRANSFER_STATUS_POS 1 107 #define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E 108 #define IWL_CD_STTS_WIFI_STATUS_POS 4 109 #define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 110 111 /** 112 * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) 113 * @IWL_CD_STTS_END_TRANSFER: successful transfer complete. 114 * In sniffer mode, when split is used, set in last CD completion. (RX) 115 * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for 116 * all CD completion. (RX) 117 * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX) 118 */ 119 enum iwl_completion_desc_transfer_status { 120 IWL_CD_STTS_UNUSED, 121 IWL_CD_STTS_UNUSED_2, 122 IWL_CD_STTS_END_TRANSFER, 123 IWL_CD_STTS_OVERFLOW, 124 IWL_CD_STTS_ABORTED, 125 IWL_CD_STTS_ERROR, 126 }; 127 128 /** 129 * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7) 130 * @IWL_CD_STTS_VALID: the packet is valid (RX) 131 * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX) 132 * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX) 133 * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX) 134 * @IWL_CD_STTS_DUP: duplicate packet (RX) 135 * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX) 136 * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX) 137 * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX) 138 * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX) 139 * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX) 140 * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX) 141 * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX) 142 * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX) 143 * @IWL_CD_STTS_NOT_USED: completed but not used (RX) 144 * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX) 145 */ 146 enum iwl_completion_desc_wifi_status { 147 IWL_CD_STTS_VALID, 148 IWL_CD_STTS_FCS_ERR, 149 IWL_CD_STTS_SEC_KEY_ERR, 150 IWL_CD_STTS_DECRYPTION_ERR, 151 IWL_CD_STTS_DUP, 152 IWL_CD_STTS_ICV_MIC_ERR, 153 IWL_CD_STTS_INTERNAL_SNAP_ERR, 154 IWL_CD_STTS_SEC_PORT_FAIL, 155 IWL_CD_STTS_BA_OLD_SN, 156 IWL_CD_STTS_QOS_NULL, 157 IWL_CD_STTS_MAC_HDR_ERR, 158 IWL_CD_STTS_MAX_RETRANS, 159 IWL_CD_STTS_EX_LIFETIME, 160 IWL_CD_STTS_NOT_USED, 161 IWL_CD_STTS_REPLAY_ERR, 162 }; 163 164 #define IWL_RX_TD_TYPE_MSK 0xff000000 165 #define IWL_RX_TD_SIZE_MSK 0x00ffffff 166 #define IWL_RX_TD_SIZE_2K BIT(11) 167 #define IWL_RX_TD_TYPE 0 168 169 /** 170 * struct iwl_rx_transfer_desc - transfer descriptor 171 * @type_n_size: buffer type (bit 0: external buff valid, 172 * bit 1: optional footer valid, bit 2-7: reserved) 173 * and buffer size 174 * @addr: ptr to free buffer start address 175 * @rbid: unique tag of the buffer 176 * @reserved: reserved 177 */ 178 struct iwl_rx_transfer_desc { 179 __le32 type_n_size; 180 __le64 addr; 181 __le16 rbid; 182 __le16 reserved; 183 } __packed; 184 185 #define IWL_RX_CD_SIZE 0xffffff00 186 187 /** 188 * struct iwl_rx_completion_desc - completion descriptor 189 * @type: buffer type (bit 0: external buff valid, 190 * bit 1: optional footer valid, bit 2-7: reserved) 191 * @status: status of the completion 192 * @reserved1: reserved 193 * @rbid: unique tag of the received buffer 194 * @size: buffer size, masked by IWL_RX_CD_SIZE 195 * @reserved2: reserved 196 */ 197 struct iwl_rx_completion_desc { 198 u8 type; 199 u8 status; 200 __le16 reserved1; 201 __le16 rbid; 202 __le32 size; 203 u8 reserved2[22]; 204 } __packed; 205 206 /** 207 * struct iwl_rxq - Rx queue 208 * @id: queue index 209 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 210 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 211 * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's 212 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 213 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 214 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 215 * @tr_tail: driver's pointer to the transmission ring tail buffer 216 * @tr_tail_dma: physical address of the buffer for the transmission ring tail 217 * @cr_tail: driver's pointer to the completion ring tail buffer 218 * @cr_tail_dma: physical address of the buffer for the completion ring tail 219 * @read: Shared index to newest available Rx buffer 220 * @write: Shared index to oldest written Rx packet 221 * @free_count: Number of pre-allocated buffers in rx_free 222 * @used_count: Number of RBDs handled to allocator to use for allocation 223 * @write_actual: 224 * @rx_free: list of RBDs with allocated RB ready for use 225 * @rx_used: list of RBDs with no RB attached 226 * @need_update: flag to indicate we need to update read/write index 227 * @rb_stts: driver's pointer to receive buffer status 228 * @rb_stts_dma: bus address of receive buffer status 229 * @lock: 230 * @queue: actual rx queue. Not used for multi-rx queue. 231 * 232 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 233 */ 234 struct iwl_rxq { 235 int id; 236 void *bd; 237 dma_addr_t bd_dma; 238 void *used_bd; 239 dma_addr_t used_bd_dma; 240 __le16 *tr_tail; 241 dma_addr_t tr_tail_dma; 242 __le16 *cr_tail; 243 dma_addr_t cr_tail_dma; 244 u32 read; 245 u32 write; 246 u32 free_count; 247 u32 used_count; 248 u32 write_actual; 249 u32 queue_size; 250 struct list_head rx_free; 251 struct list_head rx_used; 252 bool need_update; 253 void *rb_stts; 254 dma_addr_t rb_stts_dma; 255 spinlock_t lock; 256 struct napi_struct napi; 257 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 258 }; 259 260 /** 261 * struct iwl_rb_allocator - Rx allocator 262 * @req_pending: number of requests the allcator had not processed yet 263 * @req_ready: number of requests honored and ready for claiming 264 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 265 * the queue. This is a list of &struct iwl_rx_mem_buffer 266 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 267 * of &struct iwl_rx_mem_buffer 268 * @lock: protects the rbd_allocated and rbd_empty lists 269 * @alloc_wq: work queue for background calls 270 * @rx_alloc: work struct for background calls 271 */ 272 struct iwl_rb_allocator { 273 atomic_t req_pending; 274 atomic_t req_ready; 275 struct list_head rbd_allocated; 276 struct list_head rbd_empty; 277 spinlock_t lock; 278 struct workqueue_struct *alloc_wq; 279 struct work_struct rx_alloc; 280 }; 281 282 struct iwl_dma_ptr { 283 dma_addr_t dma; 284 void *addr; 285 size_t size; 286 }; 287 288 /** 289 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 290 * @index -- current index 291 */ 292 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) 293 { 294 return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); 295 } 296 297 /** 298 * iwl_get_closed_rb_stts - get closed rb stts from different structs 299 * @rxq - the rxq to get the rb stts from 300 */ 301 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, 302 struct iwl_rxq *rxq) 303 { 304 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { 305 __le16 *rb_stts = rxq->rb_stts; 306 307 return READ_ONCE(*rb_stts); 308 } else { 309 struct iwl_rb_status *rb_stts = rxq->rb_stts; 310 311 return READ_ONCE(rb_stts->closed_rb_num); 312 } 313 } 314 315 /** 316 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 317 * @index -- current index 318 */ 319 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) 320 { 321 return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); 322 } 323 324 struct iwl_cmd_meta { 325 /* only for SYNC commands, iff the reply skb is wanted */ 326 struct iwl_host_cmd *source; 327 u32 flags; 328 u32 tbs; 329 }; 330 331 332 #define TFD_TX_CMD_SLOTS 256 333 #define TFD_CMD_SLOTS 32 334 335 /* 336 * The FH will write back to the first TB only, so we need to copy some data 337 * into the buffer regardless of whether it should be mapped or not. 338 * This indicates how big the first TB must be to include the scratch buffer 339 * and the assigned PN. 340 * Since PN location is 8 bytes at offset 12, it's 20 now. 341 * If we make it bigger then allocations will be bigger and copy slower, so 342 * that's probably not useful. 343 */ 344 #define IWL_FIRST_TB_SIZE 20 345 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 346 347 struct iwl_pcie_txq_entry { 348 struct iwl_device_cmd *cmd; 349 struct sk_buff *skb; 350 /* buffer to free after command completes */ 351 const void *free_buf; 352 struct iwl_cmd_meta meta; 353 }; 354 355 struct iwl_pcie_first_tb_buf { 356 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 357 }; 358 359 /** 360 * struct iwl_txq - Tx Queue for DMA 361 * @q: generic Rx/Tx queue descriptor 362 * @tfds: transmit frame descriptors (DMA memory) 363 * @first_tb_bufs: start of command headers, including scratch buffers, for 364 * the writeback -- this is DMA memory and an array holding one buffer 365 * for each command on the queue 366 * @first_tb_dma: DMA address for the first_tb_bufs start 367 * @entries: transmit entries (driver state) 368 * @lock: queue lock 369 * @stuck_timer: timer that fires if queue gets stuck 370 * @trans_pcie: pointer back to transport (for timer) 371 * @need_update: indicates need to update read/write index 372 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 373 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 374 * @frozen: tx stuck queue timer is frozen 375 * @frozen_expiry_remainder: remember how long until the timer fires 376 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 377 * @write_ptr: 1-st empty entry (index) host_w 378 * @read_ptr: last used entry (index) host_r 379 * @dma_addr: physical addr for BD's 380 * @n_window: safe queue window 381 * @id: queue id 382 * @low_mark: low watermark, resume queue if free space more than this 383 * @high_mark: high watermark, stop queue if free space less than this 384 * 385 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 386 * descriptors) and required locking structures. 387 * 388 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 389 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 390 * there might be HW changes in the future). For the normal TX 391 * queues, n_window, which is the size of the software queue data 392 * is also 256; however, for the command queue, n_window is only 393 * 32 since we don't need so many commands pending. Since the HW 394 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 395 * This means that we end up with the following: 396 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 397 * SW entries: | 0 | ... | 31 | 398 * where N is a number between 0 and 7. This means that the SW 399 * data is a window overlayed over the HW queue. 400 */ 401 struct iwl_txq { 402 void *tfds; 403 struct iwl_pcie_first_tb_buf *first_tb_bufs; 404 dma_addr_t first_tb_dma; 405 struct iwl_pcie_txq_entry *entries; 406 spinlock_t lock; 407 unsigned long frozen_expiry_remainder; 408 struct timer_list stuck_timer; 409 struct iwl_trans_pcie *trans_pcie; 410 bool need_update; 411 bool frozen; 412 bool ampdu; 413 int block; 414 unsigned long wd_timeout; 415 struct sk_buff_head overflow_q; 416 struct iwl_dma_ptr bc_tbl; 417 418 int write_ptr; 419 int read_ptr; 420 dma_addr_t dma_addr; 421 int n_window; 422 u32 id; 423 int low_mark; 424 int high_mark; 425 }; 426 427 static inline dma_addr_t 428 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 429 { 430 return txq->first_tb_dma + 431 sizeof(struct iwl_pcie_first_tb_buf) * idx; 432 } 433 434 struct iwl_tso_hdr_page { 435 struct page *page; 436 u8 *pos; 437 }; 438 439 /** 440 * enum iwl_shared_irq_flags - level of sharing for irq 441 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 442 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 443 */ 444 enum iwl_shared_irq_flags { 445 IWL_SHARED_IRQ_NON_RX = BIT(0), 446 IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 447 }; 448 449 /** 450 * enum iwl_image_response_code - image response values 451 * @IWL_IMAGE_RESP_DEF: the default value of the register 452 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 453 * @IWL_IMAGE_RESP_FAIL: iml reading failed 454 */ 455 enum iwl_image_response_code { 456 IWL_IMAGE_RESP_DEF = 0, 457 IWL_IMAGE_RESP_SUCCESS = 1, 458 IWL_IMAGE_RESP_FAIL = 2, 459 }; 460 461 /** 462 * struct iwl_dram_data 463 * @physical: page phy pointer 464 * @block: pointer to the allocated block/page 465 * @size: size of the block/page 466 */ 467 struct iwl_dram_data { 468 dma_addr_t physical; 469 void *block; 470 int size; 471 }; 472 473 /** 474 * struct iwl_self_init_dram - dram data used by self init process 475 * @fw: lmac and umac dram data 476 * @fw_cnt: total number of items in array 477 * @paging: paging dram data 478 * @paging_cnt: total number of items in array 479 */ 480 struct iwl_self_init_dram { 481 struct iwl_dram_data *fw; 482 int fw_cnt; 483 struct iwl_dram_data *paging; 484 int paging_cnt; 485 }; 486 487 /** 488 * struct iwl_trans_pcie - PCIe transport specific data 489 * @rxq: all the RX queue data 490 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 491 * @global_table: table mapping received VID from hw to rxb 492 * @rba: allocator for RX replenishing 493 * @ctxt_info: context information for FW self init 494 * @ctxt_info_gen3: context information for gen3 devices 495 * @prph_info: prph info for self init 496 * @prph_scratch: prph scratch for self init 497 * @ctxt_info_dma_addr: dma addr of context information 498 * @prph_info_dma_addr: dma addr of prph info 499 * @prph_scratch_dma_addr: dma addr of prph scratch 500 * @ctxt_info_dma_addr: dma addr of context information 501 * @init_dram: DRAM data of firmware image (including paging). 502 * Context information addresses will be taken from here. 503 * This is driver's local copy for keeping track of size and 504 * count for allocating and freeing the memory. 505 * @trans: pointer to the generic transport area 506 * @scd_base_addr: scheduler sram base address in SRAM 507 * @scd_bc_tbls: pointer to the byte count table of the scheduler 508 * @kw: keep warm address 509 * @pci_dev: basic pci-network driver stuff 510 * @hw_base: pci hardware address support 511 * @ucode_write_complete: indicates that the ucode has been copied. 512 * @ucode_write_waitq: wait queue for uCode load 513 * @cmd_queue - command queue number 514 * @rx_buf_size: Rx buffer size 515 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 516 * @scd_set_active: should the transport configure the SCD for HCMD queue 517 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 518 * frame. 519 * @rx_page_order: page order for receive buffer size 520 * @reg_lock: protect hw register access 521 * @mutex: to protect stop_device / start_fw / start_hw 522 * @cmd_in_flight: true when we have a host command in flight 523 * @fw_mon_phys: physical address of the buffer for the firmware monitor 524 * @fw_mon_page: points to the first page of the buffer for the firmware monitor 525 * @fw_mon_size: size of the buffer for the firmware monitor 526 * @msix_entries: array of MSI-X entries 527 * @msix_enabled: true if managed to enable MSI-X 528 * @shared_vec_mask: the type of causes the shared vector handles 529 * (see iwl_shared_irq_flags). 530 * @alloc_vecs: the number of interrupt vectors allocated by the OS 531 * @def_irq: default irq for non rx causes 532 * @fh_init_mask: initial unmasked fh causes 533 * @hw_init_mask: initial unmasked hw causes 534 * @fh_mask: current unmasked fh causes 535 * @hw_mask: current unmasked hw causes 536 * @in_rescan: true if we have triggered a device rescan 537 * @scheduled_for_removal: true if we have scheduled a device removal 538 */ 539 struct iwl_trans_pcie { 540 struct iwl_rxq *rxq; 541 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 542 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; 543 struct iwl_rb_allocator rba; 544 union { 545 struct iwl_context_info *ctxt_info; 546 struct iwl_context_info_gen3 *ctxt_info_gen3; 547 }; 548 struct iwl_prph_info *prph_info; 549 struct iwl_prph_scratch *prph_scratch; 550 dma_addr_t ctxt_info_dma_addr; 551 dma_addr_t prph_info_dma_addr; 552 dma_addr_t prph_scratch_dma_addr; 553 dma_addr_t iml_dma_addr; 554 struct iwl_self_init_dram init_dram; 555 struct iwl_trans *trans; 556 557 struct net_device napi_dev; 558 559 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 560 561 /* INT ICT Table */ 562 __le32 *ict_tbl; 563 dma_addr_t ict_tbl_dma; 564 int ict_index; 565 bool use_ict; 566 bool is_down, opmode_down; 567 bool debug_rfkill; 568 struct isr_statistics isr_stats; 569 570 spinlock_t irq_lock; 571 struct mutex mutex; 572 u32 inta_mask; 573 u32 scd_base_addr; 574 struct iwl_dma_ptr scd_bc_tbls; 575 struct iwl_dma_ptr kw; 576 577 struct iwl_txq *txq_memory; 578 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 579 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 580 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 581 582 /* PCI bus related data */ 583 struct pci_dev *pci_dev; 584 void __iomem *hw_base; 585 586 bool ucode_write_complete; 587 wait_queue_head_t ucode_write_waitq; 588 wait_queue_head_t wait_command_queue; 589 wait_queue_head_t d0i3_waitq; 590 591 u8 page_offs, dev_cmd_offs; 592 593 u8 cmd_queue; 594 u8 cmd_fifo; 595 unsigned int cmd_q_wdg_timeout; 596 u8 n_no_reclaim_cmds; 597 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 598 u8 max_tbs; 599 u16 tfd_size; 600 601 enum iwl_amsdu_size rx_buf_size; 602 bool bc_table_dword; 603 bool scd_set_active; 604 bool sw_csum_tx; 605 bool pcie_dbg_dumped_once; 606 u32 rx_page_order; 607 608 /*protect hw register */ 609 spinlock_t reg_lock; 610 bool cmd_hold_nic_awake; 611 bool ref_cmd_in_flight; 612 613 dma_addr_t fw_mon_phys; 614 struct page *fw_mon_page; 615 u32 fw_mon_size; 616 617 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 618 bool msix_enabled; 619 u8 shared_vec_mask; 620 u32 alloc_vecs; 621 u32 def_irq; 622 u32 fh_init_mask; 623 u32 hw_init_mask; 624 u32 fh_mask; 625 u32 hw_mask; 626 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 627 u16 tx_cmd_queue_size; 628 bool in_rescan; 629 bool scheduled_for_removal; 630 }; 631 632 static inline struct iwl_trans_pcie * 633 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 634 { 635 return (void *)trans->trans_specific; 636 } 637 638 static inline struct iwl_trans * 639 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 640 { 641 return container_of((void *)trans_pcie, struct iwl_trans, 642 trans_specific); 643 } 644 645 /* 646 * Convention: trans API functions: iwl_trans_pcie_XXX 647 * Other functions: iwl_pcie_XXX 648 */ 649 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 650 const struct pci_device_id *ent, 651 const struct iwl_cfg *cfg); 652 void iwl_trans_pcie_free(struct iwl_trans *trans); 653 654 /***************************************************** 655 * RX 656 ******************************************************/ 657 int iwl_pcie_rx_init(struct iwl_trans *trans); 658 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 659 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 660 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 661 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 662 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 663 int iwl_pcie_rx_stop(struct iwl_trans *trans); 664 void iwl_pcie_rx_free(struct iwl_trans *trans); 665 666 /***************************************************** 667 * ICT - interrupt handling 668 ******************************************************/ 669 irqreturn_t iwl_pcie_isr(int irq, void *data); 670 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 671 void iwl_pcie_free_ict(struct iwl_trans *trans); 672 void iwl_pcie_reset_ict(struct iwl_trans *trans); 673 void iwl_pcie_disable_ict(struct iwl_trans *trans); 674 675 /***************************************************** 676 * TX / HCMD 677 ******************************************************/ 678 int iwl_pcie_tx_init(struct iwl_trans *trans); 679 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans); 680 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 681 int iwl_pcie_tx_stop(struct iwl_trans *trans); 682 void iwl_pcie_tx_free(struct iwl_trans *trans); 683 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 684 const struct iwl_trans_txq_scd_cfg *cfg, 685 unsigned int wdg_timeout); 686 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 687 bool configure_scd); 688 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 689 bool shared_mode); 690 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 691 struct iwl_txq *txq); 692 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 693 struct iwl_device_cmd *dev_cmd, int txq_id); 694 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 695 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 696 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 697 struct iwl_rx_cmd_buffer *rxb); 698 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 699 struct sk_buff_head *skbs); 700 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 701 702 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 703 u8 idx) 704 { 705 if (trans->cfg->use_tfh) { 706 struct iwl_tfh_tfd *tfd = _tfd; 707 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 708 709 return le16_to_cpu(tb->tb_len); 710 } else { 711 struct iwl_tfd *tfd = _tfd; 712 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 713 714 return le16_to_cpu(tb->hi_n_len) >> 4; 715 } 716 } 717 718 /***************************************************** 719 * Error handling 720 ******************************************************/ 721 void iwl_pcie_dump_csr(struct iwl_trans *trans); 722 723 /***************************************************** 724 * Helpers 725 ******************************************************/ 726 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 727 { 728 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 729 730 clear_bit(STATUS_INT_ENABLED, &trans->status); 731 if (!trans_pcie->msix_enabled) { 732 /* disable interrupts from uCode/NIC to host */ 733 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 734 735 /* acknowledge/clear/reset any interrupts still pending 736 * from uCode or flow handler (Rx/Tx DMA) */ 737 iwl_write32(trans, CSR_INT, 0xffffffff); 738 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 739 } else { 740 /* disable all the interrupt we might use */ 741 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 742 trans_pcie->fh_init_mask); 743 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 744 trans_pcie->hw_init_mask); 745 } 746 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 747 } 748 749 #define IWL_NUM_OF_COMPLETION_RINGS 31 750 #define IWL_NUM_OF_TRANSFER_RINGS 527 751 752 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 753 int start) 754 { 755 int i = 0; 756 757 while (start < fw->num_sec && 758 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 759 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 760 start++; 761 i++; 762 } 763 764 return i; 765 } 766 767 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 768 const struct fw_desc *sec, 769 struct iwl_dram_data *dram) 770 { 771 dram->block = dma_alloc_coherent(trans->dev, sec->len, 772 &dram->physical, 773 GFP_KERNEL); 774 if (!dram->block) 775 return -ENOMEM; 776 777 dram->size = sec->len; 778 memcpy(dram->block, sec->data, sec->len); 779 780 return 0; 781 } 782 783 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 784 { 785 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 786 struct iwl_self_init_dram *dram = &trans_pcie->init_dram; 787 int i; 788 789 if (!dram->fw) { 790 WARN_ON(dram->fw_cnt); 791 return; 792 } 793 794 for (i = 0; i < dram->fw_cnt; i++) 795 dma_free_coherent(trans->dev, dram->fw[i].size, 796 dram->fw[i].block, dram->fw[i].physical); 797 798 kfree(dram->fw); 799 dram->fw_cnt = 0; 800 dram->fw = NULL; 801 } 802 803 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 804 { 805 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 806 807 spin_lock(&trans_pcie->irq_lock); 808 _iwl_disable_interrupts(trans); 809 spin_unlock(&trans_pcie->irq_lock); 810 } 811 812 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 813 { 814 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 815 816 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 817 set_bit(STATUS_INT_ENABLED, &trans->status); 818 if (!trans_pcie->msix_enabled) { 819 trans_pcie->inta_mask = CSR_INI_SET_MASK; 820 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 821 } else { 822 /* 823 * fh/hw_mask keeps all the unmasked causes. 824 * Unlike msi, in msix cause is enabled when it is unset. 825 */ 826 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 827 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 828 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 829 ~trans_pcie->fh_mask); 830 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 831 ~trans_pcie->hw_mask); 832 } 833 } 834 835 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 836 { 837 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 838 839 spin_lock(&trans_pcie->irq_lock); 840 _iwl_enable_interrupts(trans); 841 spin_unlock(&trans_pcie->irq_lock); 842 } 843 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 844 { 845 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 846 847 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 848 trans_pcie->hw_mask = msk; 849 } 850 851 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 852 { 853 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 854 855 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 856 trans_pcie->fh_mask = msk; 857 } 858 859 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 860 { 861 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 862 863 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 864 if (!trans_pcie->msix_enabled) { 865 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 866 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 867 } else { 868 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 869 trans_pcie->hw_init_mask); 870 iwl_enable_fh_int_msk_msix(trans, 871 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 872 } 873 } 874 875 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) 876 { 877 return index & (q->n_window - 1); 878 } 879 880 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, 881 struct iwl_txq *txq, int idx) 882 { 883 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 884 885 if (trans->cfg->use_tfh) 886 idx = iwl_pcie_get_cmd_index(txq, idx); 887 888 return txq->tfds + trans_pcie->tfd_size * idx; 889 } 890 891 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 892 { 893 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 894 895 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 896 if (!trans_pcie->msix_enabled) { 897 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 898 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 899 } else { 900 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 901 trans_pcie->fh_init_mask); 902 iwl_enable_hw_int_msk_msix(trans, 903 MSIX_HW_INT_CAUSES_REG_RF_KILL); 904 } 905 906 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) { 907 /* 908 * On 9000-series devices this bit isn't enabled by default, so 909 * when we power down the device we need set the bit to allow it 910 * to wake up the PCI-E bus for RF-kill interrupts. 911 */ 912 iwl_set_bit(trans, CSR_GP_CNTRL, 913 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 914 } 915 } 916 917 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 918 919 static inline void iwl_wake_queue(struct iwl_trans *trans, 920 struct iwl_txq *txq) 921 { 922 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 923 924 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { 925 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 926 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 927 } 928 } 929 930 static inline void iwl_stop_queue(struct iwl_trans *trans, 931 struct iwl_txq *txq) 932 { 933 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 934 935 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { 936 iwl_op_mode_queue_full(trans->op_mode, txq->id); 937 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 938 } else 939 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 940 txq->id); 941 } 942 943 static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 944 { 945 int index = iwl_pcie_get_cmd_index(q, i); 946 int r = iwl_pcie_get_cmd_index(q, q->read_ptr); 947 int w = iwl_pcie_get_cmd_index(q, q->write_ptr); 948 949 return w >= r ? 950 (index >= r && index < w) : 951 !(index < r && index >= w); 952 } 953 954 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 955 { 956 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 957 958 lockdep_assert_held(&trans_pcie->mutex); 959 960 if (trans_pcie->debug_rfkill) 961 return true; 962 963 return !(iwl_read32(trans, CSR_GP_CNTRL) & 964 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 965 } 966 967 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 968 u32 reg, u32 mask, u32 value) 969 { 970 u32 v; 971 972 #ifdef CONFIG_IWLWIFI_DEBUG 973 WARN_ON_ONCE(value & ~mask); 974 #endif 975 976 v = iwl_read32(trans, reg); 977 v &= ~mask; 978 v |= value; 979 iwl_write32(trans, reg, v); 980 } 981 982 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 983 u32 reg, u32 mask) 984 { 985 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 986 } 987 988 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 989 u32 reg, u32 mask) 990 { 991 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 992 } 993 994 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 995 996 #ifdef CONFIG_IWLWIFI_DEBUGFS 997 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 998 #else 999 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 1000 { 1001 return 0; 1002 } 1003 #endif 1004 1005 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); 1006 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); 1007 1008 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); 1009 1010 void iwl_pcie_rx_allocator_work(struct work_struct *data); 1011 1012 /* common functions that are used by gen2 transport */ 1013 void iwl_pcie_apm_config(struct iwl_trans *trans); 1014 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 1015 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 1016 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 1017 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1018 bool was_in_rfkill); 1019 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 1020 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); 1021 void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 1022 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 1023 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 1024 int slots_num, bool cmd_queue); 1025 int iwl_pcie_txq_alloc(struct iwl_trans *trans, 1026 struct iwl_txq *txq, int slots_num, bool cmd_queue); 1027 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 1028 struct iwl_dma_ptr *ptr, size_t size); 1029 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 1030 void iwl_pcie_apply_destination(struct iwl_trans *trans); 1031 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 1032 struct sk_buff *skb); 1033 #ifdef CONFIG_INET 1034 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); 1035 #endif 1036 1037 /* common functions that are used by gen3 transport */ 1038 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 1039 1040 /* transport gen 2 exported functions */ 1041 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 1042 const struct fw_img *fw, bool run_in_rfkill); 1043 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 1044 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 1045 struct iwl_tx_queue_cfg_cmd *cmd, 1046 int cmd_id, int size, 1047 unsigned int timeout); 1048 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 1049 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 1050 struct iwl_device_cmd *dev_cmd, int txq_id); 1051 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 1052 struct iwl_host_cmd *cmd); 1053 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, 1054 bool low_power); 1055 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); 1056 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 1057 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 1058 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 1059 #endif /* __iwl_trans_int_pcie_h__ */ 1060