1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * 6 * Portions of this file are derived from the ipw3945 project, as well 7 * as portions of the ieee80211 subsystem header files. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 21 * 22 * The full GNU General Public License is included in this distribution in the 23 * file called LICENSE. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 *****************************************************************************/ 30 #ifndef __iwl_trans_int_pcie_h__ 31 #define __iwl_trans_int_pcie_h__ 32 33 #include <linux/spinlock.h> 34 #include <linux/interrupt.h> 35 #include <linux/skbuff.h> 36 #include <linux/wait.h> 37 #include <linux/pci.h> 38 #include <linux/timer.h> 39 40 #include "iwl-fh.h" 41 #include "iwl-csr.h" 42 #include "iwl-trans.h" 43 #include "iwl-debug.h" 44 #include "iwl-io.h" 45 #include "iwl-op-mode.h" 46 47 /* We need 2 entries for the TX command and header, and another one might 48 * be needed for potential data in the SKB's head. The remaining ones can 49 * be used for frags. 50 */ 51 #define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) 52 53 /* 54 * RX related structures and functions 55 */ 56 #define RX_NUM_QUEUES 1 57 #define RX_POST_REQ_ALLOC 2 58 #define RX_CLAIM_REQ_ALLOC 8 59 #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) 60 #define RX_LOW_WATERMARK 8 61 62 struct iwl_host_cmd; 63 64 /*This file includes the declaration that are internal to the 65 * trans_pcie layer */ 66 67 struct iwl_rx_mem_buffer { 68 dma_addr_t page_dma; 69 struct page *page; 70 struct list_head list; 71 }; 72 73 /** 74 * struct isr_statistics - interrupt statistics 75 * 76 */ 77 struct isr_statistics { 78 u32 hw; 79 u32 sw; 80 u32 err_code; 81 u32 sch; 82 u32 alive; 83 u32 rfkill; 84 u32 ctkill; 85 u32 wakeup; 86 u32 rx; 87 u32 tx; 88 u32 unhandled; 89 }; 90 91 /** 92 * struct iwl_rxq - Rx queue 93 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 94 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 95 * @read: Shared index to newest available Rx buffer 96 * @write: Shared index to oldest written Rx packet 97 * @free_count: Number of pre-allocated buffers in rx_free 98 * @used_count: Number of RBDs handled to allocator to use for allocation 99 * @write_actual: 100 * @rx_free: list of RBDs with allocated RB ready for use 101 * @rx_used: list of RBDs with no RB attached 102 * @need_update: flag to indicate we need to update read/write index 103 * @rb_stts: driver's pointer to receive buffer status 104 * @rb_stts_dma: bus address of receive buffer status 105 * @lock: 106 * @pool: initial pool of iwl_rx_mem_buffer for the queue 107 * @queue: actual rx queue 108 * 109 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 110 */ 111 struct iwl_rxq { 112 __le32 *bd; 113 dma_addr_t bd_dma; 114 u32 read; 115 u32 write; 116 u32 free_count; 117 u32 used_count; 118 u32 write_actual; 119 struct list_head rx_free; 120 struct list_head rx_used; 121 bool need_update; 122 struct iwl_rb_status *rb_stts; 123 dma_addr_t rb_stts_dma; 124 spinlock_t lock; 125 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; 126 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 127 }; 128 129 /** 130 * struct iwl_rb_allocator - Rx allocator 131 * @pool: initial pool of allocator 132 * @req_pending: number of requests the allcator had not processed yet 133 * @req_ready: number of requests honored and ready for claiming 134 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 135 * the queue. This is a list of &struct iwl_rx_mem_buffer 136 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 137 * of &struct iwl_rx_mem_buffer 138 * @lock: protects the rbd_allocated and rbd_empty lists 139 * @alloc_wq: work queue for background calls 140 * @rx_alloc: work struct for background calls 141 */ 142 struct iwl_rb_allocator { 143 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; 144 atomic_t req_pending; 145 atomic_t req_ready; 146 struct list_head rbd_allocated; 147 struct list_head rbd_empty; 148 spinlock_t lock; 149 struct workqueue_struct *alloc_wq; 150 struct work_struct rx_alloc; 151 }; 152 153 struct iwl_dma_ptr { 154 dma_addr_t dma; 155 void *addr; 156 size_t size; 157 }; 158 159 /** 160 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 161 * @index -- current index 162 */ 163 static inline int iwl_queue_inc_wrap(int index) 164 { 165 return ++index & (TFD_QUEUE_SIZE_MAX - 1); 166 } 167 168 /** 169 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 170 * @index -- current index 171 */ 172 static inline int iwl_queue_dec_wrap(int index) 173 { 174 return --index & (TFD_QUEUE_SIZE_MAX - 1); 175 } 176 177 struct iwl_cmd_meta { 178 /* only for SYNC commands, iff the reply skb is wanted */ 179 struct iwl_host_cmd *source; 180 u32 flags; 181 }; 182 183 /* 184 * Generic queue structure 185 * 186 * Contains common data for Rx and Tx queues. 187 * 188 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 189 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 190 * there might be HW changes in the future). For the normal TX 191 * queues, n_window, which is the size of the software queue data 192 * is also 256; however, for the command queue, n_window is only 193 * 32 since we don't need so many commands pending. Since the HW 194 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, 195 * the software buffers (in the variables @meta, @txb in struct 196 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in 197 * the same struct) have 256. 198 * This means that we end up with the following: 199 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 200 * SW entries: | 0 | ... | 31 | 201 * where N is a number between 0 and 7. This means that the SW 202 * data is a window overlayed over the HW queue. 203 */ 204 struct iwl_queue { 205 int write_ptr; /* 1-st empty entry (index) host_w*/ 206 int read_ptr; /* last used entry (index) host_r*/ 207 /* use for monitoring and recovering the stuck queue */ 208 dma_addr_t dma_addr; /* physical addr for BD's */ 209 int n_window; /* safe queue window */ 210 u32 id; 211 int low_mark; /* low watermark, resume queue if free 212 * space more than this */ 213 int high_mark; /* high watermark, stop queue if free 214 * space less than this */ 215 }; 216 217 #define TFD_TX_CMD_SLOTS 256 218 #define TFD_CMD_SLOTS 32 219 220 /* 221 * The FH will write back to the first TB only, so we need 222 * to copy some data into the buffer regardless of whether 223 * it should be mapped or not. This indicates how big the 224 * first TB must be to include the scratch buffer. Since 225 * the scratch is 4 bytes at offset 12, it's 16 now. If we 226 * make it bigger then allocations will be bigger and copy 227 * slower, so that's probably not useful. 228 */ 229 #define IWL_HCMD_SCRATCHBUF_SIZE 16 230 231 struct iwl_pcie_txq_entry { 232 struct iwl_device_cmd *cmd; 233 struct sk_buff *skb; 234 /* buffer to free after command completes */ 235 const void *free_buf; 236 struct iwl_cmd_meta meta; 237 }; 238 239 struct iwl_pcie_txq_scratch_buf { 240 struct iwl_cmd_header hdr; 241 u8 buf[8]; 242 __le32 scratch; 243 }; 244 245 /** 246 * struct iwl_txq - Tx Queue for DMA 247 * @q: generic Rx/Tx queue descriptor 248 * @tfds: transmit frame descriptors (DMA memory) 249 * @scratchbufs: start of command headers, including scratch buffers, for 250 * the writeback -- this is DMA memory and an array holding one buffer 251 * for each command on the queue 252 * @scratchbufs_dma: DMA address for the scratchbufs start 253 * @entries: transmit entries (driver state) 254 * @lock: queue lock 255 * @stuck_timer: timer that fires if queue gets stuck 256 * @trans_pcie: pointer back to transport (for timer) 257 * @need_update: indicates need to update read/write index 258 * @active: stores if queue is active 259 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 260 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 261 * @frozen: tx stuck queue timer is frozen 262 * @frozen_expiry_remainder: remember how long until the timer fires 263 * 264 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 265 * descriptors) and required locking structures. 266 */ 267 struct iwl_txq { 268 struct iwl_queue q; 269 struct iwl_tfd *tfds; 270 struct iwl_pcie_txq_scratch_buf *scratchbufs; 271 dma_addr_t scratchbufs_dma; 272 struct iwl_pcie_txq_entry *entries; 273 spinlock_t lock; 274 unsigned long frozen_expiry_remainder; 275 struct timer_list stuck_timer; 276 struct iwl_trans_pcie *trans_pcie; 277 bool need_update; 278 bool frozen; 279 u8 active; 280 bool ampdu; 281 bool block; 282 unsigned long wd_timeout; 283 }; 284 285 static inline dma_addr_t 286 iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) 287 { 288 return txq->scratchbufs_dma + 289 sizeof(struct iwl_pcie_txq_scratch_buf) * idx; 290 } 291 292 struct iwl_tso_hdr_page { 293 struct page *page; 294 u8 *pos; 295 }; 296 297 /** 298 * struct iwl_trans_pcie - PCIe transport specific data 299 * @rxq: all the RX queue data 300 * @rba: allocator for RX replenishing 301 * @drv - pointer to iwl_drv 302 * @trans: pointer to the generic transport area 303 * @scd_base_addr: scheduler sram base address in SRAM 304 * @scd_bc_tbls: pointer to the byte count table of the scheduler 305 * @kw: keep warm address 306 * @pci_dev: basic pci-network driver stuff 307 * @hw_base: pci hardware address support 308 * @ucode_write_complete: indicates that the ucode has been copied. 309 * @ucode_write_waitq: wait queue for uCode load 310 * @cmd_queue - command queue number 311 * @rx_buf_size: Rx buffer size 312 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 313 * @scd_set_active: should the transport configure the SCD for HCMD queue 314 * @wide_cmd_header: true when ucode supports wide command header format 315 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 316 * frame. 317 * @rx_page_order: page order for receive buffer size 318 * @reg_lock: protect hw register access 319 * @mutex: to protect stop_device / start_fw / start_hw 320 * @cmd_in_flight: true when we have a host command in flight 321 * @fw_mon_phys: physical address of the buffer for the firmware monitor 322 * @fw_mon_page: points to the first page of the buffer for the firmware monitor 323 * @fw_mon_size: size of the buffer for the firmware monitor 324 */ 325 struct iwl_trans_pcie { 326 struct iwl_rxq rxq; 327 struct iwl_rb_allocator rba; 328 struct iwl_trans *trans; 329 struct iwl_drv *drv; 330 331 struct net_device napi_dev; 332 struct napi_struct napi; 333 334 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 335 336 /* INT ICT Table */ 337 __le32 *ict_tbl; 338 dma_addr_t ict_tbl_dma; 339 int ict_index; 340 bool use_ict; 341 bool is_down; 342 struct isr_statistics isr_stats; 343 344 spinlock_t irq_lock; 345 struct mutex mutex; 346 u32 inta_mask; 347 u32 scd_base_addr; 348 struct iwl_dma_ptr scd_bc_tbls; 349 struct iwl_dma_ptr kw; 350 351 struct iwl_txq *txq; 352 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 353 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 354 355 /* PCI bus related data */ 356 struct pci_dev *pci_dev; 357 void __iomem *hw_base; 358 359 bool ucode_write_complete; 360 wait_queue_head_t ucode_write_waitq; 361 wait_queue_head_t wait_command_queue; 362 363 u8 cmd_queue; 364 u8 cmd_fifo; 365 unsigned int cmd_q_wdg_timeout; 366 u8 n_no_reclaim_cmds; 367 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 368 369 enum iwl_amsdu_size rx_buf_size; 370 bool bc_table_dword; 371 bool scd_set_active; 372 bool wide_cmd_header; 373 bool sw_csum_tx; 374 u32 rx_page_order; 375 376 /*protect hw register */ 377 spinlock_t reg_lock; 378 bool cmd_hold_nic_awake; 379 bool ref_cmd_in_flight; 380 381 /* protect ref counter */ 382 spinlock_t ref_lock; 383 u32 ref_count; 384 385 dma_addr_t fw_mon_phys; 386 struct page *fw_mon_page; 387 u32 fw_mon_size; 388 }; 389 390 static inline struct iwl_trans_pcie * 391 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 392 { 393 return (void *)trans->trans_specific; 394 } 395 396 static inline struct iwl_trans * 397 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 398 { 399 return container_of((void *)trans_pcie, struct iwl_trans, 400 trans_specific); 401 } 402 403 /* 404 * Convention: trans API functions: iwl_trans_pcie_XXX 405 * Other functions: iwl_pcie_XXX 406 */ 407 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 408 const struct pci_device_id *ent, 409 const struct iwl_cfg *cfg); 410 void iwl_trans_pcie_free(struct iwl_trans *trans); 411 412 /***************************************************** 413 * RX 414 ******************************************************/ 415 int iwl_pcie_rx_init(struct iwl_trans *trans); 416 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 417 int iwl_pcie_rx_stop(struct iwl_trans *trans); 418 void iwl_pcie_rx_free(struct iwl_trans *trans); 419 420 /***************************************************** 421 * ICT - interrupt handling 422 ******************************************************/ 423 irqreturn_t iwl_pcie_isr(int irq, void *data); 424 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 425 void iwl_pcie_free_ict(struct iwl_trans *trans); 426 void iwl_pcie_reset_ict(struct iwl_trans *trans); 427 void iwl_pcie_disable_ict(struct iwl_trans *trans); 428 429 /***************************************************** 430 * TX / HCMD 431 ******************************************************/ 432 int iwl_pcie_tx_init(struct iwl_trans *trans); 433 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 434 int iwl_pcie_tx_stop(struct iwl_trans *trans); 435 void iwl_pcie_tx_free(struct iwl_trans *trans); 436 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 437 const struct iwl_trans_txq_scd_cfg *cfg, 438 unsigned int wdg_timeout); 439 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 440 bool configure_scd); 441 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 442 struct iwl_device_cmd *dev_cmd, int txq_id); 443 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 444 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 445 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 446 struct iwl_rx_cmd_buffer *rxb); 447 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 448 struct sk_buff_head *skbs); 449 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 450 451 void iwl_trans_pcie_ref(struct iwl_trans *trans); 452 void iwl_trans_pcie_unref(struct iwl_trans *trans); 453 454 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) 455 { 456 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 457 458 return le16_to_cpu(tb->hi_n_len) >> 4; 459 } 460 461 /***************************************************** 462 * Error handling 463 ******************************************************/ 464 void iwl_pcie_dump_csr(struct iwl_trans *trans); 465 466 /***************************************************** 467 * Helpers 468 ******************************************************/ 469 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 470 { 471 clear_bit(STATUS_INT_ENABLED, &trans->status); 472 473 /* disable interrupts from uCode/NIC to host */ 474 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 475 476 /* acknowledge/clear/reset any interrupts still pending 477 * from uCode or flow handler (Rx/Tx DMA) */ 478 iwl_write32(trans, CSR_INT, 0xffffffff); 479 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 480 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 481 } 482 483 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 484 { 485 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 486 487 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 488 set_bit(STATUS_INT_ENABLED, &trans->status); 489 trans_pcie->inta_mask = CSR_INI_SET_MASK; 490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 491 } 492 493 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 494 { 495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 496 497 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 498 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 499 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 500 } 501 502 static inline void iwl_wake_queue(struct iwl_trans *trans, 503 struct iwl_txq *txq) 504 { 505 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 506 507 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { 508 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); 509 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); 510 } 511 } 512 513 static inline void iwl_stop_queue(struct iwl_trans *trans, 514 struct iwl_txq *txq) 515 { 516 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 517 518 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { 519 iwl_op_mode_queue_full(trans->op_mode, txq->q.id); 520 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); 521 } else 522 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 523 txq->q.id); 524 } 525 526 static inline bool iwl_queue_used(const struct iwl_queue *q, int i) 527 { 528 return q->write_ptr >= q->read_ptr ? 529 (i >= q->read_ptr && i < q->write_ptr) : 530 !(i < q->read_ptr && i >= q->write_ptr); 531 } 532 533 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) 534 { 535 return index & (q->n_window - 1); 536 } 537 538 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 539 { 540 return !(iwl_read32(trans, CSR_GP_CNTRL) & 541 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 542 } 543 544 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 545 u32 reg, u32 mask, u32 value) 546 { 547 u32 v; 548 549 #ifdef CONFIG_IWLWIFI_DEBUG 550 WARN_ON_ONCE(value & ~mask); 551 #endif 552 553 v = iwl_read32(trans, reg); 554 v &= ~mask; 555 v |= value; 556 iwl_write32(trans, reg, v); 557 } 558 559 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 560 u32 reg, u32 mask) 561 { 562 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 563 } 564 565 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 566 u32 reg, u32 mask) 567 { 568 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 569 } 570 571 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 572 573 #ifdef CONFIG_IWLWIFI_DEBUGFS 574 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 575 #else 576 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 577 { 578 return 0; 579 } 580 #endif 581 582 #endif /* __iwl_trans_int_pcie_h__ */ 583