1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3cefec29eSJohannes Berg * This file is provided under a dual BSD/GPLv2 license. When using or 4cefec29eSJohannes Berg * redistributing this file, you may do so under either license. 5cefec29eSJohannes Berg * 6cefec29eSJohannes Berg * GPL LICENSE SUMMARY 7cefec29eSJohannes Berg * 8e705c121SKalle Valo * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 9e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10eda50cdeSSara Sharon * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 116cc6ba3aSTriebitz * Copyright(c) 2018 - 2019 Intel Corporation 12e705c121SKalle Valo * 13e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 14e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 15e705c121SKalle Valo * published by the Free Software Foundation. 16e705c121SKalle Valo * 17e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 18e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20e705c121SKalle Valo * more details. 21e705c121SKalle Valo * 22e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 23cefec29eSJohannes Berg * file called COPYING. 24e705c121SKalle Valo * 25e705c121SKalle Valo * Contact Information: 26cb2f8277SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 27e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28e705c121SKalle Valo * 29cefec29eSJohannes Berg * BSD LICENSE 30cefec29eSJohannes Berg * 31cefec29eSJohannes Berg * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 32cefec29eSJohannes Berg * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33cefec29eSJohannes Berg * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 346cc6ba3aSTriebitz * Copyright(c) 2018 - 2019 Intel Corporation 35cefec29eSJohannes Berg * All rights reserved. 36cefec29eSJohannes Berg * 37cefec29eSJohannes Berg * Redistribution and use in source and binary forms, with or without 38cefec29eSJohannes Berg * modification, are permitted provided that the following conditions 39cefec29eSJohannes Berg * are met: 40cefec29eSJohannes Berg * 41cefec29eSJohannes Berg * * Redistributions of source code must retain the above copyright 42cefec29eSJohannes Berg * notice, this list of conditions and the following disclaimer. 43cefec29eSJohannes Berg * * Redistributions in binary form must reproduce the above copyright 44cefec29eSJohannes Berg * notice, this list of conditions and the following disclaimer in 45cefec29eSJohannes Berg * the documentation and/or other materials provided with the 46cefec29eSJohannes Berg * distribution. 47cefec29eSJohannes Berg * * Neither the name Intel Corporation nor the names of its 48cefec29eSJohannes Berg * contributors may be used to endorse or promote products derived 49cefec29eSJohannes Berg * from this software without specific prior written permission. 50cefec29eSJohannes Berg * 51cefec29eSJohannes Berg * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52cefec29eSJohannes Berg * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53cefec29eSJohannes Berg * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54cefec29eSJohannes Berg * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55cefec29eSJohannes Berg * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56cefec29eSJohannes Berg * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57cefec29eSJohannes Berg * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58cefec29eSJohannes Berg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59cefec29eSJohannes Berg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60cefec29eSJohannes Berg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61cefec29eSJohannes Berg * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62cefec29eSJohannes Berg * 63e705c121SKalle Valo *****************************************************************************/ 64e705c121SKalle Valo #ifndef __iwl_trans_int_pcie_h__ 65e705c121SKalle Valo #define __iwl_trans_int_pcie_h__ 66e705c121SKalle Valo 67e705c121SKalle Valo #include <linux/spinlock.h> 68e705c121SKalle Valo #include <linux/interrupt.h> 69e705c121SKalle Valo #include <linux/skbuff.h> 70e705c121SKalle Valo #include <linux/wait.h> 71e705c121SKalle Valo #include <linux/pci.h> 72e705c121SKalle Valo #include <linux/timer.h> 737c8d91ebSHaim Dreyfuss #include <linux/cpu.h> 74e705c121SKalle Valo 75e705c121SKalle Valo #include "iwl-fh.h" 76e705c121SKalle Valo #include "iwl-csr.h" 77e705c121SKalle Valo #include "iwl-trans.h" 78e705c121SKalle Valo #include "iwl-debug.h" 79e705c121SKalle Valo #include "iwl-io.h" 80e705c121SKalle Valo #include "iwl-op-mode.h" 81ff932f61SGolan Ben Ami #include "iwl-drv.h" 82e705c121SKalle Valo 83e705c121SKalle Valo /* We need 2 entries for the TX command and header, and another one might 84e705c121SKalle Valo * be needed for potential data in the SKB's head. The remaining ones can 85e705c121SKalle Valo * be used for frags. 86e705c121SKalle Valo */ 873cd1980bSSara Sharon #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 88e705c121SKalle Valo 89e705c121SKalle Valo /* 90e705c121SKalle Valo * RX related structures and functions 91e705c121SKalle Valo */ 92e705c121SKalle Valo #define RX_NUM_QUEUES 1 93e705c121SKalle Valo #define RX_POST_REQ_ALLOC 2 94e705c121SKalle Valo #define RX_CLAIM_REQ_ALLOC 8 9578485054SSara Sharon #define RX_PENDING_WATERMARK 16 961b493e30SGolan Ben Ami #define FIRST_RX_QUEUE 512 97e705c121SKalle Valo 98e705c121SKalle Valo struct iwl_host_cmd; 99e705c121SKalle Valo 100e705c121SKalle Valo /*This file includes the declaration that are internal to the 101e705c121SKalle Valo * trans_pcie layer */ 102e705c121SKalle Valo 10396a6497bSSara Sharon /** 10496a6497bSSara Sharon * struct iwl_rx_mem_buffer 10596a6497bSSara Sharon * @page_dma: bus address of rxb page 10696a6497bSSara Sharon * @page: driver's pointer to the rxb page 107b1753c62SSara Sharon * @invalid: rxb is in driver ownership - not owned by HW 10896a6497bSSara Sharon * @vid: index of this rxb in the global table 109cfdc20efSJohannes Berg * @offset: indicates which offset of the page (in bytes) 110cfdc20efSJohannes Berg * this buffer uses (if multiple RBs fit into one page) 11196a6497bSSara Sharon */ 112e705c121SKalle Valo struct iwl_rx_mem_buffer { 113e705c121SKalle Valo dma_addr_t page_dma; 114e705c121SKalle Valo struct page *page; 11596a6497bSSara Sharon u16 vid; 116b1753c62SSara Sharon bool invalid; 117e705c121SKalle Valo struct list_head list; 118cfdc20efSJohannes Berg u32 offset; 119e705c121SKalle Valo }; 120e705c121SKalle Valo 121e705c121SKalle Valo /** 122e705c121SKalle Valo * struct isr_statistics - interrupt statistics 123e705c121SKalle Valo * 124e705c121SKalle Valo */ 125e705c121SKalle Valo struct isr_statistics { 126e705c121SKalle Valo u32 hw; 127e705c121SKalle Valo u32 sw; 128e705c121SKalle Valo u32 err_code; 129e705c121SKalle Valo u32 sch; 130e705c121SKalle Valo u32 alive; 131e705c121SKalle Valo u32 rfkill; 132e705c121SKalle Valo u32 ctkill; 133e705c121SKalle Valo u32 wakeup; 134e705c121SKalle Valo u32 rx; 135e705c121SKalle Valo u32 tx; 136e705c121SKalle Valo u32 unhandled; 137e705c121SKalle Valo }; 138e705c121SKalle Valo 139cf495496SGolan Ben Ami /** 140cf495496SGolan Ben Ami * struct iwl_rx_transfer_desc - transfer descriptor 141cf495496SGolan Ben Ami * @addr: ptr to free buffer start address 142cf495496SGolan Ben Ami * @rbid: unique tag of the buffer 143cf495496SGolan Ben Ami * @reserved: reserved 144cf495496SGolan Ben Ami */ 145cf495496SGolan Ben Ami struct iwl_rx_transfer_desc { 146cf495496SGolan Ben Ami __le16 rbid; 147f826faaaSJohannes Berg __le16 reserved[3]; 148f826faaaSJohannes Berg __le64 addr; 149cf495496SGolan Ben Ami } __packed; 150cf495496SGolan Ben Ami 151f826faaaSJohannes Berg #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) 152cf495496SGolan Ben Ami 153cf495496SGolan Ben Ami /** 154cf495496SGolan Ben Ami * struct iwl_rx_completion_desc - completion descriptor 155cf495496SGolan Ben Ami * @reserved1: reserved 156cf495496SGolan Ben Ami * @rbid: unique tag of the received buffer 157f826faaaSJohannes Berg * @flags: flags (0: fragmented, all others: reserved) 158cf495496SGolan Ben Ami * @reserved2: reserved 159cf495496SGolan Ben Ami */ 160cf495496SGolan Ben Ami struct iwl_rx_completion_desc { 161f826faaaSJohannes Berg __le32 reserved1; 162cf495496SGolan Ben Ami __le16 rbid; 163f826faaaSJohannes Berg u8 flags; 164f826faaaSJohannes Berg u8 reserved2[25]; 165cf495496SGolan Ben Ami } __packed; 166cf495496SGolan Ben Ami 167e705c121SKalle Valo /** 168e705c121SKalle Valo * struct iwl_rxq - Rx queue 16996a6497bSSara Sharon * @id: queue index 17096a6497bSSara Sharon * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 17196a6497bSSara Sharon * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 1723681021fSJohannes Berg * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's 173e705c121SKalle Valo * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 17496a6497bSSara Sharon * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 17596a6497bSSara Sharon * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 1761b493e30SGolan Ben Ami * @tr_tail: driver's pointer to the transmission ring tail buffer 1771b493e30SGolan Ben Ami * @tr_tail_dma: physical address of the buffer for the transmission ring tail 1781b493e30SGolan Ben Ami * @cr_tail: driver's pointer to the completion ring tail buffer 1791b493e30SGolan Ben Ami * @cr_tail_dma: physical address of the buffer for the completion ring tail 180e705c121SKalle Valo * @read: Shared index to newest available Rx buffer 181e705c121SKalle Valo * @write: Shared index to oldest written Rx packet 182e705c121SKalle Valo * @free_count: Number of pre-allocated buffers in rx_free 183e705c121SKalle Valo * @used_count: Number of RBDs handled to allocator to use for allocation 184e705c121SKalle Valo * @write_actual: 185e705c121SKalle Valo * @rx_free: list of RBDs with allocated RB ready for use 186e705c121SKalle Valo * @rx_used: list of RBDs with no RB attached 187e705c121SKalle Valo * @need_update: flag to indicate we need to update read/write index 188e705c121SKalle Valo * @rb_stts: driver's pointer to receive buffer status 189e705c121SKalle Valo * @rb_stts_dma: bus address of receive buffer status 190e705c121SKalle Valo * @lock: 19196a6497bSSara Sharon * @queue: actual rx queue. Not used for multi-rx queue. 192e705c121SKalle Valo * 193e705c121SKalle Valo * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 194e705c121SKalle Valo */ 195e705c121SKalle Valo struct iwl_rxq { 19696a6497bSSara Sharon int id; 19796a6497bSSara Sharon void *bd; 198e705c121SKalle Valo dma_addr_t bd_dma; 199b2a58c97SSara Sharon union { 2000307c839SGolan Ben Ami void *used_bd; 201b2a58c97SSara Sharon __le32 *bd_32; 202b2a58c97SSara Sharon struct iwl_rx_completion_desc *cd; 203b2a58c97SSara Sharon }; 20496a6497bSSara Sharon dma_addr_t used_bd_dma; 2051b493e30SGolan Ben Ami __le16 *tr_tail; 2061b493e30SGolan Ben Ami dma_addr_t tr_tail_dma; 2071b493e30SGolan Ben Ami __le16 *cr_tail; 2081b493e30SGolan Ben Ami dma_addr_t cr_tail_dma; 209e705c121SKalle Valo u32 read; 210e705c121SKalle Valo u32 write; 211e705c121SKalle Valo u32 free_count; 212e705c121SKalle Valo u32 used_count; 213e705c121SKalle Valo u32 write_actual; 21496a6497bSSara Sharon u32 queue_size; 215e705c121SKalle Valo struct list_head rx_free; 216e705c121SKalle Valo struct list_head rx_used; 217e705c121SKalle Valo bool need_update; 2180307c839SGolan Ben Ami void *rb_stts; 219e705c121SKalle Valo dma_addr_t rb_stts_dma; 220e705c121SKalle Valo spinlock_t lock; 221bce97731SSara Sharon struct napi_struct napi; 222e705c121SKalle Valo struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 223e705c121SKalle Valo }; 224e705c121SKalle Valo 225e705c121SKalle Valo /** 226e705c121SKalle Valo * struct iwl_rb_allocator - Rx allocator 227e705c121SKalle Valo * @req_pending: number of requests the allcator had not processed yet 228e705c121SKalle Valo * @req_ready: number of requests honored and ready for claiming 229e705c121SKalle Valo * @rbd_allocated: RBDs with pages allocated and ready to be handled to 230e705c121SKalle Valo * the queue. This is a list of &struct iwl_rx_mem_buffer 231e705c121SKalle Valo * @rbd_empty: RBDs with no page attached for allocator use. This is a list 232e705c121SKalle Valo * of &struct iwl_rx_mem_buffer 233e705c121SKalle Valo * @lock: protects the rbd_allocated and rbd_empty lists 234e705c121SKalle Valo * @alloc_wq: work queue for background calls 235e705c121SKalle Valo * @rx_alloc: work struct for background calls 236e705c121SKalle Valo */ 237e705c121SKalle Valo struct iwl_rb_allocator { 238e705c121SKalle Valo atomic_t req_pending; 239e705c121SKalle Valo atomic_t req_ready; 240e705c121SKalle Valo struct list_head rbd_allocated; 241e705c121SKalle Valo struct list_head rbd_empty; 242e705c121SKalle Valo spinlock_t lock; 243e705c121SKalle Valo struct workqueue_struct *alloc_wq; 244e705c121SKalle Valo struct work_struct rx_alloc; 245e705c121SKalle Valo }; 246e705c121SKalle Valo 247e705c121SKalle Valo struct iwl_dma_ptr { 248e705c121SKalle Valo dma_addr_t dma; 249e705c121SKalle Valo void *addr; 250e705c121SKalle Valo size_t size; 251e705c121SKalle Valo }; 252e705c121SKalle Valo 253e705c121SKalle Valo /** 254e705c121SKalle Valo * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 255e705c121SKalle Valo * @index -- current index 256e705c121SKalle Valo */ 2577b3e42eaSGolan Ben Ami static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) 258e705c121SKalle Valo { 25979b6c8feSLuca Coelho return ++index & 260286ca8ebSLuca Coelho (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 261e705c121SKalle Valo } 262e705c121SKalle Valo 263e705c121SKalle Valo /** 2640307c839SGolan Ben Ami * iwl_get_closed_rb_stts - get closed rb stts from different structs 2650307c839SGolan Ben Ami * @rxq - the rxq to get the rb stts from 2660307c839SGolan Ben Ami */ 2670307c839SGolan Ben Ami static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, 2680307c839SGolan Ben Ami struct iwl_rxq *rxq) 2690307c839SGolan Ben Ami { 2703681021fSJohannes Berg if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 2710307c839SGolan Ben Ami __le16 *rb_stts = rxq->rb_stts; 2720307c839SGolan Ben Ami 2730307c839SGolan Ben Ami return READ_ONCE(*rb_stts); 2740307c839SGolan Ben Ami } else { 2750307c839SGolan Ben Ami struct iwl_rb_status *rb_stts = rxq->rb_stts; 2760307c839SGolan Ben Ami 2770307c839SGolan Ben Ami return READ_ONCE(rb_stts->closed_rb_num); 2780307c839SGolan Ben Ami } 2790307c839SGolan Ben Ami } 2800307c839SGolan Ben Ami 2810307c839SGolan Ben Ami /** 282e705c121SKalle Valo * iwl_queue_dec_wrap - decrement queue index, wrap back to end 283e705c121SKalle Valo * @index -- current index 284e705c121SKalle Valo */ 2857b3e42eaSGolan Ben Ami static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) 286e705c121SKalle Valo { 28779b6c8feSLuca Coelho return --index & 288286ca8ebSLuca Coelho (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 289e705c121SKalle Valo } 290e705c121SKalle Valo 291e705c121SKalle Valo struct iwl_cmd_meta { 292e705c121SKalle Valo /* only for SYNC commands, iff the reply skb is wanted */ 293e705c121SKalle Valo struct iwl_host_cmd *source; 294e705c121SKalle Valo u32 flags; 2953cd1980bSSara Sharon u32 tbs; 296e705c121SKalle Valo }; 297e705c121SKalle Valo 298e705c121SKalle Valo /* 2998de437c7SSara Sharon * The FH will write back to the first TB only, so we need to copy some data 3008de437c7SSara Sharon * into the buffer regardless of whether it should be mapped or not. 3018de437c7SSara Sharon * This indicates how big the first TB must be to include the scratch buffer 3028de437c7SSara Sharon * and the assigned PN. 303b97277ccSSara Sharon * Since PN location is 8 bytes at offset 12, it's 20 now. 3048de437c7SSara Sharon * If we make it bigger then allocations will be bigger and copy slower, so 3058de437c7SSara Sharon * that's probably not useful. 306e705c121SKalle Valo */ 307b97277ccSSara Sharon #define IWL_FIRST_TB_SIZE 20 3088de437c7SSara Sharon #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 309e705c121SKalle Valo 310e705c121SKalle Valo struct iwl_pcie_txq_entry { 311e705c121SKalle Valo struct iwl_device_cmd *cmd; 312e705c121SKalle Valo struct sk_buff *skb; 313e705c121SKalle Valo /* buffer to free after command completes */ 314e705c121SKalle Valo const void *free_buf; 315e705c121SKalle Valo struct iwl_cmd_meta meta; 316e705c121SKalle Valo }; 317e705c121SKalle Valo 3188de437c7SSara Sharon struct iwl_pcie_first_tb_buf { 3198de437c7SSara Sharon u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 320e705c121SKalle Valo }; 321e705c121SKalle Valo 322e705c121SKalle Valo /** 323e705c121SKalle Valo * struct iwl_txq - Tx Queue for DMA 324e705c121SKalle Valo * @q: generic Rx/Tx queue descriptor 325e705c121SKalle Valo * @tfds: transmit frame descriptors (DMA memory) 3268de437c7SSara Sharon * @first_tb_bufs: start of command headers, including scratch buffers, for 327e705c121SKalle Valo * the writeback -- this is DMA memory and an array holding one buffer 328e705c121SKalle Valo * for each command on the queue 3298de437c7SSara Sharon * @first_tb_dma: DMA address for the first_tb_bufs start 330e705c121SKalle Valo * @entries: transmit entries (driver state) 331e705c121SKalle Valo * @lock: queue lock 332e705c121SKalle Valo * @stuck_timer: timer that fires if queue gets stuck 333e705c121SKalle Valo * @trans_pcie: pointer back to transport (for timer) 334e705c121SKalle Valo * @need_update: indicates need to update read/write index 335e705c121SKalle Valo * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 336e705c121SKalle Valo * @wd_timeout: queue watchdog timeout (jiffies) - per queue 337e705c121SKalle Valo * @frozen: tx stuck queue timer is frozen 338e705c121SKalle Valo * @frozen_expiry_remainder: remember how long until the timer fires 33913a3a390SSara Sharon * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 340bb98ecd4SSara Sharon * @write_ptr: 1-st empty entry (index) host_w 341bb98ecd4SSara Sharon * @read_ptr: last used entry (index) host_r 342bb98ecd4SSara Sharon * @dma_addr: physical addr for BD's 343bb98ecd4SSara Sharon * @n_window: safe queue window 344bb98ecd4SSara Sharon * @id: queue id 345bb98ecd4SSara Sharon * @low_mark: low watermark, resume queue if free space more than this 346bb98ecd4SSara Sharon * @high_mark: high watermark, stop queue if free space less than this 347e705c121SKalle Valo * 348e705c121SKalle Valo * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 349e705c121SKalle Valo * descriptors) and required locking structures. 350bb98ecd4SSara Sharon * 351bb98ecd4SSara Sharon * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 352bb98ecd4SSara Sharon * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 353bb98ecd4SSara Sharon * there might be HW changes in the future). For the normal TX 354bb98ecd4SSara Sharon * queues, n_window, which is the size of the software queue data 355bb98ecd4SSara Sharon * is also 256; however, for the command queue, n_window is only 356bb98ecd4SSara Sharon * 32 since we don't need so many commands pending. Since the HW 357bb98ecd4SSara Sharon * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 358bb98ecd4SSara Sharon * This means that we end up with the following: 359bb98ecd4SSara Sharon * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 360bb98ecd4SSara Sharon * SW entries: | 0 | ... | 31 | 361bb98ecd4SSara Sharon * where N is a number between 0 and 7. This means that the SW 362bb98ecd4SSara Sharon * data is a window overlayed over the HW queue. 363e705c121SKalle Valo */ 364e705c121SKalle Valo struct iwl_txq { 3656983ba69SSara Sharon void *tfds; 3668de437c7SSara Sharon struct iwl_pcie_first_tb_buf *first_tb_bufs; 3678de437c7SSara Sharon dma_addr_t first_tb_dma; 368e705c121SKalle Valo struct iwl_pcie_txq_entry *entries; 369e705c121SKalle Valo spinlock_t lock; 370e705c121SKalle Valo unsigned long frozen_expiry_remainder; 371e705c121SKalle Valo struct timer_list stuck_timer; 372e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie; 373e705c121SKalle Valo bool need_update; 374e705c121SKalle Valo bool frozen; 375e705c121SKalle Valo bool ampdu; 37604fa3e68SEmmanuel Grumbach int block; 377e705c121SKalle Valo unsigned long wd_timeout; 3783955525dSEmmanuel Grumbach struct sk_buff_head overflow_q; 37913a3a390SSara Sharon struct iwl_dma_ptr bc_tbl; 380bb98ecd4SSara Sharon 381bb98ecd4SSara Sharon int write_ptr; 382bb98ecd4SSara Sharon int read_ptr; 383bb98ecd4SSara Sharon dma_addr_t dma_addr; 384bb98ecd4SSara Sharon int n_window; 385bb98ecd4SSara Sharon u32 id; 386bb98ecd4SSara Sharon int low_mark; 387bb98ecd4SSara Sharon int high_mark; 3882ae48edcSSara Sharon 3892ae48edcSSara Sharon bool overflow_tx; 390e705c121SKalle Valo }; 391e705c121SKalle Valo 392e705c121SKalle Valo static inline dma_addr_t 3938de437c7SSara Sharon iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 394e705c121SKalle Valo { 3958de437c7SSara Sharon return txq->first_tb_dma + 3968de437c7SSara Sharon sizeof(struct iwl_pcie_first_tb_buf) * idx; 397e705c121SKalle Valo } 398e705c121SKalle Valo 3996eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page { 4006eb5e529SEmmanuel Grumbach struct page *page; 4016eb5e529SEmmanuel Grumbach u8 *pos; 4026eb5e529SEmmanuel Grumbach }; 4036eb5e529SEmmanuel Grumbach 404f7805b33SLior Cohen #ifdef CONFIG_IWLWIFI_DEBUGFS 405f7805b33SLior Cohen /** 406f7805b33SLior Cohen * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data 407f7805b33SLior Cohen * debugfs file 408f7805b33SLior Cohen * 409f7805b33SLior Cohen * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. 410f7805b33SLior Cohen * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. 411f7805b33SLior Cohen * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is 412f7805b33SLior Cohen * set the file can no longer be used. 413f7805b33SLior Cohen */ 414f7805b33SLior Cohen enum iwl_fw_mon_dbgfs_state { 415f7805b33SLior Cohen IWL_FW_MON_DBGFS_STATE_CLOSED, 416f7805b33SLior Cohen IWL_FW_MON_DBGFS_STATE_OPEN, 417f7805b33SLior Cohen IWL_FW_MON_DBGFS_STATE_DISABLED, 418f7805b33SLior Cohen }; 419f7805b33SLior Cohen #endif 420f7805b33SLior Cohen 421e705c121SKalle Valo /** 422496d83caSHaim Dreyfuss * enum iwl_shared_irq_flags - level of sharing for irq 423496d83caSHaim Dreyfuss * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 424496d83caSHaim Dreyfuss * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 425496d83caSHaim Dreyfuss */ 426496d83caSHaim Dreyfuss enum iwl_shared_irq_flags { 427496d83caSHaim Dreyfuss IWL_SHARED_IRQ_NON_RX = BIT(0), 428496d83caSHaim Dreyfuss IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 429496d83caSHaim Dreyfuss }; 430496d83caSHaim Dreyfuss 431496d83caSHaim Dreyfuss /** 4329b58419eSGolan Ben Ami * enum iwl_image_response_code - image response values 4339b58419eSGolan Ben Ami * @IWL_IMAGE_RESP_DEF: the default value of the register 4349b58419eSGolan Ben Ami * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 4359b58419eSGolan Ben Ami * @IWL_IMAGE_RESP_FAIL: iml reading failed 4369b58419eSGolan Ben Ami */ 4379b58419eSGolan Ben Ami enum iwl_image_response_code { 4389b58419eSGolan Ben Ami IWL_IMAGE_RESP_DEF = 0, 4399b58419eSGolan Ben Ami IWL_IMAGE_RESP_SUCCESS = 1, 4409b58419eSGolan Ben Ami IWL_IMAGE_RESP_FAIL = 2, 4419b58419eSGolan Ben Ami }; 4429b58419eSGolan Ben Ami 4439b58419eSGolan Ben Ami /** 444f7805b33SLior Cohen * struct cont_rec: continuous recording data structure 445f7805b33SLior Cohen * @prev_wr_ptr: the last address that was read in monitor_data 446f7805b33SLior Cohen * debugfs file 447f7805b33SLior Cohen * @prev_wrap_cnt: the wrap count that was used during the last read in 448f7805b33SLior Cohen * monitor_data debugfs file 449f7805b33SLior Cohen * @state: the state of monitor_data debugfs file as described 450f7805b33SLior Cohen * in &iwl_fw_mon_dbgfs_state enum 451f7805b33SLior Cohen * @mutex: locked while reading from monitor_data debugfs file 452f7805b33SLior Cohen */ 453f7805b33SLior Cohen #ifdef CONFIG_IWLWIFI_DEBUGFS 454f7805b33SLior Cohen struct cont_rec { 455f7805b33SLior Cohen u32 prev_wr_ptr; 456f7805b33SLior Cohen u32 prev_wrap_cnt; 457f7805b33SLior Cohen u8 state; 458f7805b33SLior Cohen /* Used to sync monitor_data debugfs file with driver unload flow */ 459f7805b33SLior Cohen struct mutex mutex; 460f7805b33SLior Cohen }; 461f7805b33SLior Cohen #endif 462f7805b33SLior Cohen 463f7805b33SLior Cohen /** 464e705c121SKalle Valo * struct iwl_trans_pcie - PCIe transport specific data 465e705c121SKalle Valo * @rxq: all the RX queue data 46678485054SSara Sharon * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 46796a6497bSSara Sharon * @global_table: table mapping received VID from hw to rxb 468e705c121SKalle Valo * @rba: allocator for RX replenishing 469eda50cdeSSara Sharon * @ctxt_info: context information for FW self init 4702ee82402SGolan Ben Ami * @ctxt_info_gen3: context information for gen3 devices 4712ee82402SGolan Ben Ami * @prph_info: prph info for self init 4722ee82402SGolan Ben Ami * @prph_scratch: prph scratch for self init 4732ee82402SGolan Ben Ami * @ctxt_info_dma_addr: dma addr of context information 4742ee82402SGolan Ben Ami * @prph_info_dma_addr: dma addr of prph info 4752ee82402SGolan Ben Ami * @prph_scratch_dma_addr: dma addr of prph scratch 476eda50cdeSSara Sharon * @ctxt_info_dma_addr: dma addr of context information 477eda50cdeSSara Sharon * @init_dram: DRAM data of firmware image (including paging). 478eda50cdeSSara Sharon * Context information addresses will be taken from here. 479eda50cdeSSara Sharon * This is driver's local copy for keeping track of size and 480eda50cdeSSara Sharon * count for allocating and freeing the memory. 481e705c121SKalle Valo * @trans: pointer to the generic transport area 482e705c121SKalle Valo * @scd_base_addr: scheduler sram base address in SRAM 483e705c121SKalle Valo * @scd_bc_tbls: pointer to the byte count table of the scheduler 484e705c121SKalle Valo * @kw: keep warm address 485e705c121SKalle Valo * @pci_dev: basic pci-network driver stuff 486e705c121SKalle Valo * @hw_base: pci hardware address support 487e705c121SKalle Valo * @ucode_write_complete: indicates that the ucode has been copied. 488e705c121SKalle Valo * @ucode_write_waitq: wait queue for uCode load 489e705c121SKalle Valo * @cmd_queue - command queue number 4909416560eSGolan Ben Ami * @def_rx_queue - default rx queue number 4916c4fbcbcSEmmanuel Grumbach * @rx_buf_size: Rx buffer size 492e705c121SKalle Valo * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 493e705c121SKalle Valo * @scd_set_active: should the transport configure the SCD for HCMD queue 49441837ca9SEmmanuel Grumbach * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 49541837ca9SEmmanuel Grumbach * frame. 496e705c121SKalle Valo * @rx_page_order: page order for receive buffer size 49780084e35SJohannes Berg * @rx_buf_bytes: RX buffer (RB) size in bytes 498e705c121SKalle Valo * @reg_lock: protect hw register access 499e705c121SKalle Valo * @mutex: to protect stop_device / start_fw / start_hw 500e705c121SKalle Valo * @cmd_in_flight: true when we have a host command in flight 501f7805b33SLior Cohen #ifdef CONFIG_IWLWIFI_DEBUGFS 502f7805b33SLior Cohen * @fw_mon_data: fw continuous recording data 503f7805b33SLior Cohen #endif 5042e5d4a8fSHaim Dreyfuss * @msix_entries: array of MSI-X entries 5052e5d4a8fSHaim Dreyfuss * @msix_enabled: true if managed to enable MSI-X 506496d83caSHaim Dreyfuss * @shared_vec_mask: the type of causes the shared vector handles 507496d83caSHaim Dreyfuss * (see iwl_shared_irq_flags). 508496d83caSHaim Dreyfuss * @alloc_vecs: the number of interrupt vectors allocated by the OS 509496d83caSHaim Dreyfuss * @def_irq: default irq for non rx causes 5102e5d4a8fSHaim Dreyfuss * @fh_init_mask: initial unmasked fh causes 5112e5d4a8fSHaim Dreyfuss * @hw_init_mask: initial unmasked hw causes 5122e5d4a8fSHaim Dreyfuss * @fh_mask: current unmasked fh causes 5132e5d4a8fSHaim Dreyfuss * @hw_mask: current unmasked hw causes 51449564a80SLuca Coelho * @in_rescan: true if we have triggered a device rescan 5156cc6ba3aSTriebitz * @base_rb_stts: base virtual address of receive buffer status for all queues 5166cc6ba3aSTriebitz * @base_rb_stts_dma: base physical address of receive buffer status 517cfdc20efSJohannes Berg * @supported_dma_mask: DMA mask to validate the actual address against, 518cfdc20efSJohannes Berg * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device 519cfdc20efSJohannes Berg * @alloc_page_lock: spinlock for the page allocator 520cfdc20efSJohannes Berg * @alloc_page: allocated page to still use parts of 521cfdc20efSJohannes Berg * @alloc_page_used: how much of the allocated page was already used (bytes) 522e705c121SKalle Valo */ 523e705c121SKalle Valo struct iwl_trans_pcie { 52478485054SSara Sharon struct iwl_rxq *rxq; 525c042f0c7SJohannes Berg struct iwl_rx_mem_buffer *rx_pool; 526c042f0c7SJohannes Berg struct iwl_rx_mem_buffer **global_table; 527e705c121SKalle Valo struct iwl_rb_allocator rba; 5282ee82402SGolan Ben Ami union { 529eda50cdeSSara Sharon struct iwl_context_info *ctxt_info; 5302ee82402SGolan Ben Ami struct iwl_context_info_gen3 *ctxt_info_gen3; 5312ee82402SGolan Ben Ami }; 5322ee82402SGolan Ben Ami struct iwl_prph_info *prph_info; 5332ee82402SGolan Ben Ami struct iwl_prph_scratch *prph_scratch; 534eda50cdeSSara Sharon dma_addr_t ctxt_info_dma_addr; 5352ee82402SGolan Ben Ami dma_addr_t prph_info_dma_addr; 5362ee82402SGolan Ben Ami dma_addr_t prph_scratch_dma_addr; 5372ee82402SGolan Ben Ami dma_addr_t iml_dma_addr; 538e705c121SKalle Valo struct iwl_trans *trans; 539e705c121SKalle Valo 540e705c121SKalle Valo struct net_device napi_dev; 541e705c121SKalle Valo 5426eb5e529SEmmanuel Grumbach struct __percpu iwl_tso_hdr_page *tso_hdr_page; 5436eb5e529SEmmanuel Grumbach 544e705c121SKalle Valo /* INT ICT Table */ 545e705c121SKalle Valo __le32 *ict_tbl; 546e705c121SKalle Valo dma_addr_t ict_tbl_dma; 547e705c121SKalle Valo int ict_index; 548e705c121SKalle Valo bool use_ict; 549326477e4SJohannes Berg bool is_down, opmode_down; 550c5bf4fa1SJohannes Berg s8 debug_rfkill; 551e705c121SKalle Valo struct isr_statistics isr_stats; 552e705c121SKalle Valo 553e705c121SKalle Valo spinlock_t irq_lock; 554e705c121SKalle Valo struct mutex mutex; 555e705c121SKalle Valo u32 inta_mask; 556e705c121SKalle Valo u32 scd_base_addr; 557e705c121SKalle Valo struct iwl_dma_ptr scd_bc_tbls; 558e705c121SKalle Valo struct iwl_dma_ptr kw; 559e705c121SKalle Valo 560b2a3b1c1SSara Sharon struct iwl_txq *txq_memory; 561e982bc2cSSara Sharon struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 562e982bc2cSSara Sharon unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 563e982bc2cSSara Sharon unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 564e705c121SKalle Valo 565e705c121SKalle Valo /* PCI bus related data */ 566e705c121SKalle Valo struct pci_dev *pci_dev; 567e705c121SKalle Valo void __iomem *hw_base; 568e705c121SKalle Valo 569e705c121SKalle Valo bool ucode_write_complete; 570e5f3f215SHaim Dreyfuss bool sx_complete; 571e705c121SKalle Valo wait_queue_head_t ucode_write_waitq; 572e705c121SKalle Valo wait_queue_head_t wait_command_queue; 573e5f3f215SHaim Dreyfuss wait_queue_head_t sx_waitq; 574e705c121SKalle Valo 57521cb3222SJohannes Berg u8 page_offs, dev_cmd_offs; 57621cb3222SJohannes Berg 577e705c121SKalle Valo u8 cmd_queue; 5789416560eSGolan Ben Ami u8 def_rx_queue; 579e705c121SKalle Valo u8 cmd_fifo; 580e705c121SKalle Valo unsigned int cmd_q_wdg_timeout; 581e705c121SKalle Valo u8 n_no_reclaim_cmds; 582e705c121SKalle Valo u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 5833cd1980bSSara Sharon u8 max_tbs; 5846983ba69SSara Sharon u16 tfd_size; 585c042f0c7SJohannes Berg u16 num_rx_bufs; 586e705c121SKalle Valo 5876c4fbcbcSEmmanuel Grumbach enum iwl_amsdu_size rx_buf_size; 588e705c121SKalle Valo bool bc_table_dword; 589e705c121SKalle Valo bool scd_set_active; 59041837ca9SEmmanuel Grumbach bool sw_csum_tx; 591a6d24fadSRajat Jain bool pcie_dbg_dumped_once; 592e705c121SKalle Valo u32 rx_page_order; 59380084e35SJohannes Berg u32 rx_buf_bytes; 594cfdc20efSJohannes Berg u32 supported_dma_mask; 595cfdc20efSJohannes Berg 596cfdc20efSJohannes Berg /* allocator lock for the two values below */ 597cfdc20efSJohannes Berg spinlock_t alloc_page_lock; 598cfdc20efSJohannes Berg struct page *alloc_page; 599cfdc20efSJohannes Berg u32 alloc_page_used; 600e705c121SKalle Valo 601e705c121SKalle Valo /*protect hw register */ 602e705c121SKalle Valo spinlock_t reg_lock; 603e705c121SKalle Valo bool cmd_hold_nic_awake; 604e705c121SKalle Valo 605f7805b33SLior Cohen #ifdef CONFIG_IWLWIFI_DEBUGFS 606f7805b33SLior Cohen struct cont_rec fw_mon_data; 607f7805b33SLior Cohen #endif 608f7805b33SLior Cohen 6092e5d4a8fSHaim Dreyfuss struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 6102e5d4a8fSHaim Dreyfuss bool msix_enabled; 611496d83caSHaim Dreyfuss u8 shared_vec_mask; 612496d83caSHaim Dreyfuss u32 alloc_vecs; 613496d83caSHaim Dreyfuss u32 def_irq; 6142e5d4a8fSHaim Dreyfuss u32 fh_init_mask; 6152e5d4a8fSHaim Dreyfuss u32 hw_init_mask; 6162e5d4a8fSHaim Dreyfuss u32 fh_mask; 6172e5d4a8fSHaim Dreyfuss u32 hw_mask; 6187c8d91ebSHaim Dreyfuss cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 61949564a80SLuca Coelho u16 tx_cmd_queue_size; 62049564a80SLuca Coelho bool in_rescan; 6216cc6ba3aSTriebitz 6226cc6ba3aSTriebitz void *base_rb_stts; 6236cc6ba3aSTriebitz dma_addr_t base_rb_stts_dma; 624e705c121SKalle Valo }; 625e705c121SKalle Valo 62685e5a387SJohannes Berg static inline struct iwl_trans_pcie * 62785e5a387SJohannes Berg IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 62885e5a387SJohannes Berg { 62985e5a387SJohannes Berg return (void *)trans->trans_specific; 63085e5a387SJohannes Berg } 631e705c121SKalle Valo 632ff932f61SGolan Ben Ami static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 633ff932f61SGolan Ben Ami struct msix_entry *entry) 634ff932f61SGolan Ben Ami { 635ff932f61SGolan Ben Ami /* 636ff932f61SGolan Ben Ami * Before sending the interrupt the HW disables it to prevent 637ff932f61SGolan Ben Ami * a nested interrupt. This is done by writing 1 to the corresponding 638ff932f61SGolan Ben Ami * bit in the mask register. After handling the interrupt, it should be 639ff932f61SGolan Ben Ami * re-enabled by clearing this bit. This register is defined as 640ff932f61SGolan Ben Ami * write 1 clear (W1C) register, meaning that it's being clear 641ff932f61SGolan Ben Ami * by writing 1 to the bit. 642ff932f61SGolan Ben Ami */ 643ff932f61SGolan Ben Ami iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 644ff932f61SGolan Ben Ami } 645ff932f61SGolan Ben Ami 646e705c121SKalle Valo static inline struct iwl_trans * 647e705c121SKalle Valo iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 648e705c121SKalle Valo { 649e705c121SKalle Valo return container_of((void *)trans_pcie, struct iwl_trans, 650e705c121SKalle Valo trans_specific); 651e705c121SKalle Valo } 652e705c121SKalle Valo 653e705c121SKalle Valo /* 654e705c121SKalle Valo * Convention: trans API functions: iwl_trans_pcie_XXX 655e705c121SKalle Valo * Other functions: iwl_pcie_XXX 656e705c121SKalle Valo */ 6577e8258c0SLuca Coelho struct iwl_trans 6587e8258c0SLuca Coelho *iwl_trans_pcie_alloc(struct pci_dev *pdev, 659e705c121SKalle Valo const struct pci_device_id *ent, 6607e8258c0SLuca Coelho const struct iwl_cfg_trans_params *cfg_trans); 661e705c121SKalle Valo void iwl_trans_pcie_free(struct iwl_trans *trans); 662e705c121SKalle Valo 663e705c121SKalle Valo /***************************************************** 664e705c121SKalle Valo * RX 665e705c121SKalle Valo ******************************************************/ 666e705c121SKalle Valo int iwl_pcie_rx_init(struct iwl_trans *trans); 667eda50cdeSSara Sharon int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 6682e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 669e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 6702e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 6712e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 672e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans); 673e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans); 674ff932f61SGolan Ben Ami void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); 675ff932f61SGolan Ben Ami void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); 676ff932f61SGolan Ben Ami int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); 677ff932f61SGolan Ben Ami void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 678ff932f61SGolan Ben Ami struct iwl_rxq *rxq); 679e705c121SKalle Valo 680e705c121SKalle Valo /***************************************************** 681e705c121SKalle Valo * ICT - interrupt handling 682e705c121SKalle Valo ******************************************************/ 683e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data); 684e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans); 685e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans); 686e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans); 687e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans); 688e705c121SKalle Valo 689e705c121SKalle Valo /***************************************************** 690e705c121SKalle Valo * TX / HCMD 691e705c121SKalle Valo ******************************************************/ 692e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans); 6939b3089bdSGolan Ben Ami int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, 6949b3089bdSGolan Ben Ami int queue_size); 695e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 696e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans); 697e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans); 698dcfbd67bSEmmanuel Grumbach bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 699e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg, 700e705c121SKalle Valo unsigned int wdg_timeout); 701e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 702e705c121SKalle Valo bool configure_scd); 70342db09c1SLiad Kaufman void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 70442db09c1SLiad Kaufman bool shared_mode); 70538398efbSSara Sharon void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 70638398efbSSara Sharon struct iwl_txq *txq); 707e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 708e705c121SKalle Valo struct iwl_device_cmd *dev_cmd, int txq_id); 709e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 710e705c121SKalle Valo int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 71189d5e833SGolan Ben Ami void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); 71289d5e833SGolan Ben Ami void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, 71389d5e833SGolan Ben Ami struct iwl_txq *txq); 714e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 715e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb); 716e705c121SKalle Valo void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 717e705c121SKalle Valo struct sk_buff_head *skbs); 718ba7136f3SAlex Malamud void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); 719e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 720e705c121SKalle Valo 721cc2f41f8SJohannes Berg static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 7226983ba69SSara Sharon u8 idx) 723e705c121SKalle Valo { 724286ca8ebSLuca Coelho if (trans->trans_cfg->use_tfh) { 725cc2f41f8SJohannes Berg struct iwl_tfh_tfd *tfd = _tfd; 726cc2f41f8SJohannes Berg struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 7276983ba69SSara Sharon 7286983ba69SSara Sharon return le16_to_cpu(tb->tb_len); 729cc2f41f8SJohannes Berg } else { 730cc2f41f8SJohannes Berg struct iwl_tfd *tfd = _tfd; 731cc2f41f8SJohannes Berg struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 732e705c121SKalle Valo 733e705c121SKalle Valo return le16_to_cpu(tb->hi_n_len) >> 4; 734e705c121SKalle Valo } 735cc2f41f8SJohannes Berg } 736e705c121SKalle Valo 737e705c121SKalle Valo /***************************************************** 738e705c121SKalle Valo * Error handling 739e705c121SKalle Valo ******************************************************/ 740e705c121SKalle Valo void iwl_pcie_dump_csr(struct iwl_trans *trans); 741e705c121SKalle Valo 742e705c121SKalle Valo /***************************************************** 743e705c121SKalle Valo * Helpers 744e705c121SKalle Valo ******************************************************/ 745f16c3ebfSEmmanuel Grumbach static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 746e705c121SKalle Valo { 7472e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 748e705c121SKalle Valo 7492e5d4a8fSHaim Dreyfuss clear_bit(STATUS_INT_ENABLED, &trans->status); 7502e5d4a8fSHaim Dreyfuss if (!trans_pcie->msix_enabled) { 751e705c121SKalle Valo /* disable interrupts from uCode/NIC to host */ 752e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, 0x00000000); 753e705c121SKalle Valo 754e705c121SKalle Valo /* acknowledge/clear/reset any interrupts still pending 755e705c121SKalle Valo * from uCode or flow handler (Rx/Tx DMA) */ 756e705c121SKalle Valo iwl_write32(trans, CSR_INT, 0xffffffff); 757e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 7582e5d4a8fSHaim Dreyfuss } else { 7592e5d4a8fSHaim Dreyfuss /* disable all the interrupt we might use */ 7602e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 7612e5d4a8fSHaim Dreyfuss trans_pcie->fh_init_mask); 7622e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 7632e5d4a8fSHaim Dreyfuss trans_pcie->hw_init_mask); 7642e5d4a8fSHaim Dreyfuss } 765e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 766e705c121SKalle Valo } 767e705c121SKalle Valo 7682ee82402SGolan Ben Ami #define IWL_NUM_OF_COMPLETION_RINGS 31 7692ee82402SGolan Ben Ami #define IWL_NUM_OF_TRANSFER_RINGS 527 7702ee82402SGolan Ben Ami 7712ee82402SGolan Ben Ami static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 7722ee82402SGolan Ben Ami int start) 7732ee82402SGolan Ben Ami { 7742ee82402SGolan Ben Ami int i = 0; 7752ee82402SGolan Ben Ami 7762ee82402SGolan Ben Ami while (start < fw->num_sec && 7772ee82402SGolan Ben Ami fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 7782ee82402SGolan Ben Ami fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 7792ee82402SGolan Ben Ami start++; 7802ee82402SGolan Ben Ami i++; 7812ee82402SGolan Ben Ami } 7822ee82402SGolan Ben Ami 7832ee82402SGolan Ben Ami return i; 7842ee82402SGolan Ben Ami } 7852ee82402SGolan Ben Ami 7862ee82402SGolan Ben Ami static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 7872ee82402SGolan Ben Ami const struct fw_desc *sec, 7882ee82402SGolan Ben Ami struct iwl_dram_data *dram) 7892ee82402SGolan Ben Ami { 7902ee82402SGolan Ben Ami dram->block = dma_alloc_coherent(trans->dev, sec->len, 7912ee82402SGolan Ben Ami &dram->physical, 7922ee82402SGolan Ben Ami GFP_KERNEL); 7932ee82402SGolan Ben Ami if (!dram->block) 7942ee82402SGolan Ben Ami return -ENOMEM; 7952ee82402SGolan Ben Ami 7962ee82402SGolan Ben Ami dram->size = sec->len; 7972ee82402SGolan Ben Ami memcpy(dram->block, sec->data, sec->len); 7982ee82402SGolan Ben Ami 7992ee82402SGolan Ben Ami return 0; 8002ee82402SGolan Ben Ami } 8012ee82402SGolan Ben Ami 8022ee82402SGolan Ben Ami static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 8032ee82402SGolan Ben Ami { 804505a00c0SShahar S Matityahu struct iwl_self_init_dram *dram = &trans->init_dram; 8052ee82402SGolan Ben Ami int i; 8062ee82402SGolan Ben Ami 8072ee82402SGolan Ben Ami if (!dram->fw) { 8082ee82402SGolan Ben Ami WARN_ON(dram->fw_cnt); 8092ee82402SGolan Ben Ami return; 8102ee82402SGolan Ben Ami } 8112ee82402SGolan Ben Ami 8122ee82402SGolan Ben Ami for (i = 0; i < dram->fw_cnt; i++) 8132ee82402SGolan Ben Ami dma_free_coherent(trans->dev, dram->fw[i].size, 8142ee82402SGolan Ben Ami dram->fw[i].block, dram->fw[i].physical); 8152ee82402SGolan Ben Ami 8162ee82402SGolan Ben Ami kfree(dram->fw); 8172ee82402SGolan Ben Ami dram->fw_cnt = 0; 8182ee82402SGolan Ben Ami dram->fw = NULL; 8192ee82402SGolan Ben Ami } 8202ee82402SGolan Ben Ami 821f16c3ebfSEmmanuel Grumbach static inline void iwl_disable_interrupts(struct iwl_trans *trans) 822f16c3ebfSEmmanuel Grumbach { 823f16c3ebfSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 824f16c3ebfSEmmanuel Grumbach 825f16c3ebfSEmmanuel Grumbach spin_lock(&trans_pcie->irq_lock); 826f16c3ebfSEmmanuel Grumbach _iwl_disable_interrupts(trans); 827f16c3ebfSEmmanuel Grumbach spin_unlock(&trans_pcie->irq_lock); 828f16c3ebfSEmmanuel Grumbach } 829f16c3ebfSEmmanuel Grumbach 830f16c3ebfSEmmanuel Grumbach static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 831e705c121SKalle Valo { 832e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 833e705c121SKalle Valo 834e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 835e705c121SKalle Valo set_bit(STATUS_INT_ENABLED, &trans->status); 8362e5d4a8fSHaim Dreyfuss if (!trans_pcie->msix_enabled) { 837e705c121SKalle Valo trans_pcie->inta_mask = CSR_INI_SET_MASK; 838e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 8392e5d4a8fSHaim Dreyfuss } else { 8402e5d4a8fSHaim Dreyfuss /* 8412e5d4a8fSHaim Dreyfuss * fh/hw_mask keeps all the unmasked causes. 8422e5d4a8fSHaim Dreyfuss * Unlike msi, in msix cause is enabled when it is unset. 8432e5d4a8fSHaim Dreyfuss */ 8442e5d4a8fSHaim Dreyfuss trans_pcie->hw_mask = trans_pcie->hw_init_mask; 8452e5d4a8fSHaim Dreyfuss trans_pcie->fh_mask = trans_pcie->fh_init_mask; 8462e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 8472e5d4a8fSHaim Dreyfuss ~trans_pcie->fh_mask); 8482e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 8492e5d4a8fSHaim Dreyfuss ~trans_pcie->hw_mask); 8502e5d4a8fSHaim Dreyfuss } 8512e5d4a8fSHaim Dreyfuss } 8522e5d4a8fSHaim Dreyfuss 853f16c3ebfSEmmanuel Grumbach static inline void iwl_enable_interrupts(struct iwl_trans *trans) 854f16c3ebfSEmmanuel Grumbach { 855f16c3ebfSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 856f16c3ebfSEmmanuel Grumbach 857f16c3ebfSEmmanuel Grumbach spin_lock(&trans_pcie->irq_lock); 858f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 859f16c3ebfSEmmanuel Grumbach spin_unlock(&trans_pcie->irq_lock); 860f16c3ebfSEmmanuel Grumbach } 8612e5d4a8fSHaim Dreyfuss static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 8622e5d4a8fSHaim Dreyfuss { 8632e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 8642e5d4a8fSHaim Dreyfuss 8652e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 8662e5d4a8fSHaim Dreyfuss trans_pcie->hw_mask = msk; 8672e5d4a8fSHaim Dreyfuss } 8682e5d4a8fSHaim Dreyfuss 8692e5d4a8fSHaim Dreyfuss static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 8702e5d4a8fSHaim Dreyfuss { 8712e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 8722e5d4a8fSHaim Dreyfuss 8732e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 8742e5d4a8fSHaim Dreyfuss trans_pcie->fh_mask = msk; 875e705c121SKalle Valo } 876e705c121SKalle Valo 877a6bd005fSEmmanuel Grumbach static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 878a6bd005fSEmmanuel Grumbach { 879a6bd005fSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 880a6bd005fSEmmanuel Grumbach 881a6bd005fSEmmanuel Grumbach IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 8822e5d4a8fSHaim Dreyfuss if (!trans_pcie->msix_enabled) { 883a6bd005fSEmmanuel Grumbach trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 884a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 8852e5d4a8fSHaim Dreyfuss } else { 8862e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 8872e5d4a8fSHaim Dreyfuss trans_pcie->hw_init_mask); 8882e5d4a8fSHaim Dreyfuss iwl_enable_fh_int_msk_msix(trans, 8892e5d4a8fSHaim Dreyfuss MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 8902e5d4a8fSHaim Dreyfuss } 891a6bd005fSEmmanuel Grumbach } 892a6bd005fSEmmanuel Grumbach 893ed3e4c6dSEmmanuel Grumbach static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) 894ed3e4c6dSEmmanuel Grumbach { 895ed3e4c6dSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 896ed3e4c6dSEmmanuel Grumbach 897ed3e4c6dSEmmanuel Grumbach IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); 898ed3e4c6dSEmmanuel Grumbach 899ed3e4c6dSEmmanuel Grumbach if (!trans_pcie->msix_enabled) { 900ed3e4c6dSEmmanuel Grumbach /* 901ed3e4c6dSEmmanuel Grumbach * When we'll receive the ALIVE interrupt, the ISR will call 902ed3e4c6dSEmmanuel Grumbach * iwl_enable_fw_load_int_ctx_info again to set the ALIVE 903ed3e4c6dSEmmanuel Grumbach * interrupt (which is not really needed anymore) but also the 904ed3e4c6dSEmmanuel Grumbach * RX interrupt which will allow us to receive the ALIVE 905ed3e4c6dSEmmanuel Grumbach * notification (which is Rx) and continue the flow. 906ed3e4c6dSEmmanuel Grumbach */ 907ed3e4c6dSEmmanuel Grumbach trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; 908ed3e4c6dSEmmanuel Grumbach iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 909ed3e4c6dSEmmanuel Grumbach } else { 910ed3e4c6dSEmmanuel Grumbach iwl_enable_hw_int_msk_msix(trans, 911ed3e4c6dSEmmanuel Grumbach MSIX_HW_INT_CAUSES_REG_ALIVE); 912ed3e4c6dSEmmanuel Grumbach /* 913ed3e4c6dSEmmanuel Grumbach * Leave all the FH causes enabled to get the ALIVE 914ed3e4c6dSEmmanuel Grumbach * notification. 915ed3e4c6dSEmmanuel Grumbach */ 916ed3e4c6dSEmmanuel Grumbach iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); 917ed3e4c6dSEmmanuel Grumbach } 918ed3e4c6dSEmmanuel Grumbach } 919ed3e4c6dSEmmanuel Grumbach 9207b3e42eaSGolan Ben Ami static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) 9214ecab561SEmmanuel Grumbach { 9224ecab561SEmmanuel Grumbach return index & (q->n_window - 1); 9234ecab561SEmmanuel Grumbach } 9244ecab561SEmmanuel Grumbach 925943309d4SEmmanuel Grumbach static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, 926ab6c6445SSara Sharon struct iwl_txq *txq, int idx) 927ab6c6445SSara Sharon { 928943309d4SEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 929943309d4SEmmanuel Grumbach 930286ca8ebSLuca Coelho if (trans->trans_cfg->use_tfh) 931943309d4SEmmanuel Grumbach idx = iwl_pcie_get_cmd_index(txq, idx); 932943309d4SEmmanuel Grumbach 933943309d4SEmmanuel Grumbach return txq->tfds + trans_pcie->tfd_size * idx; 934ab6c6445SSara Sharon } 935ab6c6445SSara Sharon 936ff932f61SGolan Ben Ami static inline const char *queue_name(struct device *dev, 937ff932f61SGolan Ben Ami struct iwl_trans_pcie *trans_p, int i) 938ff932f61SGolan Ben Ami { 939ff932f61SGolan Ben Ami if (trans_p->shared_vec_mask) { 940ff932f61SGolan Ben Ami int vec = trans_p->shared_vec_mask & 941ff932f61SGolan Ben Ami IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 942ff932f61SGolan Ben Ami 943ff932f61SGolan Ben Ami if (i == 0) 944ff932f61SGolan Ben Ami return DRV_NAME ": shared IRQ"; 945ff932f61SGolan Ben Ami 946ff932f61SGolan Ben Ami return devm_kasprintf(dev, GFP_KERNEL, 947ff932f61SGolan Ben Ami DRV_NAME ": queue %d", i + vec); 948ff932f61SGolan Ben Ami } 949ff932f61SGolan Ben Ami if (i == 0) 950ff932f61SGolan Ben Ami return DRV_NAME ": default queue"; 951ff932f61SGolan Ben Ami 952ff932f61SGolan Ben Ami if (i == trans_p->alloc_vecs - 1) 953ff932f61SGolan Ben Ami return DRV_NAME ": exception"; 954ff932f61SGolan Ben Ami 955ff932f61SGolan Ben Ami return devm_kasprintf(dev, GFP_KERNEL, 956ff932f61SGolan Ben Ami DRV_NAME ": queue %d", i); 957ff932f61SGolan Ben Ami } 958ff932f61SGolan Ben Ami 959e705c121SKalle Valo static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 960e705c121SKalle Valo { 961e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 962e705c121SKalle Valo 963e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 9642e5d4a8fSHaim Dreyfuss if (!trans_pcie->msix_enabled) { 965e705c121SKalle Valo trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 966e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 9672e5d4a8fSHaim Dreyfuss } else { 9682e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 9692e5d4a8fSHaim Dreyfuss trans_pcie->fh_init_mask); 9702e5d4a8fSHaim Dreyfuss iwl_enable_hw_int_msk_msix(trans, 9712e5d4a8fSHaim Dreyfuss MSIX_HW_INT_CAUSES_REG_RF_KILL); 9722e5d4a8fSHaim Dreyfuss } 973ae5bb2a6SJohannes Berg 974286ca8ebSLuca Coelho if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { 975ae5bb2a6SJohannes Berg /* 976ae5bb2a6SJohannes Berg * On 9000-series devices this bit isn't enabled by default, so 977ae5bb2a6SJohannes Berg * when we power down the device we need set the bit to allow it 978ae5bb2a6SJohannes Berg * to wake up the PCI-E bus for RF-kill interrupts. 979ae5bb2a6SJohannes Berg */ 980ae5bb2a6SJohannes Berg iwl_set_bit(trans, CSR_GP_CNTRL, 981ae5bb2a6SJohannes Berg CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 982ae5bb2a6SJohannes Berg } 983e705c121SKalle Valo } 984e705c121SKalle Valo 985fa4de7f7SJohannes Berg void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 986fa4de7f7SJohannes Berg 987e705c121SKalle Valo static inline void iwl_wake_queue(struct iwl_trans *trans, 988e705c121SKalle Valo struct iwl_txq *txq) 989e705c121SKalle Valo { 990e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 991e705c121SKalle Valo 992bb98ecd4SSara Sharon if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { 993bb98ecd4SSara Sharon IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 994bb98ecd4SSara Sharon iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 995e705c121SKalle Valo } 996e705c121SKalle Valo } 997e705c121SKalle Valo 998e705c121SKalle Valo static inline void iwl_stop_queue(struct iwl_trans *trans, 999e705c121SKalle Valo struct iwl_txq *txq) 1000e705c121SKalle Valo { 1001e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1002e705c121SKalle Valo 1003bb98ecd4SSara Sharon if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { 1004bb98ecd4SSara Sharon iwl_op_mode_queue_full(trans->op_mode, txq->id); 1005bb98ecd4SSara Sharon IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 1006e705c121SKalle Valo } else 1007e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 1008bb98ecd4SSara Sharon txq->id); 1009e705c121SKalle Valo } 1010e705c121SKalle Valo 1011bb98ecd4SSara Sharon static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 1012e705c121SKalle Valo { 1013f5955a6cSGolan Ben Ami int index = iwl_pcie_get_cmd_index(q, i); 1014f5955a6cSGolan Ben Ami int r = iwl_pcie_get_cmd_index(q, q->read_ptr); 1015f5955a6cSGolan Ben Ami int w = iwl_pcie_get_cmd_index(q, q->write_ptr); 1016f5955a6cSGolan Ben Ami 1017f5955a6cSGolan Ben Ami return w >= r ? 1018f5955a6cSGolan Ben Ami (index >= r && index < w) : 1019f5955a6cSGolan Ben Ami !(index < r && index >= w); 1020e705c121SKalle Valo } 1021e705c121SKalle Valo 1022e705c121SKalle Valo static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 1023e705c121SKalle Valo { 1024fa4de7f7SJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1025fa4de7f7SJohannes Berg 1026fa4de7f7SJohannes Berg lockdep_assert_held(&trans_pcie->mutex); 1027fa4de7f7SJohannes Berg 1028c5bf4fa1SJohannes Berg if (trans_pcie->debug_rfkill == 1) 1029fa4de7f7SJohannes Berg return true; 103023aeea94SJohannes Berg 1031e705c121SKalle Valo return !(iwl_read32(trans, CSR_GP_CNTRL) & 1032e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 1033e705c121SKalle Valo } 1034e705c121SKalle Valo 1035e705c121SKalle Valo static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 1036e705c121SKalle Valo u32 reg, u32 mask, u32 value) 1037e705c121SKalle Valo { 1038e705c121SKalle Valo u32 v; 1039e705c121SKalle Valo 1040e705c121SKalle Valo #ifdef CONFIG_IWLWIFI_DEBUG 1041e705c121SKalle Valo WARN_ON_ONCE(value & ~mask); 1042e705c121SKalle Valo #endif 1043e705c121SKalle Valo 1044e705c121SKalle Valo v = iwl_read32(trans, reg); 1045e705c121SKalle Valo v &= ~mask; 1046e705c121SKalle Valo v |= value; 1047e705c121SKalle Valo iwl_write32(trans, reg, v); 1048e705c121SKalle Valo } 1049e705c121SKalle Valo 1050e705c121SKalle Valo static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 1051e705c121SKalle Valo u32 reg, u32 mask) 1052e705c121SKalle Valo { 1053e705c121SKalle Valo __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 1054e705c121SKalle Valo } 1055e705c121SKalle Valo 1056e705c121SKalle Valo static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 1057e705c121SKalle Valo u32 reg, u32 mask) 1058e705c121SKalle Valo { 1059e705c121SKalle Valo __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 1060e705c121SKalle Valo } 1061e705c121SKalle Valo 10627a14c23dSSara Sharon static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) 10637a14c23dSSara Sharon { 1064a1af4c48SShahar S Matityahu return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); 10657a14c23dSSara Sharon } 10667a14c23dSSara Sharon 1067e705c121SKalle Valo void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 10684290eaadSJohannes Berg void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); 1069d1967ce6SShahar S Matityahu void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); 1070e705c121SKalle Valo 1071f8a1edb7SJohannes Berg #ifdef CONFIG_IWLWIFI_DEBUGFS 1072cf5d5663SGreg Kroah-Hartman void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 1073f8a1edb7SJohannes Berg #else 1074cf5d5663SGreg Kroah-Hartman static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } 1075f8a1edb7SJohannes Berg #endif 1076f8a1edb7SJohannes Berg 107710a54d81SLuca Coelho void iwl_pcie_rx_allocator_work(struct work_struct *data); 107810a54d81SLuca Coelho 1079eda50cdeSSara Sharon /* common functions that are used by gen2 transport */ 1080b6fe2757SGolan Ben Ami int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); 1081eda50cdeSSara Sharon void iwl_pcie_apm_config(struct iwl_trans *trans); 1082eda50cdeSSara Sharon int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 1083eda50cdeSSara Sharon void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 10849ad8fd0bSJohannes Berg bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 1085326477e4SJohannes Berg void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1086326477e4SJohannes Berg bool was_in_rfkill); 10876b35ff91SSara Sharon void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 10887b3e42eaSGolan Ben Ami int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); 1089e8c8935eSJohannes Berg void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 109077c09bc8SSara Sharon void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 109113a3a390SSara Sharon int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 1092b8e8d7ceSSara Sharon int slots_num, bool cmd_queue); 109313a3a390SSara Sharon int iwl_pcie_txq_alloc(struct iwl_trans *trans, 1094b8e8d7ceSSara Sharon struct iwl_txq *txq, int slots_num, bool cmd_queue); 109513a3a390SSara Sharon int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 109613a3a390SSara Sharon struct iwl_dma_ptr *ptr, size_t size); 109713a3a390SSara Sharon void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 1098c9be849dSLiad Kaufman void iwl_pcie_apply_destination(struct iwl_trans *trans); 10999bb3d5a0SEmmanuel Grumbach void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 11009bb3d5a0SEmmanuel Grumbach struct sk_buff *skb); 11016ffe5de3SSara Sharon #ifdef CONFIG_INET 11026ffe5de3SSara Sharon struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); 11036ffe5de3SSara Sharon #endif 1104eda50cdeSSara Sharon 11059f358c17SGolan Ben Ami /* common functions that are used by gen3 transport */ 11069f358c17SGolan Ben Ami void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 11079f358c17SGolan Ben Ami 1108eda50cdeSSara Sharon /* transport gen 2 exported functions */ 1109eda50cdeSSara Sharon int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 1110eda50cdeSSara Sharon const struct fw_img *fw, bool run_in_rfkill); 1111eda50cdeSSara Sharon void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 11121169310fSGolan Ben Ami void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, 11131169310fSGolan Ben Ami struct iwl_txq *txq); 11141169310fSGolan Ben Ami int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, 11151169310fSGolan Ben Ami struct iwl_txq **intxq, int size, 11161169310fSGolan Ben Ami unsigned int timeout); 11171169310fSGolan Ben Ami int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, 11181169310fSGolan Ben Ami struct iwl_txq *txq, 11191169310fSGolan Ben Ami struct iwl_host_cmd *hcmd); 11206b35ff91SSara Sharon int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 11211169310fSGolan Ben Ami __le16 flags, u8 sta_id, u8 tid, 11225369774cSSara Sharon int cmd_id, int size, 11236b35ff91SSara Sharon unsigned int timeout); 11246b35ff91SSara Sharon void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 1125ab6c6445SSara Sharon int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 1126ab6c6445SSara Sharon struct iwl_device_cmd *dev_cmd, int txq_id); 1127ca60da2eSSara Sharon int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 1128ca60da2eSSara Sharon struct iwl_host_cmd *cmd); 1129bab3cb92SEmmanuel Grumbach void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); 1130bab3cb92SEmmanuel Grumbach void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); 113113a3a390SSara Sharon void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 113213a3a390SSara Sharon void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 113313a3a390SSara Sharon void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 1134e5f3f215SHaim Dreyfuss void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1135e5f3f215SHaim Dreyfuss bool test, bool reset); 1136e705c121SKalle Valo #endif /* __iwl_trans_int_pcie_h__ */ 1137