1e705c121SKalle Valo /****************************************************************************** 2e705c121SKalle Valo * 3cefec29eSJohannes Berg * This file is provided under a dual BSD/GPLv2 license. When using or 4cefec29eSJohannes Berg * redistributing this file, you may do so under either license. 5cefec29eSJohannes Berg * 6cefec29eSJohannes Berg * GPL LICENSE SUMMARY 7cefec29eSJohannes Berg * 8e705c121SKalle Valo * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 9e705c121SKalle Valo * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10eda50cdeSSara Sharon * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 116cc6ba3aSTriebitz * Copyright(c) 2018 - 2019 Intel Corporation 12e705c121SKalle Valo * 13e705c121SKalle Valo * This program is free software; you can redistribute it and/or modify it 14e705c121SKalle Valo * under the terms of version 2 of the GNU General Public License as 15e705c121SKalle Valo * published by the Free Software Foundation. 16e705c121SKalle Valo * 17e705c121SKalle Valo * This program is distributed in the hope that it will be useful, but WITHOUT 18e705c121SKalle Valo * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19e705c121SKalle Valo * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20e705c121SKalle Valo * more details. 21e705c121SKalle Valo * 22e705c121SKalle Valo * The full GNU General Public License is included in this distribution in the 23cefec29eSJohannes Berg * file called COPYING. 24e705c121SKalle Valo * 25e705c121SKalle Valo * Contact Information: 26cb2f8277SEmmanuel Grumbach * Intel Linux Wireless <linuxwifi@intel.com> 27e705c121SKalle Valo * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28e705c121SKalle Valo * 29cefec29eSJohannes Berg * BSD LICENSE 30cefec29eSJohannes Berg * 31cefec29eSJohannes Berg * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 32cefec29eSJohannes Berg * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33cefec29eSJohannes Berg * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 346cc6ba3aSTriebitz * Copyright(c) 2018 - 2019 Intel Corporation 35cefec29eSJohannes Berg * All rights reserved. 36cefec29eSJohannes Berg * 37cefec29eSJohannes Berg * Redistribution and use in source and binary forms, with or without 38cefec29eSJohannes Berg * modification, are permitted provided that the following conditions 39cefec29eSJohannes Berg * are met: 40cefec29eSJohannes Berg * 41cefec29eSJohannes Berg * * Redistributions of source code must retain the above copyright 42cefec29eSJohannes Berg * notice, this list of conditions and the following disclaimer. 43cefec29eSJohannes Berg * * Redistributions in binary form must reproduce the above copyright 44cefec29eSJohannes Berg * notice, this list of conditions and the following disclaimer in 45cefec29eSJohannes Berg * the documentation and/or other materials provided with the 46cefec29eSJohannes Berg * distribution. 47cefec29eSJohannes Berg * * Neither the name Intel Corporation nor the names of its 48cefec29eSJohannes Berg * contributors may be used to endorse or promote products derived 49cefec29eSJohannes Berg * from this software without specific prior written permission. 50cefec29eSJohannes Berg * 51cefec29eSJohannes Berg * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52cefec29eSJohannes Berg * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53cefec29eSJohannes Berg * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54cefec29eSJohannes Berg * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55cefec29eSJohannes Berg * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56cefec29eSJohannes Berg * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57cefec29eSJohannes Berg * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58cefec29eSJohannes Berg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59cefec29eSJohannes Berg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60cefec29eSJohannes Berg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61cefec29eSJohannes Berg * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62cefec29eSJohannes Berg * 63e705c121SKalle Valo *****************************************************************************/ 64e705c121SKalle Valo #ifndef __iwl_trans_int_pcie_h__ 65e705c121SKalle Valo #define __iwl_trans_int_pcie_h__ 66e705c121SKalle Valo 67e705c121SKalle Valo #include <linux/spinlock.h> 68e705c121SKalle Valo #include <linux/interrupt.h> 69e705c121SKalle Valo #include <linux/skbuff.h> 70e705c121SKalle Valo #include <linux/wait.h> 71e705c121SKalle Valo #include <linux/pci.h> 72e705c121SKalle Valo #include <linux/timer.h> 737c8d91ebSHaim Dreyfuss #include <linux/cpu.h> 74e705c121SKalle Valo 75e705c121SKalle Valo #include "iwl-fh.h" 76e705c121SKalle Valo #include "iwl-csr.h" 77e705c121SKalle Valo #include "iwl-trans.h" 78e705c121SKalle Valo #include "iwl-debug.h" 79e705c121SKalle Valo #include "iwl-io.h" 80e705c121SKalle Valo #include "iwl-op-mode.h" 81ff932f61SGolan Ben Ami #include "iwl-drv.h" 82e705c121SKalle Valo 83e705c121SKalle Valo /* We need 2 entries for the TX command and header, and another one might 84e705c121SKalle Valo * be needed for potential data in the SKB's head. The remaining ones can 85e705c121SKalle Valo * be used for frags. 86e705c121SKalle Valo */ 873cd1980bSSara Sharon #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 88e705c121SKalle Valo 89e705c121SKalle Valo /* 90e705c121SKalle Valo * RX related structures and functions 91e705c121SKalle Valo */ 92e705c121SKalle Valo #define RX_NUM_QUEUES 1 93e705c121SKalle Valo #define RX_POST_REQ_ALLOC 2 94e705c121SKalle Valo #define RX_CLAIM_REQ_ALLOC 8 9578485054SSara Sharon #define RX_PENDING_WATERMARK 16 961b493e30SGolan Ben Ami #define FIRST_RX_QUEUE 512 97e705c121SKalle Valo 98e705c121SKalle Valo struct iwl_host_cmd; 99e705c121SKalle Valo 100e705c121SKalle Valo /*This file includes the declaration that are internal to the 101e705c121SKalle Valo * trans_pcie layer */ 102e705c121SKalle Valo 10396a6497bSSara Sharon /** 10496a6497bSSara Sharon * struct iwl_rx_mem_buffer 10596a6497bSSara Sharon * @page_dma: bus address of rxb page 10696a6497bSSara Sharon * @page: driver's pointer to the rxb page 107b1753c62SSara Sharon * @invalid: rxb is in driver ownership - not owned by HW 10896a6497bSSara Sharon * @vid: index of this rxb in the global table 10996a6497bSSara Sharon */ 110e705c121SKalle Valo struct iwl_rx_mem_buffer { 111e705c121SKalle Valo dma_addr_t page_dma; 112e705c121SKalle Valo struct page *page; 11396a6497bSSara Sharon u16 vid; 114b1753c62SSara Sharon bool invalid; 115e705c121SKalle Valo struct list_head list; 116e705c121SKalle Valo }; 117e705c121SKalle Valo 118e705c121SKalle Valo /** 119e705c121SKalle Valo * struct isr_statistics - interrupt statistics 120e705c121SKalle Valo * 121e705c121SKalle Valo */ 122e705c121SKalle Valo struct isr_statistics { 123e705c121SKalle Valo u32 hw; 124e705c121SKalle Valo u32 sw; 125e705c121SKalle Valo u32 err_code; 126e705c121SKalle Valo u32 sch; 127e705c121SKalle Valo u32 alive; 128e705c121SKalle Valo u32 rfkill; 129e705c121SKalle Valo u32 ctkill; 130e705c121SKalle Valo u32 wakeup; 131e705c121SKalle Valo u32 rx; 132e705c121SKalle Valo u32 tx; 133e705c121SKalle Valo u32 unhandled; 134e705c121SKalle Valo }; 135e705c121SKalle Valo 136cf495496SGolan Ben Ami /** 137cf495496SGolan Ben Ami * struct iwl_rx_transfer_desc - transfer descriptor 138cf495496SGolan Ben Ami * @addr: ptr to free buffer start address 139cf495496SGolan Ben Ami * @rbid: unique tag of the buffer 140cf495496SGolan Ben Ami * @reserved: reserved 141cf495496SGolan Ben Ami */ 142cf495496SGolan Ben Ami struct iwl_rx_transfer_desc { 143cf495496SGolan Ben Ami __le16 rbid; 144f826faaaSJohannes Berg __le16 reserved[3]; 145f826faaaSJohannes Berg __le64 addr; 146cf495496SGolan Ben Ami } __packed; 147cf495496SGolan Ben Ami 148f826faaaSJohannes Berg #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) 149cf495496SGolan Ben Ami 150cf495496SGolan Ben Ami /** 151cf495496SGolan Ben Ami * struct iwl_rx_completion_desc - completion descriptor 152cf495496SGolan Ben Ami * @reserved1: reserved 153cf495496SGolan Ben Ami * @rbid: unique tag of the received buffer 154f826faaaSJohannes Berg * @flags: flags (0: fragmented, all others: reserved) 155cf495496SGolan Ben Ami * @reserved2: reserved 156cf495496SGolan Ben Ami */ 157cf495496SGolan Ben Ami struct iwl_rx_completion_desc { 158f826faaaSJohannes Berg __le32 reserved1; 159cf495496SGolan Ben Ami __le16 rbid; 160f826faaaSJohannes Berg u8 flags; 161f826faaaSJohannes Berg u8 reserved2[25]; 162cf495496SGolan Ben Ami } __packed; 163cf495496SGolan Ben Ami 164e705c121SKalle Valo /** 165e705c121SKalle Valo * struct iwl_rxq - Rx queue 16696a6497bSSara Sharon * @id: queue index 16796a6497bSSara Sharon * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 16896a6497bSSara Sharon * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 1690307c839SGolan Ben Ami * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's 170e705c121SKalle Valo * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 17196a6497bSSara Sharon * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 17296a6497bSSara Sharon * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 1731b493e30SGolan Ben Ami * @tr_tail: driver's pointer to the transmission ring tail buffer 1741b493e30SGolan Ben Ami * @tr_tail_dma: physical address of the buffer for the transmission ring tail 1751b493e30SGolan Ben Ami * @cr_tail: driver's pointer to the completion ring tail buffer 1761b493e30SGolan Ben Ami * @cr_tail_dma: physical address of the buffer for the completion ring tail 177e705c121SKalle Valo * @read: Shared index to newest available Rx buffer 178e705c121SKalle Valo * @write: Shared index to oldest written Rx packet 179e705c121SKalle Valo * @free_count: Number of pre-allocated buffers in rx_free 180e705c121SKalle Valo * @used_count: Number of RBDs handled to allocator to use for allocation 181e705c121SKalle Valo * @write_actual: 182e705c121SKalle Valo * @rx_free: list of RBDs with allocated RB ready for use 183e705c121SKalle Valo * @rx_used: list of RBDs with no RB attached 184e705c121SKalle Valo * @need_update: flag to indicate we need to update read/write index 185e705c121SKalle Valo * @rb_stts: driver's pointer to receive buffer status 186e705c121SKalle Valo * @rb_stts_dma: bus address of receive buffer status 187e705c121SKalle Valo * @lock: 18896a6497bSSara Sharon * @queue: actual rx queue. Not used for multi-rx queue. 189e705c121SKalle Valo * 190e705c121SKalle Valo * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 191e705c121SKalle Valo */ 192e705c121SKalle Valo struct iwl_rxq { 19396a6497bSSara Sharon int id; 19496a6497bSSara Sharon void *bd; 195e705c121SKalle Valo dma_addr_t bd_dma; 196b2a58c97SSara Sharon union { 1970307c839SGolan Ben Ami void *used_bd; 198b2a58c97SSara Sharon __le32 *bd_32; 199b2a58c97SSara Sharon struct iwl_rx_completion_desc *cd; 200b2a58c97SSara Sharon }; 20196a6497bSSara Sharon dma_addr_t used_bd_dma; 2021b493e30SGolan Ben Ami __le16 *tr_tail; 2031b493e30SGolan Ben Ami dma_addr_t tr_tail_dma; 2041b493e30SGolan Ben Ami __le16 *cr_tail; 2051b493e30SGolan Ben Ami dma_addr_t cr_tail_dma; 206e705c121SKalle Valo u32 read; 207e705c121SKalle Valo u32 write; 208e705c121SKalle Valo u32 free_count; 209e705c121SKalle Valo u32 used_count; 210e705c121SKalle Valo u32 write_actual; 21196a6497bSSara Sharon u32 queue_size; 212e705c121SKalle Valo struct list_head rx_free; 213e705c121SKalle Valo struct list_head rx_used; 214e705c121SKalle Valo bool need_update; 2150307c839SGolan Ben Ami void *rb_stts; 216e705c121SKalle Valo dma_addr_t rb_stts_dma; 217e705c121SKalle Valo spinlock_t lock; 218bce97731SSara Sharon struct napi_struct napi; 219e705c121SKalle Valo struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 220e705c121SKalle Valo }; 221e705c121SKalle Valo 222e705c121SKalle Valo /** 223e705c121SKalle Valo * struct iwl_rb_allocator - Rx allocator 224e705c121SKalle Valo * @req_pending: number of requests the allcator had not processed yet 225e705c121SKalle Valo * @req_ready: number of requests honored and ready for claiming 226e705c121SKalle Valo * @rbd_allocated: RBDs with pages allocated and ready to be handled to 227e705c121SKalle Valo * the queue. This is a list of &struct iwl_rx_mem_buffer 228e705c121SKalle Valo * @rbd_empty: RBDs with no page attached for allocator use. This is a list 229e705c121SKalle Valo * of &struct iwl_rx_mem_buffer 230e705c121SKalle Valo * @lock: protects the rbd_allocated and rbd_empty lists 231e705c121SKalle Valo * @alloc_wq: work queue for background calls 232e705c121SKalle Valo * @rx_alloc: work struct for background calls 233e705c121SKalle Valo */ 234e705c121SKalle Valo struct iwl_rb_allocator { 235e705c121SKalle Valo atomic_t req_pending; 236e705c121SKalle Valo atomic_t req_ready; 237e705c121SKalle Valo struct list_head rbd_allocated; 238e705c121SKalle Valo struct list_head rbd_empty; 239e705c121SKalle Valo spinlock_t lock; 240e705c121SKalle Valo struct workqueue_struct *alloc_wq; 241e705c121SKalle Valo struct work_struct rx_alloc; 242e705c121SKalle Valo }; 243e705c121SKalle Valo 244e705c121SKalle Valo struct iwl_dma_ptr { 245e705c121SKalle Valo dma_addr_t dma; 246e705c121SKalle Valo void *addr; 247e705c121SKalle Valo size_t size; 248e705c121SKalle Valo }; 249e705c121SKalle Valo 250e705c121SKalle Valo /** 251e705c121SKalle Valo * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 252e705c121SKalle Valo * @index -- current index 253e705c121SKalle Valo */ 2547b3e42eaSGolan Ben Ami static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) 255e705c121SKalle Valo { 2567b3e42eaSGolan Ben Ami return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); 257e705c121SKalle Valo } 258e705c121SKalle Valo 259e705c121SKalle Valo /** 2600307c839SGolan Ben Ami * iwl_get_closed_rb_stts - get closed rb stts from different structs 2610307c839SGolan Ben Ami * @rxq - the rxq to get the rb stts from 2620307c839SGolan Ben Ami */ 2630307c839SGolan Ben Ami static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, 2640307c839SGolan Ben Ami struct iwl_rxq *rxq) 2650307c839SGolan Ben Ami { 2660307c839SGolan Ben Ami if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { 2670307c839SGolan Ben Ami __le16 *rb_stts = rxq->rb_stts; 2680307c839SGolan Ben Ami 2690307c839SGolan Ben Ami return READ_ONCE(*rb_stts); 2700307c839SGolan Ben Ami } else { 2710307c839SGolan Ben Ami struct iwl_rb_status *rb_stts = rxq->rb_stts; 2720307c839SGolan Ben Ami 2730307c839SGolan Ben Ami return READ_ONCE(rb_stts->closed_rb_num); 2740307c839SGolan Ben Ami } 2750307c839SGolan Ben Ami } 2760307c839SGolan Ben Ami 2770307c839SGolan Ben Ami /** 278e705c121SKalle Valo * iwl_queue_dec_wrap - decrement queue index, wrap back to end 279e705c121SKalle Valo * @index -- current index 280e705c121SKalle Valo */ 2817b3e42eaSGolan Ben Ami static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) 282e705c121SKalle Valo { 2837b3e42eaSGolan Ben Ami return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); 284e705c121SKalle Valo } 285e705c121SKalle Valo 286e705c121SKalle Valo struct iwl_cmd_meta { 287e705c121SKalle Valo /* only for SYNC commands, iff the reply skb is wanted */ 288e705c121SKalle Valo struct iwl_host_cmd *source; 289e705c121SKalle Valo u32 flags; 2903cd1980bSSara Sharon u32 tbs; 291e705c121SKalle Valo }; 292e705c121SKalle Valo 293e705c121SKalle Valo /* 2948de437c7SSara Sharon * The FH will write back to the first TB only, so we need to copy some data 2958de437c7SSara Sharon * into the buffer regardless of whether it should be mapped or not. 2968de437c7SSara Sharon * This indicates how big the first TB must be to include the scratch buffer 2978de437c7SSara Sharon * and the assigned PN. 298b97277ccSSara Sharon * Since PN location is 8 bytes at offset 12, it's 20 now. 2998de437c7SSara Sharon * If we make it bigger then allocations will be bigger and copy slower, so 3008de437c7SSara Sharon * that's probably not useful. 301e705c121SKalle Valo */ 302b97277ccSSara Sharon #define IWL_FIRST_TB_SIZE 20 3038de437c7SSara Sharon #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 304e705c121SKalle Valo 305e705c121SKalle Valo struct iwl_pcie_txq_entry { 306e705c121SKalle Valo struct iwl_device_cmd *cmd; 307e705c121SKalle Valo struct sk_buff *skb; 308e705c121SKalle Valo /* buffer to free after command completes */ 309e705c121SKalle Valo const void *free_buf; 310e705c121SKalle Valo struct iwl_cmd_meta meta; 311e705c121SKalle Valo }; 312e705c121SKalle Valo 3138de437c7SSara Sharon struct iwl_pcie_first_tb_buf { 3148de437c7SSara Sharon u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 315e705c121SKalle Valo }; 316e705c121SKalle Valo 317e705c121SKalle Valo /** 318e705c121SKalle Valo * struct iwl_txq - Tx Queue for DMA 319e705c121SKalle Valo * @q: generic Rx/Tx queue descriptor 320e705c121SKalle Valo * @tfds: transmit frame descriptors (DMA memory) 3218de437c7SSara Sharon * @first_tb_bufs: start of command headers, including scratch buffers, for 322e705c121SKalle Valo * the writeback -- this is DMA memory and an array holding one buffer 323e705c121SKalle Valo * for each command on the queue 3248de437c7SSara Sharon * @first_tb_dma: DMA address for the first_tb_bufs start 325e705c121SKalle Valo * @entries: transmit entries (driver state) 326e705c121SKalle Valo * @lock: queue lock 327e705c121SKalle Valo * @stuck_timer: timer that fires if queue gets stuck 328e705c121SKalle Valo * @trans_pcie: pointer back to transport (for timer) 329e705c121SKalle Valo * @need_update: indicates need to update read/write index 330e705c121SKalle Valo * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 331e705c121SKalle Valo * @wd_timeout: queue watchdog timeout (jiffies) - per queue 332e705c121SKalle Valo * @frozen: tx stuck queue timer is frozen 333e705c121SKalle Valo * @frozen_expiry_remainder: remember how long until the timer fires 33413a3a390SSara Sharon * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 335bb98ecd4SSara Sharon * @write_ptr: 1-st empty entry (index) host_w 336bb98ecd4SSara Sharon * @read_ptr: last used entry (index) host_r 337bb98ecd4SSara Sharon * @dma_addr: physical addr for BD's 338bb98ecd4SSara Sharon * @n_window: safe queue window 339bb98ecd4SSara Sharon * @id: queue id 340bb98ecd4SSara Sharon * @low_mark: low watermark, resume queue if free space more than this 341bb98ecd4SSara Sharon * @high_mark: high watermark, stop queue if free space less than this 342e705c121SKalle Valo * 343e705c121SKalle Valo * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 344e705c121SKalle Valo * descriptors) and required locking structures. 345bb98ecd4SSara Sharon * 346bb98ecd4SSara Sharon * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 347bb98ecd4SSara Sharon * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 348bb98ecd4SSara Sharon * there might be HW changes in the future). For the normal TX 349bb98ecd4SSara Sharon * queues, n_window, which is the size of the software queue data 350bb98ecd4SSara Sharon * is also 256; however, for the command queue, n_window is only 351bb98ecd4SSara Sharon * 32 since we don't need so many commands pending. Since the HW 352bb98ecd4SSara Sharon * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 353bb98ecd4SSara Sharon * This means that we end up with the following: 354bb98ecd4SSara Sharon * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 355bb98ecd4SSara Sharon * SW entries: | 0 | ... | 31 | 356bb98ecd4SSara Sharon * where N is a number between 0 and 7. This means that the SW 357bb98ecd4SSara Sharon * data is a window overlayed over the HW queue. 358e705c121SKalle Valo */ 359e705c121SKalle Valo struct iwl_txq { 3606983ba69SSara Sharon void *tfds; 3618de437c7SSara Sharon struct iwl_pcie_first_tb_buf *first_tb_bufs; 3628de437c7SSara Sharon dma_addr_t first_tb_dma; 363e705c121SKalle Valo struct iwl_pcie_txq_entry *entries; 364e705c121SKalle Valo spinlock_t lock; 365e705c121SKalle Valo unsigned long frozen_expiry_remainder; 366e705c121SKalle Valo struct timer_list stuck_timer; 367e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie; 368e705c121SKalle Valo bool need_update; 369e705c121SKalle Valo bool frozen; 370e705c121SKalle Valo bool ampdu; 37104fa3e68SEmmanuel Grumbach int block; 372e705c121SKalle Valo unsigned long wd_timeout; 3733955525dSEmmanuel Grumbach struct sk_buff_head overflow_q; 37413a3a390SSara Sharon struct iwl_dma_ptr bc_tbl; 375bb98ecd4SSara Sharon 376bb98ecd4SSara Sharon int write_ptr; 377bb98ecd4SSara Sharon int read_ptr; 378bb98ecd4SSara Sharon dma_addr_t dma_addr; 379bb98ecd4SSara Sharon int n_window; 380bb98ecd4SSara Sharon u32 id; 381bb98ecd4SSara Sharon int low_mark; 382bb98ecd4SSara Sharon int high_mark; 3832ae48edcSSara Sharon 3842ae48edcSSara Sharon bool overflow_tx; 385e705c121SKalle Valo }; 386e705c121SKalle Valo 387e705c121SKalle Valo static inline dma_addr_t 3888de437c7SSara Sharon iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 389e705c121SKalle Valo { 3908de437c7SSara Sharon return txq->first_tb_dma + 3918de437c7SSara Sharon sizeof(struct iwl_pcie_first_tb_buf) * idx; 392e705c121SKalle Valo } 393e705c121SKalle Valo 3946eb5e529SEmmanuel Grumbach struct iwl_tso_hdr_page { 3956eb5e529SEmmanuel Grumbach struct page *page; 3966eb5e529SEmmanuel Grumbach u8 *pos; 3976eb5e529SEmmanuel Grumbach }; 3986eb5e529SEmmanuel Grumbach 399f7805b33SLior Cohen #ifdef CONFIG_IWLWIFI_DEBUGFS 400f7805b33SLior Cohen /** 401f7805b33SLior Cohen * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data 402f7805b33SLior Cohen * debugfs file 403f7805b33SLior Cohen * 404f7805b33SLior Cohen * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. 405f7805b33SLior Cohen * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. 406f7805b33SLior Cohen * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is 407f7805b33SLior Cohen * set the file can no longer be used. 408f7805b33SLior Cohen */ 409f7805b33SLior Cohen enum iwl_fw_mon_dbgfs_state { 410f7805b33SLior Cohen IWL_FW_MON_DBGFS_STATE_CLOSED, 411f7805b33SLior Cohen IWL_FW_MON_DBGFS_STATE_OPEN, 412f7805b33SLior Cohen IWL_FW_MON_DBGFS_STATE_DISABLED, 413f7805b33SLior Cohen }; 414f7805b33SLior Cohen #endif 415f7805b33SLior Cohen 416e705c121SKalle Valo /** 417496d83caSHaim Dreyfuss * enum iwl_shared_irq_flags - level of sharing for irq 418496d83caSHaim Dreyfuss * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 419496d83caSHaim Dreyfuss * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 420496d83caSHaim Dreyfuss */ 421496d83caSHaim Dreyfuss enum iwl_shared_irq_flags { 422496d83caSHaim Dreyfuss IWL_SHARED_IRQ_NON_RX = BIT(0), 423496d83caSHaim Dreyfuss IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 424496d83caSHaim Dreyfuss }; 425496d83caSHaim Dreyfuss 426496d83caSHaim Dreyfuss /** 4279b58419eSGolan Ben Ami * enum iwl_image_response_code - image response values 4289b58419eSGolan Ben Ami * @IWL_IMAGE_RESP_DEF: the default value of the register 4299b58419eSGolan Ben Ami * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 4309b58419eSGolan Ben Ami * @IWL_IMAGE_RESP_FAIL: iml reading failed 4319b58419eSGolan Ben Ami */ 4329b58419eSGolan Ben Ami enum iwl_image_response_code { 4339b58419eSGolan Ben Ami IWL_IMAGE_RESP_DEF = 0, 4349b58419eSGolan Ben Ami IWL_IMAGE_RESP_SUCCESS = 1, 4359b58419eSGolan Ben Ami IWL_IMAGE_RESP_FAIL = 2, 4369b58419eSGolan Ben Ami }; 4379b58419eSGolan Ben Ami 4389b58419eSGolan Ben Ami /** 439f7805b33SLior Cohen * struct cont_rec: continuous recording data structure 440f7805b33SLior Cohen * @prev_wr_ptr: the last address that was read in monitor_data 441f7805b33SLior Cohen * debugfs file 442f7805b33SLior Cohen * @prev_wrap_cnt: the wrap count that was used during the last read in 443f7805b33SLior Cohen * monitor_data debugfs file 444f7805b33SLior Cohen * @state: the state of monitor_data debugfs file as described 445f7805b33SLior Cohen * in &iwl_fw_mon_dbgfs_state enum 446f7805b33SLior Cohen * @mutex: locked while reading from monitor_data debugfs file 447f7805b33SLior Cohen */ 448f7805b33SLior Cohen #ifdef CONFIG_IWLWIFI_DEBUGFS 449f7805b33SLior Cohen struct cont_rec { 450f7805b33SLior Cohen u32 prev_wr_ptr; 451f7805b33SLior Cohen u32 prev_wrap_cnt; 452f7805b33SLior Cohen u8 state; 453f7805b33SLior Cohen /* Used to sync monitor_data debugfs file with driver unload flow */ 454f7805b33SLior Cohen struct mutex mutex; 455f7805b33SLior Cohen }; 456f7805b33SLior Cohen #endif 457f7805b33SLior Cohen 458f7805b33SLior Cohen /** 459e705c121SKalle Valo * struct iwl_trans_pcie - PCIe transport specific data 460e705c121SKalle Valo * @rxq: all the RX queue data 46178485054SSara Sharon * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 46296a6497bSSara Sharon * @global_table: table mapping received VID from hw to rxb 463e705c121SKalle Valo * @rba: allocator for RX replenishing 464eda50cdeSSara Sharon * @ctxt_info: context information for FW self init 4652ee82402SGolan Ben Ami * @ctxt_info_gen3: context information for gen3 devices 4662ee82402SGolan Ben Ami * @prph_info: prph info for self init 4672ee82402SGolan Ben Ami * @prph_scratch: prph scratch for self init 4682ee82402SGolan Ben Ami * @ctxt_info_dma_addr: dma addr of context information 4692ee82402SGolan Ben Ami * @prph_info_dma_addr: dma addr of prph info 4702ee82402SGolan Ben Ami * @prph_scratch_dma_addr: dma addr of prph scratch 471eda50cdeSSara Sharon * @ctxt_info_dma_addr: dma addr of context information 472eda50cdeSSara Sharon * @init_dram: DRAM data of firmware image (including paging). 473eda50cdeSSara Sharon * Context information addresses will be taken from here. 474eda50cdeSSara Sharon * This is driver's local copy for keeping track of size and 475eda50cdeSSara Sharon * count for allocating and freeing the memory. 476e705c121SKalle Valo * @trans: pointer to the generic transport area 477e705c121SKalle Valo * @scd_base_addr: scheduler sram base address in SRAM 478e705c121SKalle Valo * @scd_bc_tbls: pointer to the byte count table of the scheduler 479e705c121SKalle Valo * @kw: keep warm address 480e705c121SKalle Valo * @pci_dev: basic pci-network driver stuff 481e705c121SKalle Valo * @hw_base: pci hardware address support 482e705c121SKalle Valo * @ucode_write_complete: indicates that the ucode has been copied. 483e705c121SKalle Valo * @ucode_write_waitq: wait queue for uCode load 484e705c121SKalle Valo * @cmd_queue - command queue number 4859416560eSGolan Ben Ami * @def_rx_queue - default rx queue number 4866c4fbcbcSEmmanuel Grumbach * @rx_buf_size: Rx buffer size 487e705c121SKalle Valo * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 488e705c121SKalle Valo * @scd_set_active: should the transport configure the SCD for HCMD queue 48941837ca9SEmmanuel Grumbach * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 49041837ca9SEmmanuel Grumbach * frame. 491e705c121SKalle Valo * @rx_page_order: page order for receive buffer size 492e705c121SKalle Valo * @reg_lock: protect hw register access 493e705c121SKalle Valo * @mutex: to protect stop_device / start_fw / start_hw 494e705c121SKalle Valo * @cmd_in_flight: true when we have a host command in flight 495f7805b33SLior Cohen #ifdef CONFIG_IWLWIFI_DEBUGFS 496f7805b33SLior Cohen * @fw_mon_data: fw continuous recording data 497f7805b33SLior Cohen #endif 4982e5d4a8fSHaim Dreyfuss * @msix_entries: array of MSI-X entries 4992e5d4a8fSHaim Dreyfuss * @msix_enabled: true if managed to enable MSI-X 500496d83caSHaim Dreyfuss * @shared_vec_mask: the type of causes the shared vector handles 501496d83caSHaim Dreyfuss * (see iwl_shared_irq_flags). 502496d83caSHaim Dreyfuss * @alloc_vecs: the number of interrupt vectors allocated by the OS 503496d83caSHaim Dreyfuss * @def_irq: default irq for non rx causes 5042e5d4a8fSHaim Dreyfuss * @fh_init_mask: initial unmasked fh causes 5052e5d4a8fSHaim Dreyfuss * @hw_init_mask: initial unmasked hw causes 5062e5d4a8fSHaim Dreyfuss * @fh_mask: current unmasked fh causes 5072e5d4a8fSHaim Dreyfuss * @hw_mask: current unmasked hw causes 50849564a80SLuca Coelho * @in_rescan: true if we have triggered a device rescan 5096cc6ba3aSTriebitz * @base_rb_stts: base virtual address of receive buffer status for all queues 5106cc6ba3aSTriebitz * @base_rb_stts_dma: base physical address of receive buffer status 511e705c121SKalle Valo */ 512e705c121SKalle Valo struct iwl_trans_pcie { 51378485054SSara Sharon struct iwl_rxq *rxq; 5147b542436SSara Sharon struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 51543146925SSara Sharon struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; 516e705c121SKalle Valo struct iwl_rb_allocator rba; 5172ee82402SGolan Ben Ami union { 518eda50cdeSSara Sharon struct iwl_context_info *ctxt_info; 5192ee82402SGolan Ben Ami struct iwl_context_info_gen3 *ctxt_info_gen3; 5202ee82402SGolan Ben Ami }; 5212ee82402SGolan Ben Ami struct iwl_prph_info *prph_info; 5222ee82402SGolan Ben Ami struct iwl_prph_scratch *prph_scratch; 523eda50cdeSSara Sharon dma_addr_t ctxt_info_dma_addr; 5242ee82402SGolan Ben Ami dma_addr_t prph_info_dma_addr; 5252ee82402SGolan Ben Ami dma_addr_t prph_scratch_dma_addr; 5262ee82402SGolan Ben Ami dma_addr_t iml_dma_addr; 527e705c121SKalle Valo struct iwl_trans *trans; 528e705c121SKalle Valo 529e705c121SKalle Valo struct net_device napi_dev; 530e705c121SKalle Valo 5316eb5e529SEmmanuel Grumbach struct __percpu iwl_tso_hdr_page *tso_hdr_page; 5326eb5e529SEmmanuel Grumbach 533e705c121SKalle Valo /* INT ICT Table */ 534e705c121SKalle Valo __le32 *ict_tbl; 535e705c121SKalle Valo dma_addr_t ict_tbl_dma; 536e705c121SKalle Valo int ict_index; 537e705c121SKalle Valo bool use_ict; 538326477e4SJohannes Berg bool is_down, opmode_down; 539c5bf4fa1SJohannes Berg s8 debug_rfkill; 540e705c121SKalle Valo struct isr_statistics isr_stats; 541e705c121SKalle Valo 542e705c121SKalle Valo spinlock_t irq_lock; 543e705c121SKalle Valo struct mutex mutex; 544e705c121SKalle Valo u32 inta_mask; 545e705c121SKalle Valo u32 scd_base_addr; 546e705c121SKalle Valo struct iwl_dma_ptr scd_bc_tbls; 547e705c121SKalle Valo struct iwl_dma_ptr kw; 548e705c121SKalle Valo 549b2a3b1c1SSara Sharon struct iwl_txq *txq_memory; 550e982bc2cSSara Sharon struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 551e982bc2cSSara Sharon unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 552e982bc2cSSara Sharon unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 553e705c121SKalle Valo 554e705c121SKalle Valo /* PCI bus related data */ 555e705c121SKalle Valo struct pci_dev *pci_dev; 556e705c121SKalle Valo void __iomem *hw_base; 557e705c121SKalle Valo 558e705c121SKalle Valo bool ucode_write_complete; 559e705c121SKalle Valo wait_queue_head_t ucode_write_waitq; 560e705c121SKalle Valo wait_queue_head_t wait_command_queue; 5614cbb8e50SLuciano Coelho wait_queue_head_t d0i3_waitq; 562e705c121SKalle Valo 56321cb3222SJohannes Berg u8 page_offs, dev_cmd_offs; 56421cb3222SJohannes Berg 565e705c121SKalle Valo u8 cmd_queue; 5669416560eSGolan Ben Ami u8 def_rx_queue; 567e705c121SKalle Valo u8 cmd_fifo; 568e705c121SKalle Valo unsigned int cmd_q_wdg_timeout; 569e705c121SKalle Valo u8 n_no_reclaim_cmds; 570e705c121SKalle Valo u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 5713cd1980bSSara Sharon u8 max_tbs; 5726983ba69SSara Sharon u16 tfd_size; 573e705c121SKalle Valo 5746c4fbcbcSEmmanuel Grumbach enum iwl_amsdu_size rx_buf_size; 575e705c121SKalle Valo bool bc_table_dword; 576e705c121SKalle Valo bool scd_set_active; 57741837ca9SEmmanuel Grumbach bool sw_csum_tx; 578a6d24fadSRajat Jain bool pcie_dbg_dumped_once; 579e705c121SKalle Valo u32 rx_page_order; 580e705c121SKalle Valo 581e705c121SKalle Valo /*protect hw register */ 582e705c121SKalle Valo spinlock_t reg_lock; 583e705c121SKalle Valo bool cmd_hold_nic_awake; 584e705c121SKalle Valo bool ref_cmd_in_flight; 585e705c121SKalle Valo 586f7805b33SLior Cohen #ifdef CONFIG_IWLWIFI_DEBUGFS 587f7805b33SLior Cohen struct cont_rec fw_mon_data; 588f7805b33SLior Cohen #endif 589f7805b33SLior Cohen 5902e5d4a8fSHaim Dreyfuss struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 5912e5d4a8fSHaim Dreyfuss bool msix_enabled; 592496d83caSHaim Dreyfuss u8 shared_vec_mask; 593496d83caSHaim Dreyfuss u32 alloc_vecs; 594496d83caSHaim Dreyfuss u32 def_irq; 5952e5d4a8fSHaim Dreyfuss u32 fh_init_mask; 5962e5d4a8fSHaim Dreyfuss u32 hw_init_mask; 5972e5d4a8fSHaim Dreyfuss u32 fh_mask; 5982e5d4a8fSHaim Dreyfuss u32 hw_mask; 5997c8d91ebSHaim Dreyfuss cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 60049564a80SLuca Coelho u16 tx_cmd_queue_size; 60149564a80SLuca Coelho bool in_rescan; 6026cc6ba3aSTriebitz 6036cc6ba3aSTriebitz void *base_rb_stts; 6046cc6ba3aSTriebitz dma_addr_t base_rb_stts_dma; 605e705c121SKalle Valo }; 606e705c121SKalle Valo 60785e5a387SJohannes Berg static inline struct iwl_trans_pcie * 60885e5a387SJohannes Berg IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 60985e5a387SJohannes Berg { 61085e5a387SJohannes Berg return (void *)trans->trans_specific; 61185e5a387SJohannes Berg } 612e705c121SKalle Valo 613ff932f61SGolan Ben Ami static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 614ff932f61SGolan Ben Ami struct msix_entry *entry) 615ff932f61SGolan Ben Ami { 616ff932f61SGolan Ben Ami /* 617ff932f61SGolan Ben Ami * Before sending the interrupt the HW disables it to prevent 618ff932f61SGolan Ben Ami * a nested interrupt. This is done by writing 1 to the corresponding 619ff932f61SGolan Ben Ami * bit in the mask register. After handling the interrupt, it should be 620ff932f61SGolan Ben Ami * re-enabled by clearing this bit. This register is defined as 621ff932f61SGolan Ben Ami * write 1 clear (W1C) register, meaning that it's being clear 622ff932f61SGolan Ben Ami * by writing 1 to the bit. 623ff932f61SGolan Ben Ami */ 624ff932f61SGolan Ben Ami iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 625ff932f61SGolan Ben Ami } 626ff932f61SGolan Ben Ami 627e705c121SKalle Valo static inline struct iwl_trans * 628e705c121SKalle Valo iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 629e705c121SKalle Valo { 630e705c121SKalle Valo return container_of((void *)trans_pcie, struct iwl_trans, 631e705c121SKalle Valo trans_specific); 632e705c121SKalle Valo } 633e705c121SKalle Valo 634e705c121SKalle Valo /* 635e705c121SKalle Valo * Convention: trans API functions: iwl_trans_pcie_XXX 636e705c121SKalle Valo * Other functions: iwl_pcie_XXX 637e705c121SKalle Valo */ 638e705c121SKalle Valo struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 639e705c121SKalle Valo const struct pci_device_id *ent, 640e705c121SKalle Valo const struct iwl_cfg *cfg); 641e705c121SKalle Valo void iwl_trans_pcie_free(struct iwl_trans *trans); 642e705c121SKalle Valo 643e705c121SKalle Valo /***************************************************** 644e705c121SKalle Valo * RX 645e705c121SKalle Valo ******************************************************/ 64689d5e833SGolan Ben Ami int _iwl_pcie_rx_init(struct iwl_trans *trans); 647e705c121SKalle Valo int iwl_pcie_rx_init(struct iwl_trans *trans); 648eda50cdeSSara Sharon int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 6492e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 650e705c121SKalle Valo irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 6512e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 6522e5d4a8fSHaim Dreyfuss irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 653e705c121SKalle Valo int iwl_pcie_rx_stop(struct iwl_trans *trans); 654e705c121SKalle Valo void iwl_pcie_rx_free(struct iwl_trans *trans); 655ff932f61SGolan Ben Ami void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); 656ff932f61SGolan Ben Ami void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); 657ff932f61SGolan Ben Ami int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); 658ff932f61SGolan Ben Ami void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 659ff932f61SGolan Ben Ami struct iwl_rxq *rxq); 66089d5e833SGolan Ben Ami int iwl_pcie_rx_alloc(struct iwl_trans *trans); 661e705c121SKalle Valo 662e705c121SKalle Valo /***************************************************** 663e705c121SKalle Valo * ICT - interrupt handling 664e705c121SKalle Valo ******************************************************/ 665e705c121SKalle Valo irqreturn_t iwl_pcie_isr(int irq, void *data); 666e705c121SKalle Valo int iwl_pcie_alloc_ict(struct iwl_trans *trans); 667e705c121SKalle Valo void iwl_pcie_free_ict(struct iwl_trans *trans); 668e705c121SKalle Valo void iwl_pcie_reset_ict(struct iwl_trans *trans); 669e705c121SKalle Valo void iwl_pcie_disable_ict(struct iwl_trans *trans); 670e705c121SKalle Valo 671e705c121SKalle Valo /***************************************************** 672e705c121SKalle Valo * TX / HCMD 673e705c121SKalle Valo ******************************************************/ 674e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans); 6759b3089bdSGolan Ben Ami int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, 6769b3089bdSGolan Ben Ami int queue_size); 677e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 678e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans); 679e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans); 680dcfbd67bSEmmanuel Grumbach bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 681e705c121SKalle Valo const struct iwl_trans_txq_scd_cfg *cfg, 682e705c121SKalle Valo unsigned int wdg_timeout); 683e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 684e705c121SKalle Valo bool configure_scd); 68542db09c1SLiad Kaufman void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 68642db09c1SLiad Kaufman bool shared_mode); 68738398efbSSara Sharon void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 68838398efbSSara Sharon struct iwl_txq *txq); 689e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 690e705c121SKalle Valo struct iwl_device_cmd *dev_cmd, int txq_id); 691e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 692e705c121SKalle Valo int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 69389d5e833SGolan Ben Ami void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); 69489d5e833SGolan Ben Ami void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, 69589d5e833SGolan Ben Ami struct iwl_txq *txq); 696e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 697e705c121SKalle Valo struct iwl_rx_cmd_buffer *rxb); 698e705c121SKalle Valo void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 699e705c121SKalle Valo struct sk_buff_head *skbs); 700ba7136f3SAlex Malamud void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); 701e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 70289d5e833SGolan Ben Ami void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, 70389d5e833SGolan Ben Ami struct iwl_txq *txq, u16 byte_cnt, 70489d5e833SGolan Ben Ami int num_tbs); 705e705c121SKalle Valo 706cc2f41f8SJohannes Berg static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 7076983ba69SSara Sharon u8 idx) 708e705c121SKalle Valo { 7096983ba69SSara Sharon if (trans->cfg->use_tfh) { 710cc2f41f8SJohannes Berg struct iwl_tfh_tfd *tfd = _tfd; 711cc2f41f8SJohannes Berg struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 7126983ba69SSara Sharon 7136983ba69SSara Sharon return le16_to_cpu(tb->tb_len); 714cc2f41f8SJohannes Berg } else { 715cc2f41f8SJohannes Berg struct iwl_tfd *tfd = _tfd; 716cc2f41f8SJohannes Berg struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 717e705c121SKalle Valo 718e705c121SKalle Valo return le16_to_cpu(tb->hi_n_len) >> 4; 719e705c121SKalle Valo } 720cc2f41f8SJohannes Berg } 721e705c121SKalle Valo 722e705c121SKalle Valo /***************************************************** 723e705c121SKalle Valo * Error handling 724e705c121SKalle Valo ******************************************************/ 725e705c121SKalle Valo void iwl_pcie_dump_csr(struct iwl_trans *trans); 726e705c121SKalle Valo 727e705c121SKalle Valo /***************************************************** 728e705c121SKalle Valo * Helpers 729e705c121SKalle Valo ******************************************************/ 730f16c3ebfSEmmanuel Grumbach static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 731e705c121SKalle Valo { 7322e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 733e705c121SKalle Valo 7342e5d4a8fSHaim Dreyfuss clear_bit(STATUS_INT_ENABLED, &trans->status); 7352e5d4a8fSHaim Dreyfuss if (!trans_pcie->msix_enabled) { 736e705c121SKalle Valo /* disable interrupts from uCode/NIC to host */ 737e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, 0x00000000); 738e705c121SKalle Valo 739e705c121SKalle Valo /* acknowledge/clear/reset any interrupts still pending 740e705c121SKalle Valo * from uCode or flow handler (Rx/Tx DMA) */ 741e705c121SKalle Valo iwl_write32(trans, CSR_INT, 0xffffffff); 742e705c121SKalle Valo iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 7432e5d4a8fSHaim Dreyfuss } else { 7442e5d4a8fSHaim Dreyfuss /* disable all the interrupt we might use */ 7452e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 7462e5d4a8fSHaim Dreyfuss trans_pcie->fh_init_mask); 7472e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 7482e5d4a8fSHaim Dreyfuss trans_pcie->hw_init_mask); 7492e5d4a8fSHaim Dreyfuss } 750e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 751e705c121SKalle Valo } 752e705c121SKalle Valo 7532ee82402SGolan Ben Ami #define IWL_NUM_OF_COMPLETION_RINGS 31 7542ee82402SGolan Ben Ami #define IWL_NUM_OF_TRANSFER_RINGS 527 7552ee82402SGolan Ben Ami 7562ee82402SGolan Ben Ami static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 7572ee82402SGolan Ben Ami int start) 7582ee82402SGolan Ben Ami { 7592ee82402SGolan Ben Ami int i = 0; 7602ee82402SGolan Ben Ami 7612ee82402SGolan Ben Ami while (start < fw->num_sec && 7622ee82402SGolan Ben Ami fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 7632ee82402SGolan Ben Ami fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 7642ee82402SGolan Ben Ami start++; 7652ee82402SGolan Ben Ami i++; 7662ee82402SGolan Ben Ami } 7672ee82402SGolan Ben Ami 7682ee82402SGolan Ben Ami return i; 7692ee82402SGolan Ben Ami } 7702ee82402SGolan Ben Ami 7712ee82402SGolan Ben Ami static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 7722ee82402SGolan Ben Ami const struct fw_desc *sec, 7732ee82402SGolan Ben Ami struct iwl_dram_data *dram) 7742ee82402SGolan Ben Ami { 7752ee82402SGolan Ben Ami dram->block = dma_alloc_coherent(trans->dev, sec->len, 7762ee82402SGolan Ben Ami &dram->physical, 7772ee82402SGolan Ben Ami GFP_KERNEL); 7782ee82402SGolan Ben Ami if (!dram->block) 7792ee82402SGolan Ben Ami return -ENOMEM; 7802ee82402SGolan Ben Ami 7812ee82402SGolan Ben Ami dram->size = sec->len; 7822ee82402SGolan Ben Ami memcpy(dram->block, sec->data, sec->len); 7832ee82402SGolan Ben Ami 7842ee82402SGolan Ben Ami return 0; 7852ee82402SGolan Ben Ami } 7862ee82402SGolan Ben Ami 7872ee82402SGolan Ben Ami static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 7882ee82402SGolan Ben Ami { 789505a00c0SShahar S Matityahu struct iwl_self_init_dram *dram = &trans->init_dram; 7902ee82402SGolan Ben Ami int i; 7912ee82402SGolan Ben Ami 7922ee82402SGolan Ben Ami if (!dram->fw) { 7932ee82402SGolan Ben Ami WARN_ON(dram->fw_cnt); 7942ee82402SGolan Ben Ami return; 7952ee82402SGolan Ben Ami } 7962ee82402SGolan Ben Ami 7972ee82402SGolan Ben Ami for (i = 0; i < dram->fw_cnt; i++) 7982ee82402SGolan Ben Ami dma_free_coherent(trans->dev, dram->fw[i].size, 7992ee82402SGolan Ben Ami dram->fw[i].block, dram->fw[i].physical); 8002ee82402SGolan Ben Ami 8012ee82402SGolan Ben Ami kfree(dram->fw); 8022ee82402SGolan Ben Ami dram->fw_cnt = 0; 8032ee82402SGolan Ben Ami dram->fw = NULL; 8042ee82402SGolan Ben Ami } 8052ee82402SGolan Ben Ami 806f16c3ebfSEmmanuel Grumbach static inline void iwl_disable_interrupts(struct iwl_trans *trans) 807f16c3ebfSEmmanuel Grumbach { 808f16c3ebfSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 809f16c3ebfSEmmanuel Grumbach 810f16c3ebfSEmmanuel Grumbach spin_lock(&trans_pcie->irq_lock); 811f16c3ebfSEmmanuel Grumbach _iwl_disable_interrupts(trans); 812f16c3ebfSEmmanuel Grumbach spin_unlock(&trans_pcie->irq_lock); 813f16c3ebfSEmmanuel Grumbach } 814f16c3ebfSEmmanuel Grumbach 815f16c3ebfSEmmanuel Grumbach static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 816e705c121SKalle Valo { 817e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 818e705c121SKalle Valo 819e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 820e705c121SKalle Valo set_bit(STATUS_INT_ENABLED, &trans->status); 8212e5d4a8fSHaim Dreyfuss if (!trans_pcie->msix_enabled) { 822e705c121SKalle Valo trans_pcie->inta_mask = CSR_INI_SET_MASK; 823e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 8242e5d4a8fSHaim Dreyfuss } else { 8252e5d4a8fSHaim Dreyfuss /* 8262e5d4a8fSHaim Dreyfuss * fh/hw_mask keeps all the unmasked causes. 8272e5d4a8fSHaim Dreyfuss * Unlike msi, in msix cause is enabled when it is unset. 8282e5d4a8fSHaim Dreyfuss */ 8292e5d4a8fSHaim Dreyfuss trans_pcie->hw_mask = trans_pcie->hw_init_mask; 8302e5d4a8fSHaim Dreyfuss trans_pcie->fh_mask = trans_pcie->fh_init_mask; 8312e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 8322e5d4a8fSHaim Dreyfuss ~trans_pcie->fh_mask); 8332e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 8342e5d4a8fSHaim Dreyfuss ~trans_pcie->hw_mask); 8352e5d4a8fSHaim Dreyfuss } 8362e5d4a8fSHaim Dreyfuss } 8372e5d4a8fSHaim Dreyfuss 838f16c3ebfSEmmanuel Grumbach static inline void iwl_enable_interrupts(struct iwl_trans *trans) 839f16c3ebfSEmmanuel Grumbach { 840f16c3ebfSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 841f16c3ebfSEmmanuel Grumbach 842f16c3ebfSEmmanuel Grumbach spin_lock(&trans_pcie->irq_lock); 843f16c3ebfSEmmanuel Grumbach _iwl_enable_interrupts(trans); 844f16c3ebfSEmmanuel Grumbach spin_unlock(&trans_pcie->irq_lock); 845f16c3ebfSEmmanuel Grumbach } 8462e5d4a8fSHaim Dreyfuss static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 8472e5d4a8fSHaim Dreyfuss { 8482e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 8492e5d4a8fSHaim Dreyfuss 8502e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 8512e5d4a8fSHaim Dreyfuss trans_pcie->hw_mask = msk; 8522e5d4a8fSHaim Dreyfuss } 8532e5d4a8fSHaim Dreyfuss 8542e5d4a8fSHaim Dreyfuss static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 8552e5d4a8fSHaim Dreyfuss { 8562e5d4a8fSHaim Dreyfuss struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 8572e5d4a8fSHaim Dreyfuss 8582e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 8592e5d4a8fSHaim Dreyfuss trans_pcie->fh_mask = msk; 860e705c121SKalle Valo } 861e705c121SKalle Valo 862a6bd005fSEmmanuel Grumbach static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 863a6bd005fSEmmanuel Grumbach { 864a6bd005fSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 865a6bd005fSEmmanuel Grumbach 866a6bd005fSEmmanuel Grumbach IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 8672e5d4a8fSHaim Dreyfuss if (!trans_pcie->msix_enabled) { 868a6bd005fSEmmanuel Grumbach trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 869a6bd005fSEmmanuel Grumbach iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 8702e5d4a8fSHaim Dreyfuss } else { 8712e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 8722e5d4a8fSHaim Dreyfuss trans_pcie->hw_init_mask); 8732e5d4a8fSHaim Dreyfuss iwl_enable_fh_int_msk_msix(trans, 8742e5d4a8fSHaim Dreyfuss MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 8752e5d4a8fSHaim Dreyfuss } 876a6bd005fSEmmanuel Grumbach } 877a6bd005fSEmmanuel Grumbach 878ed3e4c6dSEmmanuel Grumbach static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) 879ed3e4c6dSEmmanuel Grumbach { 880ed3e4c6dSEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 881ed3e4c6dSEmmanuel Grumbach 882ed3e4c6dSEmmanuel Grumbach IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); 883ed3e4c6dSEmmanuel Grumbach 884ed3e4c6dSEmmanuel Grumbach if (!trans_pcie->msix_enabled) { 885ed3e4c6dSEmmanuel Grumbach /* 886ed3e4c6dSEmmanuel Grumbach * When we'll receive the ALIVE interrupt, the ISR will call 887ed3e4c6dSEmmanuel Grumbach * iwl_enable_fw_load_int_ctx_info again to set the ALIVE 888ed3e4c6dSEmmanuel Grumbach * interrupt (which is not really needed anymore) but also the 889ed3e4c6dSEmmanuel Grumbach * RX interrupt which will allow us to receive the ALIVE 890ed3e4c6dSEmmanuel Grumbach * notification (which is Rx) and continue the flow. 891ed3e4c6dSEmmanuel Grumbach */ 892ed3e4c6dSEmmanuel Grumbach trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; 893ed3e4c6dSEmmanuel Grumbach iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 894ed3e4c6dSEmmanuel Grumbach } else { 895ed3e4c6dSEmmanuel Grumbach iwl_enable_hw_int_msk_msix(trans, 896ed3e4c6dSEmmanuel Grumbach MSIX_HW_INT_CAUSES_REG_ALIVE); 897ed3e4c6dSEmmanuel Grumbach /* 898ed3e4c6dSEmmanuel Grumbach * Leave all the FH causes enabled to get the ALIVE 899ed3e4c6dSEmmanuel Grumbach * notification. 900ed3e4c6dSEmmanuel Grumbach */ 901ed3e4c6dSEmmanuel Grumbach iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); 902ed3e4c6dSEmmanuel Grumbach } 903ed3e4c6dSEmmanuel Grumbach } 904ed3e4c6dSEmmanuel Grumbach 9057b3e42eaSGolan Ben Ami static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) 9064ecab561SEmmanuel Grumbach { 9074ecab561SEmmanuel Grumbach return index & (q->n_window - 1); 9084ecab561SEmmanuel Grumbach } 9094ecab561SEmmanuel Grumbach 910943309d4SEmmanuel Grumbach static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, 911ab6c6445SSara Sharon struct iwl_txq *txq, int idx) 912ab6c6445SSara Sharon { 913943309d4SEmmanuel Grumbach struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 914943309d4SEmmanuel Grumbach 915943309d4SEmmanuel Grumbach if (trans->cfg->use_tfh) 916943309d4SEmmanuel Grumbach idx = iwl_pcie_get_cmd_index(txq, idx); 917943309d4SEmmanuel Grumbach 918943309d4SEmmanuel Grumbach return txq->tfds + trans_pcie->tfd_size * idx; 919ab6c6445SSara Sharon } 920ab6c6445SSara Sharon 921ff932f61SGolan Ben Ami static inline const char *queue_name(struct device *dev, 922ff932f61SGolan Ben Ami struct iwl_trans_pcie *trans_p, int i) 923ff932f61SGolan Ben Ami { 924ff932f61SGolan Ben Ami if (trans_p->shared_vec_mask) { 925ff932f61SGolan Ben Ami int vec = trans_p->shared_vec_mask & 926ff932f61SGolan Ben Ami IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 927ff932f61SGolan Ben Ami 928ff932f61SGolan Ben Ami if (i == 0) 929ff932f61SGolan Ben Ami return DRV_NAME ": shared IRQ"; 930ff932f61SGolan Ben Ami 931ff932f61SGolan Ben Ami return devm_kasprintf(dev, GFP_KERNEL, 932ff932f61SGolan Ben Ami DRV_NAME ": queue %d", i + vec); 933ff932f61SGolan Ben Ami } 934ff932f61SGolan Ben Ami if (i == 0) 935ff932f61SGolan Ben Ami return DRV_NAME ": default queue"; 936ff932f61SGolan Ben Ami 937ff932f61SGolan Ben Ami if (i == trans_p->alloc_vecs - 1) 938ff932f61SGolan Ben Ami return DRV_NAME ": exception"; 939ff932f61SGolan Ben Ami 940ff932f61SGolan Ben Ami return devm_kasprintf(dev, GFP_KERNEL, 941ff932f61SGolan Ben Ami DRV_NAME ": queue %d", i); 942ff932f61SGolan Ben Ami } 943ff932f61SGolan Ben Ami 944e705c121SKalle Valo static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 945e705c121SKalle Valo { 946e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 947e705c121SKalle Valo 948e705c121SKalle Valo IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 9492e5d4a8fSHaim Dreyfuss if (!trans_pcie->msix_enabled) { 950e705c121SKalle Valo trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 951e705c121SKalle Valo iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 9522e5d4a8fSHaim Dreyfuss } else { 9532e5d4a8fSHaim Dreyfuss iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 9542e5d4a8fSHaim Dreyfuss trans_pcie->fh_init_mask); 9552e5d4a8fSHaim Dreyfuss iwl_enable_hw_int_msk_msix(trans, 9562e5d4a8fSHaim Dreyfuss MSIX_HW_INT_CAUSES_REG_RF_KILL); 9572e5d4a8fSHaim Dreyfuss } 958ae5bb2a6SJohannes Berg 959b3500b47SEmmanuel Grumbach if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) { 960ae5bb2a6SJohannes Berg /* 961ae5bb2a6SJohannes Berg * On 9000-series devices this bit isn't enabled by default, so 962ae5bb2a6SJohannes Berg * when we power down the device we need set the bit to allow it 963ae5bb2a6SJohannes Berg * to wake up the PCI-E bus for RF-kill interrupts. 964ae5bb2a6SJohannes Berg */ 965ae5bb2a6SJohannes Berg iwl_set_bit(trans, CSR_GP_CNTRL, 966ae5bb2a6SJohannes Berg CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 967ae5bb2a6SJohannes Berg } 968e705c121SKalle Valo } 969e705c121SKalle Valo 970fa4de7f7SJohannes Berg void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 971fa4de7f7SJohannes Berg 972e705c121SKalle Valo static inline void iwl_wake_queue(struct iwl_trans *trans, 973e705c121SKalle Valo struct iwl_txq *txq) 974e705c121SKalle Valo { 975e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 976e705c121SKalle Valo 977bb98ecd4SSara Sharon if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { 978bb98ecd4SSara Sharon IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 979bb98ecd4SSara Sharon iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 980e705c121SKalle Valo } 981e705c121SKalle Valo } 982e705c121SKalle Valo 983e705c121SKalle Valo static inline void iwl_stop_queue(struct iwl_trans *trans, 984e705c121SKalle Valo struct iwl_txq *txq) 985e705c121SKalle Valo { 986e705c121SKalle Valo struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 987e705c121SKalle Valo 988bb98ecd4SSara Sharon if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { 989bb98ecd4SSara Sharon iwl_op_mode_queue_full(trans->op_mode, txq->id); 990bb98ecd4SSara Sharon IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 991e705c121SKalle Valo } else 992e705c121SKalle Valo IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 993bb98ecd4SSara Sharon txq->id); 994e705c121SKalle Valo } 995e705c121SKalle Valo 996bb98ecd4SSara Sharon static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 997e705c121SKalle Valo { 998f5955a6cSGolan Ben Ami int index = iwl_pcie_get_cmd_index(q, i); 999f5955a6cSGolan Ben Ami int r = iwl_pcie_get_cmd_index(q, q->read_ptr); 1000f5955a6cSGolan Ben Ami int w = iwl_pcie_get_cmd_index(q, q->write_ptr); 1001f5955a6cSGolan Ben Ami 1002f5955a6cSGolan Ben Ami return w >= r ? 1003f5955a6cSGolan Ben Ami (index >= r && index < w) : 1004f5955a6cSGolan Ben Ami !(index < r && index >= w); 1005e705c121SKalle Valo } 1006e705c121SKalle Valo 1007e705c121SKalle Valo static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 1008e705c121SKalle Valo { 1009fa4de7f7SJohannes Berg struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1010fa4de7f7SJohannes Berg 1011fa4de7f7SJohannes Berg lockdep_assert_held(&trans_pcie->mutex); 1012fa4de7f7SJohannes Berg 1013c5bf4fa1SJohannes Berg if (trans_pcie->debug_rfkill == 1) 1014fa4de7f7SJohannes Berg return true; 101523aeea94SJohannes Berg 1016e705c121SKalle Valo return !(iwl_read32(trans, CSR_GP_CNTRL) & 1017e705c121SKalle Valo CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 1018e705c121SKalle Valo } 1019e705c121SKalle Valo 1020e705c121SKalle Valo static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 1021e705c121SKalle Valo u32 reg, u32 mask, u32 value) 1022e705c121SKalle Valo { 1023e705c121SKalle Valo u32 v; 1024e705c121SKalle Valo 1025e705c121SKalle Valo #ifdef CONFIG_IWLWIFI_DEBUG 1026e705c121SKalle Valo WARN_ON_ONCE(value & ~mask); 1027e705c121SKalle Valo #endif 1028e705c121SKalle Valo 1029e705c121SKalle Valo v = iwl_read32(trans, reg); 1030e705c121SKalle Valo v &= ~mask; 1031e705c121SKalle Valo v |= value; 1032e705c121SKalle Valo iwl_write32(trans, reg, v); 1033e705c121SKalle Valo } 1034e705c121SKalle Valo 1035e705c121SKalle Valo static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 1036e705c121SKalle Valo u32 reg, u32 mask) 1037e705c121SKalle Valo { 1038e705c121SKalle Valo __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 1039e705c121SKalle Valo } 1040e705c121SKalle Valo 1041e705c121SKalle Valo static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 1042e705c121SKalle Valo u32 reg, u32 mask) 1043e705c121SKalle Valo { 1044e705c121SKalle Valo __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 1045e705c121SKalle Valo } 1046e705c121SKalle Valo 10477a14c23dSSara Sharon static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) 10487a14c23dSSara Sharon { 104991c28b83SShahar S Matityahu return (trans->dbg.dest_tlv || trans->dbg.ini_valid); 10507a14c23dSSara Sharon } 10517a14c23dSSara Sharon 1052e705c121SKalle Valo void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 10534290eaadSJohannes Berg void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); 1054d1967ce6SShahar S Matityahu void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); 1055e705c121SKalle Valo 1056f8a1edb7SJohannes Berg #ifdef CONFIG_IWLWIFI_DEBUGFS 1057cf5d5663SGreg Kroah-Hartman void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 1058f8a1edb7SJohannes Berg #else 1059cf5d5663SGreg Kroah-Hartman static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } 1060f8a1edb7SJohannes Berg #endif 1061f8a1edb7SJohannes Berg 10624cbb8e50SLuciano Coelho int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); 10634cbb8e50SLuciano Coelho int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); 10644cbb8e50SLuciano Coelho 106510a54d81SLuca Coelho void iwl_pcie_rx_allocator_work(struct work_struct *data); 106610a54d81SLuca Coelho 1067eda50cdeSSara Sharon /* common functions that are used by gen2 transport */ 1068b6fe2757SGolan Ben Ami int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); 1069eda50cdeSSara Sharon void iwl_pcie_apm_config(struct iwl_trans *trans); 1070eda50cdeSSara Sharon int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 1071eda50cdeSSara Sharon void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 10729ad8fd0bSJohannes Berg bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 1073326477e4SJohannes Berg void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1074326477e4SJohannes Berg bool was_in_rfkill); 10756b35ff91SSara Sharon void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 10767b3e42eaSGolan Ben Ami int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); 1077e8c8935eSJohannes Berg void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 107877c09bc8SSara Sharon void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 107913a3a390SSara Sharon int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 1080b8e8d7ceSSara Sharon int slots_num, bool cmd_queue); 108113a3a390SSara Sharon int iwl_pcie_txq_alloc(struct iwl_trans *trans, 1082b8e8d7ceSSara Sharon struct iwl_txq *txq, int slots_num, bool cmd_queue); 108313a3a390SSara Sharon int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 108413a3a390SSara Sharon struct iwl_dma_ptr *ptr, size_t size); 108513a3a390SSara Sharon void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 1086c9be849dSLiad Kaufman void iwl_pcie_apply_destination(struct iwl_trans *trans); 10879bb3d5a0SEmmanuel Grumbach void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 10889bb3d5a0SEmmanuel Grumbach struct sk_buff *skb); 10896ffe5de3SSara Sharon #ifdef CONFIG_INET 10906ffe5de3SSara Sharon struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); 10916ffe5de3SSara Sharon #endif 1092eda50cdeSSara Sharon 10939f358c17SGolan Ben Ami /* common functions that are used by gen3 transport */ 10949f358c17SGolan Ben Ami void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 10959f358c17SGolan Ben Ami 1096eda50cdeSSara Sharon /* transport gen 2 exported functions */ 1097eda50cdeSSara Sharon int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 1098eda50cdeSSara Sharon const struct fw_img *fw, bool run_in_rfkill); 1099eda50cdeSSara Sharon void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 11001169310fSGolan Ben Ami void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, 11011169310fSGolan Ben Ami struct iwl_txq *txq); 11021169310fSGolan Ben Ami int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, 11031169310fSGolan Ben Ami struct iwl_txq **intxq, int size, 11041169310fSGolan Ben Ami unsigned int timeout); 11051169310fSGolan Ben Ami int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, 11061169310fSGolan Ben Ami struct iwl_txq *txq, 11071169310fSGolan Ben Ami struct iwl_host_cmd *hcmd); 11086b35ff91SSara Sharon int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 11091169310fSGolan Ben Ami __le16 flags, u8 sta_id, u8 tid, 11105369774cSSara Sharon int cmd_id, int size, 11116b35ff91SSara Sharon unsigned int timeout); 11126b35ff91SSara Sharon void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 1113ab6c6445SSara Sharon int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 1114ab6c6445SSara Sharon struct iwl_device_cmd *dev_cmd, int txq_id); 1115ca60da2eSSara Sharon int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 1116ca60da2eSSara Sharon struct iwl_host_cmd *cmd); 111777c09bc8SSara Sharon void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, 111877c09bc8SSara Sharon bool low_power); 111977c09bc8SSara Sharon void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); 112013a3a390SSara Sharon void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 112113a3a390SSara Sharon void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 112213a3a390SSara Sharon void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 1123e705c121SKalle Valo #endif /* __iwl_trans_int_pcie_h__ */ 1124