1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 - 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20 * more details. 21 * 22 * The full GNU General Public License is included in this distribution in the 23 * file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 - 2019 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #ifndef __iwl_trans_int_pcie_h__ 65 #define __iwl_trans_int_pcie_h__ 66 67 #include <linux/spinlock.h> 68 #include <linux/interrupt.h> 69 #include <linux/skbuff.h> 70 #include <linux/wait.h> 71 #include <linux/pci.h> 72 #include <linux/timer.h> 73 #include <linux/cpu.h> 74 75 #include "iwl-fh.h" 76 #include "iwl-csr.h" 77 #include "iwl-trans.h" 78 #include "iwl-debug.h" 79 #include "iwl-io.h" 80 #include "iwl-op-mode.h" 81 #include "iwl-drv.h" 82 83 /* We need 2 entries for the TX command and header, and another one might 84 * be needed for potential data in the SKB's head. The remaining ones can 85 * be used for frags. 86 */ 87 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 88 89 /* 90 * RX related structures and functions 91 */ 92 #define RX_NUM_QUEUES 1 93 #define RX_POST_REQ_ALLOC 2 94 #define RX_CLAIM_REQ_ALLOC 8 95 #define RX_PENDING_WATERMARK 16 96 #define FIRST_RX_QUEUE 512 97 98 struct iwl_host_cmd; 99 100 /*This file includes the declaration that are internal to the 101 * trans_pcie layer */ 102 103 /** 104 * struct iwl_rx_mem_buffer 105 * @page_dma: bus address of rxb page 106 * @page: driver's pointer to the rxb page 107 * @invalid: rxb is in driver ownership - not owned by HW 108 * @vid: index of this rxb in the global table 109 * @offset: indicates which offset of the page (in bytes) 110 * this buffer uses (if multiple RBs fit into one page) 111 */ 112 struct iwl_rx_mem_buffer { 113 dma_addr_t page_dma; 114 struct page *page; 115 u16 vid; 116 bool invalid; 117 struct list_head list; 118 u32 offset; 119 }; 120 121 /** 122 * struct isr_statistics - interrupt statistics 123 * 124 */ 125 struct isr_statistics { 126 u32 hw; 127 u32 sw; 128 u32 err_code; 129 u32 sch; 130 u32 alive; 131 u32 rfkill; 132 u32 ctkill; 133 u32 wakeup; 134 u32 rx; 135 u32 tx; 136 u32 unhandled; 137 }; 138 139 /** 140 * struct iwl_rx_transfer_desc - transfer descriptor 141 * @addr: ptr to free buffer start address 142 * @rbid: unique tag of the buffer 143 * @reserved: reserved 144 */ 145 struct iwl_rx_transfer_desc { 146 __le16 rbid; 147 __le16 reserved[3]; 148 __le64 addr; 149 } __packed; 150 151 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) 152 153 /** 154 * struct iwl_rx_completion_desc - completion descriptor 155 * @reserved1: reserved 156 * @rbid: unique tag of the received buffer 157 * @flags: flags (0: fragmented, all others: reserved) 158 * @reserved2: reserved 159 */ 160 struct iwl_rx_completion_desc { 161 __le32 reserved1; 162 __le16 rbid; 163 u8 flags; 164 u8 reserved2[25]; 165 } __packed; 166 167 /** 168 * struct iwl_rxq - Rx queue 169 * @id: queue index 170 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 171 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 172 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's 173 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 174 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 175 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 176 * @tr_tail: driver's pointer to the transmission ring tail buffer 177 * @tr_tail_dma: physical address of the buffer for the transmission ring tail 178 * @cr_tail: driver's pointer to the completion ring tail buffer 179 * @cr_tail_dma: physical address of the buffer for the completion ring tail 180 * @read: Shared index to newest available Rx buffer 181 * @write: Shared index to oldest written Rx packet 182 * @free_count: Number of pre-allocated buffers in rx_free 183 * @used_count: Number of RBDs handled to allocator to use for allocation 184 * @write_actual: 185 * @rx_free: list of RBDs with allocated RB ready for use 186 * @rx_used: list of RBDs with no RB attached 187 * @need_update: flag to indicate we need to update read/write index 188 * @rb_stts: driver's pointer to receive buffer status 189 * @rb_stts_dma: bus address of receive buffer status 190 * @lock: 191 * @queue: actual rx queue. Not used for multi-rx queue. 192 * 193 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 194 */ 195 struct iwl_rxq { 196 int id; 197 void *bd; 198 dma_addr_t bd_dma; 199 union { 200 void *used_bd; 201 __le32 *bd_32; 202 struct iwl_rx_completion_desc *cd; 203 }; 204 dma_addr_t used_bd_dma; 205 __le16 *tr_tail; 206 dma_addr_t tr_tail_dma; 207 __le16 *cr_tail; 208 dma_addr_t cr_tail_dma; 209 u32 read; 210 u32 write; 211 u32 free_count; 212 u32 used_count; 213 u32 write_actual; 214 u32 queue_size; 215 struct list_head rx_free; 216 struct list_head rx_used; 217 bool need_update; 218 void *rb_stts; 219 dma_addr_t rb_stts_dma; 220 spinlock_t lock; 221 struct napi_struct napi; 222 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 223 }; 224 225 /** 226 * struct iwl_rb_allocator - Rx allocator 227 * @req_pending: number of requests the allcator had not processed yet 228 * @req_ready: number of requests honored and ready for claiming 229 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 230 * the queue. This is a list of &struct iwl_rx_mem_buffer 231 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 232 * of &struct iwl_rx_mem_buffer 233 * @lock: protects the rbd_allocated and rbd_empty lists 234 * @alloc_wq: work queue for background calls 235 * @rx_alloc: work struct for background calls 236 */ 237 struct iwl_rb_allocator { 238 atomic_t req_pending; 239 atomic_t req_ready; 240 struct list_head rbd_allocated; 241 struct list_head rbd_empty; 242 spinlock_t lock; 243 struct workqueue_struct *alloc_wq; 244 struct work_struct rx_alloc; 245 }; 246 247 struct iwl_dma_ptr { 248 dma_addr_t dma; 249 void *addr; 250 size_t size; 251 }; 252 253 /** 254 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 255 * @index -- current index 256 */ 257 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) 258 { 259 return ++index & 260 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 261 } 262 263 /** 264 * iwl_get_closed_rb_stts - get closed rb stts from different structs 265 * @rxq - the rxq to get the rb stts from 266 */ 267 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, 268 struct iwl_rxq *rxq) 269 { 270 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 271 __le16 *rb_stts = rxq->rb_stts; 272 273 return READ_ONCE(*rb_stts); 274 } else { 275 struct iwl_rb_status *rb_stts = rxq->rb_stts; 276 277 return READ_ONCE(rb_stts->closed_rb_num); 278 } 279 } 280 281 /** 282 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 283 * @index -- current index 284 */ 285 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) 286 { 287 return --index & 288 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 289 } 290 291 struct iwl_cmd_meta { 292 /* only for SYNC commands, iff the reply skb is wanted */ 293 struct iwl_host_cmd *source; 294 u32 flags; 295 u32 tbs; 296 }; 297 298 /* 299 * The FH will write back to the first TB only, so we need to copy some data 300 * into the buffer regardless of whether it should be mapped or not. 301 * This indicates how big the first TB must be to include the scratch buffer 302 * and the assigned PN. 303 * Since PN location is 8 bytes at offset 12, it's 20 now. 304 * If we make it bigger then allocations will be bigger and copy slower, so 305 * that's probably not useful. 306 */ 307 #define IWL_FIRST_TB_SIZE 20 308 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 309 310 struct iwl_pcie_txq_entry { 311 void *cmd; 312 struct sk_buff *skb; 313 /* buffer to free after command completes */ 314 const void *free_buf; 315 struct iwl_cmd_meta meta; 316 }; 317 318 struct iwl_pcie_first_tb_buf { 319 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 320 }; 321 322 /** 323 * struct iwl_txq - Tx Queue for DMA 324 * @q: generic Rx/Tx queue descriptor 325 * @tfds: transmit frame descriptors (DMA memory) 326 * @first_tb_bufs: start of command headers, including scratch buffers, for 327 * the writeback -- this is DMA memory and an array holding one buffer 328 * for each command on the queue 329 * @first_tb_dma: DMA address for the first_tb_bufs start 330 * @entries: transmit entries (driver state) 331 * @lock: queue lock 332 * @stuck_timer: timer that fires if queue gets stuck 333 * @trans_pcie: pointer back to transport (for timer) 334 * @need_update: indicates need to update read/write index 335 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 336 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 337 * @frozen: tx stuck queue timer is frozen 338 * @frozen_expiry_remainder: remember how long until the timer fires 339 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 340 * @write_ptr: 1-st empty entry (index) host_w 341 * @read_ptr: last used entry (index) host_r 342 * @dma_addr: physical addr for BD's 343 * @n_window: safe queue window 344 * @id: queue id 345 * @low_mark: low watermark, resume queue if free space more than this 346 * @high_mark: high watermark, stop queue if free space less than this 347 * 348 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 349 * descriptors) and required locking structures. 350 * 351 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 352 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 353 * there might be HW changes in the future). For the normal TX 354 * queues, n_window, which is the size of the software queue data 355 * is also 256; however, for the command queue, n_window is only 356 * 32 since we don't need so many commands pending. Since the HW 357 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 358 * This means that we end up with the following: 359 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 360 * SW entries: | 0 | ... | 31 | 361 * where N is a number between 0 and 7. This means that the SW 362 * data is a window overlayed over the HW queue. 363 */ 364 struct iwl_txq { 365 void *tfds; 366 struct iwl_pcie_first_tb_buf *first_tb_bufs; 367 dma_addr_t first_tb_dma; 368 struct iwl_pcie_txq_entry *entries; 369 spinlock_t lock; 370 unsigned long frozen_expiry_remainder; 371 struct timer_list stuck_timer; 372 struct iwl_trans_pcie *trans_pcie; 373 bool need_update; 374 bool frozen; 375 bool ampdu; 376 int block; 377 unsigned long wd_timeout; 378 struct sk_buff_head overflow_q; 379 struct iwl_dma_ptr bc_tbl; 380 381 int write_ptr; 382 int read_ptr; 383 dma_addr_t dma_addr; 384 int n_window; 385 u32 id; 386 int low_mark; 387 int high_mark; 388 389 bool overflow_tx; 390 }; 391 392 static inline dma_addr_t 393 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 394 { 395 return txq->first_tb_dma + 396 sizeof(struct iwl_pcie_first_tb_buf) * idx; 397 } 398 399 struct iwl_tso_hdr_page { 400 struct page *page; 401 u8 *pos; 402 }; 403 404 #ifdef CONFIG_IWLWIFI_DEBUGFS 405 /** 406 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data 407 * debugfs file 408 * 409 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. 410 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. 411 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is 412 * set the file can no longer be used. 413 */ 414 enum iwl_fw_mon_dbgfs_state { 415 IWL_FW_MON_DBGFS_STATE_CLOSED, 416 IWL_FW_MON_DBGFS_STATE_OPEN, 417 IWL_FW_MON_DBGFS_STATE_DISABLED, 418 }; 419 #endif 420 421 /** 422 * enum iwl_shared_irq_flags - level of sharing for irq 423 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 424 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 425 */ 426 enum iwl_shared_irq_flags { 427 IWL_SHARED_IRQ_NON_RX = BIT(0), 428 IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 429 }; 430 431 /** 432 * enum iwl_image_response_code - image response values 433 * @IWL_IMAGE_RESP_DEF: the default value of the register 434 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 435 * @IWL_IMAGE_RESP_FAIL: iml reading failed 436 */ 437 enum iwl_image_response_code { 438 IWL_IMAGE_RESP_DEF = 0, 439 IWL_IMAGE_RESP_SUCCESS = 1, 440 IWL_IMAGE_RESP_FAIL = 2, 441 }; 442 443 /** 444 * struct cont_rec: continuous recording data structure 445 * @prev_wr_ptr: the last address that was read in monitor_data 446 * debugfs file 447 * @prev_wrap_cnt: the wrap count that was used during the last read in 448 * monitor_data debugfs file 449 * @state: the state of monitor_data debugfs file as described 450 * in &iwl_fw_mon_dbgfs_state enum 451 * @mutex: locked while reading from monitor_data debugfs file 452 */ 453 #ifdef CONFIG_IWLWIFI_DEBUGFS 454 struct cont_rec { 455 u32 prev_wr_ptr; 456 u32 prev_wrap_cnt; 457 u8 state; 458 /* Used to sync monitor_data debugfs file with driver unload flow */ 459 struct mutex mutex; 460 }; 461 #endif 462 463 /** 464 * struct iwl_trans_pcie - PCIe transport specific data 465 * @rxq: all the RX queue data 466 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 467 * @global_table: table mapping received VID from hw to rxb 468 * @rba: allocator for RX replenishing 469 * @ctxt_info: context information for FW self init 470 * @ctxt_info_gen3: context information for gen3 devices 471 * @prph_info: prph info for self init 472 * @prph_scratch: prph scratch for self init 473 * @ctxt_info_dma_addr: dma addr of context information 474 * @prph_info_dma_addr: dma addr of prph info 475 * @prph_scratch_dma_addr: dma addr of prph scratch 476 * @ctxt_info_dma_addr: dma addr of context information 477 * @init_dram: DRAM data of firmware image (including paging). 478 * Context information addresses will be taken from here. 479 * This is driver's local copy for keeping track of size and 480 * count for allocating and freeing the memory. 481 * @trans: pointer to the generic transport area 482 * @scd_base_addr: scheduler sram base address in SRAM 483 * @scd_bc_tbls: pointer to the byte count table of the scheduler 484 * @kw: keep warm address 485 * @pci_dev: basic pci-network driver stuff 486 * @hw_base: pci hardware address support 487 * @ucode_write_complete: indicates that the ucode has been copied. 488 * @ucode_write_waitq: wait queue for uCode load 489 * @cmd_queue - command queue number 490 * @def_rx_queue - default rx queue number 491 * @rx_buf_size: Rx buffer size 492 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 493 * @scd_set_active: should the transport configure the SCD for HCMD queue 494 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 495 * frame. 496 * @rx_page_order: page order for receive buffer size 497 * @rx_buf_bytes: RX buffer (RB) size in bytes 498 * @reg_lock: protect hw register access 499 * @mutex: to protect stop_device / start_fw / start_hw 500 * @cmd_in_flight: true when we have a host command in flight 501 #ifdef CONFIG_IWLWIFI_DEBUGFS 502 * @fw_mon_data: fw continuous recording data 503 #endif 504 * @msix_entries: array of MSI-X entries 505 * @msix_enabled: true if managed to enable MSI-X 506 * @shared_vec_mask: the type of causes the shared vector handles 507 * (see iwl_shared_irq_flags). 508 * @alloc_vecs: the number of interrupt vectors allocated by the OS 509 * @def_irq: default irq for non rx causes 510 * @fh_init_mask: initial unmasked fh causes 511 * @hw_init_mask: initial unmasked hw causes 512 * @fh_mask: current unmasked fh causes 513 * @hw_mask: current unmasked hw causes 514 * @in_rescan: true if we have triggered a device rescan 515 * @base_rb_stts: base virtual address of receive buffer status for all queues 516 * @base_rb_stts_dma: base physical address of receive buffer status 517 * @supported_dma_mask: DMA mask to validate the actual address against, 518 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device 519 * @alloc_page_lock: spinlock for the page allocator 520 * @alloc_page: allocated page to still use parts of 521 * @alloc_page_used: how much of the allocated page was already used (bytes) 522 */ 523 struct iwl_trans_pcie { 524 struct iwl_rxq *rxq; 525 struct iwl_rx_mem_buffer *rx_pool; 526 struct iwl_rx_mem_buffer **global_table; 527 struct iwl_rb_allocator rba; 528 union { 529 struct iwl_context_info *ctxt_info; 530 struct iwl_context_info_gen3 *ctxt_info_gen3; 531 }; 532 struct iwl_prph_info *prph_info; 533 struct iwl_prph_scratch *prph_scratch; 534 dma_addr_t ctxt_info_dma_addr; 535 dma_addr_t prph_info_dma_addr; 536 dma_addr_t prph_scratch_dma_addr; 537 dma_addr_t iml_dma_addr; 538 struct iwl_trans *trans; 539 540 struct net_device napi_dev; 541 542 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 543 544 /* INT ICT Table */ 545 __le32 *ict_tbl; 546 dma_addr_t ict_tbl_dma; 547 int ict_index; 548 bool use_ict; 549 bool is_down, opmode_down; 550 s8 debug_rfkill; 551 struct isr_statistics isr_stats; 552 553 spinlock_t irq_lock; 554 struct mutex mutex; 555 u32 inta_mask; 556 u32 scd_base_addr; 557 struct iwl_dma_ptr scd_bc_tbls; 558 struct iwl_dma_ptr kw; 559 560 struct iwl_txq *txq_memory; 561 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 562 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 563 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 564 565 /* PCI bus related data */ 566 struct pci_dev *pci_dev; 567 void __iomem *hw_base; 568 569 bool ucode_write_complete; 570 bool sx_complete; 571 wait_queue_head_t ucode_write_waitq; 572 wait_queue_head_t wait_command_queue; 573 wait_queue_head_t sx_waitq; 574 575 u8 page_offs, dev_cmd_offs; 576 577 u8 cmd_queue; 578 u8 def_rx_queue; 579 u8 cmd_fifo; 580 unsigned int cmd_q_wdg_timeout; 581 u8 n_no_reclaim_cmds; 582 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 583 u8 max_tbs; 584 u16 tfd_size; 585 u16 num_rx_bufs; 586 587 enum iwl_amsdu_size rx_buf_size; 588 bool bc_table_dword; 589 bool scd_set_active; 590 bool sw_csum_tx; 591 bool pcie_dbg_dumped_once; 592 u32 rx_page_order; 593 u32 rx_buf_bytes; 594 u32 supported_dma_mask; 595 596 /* allocator lock for the two values below */ 597 spinlock_t alloc_page_lock; 598 struct page *alloc_page; 599 u32 alloc_page_used; 600 601 /*protect hw register */ 602 spinlock_t reg_lock; 603 bool cmd_hold_nic_awake; 604 605 #ifdef CONFIG_IWLWIFI_DEBUGFS 606 struct cont_rec fw_mon_data; 607 #endif 608 609 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 610 bool msix_enabled; 611 u8 shared_vec_mask; 612 u32 alloc_vecs; 613 u32 def_irq; 614 u32 fh_init_mask; 615 u32 hw_init_mask; 616 u32 fh_mask; 617 u32 hw_mask; 618 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 619 u16 tx_cmd_queue_size; 620 bool in_rescan; 621 622 void *base_rb_stts; 623 dma_addr_t base_rb_stts_dma; 624 }; 625 626 static inline struct iwl_trans_pcie * 627 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 628 { 629 return (void *)trans->trans_specific; 630 } 631 632 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 633 struct msix_entry *entry) 634 { 635 /* 636 * Before sending the interrupt the HW disables it to prevent 637 * a nested interrupt. This is done by writing 1 to the corresponding 638 * bit in the mask register. After handling the interrupt, it should be 639 * re-enabled by clearing this bit. This register is defined as 640 * write 1 clear (W1C) register, meaning that it's being clear 641 * by writing 1 to the bit. 642 */ 643 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 644 } 645 646 static inline struct iwl_trans * 647 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 648 { 649 return container_of((void *)trans_pcie, struct iwl_trans, 650 trans_specific); 651 } 652 653 /* 654 * Convention: trans API functions: iwl_trans_pcie_XXX 655 * Other functions: iwl_pcie_XXX 656 */ 657 struct iwl_trans 658 *iwl_trans_pcie_alloc(struct pci_dev *pdev, 659 const struct pci_device_id *ent, 660 const struct iwl_cfg_trans_params *cfg_trans); 661 void iwl_trans_pcie_free(struct iwl_trans *trans); 662 663 /***************************************************** 664 * RX 665 ******************************************************/ 666 int iwl_pcie_rx_init(struct iwl_trans *trans); 667 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 668 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 669 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 670 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 671 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 672 int iwl_pcie_rx_stop(struct iwl_trans *trans); 673 void iwl_pcie_rx_free(struct iwl_trans *trans); 674 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); 675 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); 676 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); 677 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 678 struct iwl_rxq *rxq); 679 680 /***************************************************** 681 * ICT - interrupt handling 682 ******************************************************/ 683 irqreturn_t iwl_pcie_isr(int irq, void *data); 684 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 685 void iwl_pcie_free_ict(struct iwl_trans *trans); 686 void iwl_pcie_reset_ict(struct iwl_trans *trans); 687 void iwl_pcie_disable_ict(struct iwl_trans *trans); 688 689 /***************************************************** 690 * TX / HCMD 691 ******************************************************/ 692 /* 693 * We need this inline in case dma_addr_t is only 32-bits - since the 694 * hardware is always 64-bit, the issue can still occur in that case, 695 * so use u64 for 'phys' here to force the addition in 64-bit. 696 */ 697 static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len) 698 { 699 return upper_32_bits(phys) != upper_32_bits(phys + len); 700 } 701 702 int iwl_pcie_tx_init(struct iwl_trans *trans); 703 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, 704 int queue_size); 705 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 706 int iwl_pcie_tx_stop(struct iwl_trans *trans); 707 void iwl_pcie_tx_free(struct iwl_trans *trans); 708 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 709 const struct iwl_trans_txq_scd_cfg *cfg, 710 unsigned int wdg_timeout); 711 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 712 bool configure_scd); 713 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 714 bool shared_mode); 715 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 716 struct iwl_txq *txq); 717 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 718 struct iwl_device_tx_cmd *dev_cmd, int txq_id); 719 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 720 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 721 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, 722 struct iwl_txq *txq); 723 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 724 struct iwl_rx_cmd_buffer *rxb); 725 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 726 struct sk_buff_head *skbs); 727 void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); 728 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 729 730 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 731 u8 idx) 732 { 733 if (trans->trans_cfg->use_tfh) { 734 struct iwl_tfh_tfd *tfd = _tfd; 735 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 736 737 return le16_to_cpu(tb->tb_len); 738 } else { 739 struct iwl_tfd *tfd = _tfd; 740 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 741 742 return le16_to_cpu(tb->hi_n_len) >> 4; 743 } 744 } 745 746 /***************************************************** 747 * Error handling 748 ******************************************************/ 749 void iwl_pcie_dump_csr(struct iwl_trans *trans); 750 751 /***************************************************** 752 * Helpers 753 ******************************************************/ 754 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 755 { 756 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 757 758 clear_bit(STATUS_INT_ENABLED, &trans->status); 759 if (!trans_pcie->msix_enabled) { 760 /* disable interrupts from uCode/NIC to host */ 761 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 762 763 /* acknowledge/clear/reset any interrupts still pending 764 * from uCode or flow handler (Rx/Tx DMA) */ 765 iwl_write32(trans, CSR_INT, 0xffffffff); 766 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 767 } else { 768 /* disable all the interrupt we might use */ 769 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 770 trans_pcie->fh_init_mask); 771 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 772 trans_pcie->hw_init_mask); 773 } 774 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 775 } 776 777 #define IWL_NUM_OF_COMPLETION_RINGS 31 778 #define IWL_NUM_OF_TRANSFER_RINGS 527 779 780 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 781 int start) 782 { 783 int i = 0; 784 785 while (start < fw->num_sec && 786 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 787 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 788 start++; 789 i++; 790 } 791 792 return i; 793 } 794 795 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 796 const struct fw_desc *sec, 797 struct iwl_dram_data *dram) 798 { 799 dram->block = dma_alloc_coherent(trans->dev, sec->len, 800 &dram->physical, 801 GFP_KERNEL); 802 if (!dram->block) 803 return -ENOMEM; 804 805 dram->size = sec->len; 806 memcpy(dram->block, sec->data, sec->len); 807 808 return 0; 809 } 810 811 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 812 { 813 struct iwl_self_init_dram *dram = &trans->init_dram; 814 int i; 815 816 if (!dram->fw) { 817 WARN_ON(dram->fw_cnt); 818 return; 819 } 820 821 for (i = 0; i < dram->fw_cnt; i++) 822 dma_free_coherent(trans->dev, dram->fw[i].size, 823 dram->fw[i].block, dram->fw[i].physical); 824 825 kfree(dram->fw); 826 dram->fw_cnt = 0; 827 dram->fw = NULL; 828 } 829 830 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 831 { 832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 833 834 spin_lock(&trans_pcie->irq_lock); 835 _iwl_disable_interrupts(trans); 836 spin_unlock(&trans_pcie->irq_lock); 837 } 838 839 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 840 { 841 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 842 843 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 844 set_bit(STATUS_INT_ENABLED, &trans->status); 845 if (!trans_pcie->msix_enabled) { 846 trans_pcie->inta_mask = CSR_INI_SET_MASK; 847 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 848 } else { 849 /* 850 * fh/hw_mask keeps all the unmasked causes. 851 * Unlike msi, in msix cause is enabled when it is unset. 852 */ 853 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 854 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 855 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 856 ~trans_pcie->fh_mask); 857 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 858 ~trans_pcie->hw_mask); 859 } 860 } 861 862 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 863 { 864 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 865 866 spin_lock(&trans_pcie->irq_lock); 867 _iwl_enable_interrupts(trans); 868 spin_unlock(&trans_pcie->irq_lock); 869 } 870 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 871 { 872 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 873 874 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 875 trans_pcie->hw_mask = msk; 876 } 877 878 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 879 { 880 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 881 882 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 883 trans_pcie->fh_mask = msk; 884 } 885 886 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 887 { 888 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 889 890 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 891 if (!trans_pcie->msix_enabled) { 892 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 893 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 894 } else { 895 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 896 trans_pcie->hw_init_mask); 897 iwl_enable_fh_int_msk_msix(trans, 898 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 899 } 900 } 901 902 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) 903 { 904 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 905 906 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); 907 908 if (!trans_pcie->msix_enabled) { 909 /* 910 * When we'll receive the ALIVE interrupt, the ISR will call 911 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE 912 * interrupt (which is not really needed anymore) but also the 913 * RX interrupt which will allow us to receive the ALIVE 914 * notification (which is Rx) and continue the flow. 915 */ 916 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; 917 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 918 } else { 919 iwl_enable_hw_int_msk_msix(trans, 920 MSIX_HW_INT_CAUSES_REG_ALIVE); 921 /* 922 * Leave all the FH causes enabled to get the ALIVE 923 * notification. 924 */ 925 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); 926 } 927 } 928 929 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) 930 { 931 return index & (q->n_window - 1); 932 } 933 934 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, 935 struct iwl_txq *txq, int idx) 936 { 937 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 938 939 if (trans->trans_cfg->use_tfh) 940 idx = iwl_pcie_get_cmd_index(txq, idx); 941 942 return txq->tfds + trans_pcie->tfd_size * idx; 943 } 944 945 static inline const char *queue_name(struct device *dev, 946 struct iwl_trans_pcie *trans_p, int i) 947 { 948 if (trans_p->shared_vec_mask) { 949 int vec = trans_p->shared_vec_mask & 950 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 951 952 if (i == 0) 953 return DRV_NAME ": shared IRQ"; 954 955 return devm_kasprintf(dev, GFP_KERNEL, 956 DRV_NAME ": queue %d", i + vec); 957 } 958 if (i == 0) 959 return DRV_NAME ": default queue"; 960 961 if (i == trans_p->alloc_vecs - 1) 962 return DRV_NAME ": exception"; 963 964 return devm_kasprintf(dev, GFP_KERNEL, 965 DRV_NAME ": queue %d", i); 966 } 967 968 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 969 { 970 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 971 972 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 973 if (!trans_pcie->msix_enabled) { 974 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 975 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 976 } else { 977 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 978 trans_pcie->fh_init_mask); 979 iwl_enable_hw_int_msk_msix(trans, 980 MSIX_HW_INT_CAUSES_REG_RF_KILL); 981 } 982 983 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { 984 /* 985 * On 9000-series devices this bit isn't enabled by default, so 986 * when we power down the device we need set the bit to allow it 987 * to wake up the PCI-E bus for RF-kill interrupts. 988 */ 989 iwl_set_bit(trans, CSR_GP_CNTRL, 990 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 991 } 992 } 993 994 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 995 996 static inline void iwl_wake_queue(struct iwl_trans *trans, 997 struct iwl_txq *txq) 998 { 999 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1000 1001 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { 1002 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 1003 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 1004 } 1005 } 1006 1007 static inline void iwl_stop_queue(struct iwl_trans *trans, 1008 struct iwl_txq *txq) 1009 { 1010 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1011 1012 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { 1013 iwl_op_mode_queue_full(trans->op_mode, txq->id); 1014 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 1015 } else 1016 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 1017 txq->id); 1018 } 1019 1020 static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 1021 { 1022 int index = iwl_pcie_get_cmd_index(q, i); 1023 int r = iwl_pcie_get_cmd_index(q, q->read_ptr); 1024 int w = iwl_pcie_get_cmd_index(q, q->write_ptr); 1025 1026 return w >= r ? 1027 (index >= r && index < w) : 1028 !(index < r && index >= w); 1029 } 1030 1031 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 1032 { 1033 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1034 1035 lockdep_assert_held(&trans_pcie->mutex); 1036 1037 if (trans_pcie->debug_rfkill == 1) 1038 return true; 1039 1040 return !(iwl_read32(trans, CSR_GP_CNTRL) & 1041 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 1042 } 1043 1044 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 1045 u32 reg, u32 mask, u32 value) 1046 { 1047 u32 v; 1048 1049 #ifdef CONFIG_IWLWIFI_DEBUG 1050 WARN_ON_ONCE(value & ~mask); 1051 #endif 1052 1053 v = iwl_read32(trans, reg); 1054 v &= ~mask; 1055 v |= value; 1056 iwl_write32(trans, reg, v); 1057 } 1058 1059 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 1060 u32 reg, u32 mask) 1061 { 1062 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 1063 } 1064 1065 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 1066 u32 reg, u32 mask) 1067 { 1068 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 1069 } 1070 1071 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) 1072 { 1073 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); 1074 } 1075 1076 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 1077 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); 1078 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); 1079 1080 #ifdef CONFIG_IWLWIFI_DEBUGFS 1081 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 1082 #else 1083 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } 1084 #endif 1085 1086 void iwl_pcie_rx_allocator_work(struct work_struct *data); 1087 1088 /* common functions that are used by gen2 transport */ 1089 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); 1090 void iwl_pcie_apm_config(struct iwl_trans *trans); 1091 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 1092 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 1093 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 1094 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1095 bool was_in_rfkill); 1096 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 1097 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); 1098 void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 1099 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 1100 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 1101 int slots_num, bool cmd_queue); 1102 int iwl_pcie_txq_alloc(struct iwl_trans *trans, 1103 struct iwl_txq *txq, int slots_num, bool cmd_queue); 1104 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 1105 struct iwl_dma_ptr *ptr, size_t size); 1106 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 1107 void iwl_pcie_apply_destination(struct iwl_trans *trans); 1108 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 1109 struct sk_buff *skb); 1110 #ifdef CONFIG_INET 1111 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 1112 struct sk_buff *skb); 1113 #endif 1114 1115 /* common functions that are used by gen3 transport */ 1116 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 1117 1118 /* transport gen 2 exported functions */ 1119 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 1120 const struct fw_img *fw, bool run_in_rfkill); 1121 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 1122 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, 1123 struct iwl_txq *txq); 1124 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, 1125 struct iwl_txq **intxq, int size, 1126 unsigned int timeout); 1127 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, 1128 struct iwl_txq *txq, 1129 struct iwl_host_cmd *hcmd); 1130 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 1131 __le16 flags, u8 sta_id, u8 tid, 1132 int cmd_id, int size, 1133 unsigned int timeout); 1134 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 1135 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 1136 struct iwl_device_tx_cmd *dev_cmd, int txq_id); 1137 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 1138 struct iwl_host_cmd *cmd); 1139 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); 1140 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); 1141 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 1142 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 1143 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 1144 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1145 bool test, bool reset); 1146 #endif /* __iwl_trans_int_pcie_h__ */ 1147