1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 - 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20 * more details. 21 * 22 * The full GNU General Public License is included in this distribution in the 23 * file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 - 2019 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #ifndef __iwl_trans_int_pcie_h__ 65 #define __iwl_trans_int_pcie_h__ 66 67 #include <linux/spinlock.h> 68 #include <linux/interrupt.h> 69 #include <linux/skbuff.h> 70 #include <linux/wait.h> 71 #include <linux/pci.h> 72 #include <linux/timer.h> 73 #include <linux/cpu.h> 74 75 #include "iwl-fh.h" 76 #include "iwl-csr.h" 77 #include "iwl-trans.h" 78 #include "iwl-debug.h" 79 #include "iwl-io.h" 80 #include "iwl-op-mode.h" 81 #include "iwl-drv.h" 82 83 /* We need 2 entries for the TX command and header, and another one might 84 * be needed for potential data in the SKB's head. The remaining ones can 85 * be used for frags. 86 */ 87 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 88 89 /* 90 * RX related structures and functions 91 */ 92 #define RX_NUM_QUEUES 1 93 #define RX_POST_REQ_ALLOC 2 94 #define RX_CLAIM_REQ_ALLOC 8 95 #define RX_PENDING_WATERMARK 16 96 #define FIRST_RX_QUEUE 512 97 98 struct iwl_host_cmd; 99 100 /*This file includes the declaration that are internal to the 101 * trans_pcie layer */ 102 103 /** 104 * struct iwl_rx_mem_buffer 105 * @page_dma: bus address of rxb page 106 * @page: driver's pointer to the rxb page 107 * @invalid: rxb is in driver ownership - not owned by HW 108 * @vid: index of this rxb in the global table 109 */ 110 struct iwl_rx_mem_buffer { 111 dma_addr_t page_dma; 112 struct page *page; 113 u16 vid; 114 bool invalid; 115 struct list_head list; 116 }; 117 118 /** 119 * struct isr_statistics - interrupt statistics 120 * 121 */ 122 struct isr_statistics { 123 u32 hw; 124 u32 sw; 125 u32 err_code; 126 u32 sch; 127 u32 alive; 128 u32 rfkill; 129 u32 ctkill; 130 u32 wakeup; 131 u32 rx; 132 u32 tx; 133 u32 unhandled; 134 }; 135 136 /** 137 * struct iwl_rx_transfer_desc - transfer descriptor 138 * @addr: ptr to free buffer start address 139 * @rbid: unique tag of the buffer 140 * @reserved: reserved 141 */ 142 struct iwl_rx_transfer_desc { 143 __le16 rbid; 144 __le16 reserved[3]; 145 __le64 addr; 146 } __packed; 147 148 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) 149 150 /** 151 * struct iwl_rx_completion_desc - completion descriptor 152 * @reserved1: reserved 153 * @rbid: unique tag of the received buffer 154 * @flags: flags (0: fragmented, all others: reserved) 155 * @reserved2: reserved 156 */ 157 struct iwl_rx_completion_desc { 158 __le32 reserved1; 159 __le16 rbid; 160 u8 flags; 161 u8 reserved2[25]; 162 } __packed; 163 164 /** 165 * struct iwl_rxq - Rx queue 166 * @id: queue index 167 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 168 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 169 * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's 170 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 171 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 172 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 173 * @tr_tail: driver's pointer to the transmission ring tail buffer 174 * @tr_tail_dma: physical address of the buffer for the transmission ring tail 175 * @cr_tail: driver's pointer to the completion ring tail buffer 176 * @cr_tail_dma: physical address of the buffer for the completion ring tail 177 * @read: Shared index to newest available Rx buffer 178 * @write: Shared index to oldest written Rx packet 179 * @free_count: Number of pre-allocated buffers in rx_free 180 * @used_count: Number of RBDs handled to allocator to use for allocation 181 * @write_actual: 182 * @rx_free: list of RBDs with allocated RB ready for use 183 * @rx_used: list of RBDs with no RB attached 184 * @need_update: flag to indicate we need to update read/write index 185 * @rb_stts: driver's pointer to receive buffer status 186 * @rb_stts_dma: bus address of receive buffer status 187 * @lock: 188 * @queue: actual rx queue. Not used for multi-rx queue. 189 * 190 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 191 */ 192 struct iwl_rxq { 193 int id; 194 void *bd; 195 dma_addr_t bd_dma; 196 union { 197 void *used_bd; 198 __le32 *bd_32; 199 struct iwl_rx_completion_desc *cd; 200 }; 201 dma_addr_t used_bd_dma; 202 __le16 *tr_tail; 203 dma_addr_t tr_tail_dma; 204 __le16 *cr_tail; 205 dma_addr_t cr_tail_dma; 206 u32 read; 207 u32 write; 208 u32 free_count; 209 u32 used_count; 210 u32 write_actual; 211 u32 queue_size; 212 struct list_head rx_free; 213 struct list_head rx_used; 214 bool need_update; 215 void *rb_stts; 216 dma_addr_t rb_stts_dma; 217 spinlock_t lock; 218 struct napi_struct napi; 219 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 220 }; 221 222 /** 223 * struct iwl_rb_allocator - Rx allocator 224 * @req_pending: number of requests the allcator had not processed yet 225 * @req_ready: number of requests honored and ready for claiming 226 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 227 * the queue. This is a list of &struct iwl_rx_mem_buffer 228 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 229 * of &struct iwl_rx_mem_buffer 230 * @lock: protects the rbd_allocated and rbd_empty lists 231 * @alloc_wq: work queue for background calls 232 * @rx_alloc: work struct for background calls 233 */ 234 struct iwl_rb_allocator { 235 atomic_t req_pending; 236 atomic_t req_ready; 237 struct list_head rbd_allocated; 238 struct list_head rbd_empty; 239 spinlock_t lock; 240 struct workqueue_struct *alloc_wq; 241 struct work_struct rx_alloc; 242 }; 243 244 struct iwl_dma_ptr { 245 dma_addr_t dma; 246 void *addr; 247 size_t size; 248 }; 249 250 /** 251 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 252 * @index -- current index 253 */ 254 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) 255 { 256 return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); 257 } 258 259 /** 260 * iwl_get_closed_rb_stts - get closed rb stts from different structs 261 * @rxq - the rxq to get the rb stts from 262 */ 263 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, 264 struct iwl_rxq *rxq) 265 { 266 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { 267 __le16 *rb_stts = rxq->rb_stts; 268 269 return READ_ONCE(*rb_stts); 270 } else { 271 struct iwl_rb_status *rb_stts = rxq->rb_stts; 272 273 return READ_ONCE(rb_stts->closed_rb_num); 274 } 275 } 276 277 /** 278 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 279 * @index -- current index 280 */ 281 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) 282 { 283 return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); 284 } 285 286 struct iwl_cmd_meta { 287 /* only for SYNC commands, iff the reply skb is wanted */ 288 struct iwl_host_cmd *source; 289 u32 flags; 290 u32 tbs; 291 }; 292 293 /* 294 * The FH will write back to the first TB only, so we need to copy some data 295 * into the buffer regardless of whether it should be mapped or not. 296 * This indicates how big the first TB must be to include the scratch buffer 297 * and the assigned PN. 298 * Since PN location is 8 bytes at offset 12, it's 20 now. 299 * If we make it bigger then allocations will be bigger and copy slower, so 300 * that's probably not useful. 301 */ 302 #define IWL_FIRST_TB_SIZE 20 303 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 304 305 struct iwl_pcie_txq_entry { 306 struct iwl_device_cmd *cmd; 307 struct sk_buff *skb; 308 /* buffer to free after command completes */ 309 const void *free_buf; 310 struct iwl_cmd_meta meta; 311 }; 312 313 struct iwl_pcie_first_tb_buf { 314 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 315 }; 316 317 /** 318 * struct iwl_txq - Tx Queue for DMA 319 * @q: generic Rx/Tx queue descriptor 320 * @tfds: transmit frame descriptors (DMA memory) 321 * @first_tb_bufs: start of command headers, including scratch buffers, for 322 * the writeback -- this is DMA memory and an array holding one buffer 323 * for each command on the queue 324 * @first_tb_dma: DMA address for the first_tb_bufs start 325 * @entries: transmit entries (driver state) 326 * @lock: queue lock 327 * @stuck_timer: timer that fires if queue gets stuck 328 * @trans_pcie: pointer back to transport (for timer) 329 * @need_update: indicates need to update read/write index 330 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 331 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 332 * @frozen: tx stuck queue timer is frozen 333 * @frozen_expiry_remainder: remember how long until the timer fires 334 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 335 * @write_ptr: 1-st empty entry (index) host_w 336 * @read_ptr: last used entry (index) host_r 337 * @dma_addr: physical addr for BD's 338 * @n_window: safe queue window 339 * @id: queue id 340 * @low_mark: low watermark, resume queue if free space more than this 341 * @high_mark: high watermark, stop queue if free space less than this 342 * 343 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 344 * descriptors) and required locking structures. 345 * 346 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 347 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 348 * there might be HW changes in the future). For the normal TX 349 * queues, n_window, which is the size of the software queue data 350 * is also 256; however, for the command queue, n_window is only 351 * 32 since we don't need so many commands pending. Since the HW 352 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 353 * This means that we end up with the following: 354 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 355 * SW entries: | 0 | ... | 31 | 356 * where N is a number between 0 and 7. This means that the SW 357 * data is a window overlayed over the HW queue. 358 */ 359 struct iwl_txq { 360 void *tfds; 361 struct iwl_pcie_first_tb_buf *first_tb_bufs; 362 dma_addr_t first_tb_dma; 363 struct iwl_pcie_txq_entry *entries; 364 spinlock_t lock; 365 unsigned long frozen_expiry_remainder; 366 struct timer_list stuck_timer; 367 struct iwl_trans_pcie *trans_pcie; 368 bool need_update; 369 bool frozen; 370 bool ampdu; 371 int block; 372 unsigned long wd_timeout; 373 struct sk_buff_head overflow_q; 374 struct iwl_dma_ptr bc_tbl; 375 376 int write_ptr; 377 int read_ptr; 378 dma_addr_t dma_addr; 379 int n_window; 380 u32 id; 381 int low_mark; 382 int high_mark; 383 384 bool overflow_tx; 385 }; 386 387 static inline dma_addr_t 388 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 389 { 390 return txq->first_tb_dma + 391 sizeof(struct iwl_pcie_first_tb_buf) * idx; 392 } 393 394 struct iwl_tso_hdr_page { 395 struct page *page; 396 u8 *pos; 397 }; 398 399 #ifdef CONFIG_IWLWIFI_DEBUGFS 400 /** 401 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data 402 * debugfs file 403 * 404 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. 405 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. 406 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is 407 * set the file can no longer be used. 408 */ 409 enum iwl_fw_mon_dbgfs_state { 410 IWL_FW_MON_DBGFS_STATE_CLOSED, 411 IWL_FW_MON_DBGFS_STATE_OPEN, 412 IWL_FW_MON_DBGFS_STATE_DISABLED, 413 }; 414 #endif 415 416 /** 417 * enum iwl_shared_irq_flags - level of sharing for irq 418 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 419 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 420 */ 421 enum iwl_shared_irq_flags { 422 IWL_SHARED_IRQ_NON_RX = BIT(0), 423 IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 424 }; 425 426 /** 427 * enum iwl_image_response_code - image response values 428 * @IWL_IMAGE_RESP_DEF: the default value of the register 429 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 430 * @IWL_IMAGE_RESP_FAIL: iml reading failed 431 */ 432 enum iwl_image_response_code { 433 IWL_IMAGE_RESP_DEF = 0, 434 IWL_IMAGE_RESP_SUCCESS = 1, 435 IWL_IMAGE_RESP_FAIL = 2, 436 }; 437 438 /** 439 * struct cont_rec: continuous recording data structure 440 * @prev_wr_ptr: the last address that was read in monitor_data 441 * debugfs file 442 * @prev_wrap_cnt: the wrap count that was used during the last read in 443 * monitor_data debugfs file 444 * @state: the state of monitor_data debugfs file as described 445 * in &iwl_fw_mon_dbgfs_state enum 446 * @mutex: locked while reading from monitor_data debugfs file 447 */ 448 #ifdef CONFIG_IWLWIFI_DEBUGFS 449 struct cont_rec { 450 u32 prev_wr_ptr; 451 u32 prev_wrap_cnt; 452 u8 state; 453 /* Used to sync monitor_data debugfs file with driver unload flow */ 454 struct mutex mutex; 455 }; 456 #endif 457 458 /** 459 * struct iwl_trans_pcie - PCIe transport specific data 460 * @rxq: all the RX queue data 461 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 462 * @global_table: table mapping received VID from hw to rxb 463 * @rba: allocator for RX replenishing 464 * @ctxt_info: context information for FW self init 465 * @ctxt_info_gen3: context information for gen3 devices 466 * @prph_info: prph info for self init 467 * @prph_scratch: prph scratch for self init 468 * @ctxt_info_dma_addr: dma addr of context information 469 * @prph_info_dma_addr: dma addr of prph info 470 * @prph_scratch_dma_addr: dma addr of prph scratch 471 * @ctxt_info_dma_addr: dma addr of context information 472 * @init_dram: DRAM data of firmware image (including paging). 473 * Context information addresses will be taken from here. 474 * This is driver's local copy for keeping track of size and 475 * count for allocating and freeing the memory. 476 * @trans: pointer to the generic transport area 477 * @scd_base_addr: scheduler sram base address in SRAM 478 * @scd_bc_tbls: pointer to the byte count table of the scheduler 479 * @kw: keep warm address 480 * @pci_dev: basic pci-network driver stuff 481 * @hw_base: pci hardware address support 482 * @ucode_write_complete: indicates that the ucode has been copied. 483 * @ucode_write_waitq: wait queue for uCode load 484 * @cmd_queue - command queue number 485 * @def_rx_queue - default rx queue number 486 * @rx_buf_size: Rx buffer size 487 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 488 * @scd_set_active: should the transport configure the SCD for HCMD queue 489 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 490 * frame. 491 * @rx_page_order: page order for receive buffer size 492 * @reg_lock: protect hw register access 493 * @mutex: to protect stop_device / start_fw / start_hw 494 * @cmd_in_flight: true when we have a host command in flight 495 #ifdef CONFIG_IWLWIFI_DEBUGFS 496 * @fw_mon_data: fw continuous recording data 497 #endif 498 * @msix_entries: array of MSI-X entries 499 * @msix_enabled: true if managed to enable MSI-X 500 * @shared_vec_mask: the type of causes the shared vector handles 501 * (see iwl_shared_irq_flags). 502 * @alloc_vecs: the number of interrupt vectors allocated by the OS 503 * @def_irq: default irq for non rx causes 504 * @fh_init_mask: initial unmasked fh causes 505 * @hw_init_mask: initial unmasked hw causes 506 * @fh_mask: current unmasked fh causes 507 * @hw_mask: current unmasked hw causes 508 * @in_rescan: true if we have triggered a device rescan 509 * @base_rb_stts: base virtual address of receive buffer status for all queues 510 * @base_rb_stts_dma: base physical address of receive buffer status 511 */ 512 struct iwl_trans_pcie { 513 struct iwl_rxq *rxq; 514 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 515 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; 516 struct iwl_rb_allocator rba; 517 union { 518 struct iwl_context_info *ctxt_info; 519 struct iwl_context_info_gen3 *ctxt_info_gen3; 520 }; 521 struct iwl_prph_info *prph_info; 522 struct iwl_prph_scratch *prph_scratch; 523 dma_addr_t ctxt_info_dma_addr; 524 dma_addr_t prph_info_dma_addr; 525 dma_addr_t prph_scratch_dma_addr; 526 dma_addr_t iml_dma_addr; 527 struct iwl_trans *trans; 528 529 struct net_device napi_dev; 530 531 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 532 533 /* INT ICT Table */ 534 __le32 *ict_tbl; 535 dma_addr_t ict_tbl_dma; 536 int ict_index; 537 bool use_ict; 538 bool is_down, opmode_down; 539 s8 debug_rfkill; 540 struct isr_statistics isr_stats; 541 542 spinlock_t irq_lock; 543 struct mutex mutex; 544 u32 inta_mask; 545 u32 scd_base_addr; 546 struct iwl_dma_ptr scd_bc_tbls; 547 struct iwl_dma_ptr kw; 548 549 struct iwl_txq *txq_memory; 550 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 551 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 552 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 553 554 /* PCI bus related data */ 555 struct pci_dev *pci_dev; 556 void __iomem *hw_base; 557 558 bool ucode_write_complete; 559 wait_queue_head_t ucode_write_waitq; 560 wait_queue_head_t wait_command_queue; 561 wait_queue_head_t d0i3_waitq; 562 563 u8 page_offs, dev_cmd_offs; 564 565 u8 cmd_queue; 566 u8 def_rx_queue; 567 u8 cmd_fifo; 568 unsigned int cmd_q_wdg_timeout; 569 u8 n_no_reclaim_cmds; 570 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 571 u8 max_tbs; 572 u16 tfd_size; 573 574 enum iwl_amsdu_size rx_buf_size; 575 bool bc_table_dword; 576 bool scd_set_active; 577 bool sw_csum_tx; 578 bool pcie_dbg_dumped_once; 579 u32 rx_page_order; 580 581 /*protect hw register */ 582 spinlock_t reg_lock; 583 bool cmd_hold_nic_awake; 584 bool ref_cmd_in_flight; 585 586 #ifdef CONFIG_IWLWIFI_DEBUGFS 587 struct cont_rec fw_mon_data; 588 #endif 589 590 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 591 bool msix_enabled; 592 u8 shared_vec_mask; 593 u32 alloc_vecs; 594 u32 def_irq; 595 u32 fh_init_mask; 596 u32 hw_init_mask; 597 u32 fh_mask; 598 u32 hw_mask; 599 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 600 u16 tx_cmd_queue_size; 601 bool in_rescan; 602 603 void *base_rb_stts; 604 dma_addr_t base_rb_stts_dma; 605 }; 606 607 static inline struct iwl_trans_pcie * 608 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 609 { 610 return (void *)trans->trans_specific; 611 } 612 613 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 614 struct msix_entry *entry) 615 { 616 /* 617 * Before sending the interrupt the HW disables it to prevent 618 * a nested interrupt. This is done by writing 1 to the corresponding 619 * bit in the mask register. After handling the interrupt, it should be 620 * re-enabled by clearing this bit. This register is defined as 621 * write 1 clear (W1C) register, meaning that it's being clear 622 * by writing 1 to the bit. 623 */ 624 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 625 } 626 627 static inline struct iwl_trans * 628 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 629 { 630 return container_of((void *)trans_pcie, struct iwl_trans, 631 trans_specific); 632 } 633 634 /* 635 * Convention: trans API functions: iwl_trans_pcie_XXX 636 * Other functions: iwl_pcie_XXX 637 */ 638 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 639 const struct pci_device_id *ent, 640 const struct iwl_cfg *cfg); 641 void iwl_trans_pcie_free(struct iwl_trans *trans); 642 643 /***************************************************** 644 * RX 645 ******************************************************/ 646 int _iwl_pcie_rx_init(struct iwl_trans *trans); 647 int iwl_pcie_rx_init(struct iwl_trans *trans); 648 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 649 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 650 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 651 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 652 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 653 int iwl_pcie_rx_stop(struct iwl_trans *trans); 654 void iwl_pcie_rx_free(struct iwl_trans *trans); 655 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); 656 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); 657 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); 658 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 659 struct iwl_rxq *rxq); 660 int iwl_pcie_rx_alloc(struct iwl_trans *trans); 661 662 /***************************************************** 663 * ICT - interrupt handling 664 ******************************************************/ 665 irqreturn_t iwl_pcie_isr(int irq, void *data); 666 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 667 void iwl_pcie_free_ict(struct iwl_trans *trans); 668 void iwl_pcie_reset_ict(struct iwl_trans *trans); 669 void iwl_pcie_disable_ict(struct iwl_trans *trans); 670 671 /***************************************************** 672 * TX / HCMD 673 ******************************************************/ 674 int iwl_pcie_tx_init(struct iwl_trans *trans); 675 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, 676 int queue_size); 677 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 678 int iwl_pcie_tx_stop(struct iwl_trans *trans); 679 void iwl_pcie_tx_free(struct iwl_trans *trans); 680 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 681 const struct iwl_trans_txq_scd_cfg *cfg, 682 unsigned int wdg_timeout); 683 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 684 bool configure_scd); 685 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 686 bool shared_mode); 687 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 688 struct iwl_txq *txq); 689 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 690 struct iwl_device_cmd *dev_cmd, int txq_id); 691 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 692 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 693 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); 694 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, 695 struct iwl_txq *txq); 696 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 697 struct iwl_rx_cmd_buffer *rxb); 698 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 699 struct sk_buff_head *skbs); 700 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 701 void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, 702 struct iwl_txq *txq, u16 byte_cnt, 703 int num_tbs); 704 705 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 706 u8 idx) 707 { 708 if (trans->cfg->use_tfh) { 709 struct iwl_tfh_tfd *tfd = _tfd; 710 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 711 712 return le16_to_cpu(tb->tb_len); 713 } else { 714 struct iwl_tfd *tfd = _tfd; 715 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 716 717 return le16_to_cpu(tb->hi_n_len) >> 4; 718 } 719 } 720 721 /***************************************************** 722 * Error handling 723 ******************************************************/ 724 void iwl_pcie_dump_csr(struct iwl_trans *trans); 725 726 /***************************************************** 727 * Helpers 728 ******************************************************/ 729 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 730 { 731 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 732 733 clear_bit(STATUS_INT_ENABLED, &trans->status); 734 if (!trans_pcie->msix_enabled) { 735 /* disable interrupts from uCode/NIC to host */ 736 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 737 738 /* acknowledge/clear/reset any interrupts still pending 739 * from uCode or flow handler (Rx/Tx DMA) */ 740 iwl_write32(trans, CSR_INT, 0xffffffff); 741 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 742 } else { 743 /* disable all the interrupt we might use */ 744 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 745 trans_pcie->fh_init_mask); 746 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 747 trans_pcie->hw_init_mask); 748 } 749 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 750 } 751 752 #define IWL_NUM_OF_COMPLETION_RINGS 31 753 #define IWL_NUM_OF_TRANSFER_RINGS 527 754 755 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 756 int start) 757 { 758 int i = 0; 759 760 while (start < fw->num_sec && 761 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 762 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 763 start++; 764 i++; 765 } 766 767 return i; 768 } 769 770 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 771 const struct fw_desc *sec, 772 struct iwl_dram_data *dram) 773 { 774 dram->block = dma_alloc_coherent(trans->dev, sec->len, 775 &dram->physical, 776 GFP_KERNEL); 777 if (!dram->block) 778 return -ENOMEM; 779 780 dram->size = sec->len; 781 memcpy(dram->block, sec->data, sec->len); 782 783 return 0; 784 } 785 786 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 787 { 788 struct iwl_self_init_dram *dram = &trans->init_dram; 789 int i; 790 791 if (!dram->fw) { 792 WARN_ON(dram->fw_cnt); 793 return; 794 } 795 796 for (i = 0; i < dram->fw_cnt; i++) 797 dma_free_coherent(trans->dev, dram->fw[i].size, 798 dram->fw[i].block, dram->fw[i].physical); 799 800 kfree(dram->fw); 801 dram->fw_cnt = 0; 802 dram->fw = NULL; 803 } 804 805 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 806 { 807 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 808 809 spin_lock(&trans_pcie->irq_lock); 810 _iwl_disable_interrupts(trans); 811 spin_unlock(&trans_pcie->irq_lock); 812 } 813 814 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 815 { 816 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 817 818 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 819 set_bit(STATUS_INT_ENABLED, &trans->status); 820 if (!trans_pcie->msix_enabled) { 821 trans_pcie->inta_mask = CSR_INI_SET_MASK; 822 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 823 } else { 824 /* 825 * fh/hw_mask keeps all the unmasked causes. 826 * Unlike msi, in msix cause is enabled when it is unset. 827 */ 828 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 829 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 830 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 831 ~trans_pcie->fh_mask); 832 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 833 ~trans_pcie->hw_mask); 834 } 835 } 836 837 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 838 { 839 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 840 841 spin_lock(&trans_pcie->irq_lock); 842 _iwl_enable_interrupts(trans); 843 spin_unlock(&trans_pcie->irq_lock); 844 } 845 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 846 { 847 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 848 849 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 850 trans_pcie->hw_mask = msk; 851 } 852 853 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 854 { 855 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 856 857 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 858 trans_pcie->fh_mask = msk; 859 } 860 861 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 862 { 863 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 864 865 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 866 if (!trans_pcie->msix_enabled) { 867 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 868 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 869 } else { 870 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 871 trans_pcie->hw_init_mask); 872 iwl_enable_fh_int_msk_msix(trans, 873 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 874 } 875 } 876 877 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) 878 { 879 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 880 881 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); 882 883 if (!trans_pcie->msix_enabled) { 884 /* 885 * When we'll receive the ALIVE interrupt, the ISR will call 886 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE 887 * interrupt (which is not really needed anymore) but also the 888 * RX interrupt which will allow us to receive the ALIVE 889 * notification (which is Rx) and continue the flow. 890 */ 891 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; 892 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 893 } else { 894 iwl_enable_hw_int_msk_msix(trans, 895 MSIX_HW_INT_CAUSES_REG_ALIVE); 896 /* 897 * Leave all the FH causes enabled to get the ALIVE 898 * notification. 899 */ 900 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); 901 } 902 } 903 904 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) 905 { 906 return index & (q->n_window - 1); 907 } 908 909 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, 910 struct iwl_txq *txq, int idx) 911 { 912 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 913 914 if (trans->cfg->use_tfh) 915 idx = iwl_pcie_get_cmd_index(txq, idx); 916 917 return txq->tfds + trans_pcie->tfd_size * idx; 918 } 919 920 static inline const char *queue_name(struct device *dev, 921 struct iwl_trans_pcie *trans_p, int i) 922 { 923 if (trans_p->shared_vec_mask) { 924 int vec = trans_p->shared_vec_mask & 925 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 926 927 if (i == 0) 928 return DRV_NAME ": shared IRQ"; 929 930 return devm_kasprintf(dev, GFP_KERNEL, 931 DRV_NAME ": queue %d", i + vec); 932 } 933 if (i == 0) 934 return DRV_NAME ": default queue"; 935 936 if (i == trans_p->alloc_vecs - 1) 937 return DRV_NAME ": exception"; 938 939 return devm_kasprintf(dev, GFP_KERNEL, 940 DRV_NAME ": queue %d", i); 941 } 942 943 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 944 { 945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 946 947 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 948 if (!trans_pcie->msix_enabled) { 949 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 950 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 951 } else { 952 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 953 trans_pcie->fh_init_mask); 954 iwl_enable_hw_int_msk_msix(trans, 955 MSIX_HW_INT_CAUSES_REG_RF_KILL); 956 } 957 958 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) { 959 /* 960 * On 9000-series devices this bit isn't enabled by default, so 961 * when we power down the device we need set the bit to allow it 962 * to wake up the PCI-E bus for RF-kill interrupts. 963 */ 964 iwl_set_bit(trans, CSR_GP_CNTRL, 965 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 966 } 967 } 968 969 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 970 971 static inline void iwl_wake_queue(struct iwl_trans *trans, 972 struct iwl_txq *txq) 973 { 974 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 975 976 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { 977 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 978 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 979 } 980 } 981 982 static inline void iwl_stop_queue(struct iwl_trans *trans, 983 struct iwl_txq *txq) 984 { 985 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 986 987 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { 988 iwl_op_mode_queue_full(trans->op_mode, txq->id); 989 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 990 } else 991 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 992 txq->id); 993 } 994 995 static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 996 { 997 int index = iwl_pcie_get_cmd_index(q, i); 998 int r = iwl_pcie_get_cmd_index(q, q->read_ptr); 999 int w = iwl_pcie_get_cmd_index(q, q->write_ptr); 1000 1001 return w >= r ? 1002 (index >= r && index < w) : 1003 !(index < r && index >= w); 1004 } 1005 1006 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 1007 { 1008 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1009 1010 lockdep_assert_held(&trans_pcie->mutex); 1011 1012 if (trans_pcie->debug_rfkill == 1) 1013 return true; 1014 1015 return !(iwl_read32(trans, CSR_GP_CNTRL) & 1016 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 1017 } 1018 1019 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 1020 u32 reg, u32 mask, u32 value) 1021 { 1022 u32 v; 1023 1024 #ifdef CONFIG_IWLWIFI_DEBUG 1025 WARN_ON_ONCE(value & ~mask); 1026 #endif 1027 1028 v = iwl_read32(trans, reg); 1029 v &= ~mask; 1030 v |= value; 1031 iwl_write32(trans, reg, v); 1032 } 1033 1034 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 1035 u32 reg, u32 mask) 1036 { 1037 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 1038 } 1039 1040 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 1041 u32 reg, u32 mask) 1042 { 1043 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 1044 } 1045 1046 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) 1047 { 1048 return (trans->dbg.dest_tlv || trans->dbg.ini_valid); 1049 } 1050 1051 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 1052 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); 1053 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); 1054 1055 #ifdef CONFIG_IWLWIFI_DEBUGFS 1056 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 1057 #else 1058 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } 1059 #endif 1060 1061 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); 1062 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); 1063 1064 void iwl_pcie_rx_allocator_work(struct work_struct *data); 1065 1066 /* common functions that are used by gen2 transport */ 1067 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); 1068 void iwl_pcie_apm_config(struct iwl_trans *trans); 1069 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 1070 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 1071 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 1072 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1073 bool was_in_rfkill); 1074 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 1075 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); 1076 void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 1077 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 1078 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 1079 int slots_num, bool cmd_queue); 1080 int iwl_pcie_txq_alloc(struct iwl_trans *trans, 1081 struct iwl_txq *txq, int slots_num, bool cmd_queue); 1082 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 1083 struct iwl_dma_ptr *ptr, size_t size); 1084 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 1085 void iwl_pcie_apply_destination(struct iwl_trans *trans); 1086 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 1087 struct sk_buff *skb); 1088 #ifdef CONFIG_INET 1089 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); 1090 #endif 1091 1092 /* common functions that are used by gen3 transport */ 1093 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 1094 1095 /* transport gen 2 exported functions */ 1096 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 1097 const struct fw_img *fw, bool run_in_rfkill); 1098 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 1099 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, 1100 struct iwl_txq *txq); 1101 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, 1102 struct iwl_txq **intxq, int size, 1103 unsigned int timeout); 1104 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, 1105 struct iwl_txq *txq, 1106 struct iwl_host_cmd *hcmd); 1107 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 1108 __le16 flags, u8 sta_id, u8 tid, 1109 int cmd_id, int size, 1110 unsigned int timeout); 1111 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 1112 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 1113 struct iwl_device_cmd *dev_cmd, int txq_id); 1114 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 1115 struct iwl_host_cmd *cmd); 1116 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, 1117 bool low_power); 1118 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); 1119 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 1120 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 1121 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 1122 #endif /* __iwl_trans_int_pcie_h__ */ 1123