1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 - 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20 * more details. 21 * 22 * The full GNU General Public License is included in this distribution in the 23 * file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 - 2019 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #ifndef __iwl_trans_int_pcie_h__ 65 #define __iwl_trans_int_pcie_h__ 66 67 #include <linux/spinlock.h> 68 #include <linux/interrupt.h> 69 #include <linux/skbuff.h> 70 #include <linux/wait.h> 71 #include <linux/pci.h> 72 #include <linux/timer.h> 73 #include <linux/cpu.h> 74 75 #include "iwl-fh.h" 76 #include "iwl-csr.h" 77 #include "iwl-trans.h" 78 #include "iwl-debug.h" 79 #include "iwl-io.h" 80 #include "iwl-op-mode.h" 81 #include "iwl-drv.h" 82 83 /* We need 2 entries for the TX command and header, and another one might 84 * be needed for potential data in the SKB's head. The remaining ones can 85 * be used for frags. 86 */ 87 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) 88 89 /* 90 * RX related structures and functions 91 */ 92 #define RX_NUM_QUEUES 1 93 #define RX_POST_REQ_ALLOC 2 94 #define RX_CLAIM_REQ_ALLOC 8 95 #define RX_PENDING_WATERMARK 16 96 #define FIRST_RX_QUEUE 512 97 98 struct iwl_host_cmd; 99 100 /*This file includes the declaration that are internal to the 101 * trans_pcie layer */ 102 103 /** 104 * struct iwl_rx_mem_buffer 105 * @page_dma: bus address of rxb page 106 * @page: driver's pointer to the rxb page 107 * @invalid: rxb is in driver ownership - not owned by HW 108 * @vid: index of this rxb in the global table 109 * @offset: indicates which offset of the page (in bytes) 110 * this buffer uses (if multiple RBs fit into one page) 111 */ 112 struct iwl_rx_mem_buffer { 113 dma_addr_t page_dma; 114 struct page *page; 115 u16 vid; 116 bool invalid; 117 struct list_head list; 118 u32 offset; 119 }; 120 121 /** 122 * struct isr_statistics - interrupt statistics 123 * 124 */ 125 struct isr_statistics { 126 u32 hw; 127 u32 sw; 128 u32 err_code; 129 u32 sch; 130 u32 alive; 131 u32 rfkill; 132 u32 ctkill; 133 u32 wakeup; 134 u32 rx; 135 u32 tx; 136 u32 unhandled; 137 }; 138 139 /** 140 * struct iwl_rx_transfer_desc - transfer descriptor 141 * @addr: ptr to free buffer start address 142 * @rbid: unique tag of the buffer 143 * @reserved: reserved 144 */ 145 struct iwl_rx_transfer_desc { 146 __le16 rbid; 147 __le16 reserved[3]; 148 __le64 addr; 149 } __packed; 150 151 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) 152 153 /** 154 * struct iwl_rx_completion_desc - completion descriptor 155 * @reserved1: reserved 156 * @rbid: unique tag of the received buffer 157 * @flags: flags (0: fragmented, all others: reserved) 158 * @reserved2: reserved 159 */ 160 struct iwl_rx_completion_desc { 161 __le32 reserved1; 162 __le16 rbid; 163 u8 flags; 164 u8 reserved2[25]; 165 } __packed; 166 167 /** 168 * struct iwl_rxq - Rx queue 169 * @id: queue index 170 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 171 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 172 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's 173 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 174 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 175 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 176 * @tr_tail: driver's pointer to the transmission ring tail buffer 177 * @tr_tail_dma: physical address of the buffer for the transmission ring tail 178 * @cr_tail: driver's pointer to the completion ring tail buffer 179 * @cr_tail_dma: physical address of the buffer for the completion ring tail 180 * @read: Shared index to newest available Rx buffer 181 * @write: Shared index to oldest written Rx packet 182 * @free_count: Number of pre-allocated buffers in rx_free 183 * @used_count: Number of RBDs handled to allocator to use for allocation 184 * @write_actual: 185 * @rx_free: list of RBDs with allocated RB ready for use 186 * @rx_used: list of RBDs with no RB attached 187 * @need_update: flag to indicate we need to update read/write index 188 * @rb_stts: driver's pointer to receive buffer status 189 * @rb_stts_dma: bus address of receive buffer status 190 * @lock: 191 * @queue: actual rx queue. Not used for multi-rx queue. 192 * @next_rb_is_fragment: indicates that the previous RB that we handled set 193 * the fragmented flag, so the next one is still another fragment 194 * 195 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 196 */ 197 struct iwl_rxq { 198 int id; 199 void *bd; 200 dma_addr_t bd_dma; 201 union { 202 void *used_bd; 203 __le32 *bd_32; 204 struct iwl_rx_completion_desc *cd; 205 }; 206 dma_addr_t used_bd_dma; 207 __le16 *tr_tail; 208 dma_addr_t tr_tail_dma; 209 __le16 *cr_tail; 210 dma_addr_t cr_tail_dma; 211 u32 read; 212 u32 write; 213 u32 free_count; 214 u32 used_count; 215 u32 write_actual; 216 u32 queue_size; 217 struct list_head rx_free; 218 struct list_head rx_used; 219 bool need_update, next_rb_is_fragment; 220 void *rb_stts; 221 dma_addr_t rb_stts_dma; 222 spinlock_t lock; 223 struct napi_struct napi; 224 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 225 }; 226 227 /** 228 * struct iwl_rb_allocator - Rx allocator 229 * @req_pending: number of requests the allcator had not processed yet 230 * @req_ready: number of requests honored and ready for claiming 231 * @rbd_allocated: RBDs with pages allocated and ready to be handled to 232 * the queue. This is a list of &struct iwl_rx_mem_buffer 233 * @rbd_empty: RBDs with no page attached for allocator use. This is a list 234 * of &struct iwl_rx_mem_buffer 235 * @lock: protects the rbd_allocated and rbd_empty lists 236 * @alloc_wq: work queue for background calls 237 * @rx_alloc: work struct for background calls 238 */ 239 struct iwl_rb_allocator { 240 atomic_t req_pending; 241 atomic_t req_ready; 242 struct list_head rbd_allocated; 243 struct list_head rbd_empty; 244 spinlock_t lock; 245 struct workqueue_struct *alloc_wq; 246 struct work_struct rx_alloc; 247 }; 248 249 /** 250 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 251 * @index -- current index 252 */ 253 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) 254 { 255 return ++index & 256 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 257 } 258 259 /** 260 * iwl_get_closed_rb_stts - get closed rb stts from different structs 261 * @rxq - the rxq to get the rb stts from 262 */ 263 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, 264 struct iwl_rxq *rxq) 265 { 266 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 267 __le16 *rb_stts = rxq->rb_stts; 268 269 return READ_ONCE(*rb_stts); 270 } else { 271 struct iwl_rb_status *rb_stts = rxq->rb_stts; 272 273 return READ_ONCE(rb_stts->closed_rb_num); 274 } 275 } 276 277 /** 278 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 279 * @index -- current index 280 */ 281 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) 282 { 283 return --index & 284 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 285 } 286 287 static inline dma_addr_t 288 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) 289 { 290 return txq->first_tb_dma + 291 sizeof(struct iwl_pcie_first_tb_buf) * idx; 292 } 293 294 struct iwl_tso_hdr_page { 295 struct page *page; 296 u8 *pos; 297 }; 298 299 #ifdef CONFIG_IWLWIFI_DEBUGFS 300 /** 301 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data 302 * debugfs file 303 * 304 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. 305 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. 306 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is 307 * set the file can no longer be used. 308 */ 309 enum iwl_fw_mon_dbgfs_state { 310 IWL_FW_MON_DBGFS_STATE_CLOSED, 311 IWL_FW_MON_DBGFS_STATE_OPEN, 312 IWL_FW_MON_DBGFS_STATE_DISABLED, 313 }; 314 #endif 315 316 /** 317 * enum iwl_shared_irq_flags - level of sharing for irq 318 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. 319 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. 320 */ 321 enum iwl_shared_irq_flags { 322 IWL_SHARED_IRQ_NON_RX = BIT(0), 323 IWL_SHARED_IRQ_FIRST_RSS = BIT(1), 324 }; 325 326 /** 327 * enum iwl_image_response_code - image response values 328 * @IWL_IMAGE_RESP_DEF: the default value of the register 329 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully 330 * @IWL_IMAGE_RESP_FAIL: iml reading failed 331 */ 332 enum iwl_image_response_code { 333 IWL_IMAGE_RESP_DEF = 0, 334 IWL_IMAGE_RESP_SUCCESS = 1, 335 IWL_IMAGE_RESP_FAIL = 2, 336 }; 337 338 /** 339 * struct cont_rec: continuous recording data structure 340 * @prev_wr_ptr: the last address that was read in monitor_data 341 * debugfs file 342 * @prev_wrap_cnt: the wrap count that was used during the last read in 343 * monitor_data debugfs file 344 * @state: the state of monitor_data debugfs file as described 345 * in &iwl_fw_mon_dbgfs_state enum 346 * @mutex: locked while reading from monitor_data debugfs file 347 */ 348 #ifdef CONFIG_IWLWIFI_DEBUGFS 349 struct cont_rec { 350 u32 prev_wr_ptr; 351 u32 prev_wrap_cnt; 352 u8 state; 353 /* Used to sync monitor_data debugfs file with driver unload flow */ 354 struct mutex mutex; 355 }; 356 #endif 357 358 /** 359 * struct iwl_trans_pcie - PCIe transport specific data 360 * @rxq: all the RX queue data 361 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 362 * @global_table: table mapping received VID from hw to rxb 363 * @rba: allocator for RX replenishing 364 * @ctxt_info: context information for FW self init 365 * @ctxt_info_gen3: context information for gen3 devices 366 * @prph_info: prph info for self init 367 * @prph_scratch: prph scratch for self init 368 * @ctxt_info_dma_addr: dma addr of context information 369 * @prph_info_dma_addr: dma addr of prph info 370 * @prph_scratch_dma_addr: dma addr of prph scratch 371 * @ctxt_info_dma_addr: dma addr of context information 372 * @init_dram: DRAM data of firmware image (including paging). 373 * Context information addresses will be taken from here. 374 * This is driver's local copy for keeping track of size and 375 * count for allocating and freeing the memory. 376 * @trans: pointer to the generic transport area 377 * @scd_base_addr: scheduler sram base address in SRAM 378 * @scd_bc_tbls: pointer to the byte count table of the scheduler 379 * @kw: keep warm address 380 * @pci_dev: basic pci-network driver stuff 381 * @hw_base: pci hardware address support 382 * @ucode_write_complete: indicates that the ucode has been copied. 383 * @ucode_write_waitq: wait queue for uCode load 384 * @cmd_queue - command queue number 385 * @def_rx_queue - default rx queue number 386 * @rx_buf_size: Rx buffer size 387 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 388 * @scd_set_active: should the transport configure the SCD for HCMD queue 389 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed 390 * frame. 391 * @rx_page_order: page order for receive buffer size 392 * @rx_buf_bytes: RX buffer (RB) size in bytes 393 * @reg_lock: protect hw register access 394 * @mutex: to protect stop_device / start_fw / start_hw 395 * @cmd_in_flight: true when we have a host command in flight 396 #ifdef CONFIG_IWLWIFI_DEBUGFS 397 * @fw_mon_data: fw continuous recording data 398 #endif 399 * @msix_entries: array of MSI-X entries 400 * @msix_enabled: true if managed to enable MSI-X 401 * @shared_vec_mask: the type of causes the shared vector handles 402 * (see iwl_shared_irq_flags). 403 * @alloc_vecs: the number of interrupt vectors allocated by the OS 404 * @def_irq: default irq for non rx causes 405 * @fh_init_mask: initial unmasked fh causes 406 * @hw_init_mask: initial unmasked hw causes 407 * @fh_mask: current unmasked fh causes 408 * @hw_mask: current unmasked hw causes 409 * @in_rescan: true if we have triggered a device rescan 410 * @base_rb_stts: base virtual address of receive buffer status for all queues 411 * @base_rb_stts_dma: base physical address of receive buffer status 412 * @supported_dma_mask: DMA mask to validate the actual address against, 413 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device 414 * @alloc_page_lock: spinlock for the page allocator 415 * @alloc_page: allocated page to still use parts of 416 * @alloc_page_used: how much of the allocated page was already used (bytes) 417 */ 418 struct iwl_trans_pcie { 419 struct iwl_rxq *rxq; 420 struct iwl_rx_mem_buffer *rx_pool; 421 struct iwl_rx_mem_buffer **global_table; 422 struct iwl_rb_allocator rba; 423 union { 424 struct iwl_context_info *ctxt_info; 425 struct iwl_context_info_gen3 *ctxt_info_gen3; 426 }; 427 struct iwl_prph_info *prph_info; 428 struct iwl_prph_scratch *prph_scratch; 429 dma_addr_t ctxt_info_dma_addr; 430 dma_addr_t prph_info_dma_addr; 431 dma_addr_t prph_scratch_dma_addr; 432 dma_addr_t iml_dma_addr; 433 struct iwl_trans *trans; 434 435 struct net_device napi_dev; 436 437 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 438 439 /* INT ICT Table */ 440 __le32 *ict_tbl; 441 dma_addr_t ict_tbl_dma; 442 int ict_index; 443 bool use_ict; 444 bool is_down, opmode_down; 445 s8 debug_rfkill; 446 struct isr_statistics isr_stats; 447 448 spinlock_t irq_lock; 449 struct mutex mutex; 450 u32 inta_mask; 451 u32 scd_base_addr; 452 struct iwl_dma_ptr scd_bc_tbls; 453 struct iwl_dma_ptr kw; 454 struct dma_pool *bc_pool; 455 456 struct iwl_txq *txq_memory; 457 458 /* PCI bus related data */ 459 struct pci_dev *pci_dev; 460 void __iomem *hw_base; 461 462 bool ucode_write_complete; 463 bool sx_complete; 464 wait_queue_head_t ucode_write_waitq; 465 wait_queue_head_t wait_command_queue; 466 wait_queue_head_t sx_waitq; 467 468 u8 page_offs, dev_cmd_offs; 469 470 u8 def_rx_queue; 471 u8 n_no_reclaim_cmds; 472 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 473 u8 max_tbs; 474 u16 tfd_size; 475 u16 num_rx_bufs; 476 477 enum iwl_amsdu_size rx_buf_size; 478 bool bc_table_dword; 479 bool scd_set_active; 480 bool sw_csum_tx; 481 bool pcie_dbg_dumped_once; 482 u32 rx_page_order; 483 u32 rx_buf_bytes; 484 u32 supported_dma_mask; 485 486 /* allocator lock for the two values below */ 487 spinlock_t alloc_page_lock; 488 struct page *alloc_page; 489 u32 alloc_page_used; 490 491 /*protect hw register */ 492 spinlock_t reg_lock; 493 bool cmd_hold_nic_awake; 494 495 #ifdef CONFIG_IWLWIFI_DEBUGFS 496 struct cont_rec fw_mon_data; 497 #endif 498 499 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; 500 bool msix_enabled; 501 u8 shared_vec_mask; 502 u32 alloc_vecs; 503 u32 def_irq; 504 u32 fh_init_mask; 505 u32 hw_init_mask; 506 u32 fh_mask; 507 u32 hw_mask; 508 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; 509 u16 tx_cmd_queue_size; 510 bool in_rescan; 511 512 void *base_rb_stts; 513 dma_addr_t base_rb_stts_dma; 514 }; 515 516 static inline struct iwl_trans_pcie * 517 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) 518 { 519 return (void *)trans->trans_specific; 520 } 521 522 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 523 struct msix_entry *entry) 524 { 525 /* 526 * Before sending the interrupt the HW disables it to prevent 527 * a nested interrupt. This is done by writing 1 to the corresponding 528 * bit in the mask register. After handling the interrupt, it should be 529 * re-enabled by clearing this bit. This register is defined as 530 * write 1 clear (W1C) register, meaning that it's being clear 531 * by writing 1 to the bit. 532 */ 533 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 534 } 535 536 static inline struct iwl_trans * 537 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 538 { 539 return container_of((void *)trans_pcie, struct iwl_trans, 540 trans_specific); 541 } 542 543 /* 544 * Convention: trans API functions: iwl_trans_pcie_XXX 545 * Other functions: iwl_pcie_XXX 546 */ 547 struct iwl_trans 548 *iwl_trans_pcie_alloc(struct pci_dev *pdev, 549 const struct pci_device_id *ent, 550 const struct iwl_cfg_trans_params *cfg_trans); 551 void iwl_trans_pcie_free(struct iwl_trans *trans); 552 553 /***************************************************** 554 * RX 555 ******************************************************/ 556 int iwl_pcie_rx_init(struct iwl_trans *trans); 557 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); 558 irqreturn_t iwl_pcie_msix_isr(int irq, void *data); 559 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 560 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); 561 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 562 int iwl_pcie_rx_stop(struct iwl_trans *trans); 563 void iwl_pcie_rx_free(struct iwl_trans *trans); 564 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); 565 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); 566 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); 567 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 568 struct iwl_rxq *rxq); 569 570 /***************************************************** 571 * ICT - interrupt handling 572 ******************************************************/ 573 irqreturn_t iwl_pcie_isr(int irq, void *data); 574 int iwl_pcie_alloc_ict(struct iwl_trans *trans); 575 void iwl_pcie_free_ict(struct iwl_trans *trans); 576 void iwl_pcie_reset_ict(struct iwl_trans *trans); 577 void iwl_pcie_disable_ict(struct iwl_trans *trans); 578 579 /***************************************************** 580 * TX / HCMD 581 ******************************************************/ 582 /* 583 * We need this inline in case dma_addr_t is only 32-bits - since the 584 * hardware is always 64-bit, the issue can still occur in that case, 585 * so use u64 for 'phys' here to force the addition in 64-bit. 586 */ 587 static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len) 588 { 589 return upper_32_bits(phys) != upper_32_bits(phys + len); 590 } 591 592 int iwl_pcie_tx_init(struct iwl_trans *trans); 593 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, 594 int queue_size); 595 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 596 int iwl_pcie_tx_stop(struct iwl_trans *trans); 597 void iwl_pcie_tx_free(struct iwl_trans *trans); 598 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 599 const struct iwl_trans_txq_scd_cfg *cfg, 600 unsigned int wdg_timeout); 601 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 602 bool configure_scd); 603 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 604 bool shared_mode); 605 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, 606 struct iwl_txq *txq); 607 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 608 struct iwl_device_tx_cmd *dev_cmd, int txq_id); 609 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 610 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 611 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, 612 struct iwl_txq *txq); 613 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 614 struct iwl_rx_cmd_buffer *rxb); 615 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 616 struct sk_buff_head *skbs); 617 void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); 618 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 619 620 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, 621 u8 idx) 622 { 623 if (trans->trans_cfg->use_tfh) { 624 struct iwl_tfh_tfd *tfd = _tfd; 625 struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 626 627 return le16_to_cpu(tb->tb_len); 628 } else { 629 struct iwl_tfd *tfd = _tfd; 630 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 631 632 return le16_to_cpu(tb->hi_n_len) >> 4; 633 } 634 } 635 636 /***************************************************** 637 * Error handling 638 ******************************************************/ 639 void iwl_pcie_dump_csr(struct iwl_trans *trans); 640 641 /***************************************************** 642 * Helpers 643 ******************************************************/ 644 static inline void _iwl_disable_interrupts(struct iwl_trans *trans) 645 { 646 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 647 648 clear_bit(STATUS_INT_ENABLED, &trans->status); 649 if (!trans_pcie->msix_enabled) { 650 /* disable interrupts from uCode/NIC to host */ 651 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 652 653 /* acknowledge/clear/reset any interrupts still pending 654 * from uCode or flow handler (Rx/Tx DMA) */ 655 iwl_write32(trans, CSR_INT, 0xffffffff); 656 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 657 } else { 658 /* disable all the interrupt we might use */ 659 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 660 trans_pcie->fh_init_mask); 661 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 662 trans_pcie->hw_init_mask); 663 } 664 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 665 } 666 667 #define IWL_NUM_OF_COMPLETION_RINGS 31 668 #define IWL_NUM_OF_TRANSFER_RINGS 527 669 670 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, 671 int start) 672 { 673 int i = 0; 674 675 while (start < fw->num_sec && 676 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 677 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 678 start++; 679 i++; 680 } 681 682 return i; 683 } 684 685 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 686 { 687 struct iwl_self_init_dram *dram = &trans->init_dram; 688 int i; 689 690 if (!dram->fw) { 691 WARN_ON(dram->fw_cnt); 692 return; 693 } 694 695 for (i = 0; i < dram->fw_cnt; i++) 696 dma_free_coherent(trans->dev, dram->fw[i].size, 697 dram->fw[i].block, dram->fw[i].physical); 698 699 kfree(dram->fw); 700 dram->fw_cnt = 0; 701 dram->fw = NULL; 702 } 703 704 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 705 { 706 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 707 708 spin_lock(&trans_pcie->irq_lock); 709 _iwl_disable_interrupts(trans); 710 spin_unlock(&trans_pcie->irq_lock); 711 } 712 713 static inline void _iwl_enable_interrupts(struct iwl_trans *trans) 714 { 715 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 716 717 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 718 set_bit(STATUS_INT_ENABLED, &trans->status); 719 if (!trans_pcie->msix_enabled) { 720 trans_pcie->inta_mask = CSR_INI_SET_MASK; 721 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 722 } else { 723 /* 724 * fh/hw_mask keeps all the unmasked causes. 725 * Unlike msi, in msix cause is enabled when it is unset. 726 */ 727 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 728 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 729 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 730 ~trans_pcie->fh_mask); 731 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 732 ~trans_pcie->hw_mask); 733 } 734 } 735 736 static inline void iwl_enable_interrupts(struct iwl_trans *trans) 737 { 738 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 739 740 spin_lock(&trans_pcie->irq_lock); 741 _iwl_enable_interrupts(trans); 742 spin_unlock(&trans_pcie->irq_lock); 743 } 744 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) 745 { 746 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 747 748 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); 749 trans_pcie->hw_mask = msk; 750 } 751 752 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) 753 { 754 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 755 756 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); 757 trans_pcie->fh_mask = msk; 758 } 759 760 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 761 { 762 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 763 764 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 765 if (!trans_pcie->msix_enabled) { 766 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 767 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 768 } else { 769 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, 770 trans_pcie->hw_init_mask); 771 iwl_enable_fh_int_msk_msix(trans, 772 MSIX_FH_INT_CAUSES_D2S_CH0_NUM); 773 } 774 } 775 776 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) 777 { 778 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 779 780 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); 781 782 if (!trans_pcie->msix_enabled) { 783 /* 784 * When we'll receive the ALIVE interrupt, the ISR will call 785 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE 786 * interrupt (which is not really needed anymore) but also the 787 * RX interrupt which will allow us to receive the ALIVE 788 * notification (which is Rx) and continue the flow. 789 */ 790 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; 791 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 792 } else { 793 iwl_enable_hw_int_msk_msix(trans, 794 MSIX_HW_INT_CAUSES_REG_ALIVE); 795 /* 796 * Leave all the FH causes enabled to get the ALIVE 797 * notification. 798 */ 799 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); 800 } 801 } 802 803 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) 804 { 805 return index & (q->n_window - 1); 806 } 807 808 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, 809 struct iwl_txq *txq, int idx) 810 { 811 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 812 813 if (trans->trans_cfg->use_tfh) 814 idx = iwl_pcie_get_cmd_index(txq, idx); 815 816 return txq->tfds + trans_pcie->tfd_size * idx; 817 } 818 819 static inline const char *queue_name(struct device *dev, 820 struct iwl_trans_pcie *trans_p, int i) 821 { 822 if (trans_p->shared_vec_mask) { 823 int vec = trans_p->shared_vec_mask & 824 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 825 826 if (i == 0) 827 return DRV_NAME ": shared IRQ"; 828 829 return devm_kasprintf(dev, GFP_KERNEL, 830 DRV_NAME ": queue %d", i + vec); 831 } 832 if (i == 0) 833 return DRV_NAME ": default queue"; 834 835 if (i == trans_p->alloc_vecs - 1) 836 return DRV_NAME ": exception"; 837 838 return devm_kasprintf(dev, GFP_KERNEL, 839 DRV_NAME ": queue %d", i); 840 } 841 842 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 843 { 844 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 845 846 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 847 if (!trans_pcie->msix_enabled) { 848 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 849 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 850 } else { 851 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, 852 trans_pcie->fh_init_mask); 853 iwl_enable_hw_int_msk_msix(trans, 854 MSIX_HW_INT_CAUSES_REG_RF_KILL); 855 } 856 857 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { 858 /* 859 * On 9000-series devices this bit isn't enabled by default, so 860 * when we power down the device we need set the bit to allow it 861 * to wake up the PCI-E bus for RF-kill interrupts. 862 */ 863 iwl_set_bit(trans, CSR_GP_CNTRL, 864 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); 865 } 866 } 867 868 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); 869 870 static inline void iwl_wake_queue(struct iwl_trans *trans, 871 struct iwl_txq *txq) 872 { 873 if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { 874 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); 875 iwl_op_mode_queue_not_full(trans->op_mode, txq->id); 876 } 877 } 878 879 static inline void iwl_stop_queue(struct iwl_trans *trans, 880 struct iwl_txq *txq) 881 { 882 if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) { 883 iwl_op_mode_queue_full(trans->op_mode, txq->id); 884 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); 885 } else 886 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 887 txq->id); 888 } 889 890 static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 891 { 892 int index = iwl_pcie_get_cmd_index(q, i); 893 int r = iwl_pcie_get_cmd_index(q, q->read_ptr); 894 int w = iwl_pcie_get_cmd_index(q, q->write_ptr); 895 896 return w >= r ? 897 (index >= r && index < w) : 898 !(index < r && index >= w); 899 } 900 901 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 902 { 903 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 904 905 lockdep_assert_held(&trans_pcie->mutex); 906 907 if (trans_pcie->debug_rfkill == 1) 908 return true; 909 910 return !(iwl_read32(trans, CSR_GP_CNTRL) & 911 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 912 } 913 914 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 915 u32 reg, u32 mask, u32 value) 916 { 917 u32 v; 918 919 #ifdef CONFIG_IWLWIFI_DEBUG 920 WARN_ON_ONCE(value & ~mask); 921 #endif 922 923 v = iwl_read32(trans, reg); 924 v &= ~mask; 925 v |= value; 926 iwl_write32(trans, reg, v); 927 } 928 929 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 930 u32 reg, u32 mask) 931 { 932 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 933 } 934 935 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 936 u32 reg, u32 mask) 937 { 938 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 939 } 940 941 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) 942 { 943 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); 944 } 945 946 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 947 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); 948 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); 949 950 #ifdef CONFIG_IWLWIFI_DEBUGFS 951 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 952 #else 953 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } 954 #endif 955 956 void iwl_pcie_rx_allocator_work(struct work_struct *data); 957 958 /* common functions that are used by gen2 transport */ 959 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); 960 void iwl_pcie_apm_config(struct iwl_trans *trans); 961 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); 962 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); 963 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); 964 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 965 bool was_in_rfkill); 966 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 967 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); 968 void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 969 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 970 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 971 int slots_num, bool cmd_queue); 972 int iwl_pcie_txq_alloc(struct iwl_trans *trans, 973 struct iwl_txq *txq, int slots_num, bool cmd_queue); 974 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 975 struct iwl_dma_ptr *ptr, size_t size); 976 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); 977 void iwl_pcie_apply_destination(struct iwl_trans *trans); 978 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, 979 struct sk_buff *skb); 980 #ifdef CONFIG_INET 981 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 982 struct sk_buff *skb); 983 #endif 984 985 /* common functions that are used by gen3 transport */ 986 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); 987 988 /* transport gen 2 exported functions */ 989 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 990 const struct fw_img *fw, bool run_in_rfkill); 991 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); 992 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, 993 struct iwl_txq *txq); 994 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, 995 struct iwl_txq **intxq, int size, 996 unsigned int timeout); 997 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, 998 struct iwl_txq *txq, 999 struct iwl_host_cmd *hcmd); 1000 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, 1001 __le16 flags, u8 sta_id, u8 tid, 1002 int cmd_id, int size, 1003 unsigned int timeout); 1004 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); 1005 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 1006 struct iwl_device_tx_cmd *dev_cmd, int txq_id); 1007 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, 1008 struct iwl_host_cmd *cmd); 1009 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); 1010 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); 1011 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); 1012 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); 1013 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); 1014 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1015 bool test, bool reset); 1016 #endif /* __iwl_trans_int_pcie_h__ */ 1017