1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2003-2014, 2018-2022 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/sched.h> 8 #include <linux/wait.h> 9 #include <linux/gfp.h> 10 11 #include "iwl-prph.h" 12 #include "iwl-io.h" 13 #include "internal.h" 14 #include "iwl-op-mode.h" 15 #include "iwl-context-info-gen3.h" 16 17 /****************************************************************************** 18 * 19 * RX path functions 20 * 21 ******************************************************************************/ 22 23 /* 24 * Rx theory of operation 25 * 26 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 27 * each of which point to Receive Buffers to be filled by the NIC. These get 28 * used not only for Rx frames, but for any command response or notification 29 * from the NIC. The driver and NIC manage the Rx buffers by means 30 * of indexes into the circular buffer. 31 * 32 * Rx Queue Indexes 33 * The host/firmware share two index registers for managing the Rx buffers. 34 * 35 * The READ index maps to the first position that the firmware may be writing 36 * to -- the driver can read up to (but not including) this position and get 37 * good data. 38 * The READ index is managed by the firmware once the card is enabled. 39 * 40 * The WRITE index maps to the last position the driver has read from -- the 41 * position preceding WRITE is the last slot the firmware can place a packet. 42 * 43 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 44 * WRITE = READ. 45 * 46 * During initialization, the host sets up the READ queue position to the first 47 * INDEX position, and WRITE to the last (READ - 1 wrapped) 48 * 49 * When the firmware places a packet in a buffer, it will advance the READ index 50 * and fire the RX interrupt. The driver can then query the READ index and 51 * process as many packets as possible, moving the WRITE index forward as it 52 * resets the Rx queue buffers with new memory. 53 * 54 * The management in the driver is as follows: 55 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 56 * When the interrupt handler is called, the request is processed. 57 * The page is either stolen - transferred to the upper layer 58 * or reused - added immediately to the iwl->rxq->rx_free list. 59 * + When the page is stolen - the driver updates the matching queue's used 60 * count, detaches the RBD and transfers it to the queue used list. 61 * When there are two used RBDs - they are transferred to the allocator empty 62 * list. Work is then scheduled for the allocator to start allocating 63 * eight buffers. 64 * When there are another 6 used RBDs - they are transferred to the allocator 65 * empty list and the driver tries to claim the pre-allocated buffers and 66 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 67 * until ready. 68 * When there are 8+ buffers in the free list - either from allocation or from 69 * 8 reused unstolen pages - restock is called to update the FW and indexes. 70 * + In order to make sure the allocator always has RBDs to use for allocation 71 * the allocator has initial pool in the size of num_queues*(8-2) - the 72 * maximum missing RBDs per allocation request (request posted with 2 73 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 74 * The queues supplies the recycle of the rest of the RBDs. 75 * + A received packet is processed and handed to the kernel network stack, 76 * detached from the iwl->rxq. The driver 'processed' index is updated. 77 * + If there are no allocated buffers in iwl->rxq->rx_free, 78 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 79 * If there were enough free buffers and RX_STALLED is set it is cleared. 80 * 81 * 82 * Driver sequence: 83 * 84 * iwl_rxq_alloc() Allocates rx_free 85 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 86 * iwl_pcie_rxq_restock. 87 * Used only during initialization. 88 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 89 * queue, updates firmware pointers, and updates 90 * the WRITE index. 91 * iwl_pcie_rx_allocator() Background work for allocating pages. 92 * 93 * -- enable interrupts -- 94 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 95 * READ INDEX, detaching the SKB from the pool. 96 * Moves the packet buffer from queue to rx_used. 97 * Posts and claims requests to the allocator. 98 * Calls iwl_pcie_rxq_restock to refill any empty 99 * slots. 100 * 101 * RBD life-cycle: 102 * 103 * Init: 104 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 105 * 106 * Regular Receive interrupt: 107 * Page Stolen: 108 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 109 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 110 * Page not Stolen: 111 * rxq.queue -> rxq.rx_free -> rxq.queue 112 * ... 113 * 114 */ 115 116 /* 117 * iwl_rxq_space - Return number of free slots available in queue. 118 */ 119 static int iwl_rxq_space(const struct iwl_rxq *rxq) 120 { 121 /* Make sure rx queue size is a power of 2 */ 122 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 123 124 /* 125 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 126 * between empty and completely full queues. 127 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 128 * defined for negative dividends. 129 */ 130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 131 } 132 133 /* 134 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 135 */ 136 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 137 { 138 return cpu_to_le32((u32)(dma_addr >> 8)); 139 } 140 141 /* 142 * iwl_pcie_rx_stop - stops the Rx DMA 143 */ 144 int iwl_pcie_rx_stop(struct iwl_trans *trans) 145 { 146 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 147 /* TODO: remove this once fw does it */ 148 iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); 149 return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, 150 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 151 } else if (trans->trans_cfg->mq_rx_supported) { 152 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 153 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, 154 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 155 } else { 156 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 157 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 158 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 159 1000); 160 } 161 } 162 163 /* 164 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 165 */ 166 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 167 struct iwl_rxq *rxq) 168 { 169 u32 reg; 170 171 lockdep_assert_held(&rxq->lock); 172 173 /* 174 * explicitly wake up the NIC if: 175 * 1. shadow registers aren't enabled 176 * 2. there is a chance that the NIC is asleep 177 */ 178 if (!trans->trans_cfg->base_params->shadow_reg_enable && 179 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 180 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 181 182 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 183 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 184 reg); 185 iwl_set_bit(trans, CSR_GP_CNTRL, 186 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 187 rxq->need_update = true; 188 return; 189 } 190 } 191 192 rxq->write_actual = round_down(rxq->write, 8); 193 if (!trans->trans_cfg->mq_rx_supported) 194 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 195 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 196 iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | 197 HBUS_TARG_WRPTR_RX_Q(rxq->id)); 198 else 199 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 200 rxq->write_actual); 201 } 202 203 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 204 { 205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 206 int i; 207 208 for (i = 0; i < trans->num_rx_queues; i++) { 209 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 210 211 if (!rxq->need_update) 212 continue; 213 spin_lock_bh(&rxq->lock); 214 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 215 rxq->need_update = false; 216 spin_unlock_bh(&rxq->lock); 217 } 218 } 219 220 static void iwl_pcie_restock_bd(struct iwl_trans *trans, 221 struct iwl_rxq *rxq, 222 struct iwl_rx_mem_buffer *rxb) 223 { 224 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 225 struct iwl_rx_transfer_desc *bd = rxq->bd; 226 227 BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); 228 229 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); 230 bd[rxq->write].rbid = cpu_to_le16(rxb->vid); 231 } else { 232 __le64 *bd = rxq->bd; 233 234 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 235 } 236 237 IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", 238 (u32)rxb->vid, rxq->id, rxq->write); 239 } 240 241 /* 242 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx 243 */ 244 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, 245 struct iwl_rxq *rxq) 246 { 247 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 248 struct iwl_rx_mem_buffer *rxb; 249 250 /* 251 * If the device isn't enabled - no need to try to add buffers... 252 * This can happen when we stop the device and still have an interrupt 253 * pending. We stop the APM before we sync the interrupts because we 254 * have to (see comment there). On the other hand, since the APM is 255 * stopped, we cannot access the HW (in particular not prph). 256 * So don't try to restock if the APM has been already stopped. 257 */ 258 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 259 return; 260 261 spin_lock_bh(&rxq->lock); 262 while (rxq->free_count) { 263 /* Get next free Rx buffer, remove from free list */ 264 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 265 list); 266 list_del(&rxb->list); 267 rxb->invalid = false; 268 /* some low bits are expected to be unset (depending on hw) */ 269 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); 270 /* Point to Rx buffer via next RBD in circular buffer */ 271 iwl_pcie_restock_bd(trans, rxq, rxb); 272 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); 273 rxq->free_count--; 274 } 275 spin_unlock_bh(&rxq->lock); 276 277 /* 278 * If we've added more space for the firmware to place data, tell it. 279 * Increment device's write pointer in multiples of 8. 280 */ 281 if (rxq->write_actual != (rxq->write & ~0x7)) { 282 spin_lock_bh(&rxq->lock); 283 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 284 spin_unlock_bh(&rxq->lock); 285 } 286 } 287 288 /* 289 * iwl_pcie_rxsq_restock - restock implementation for single queue rx 290 */ 291 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, 292 struct iwl_rxq *rxq) 293 { 294 struct iwl_rx_mem_buffer *rxb; 295 296 /* 297 * If the device isn't enabled - not need to try to add buffers... 298 * This can happen when we stop the device and still have an interrupt 299 * pending. We stop the APM before we sync the interrupts because we 300 * have to (see comment there). On the other hand, since the APM is 301 * stopped, we cannot access the HW (in particular not prph). 302 * So don't try to restock if the APM has been already stopped. 303 */ 304 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 305 return; 306 307 spin_lock_bh(&rxq->lock); 308 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 309 __le32 *bd = (__le32 *)rxq->bd; 310 /* The overwritten rxb must be a used one */ 311 rxb = rxq->queue[rxq->write]; 312 BUG_ON(rxb && rxb->page); 313 314 /* Get next free Rx buffer, remove from free list */ 315 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 316 list); 317 list_del(&rxb->list); 318 rxb->invalid = false; 319 320 /* Point to Rx buffer via next RBD in circular buffer */ 321 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 322 rxq->queue[rxq->write] = rxb; 323 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 324 rxq->free_count--; 325 } 326 spin_unlock_bh(&rxq->lock); 327 328 /* If we've added more space for the firmware to place data, tell it. 329 * Increment device's write pointer in multiples of 8. */ 330 if (rxq->write_actual != (rxq->write & ~0x7)) { 331 spin_lock_bh(&rxq->lock); 332 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 333 spin_unlock_bh(&rxq->lock); 334 } 335 } 336 337 /* 338 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 339 * 340 * If there are slots in the RX queue that need to be restocked, 341 * and we have free pre-allocated buffers, fill the ranks as much 342 * as we can, pulling from rx_free. 343 * 344 * This moves the 'write' index forward to catch up with 'processed', and 345 * also updates the memory address in the firmware to reference the new 346 * target buffer. 347 */ 348 static 349 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 350 { 351 if (trans->trans_cfg->mq_rx_supported) 352 iwl_pcie_rxmq_restock(trans, rxq); 353 else 354 iwl_pcie_rxsq_restock(trans, rxq); 355 } 356 357 /* 358 * iwl_pcie_rx_alloc_page - allocates and returns a page. 359 * 360 */ 361 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 362 u32 *offset, gfp_t priority) 363 { 364 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 365 unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 366 unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; 367 struct page *page; 368 gfp_t gfp_mask = priority; 369 370 if (trans_pcie->rx_page_order > 0) 371 gfp_mask |= __GFP_COMP; 372 373 if (trans_pcie->alloc_page) { 374 spin_lock_bh(&trans_pcie->alloc_page_lock); 375 /* recheck */ 376 if (trans_pcie->alloc_page) { 377 *offset = trans_pcie->alloc_page_used; 378 page = trans_pcie->alloc_page; 379 trans_pcie->alloc_page_used += rbsize; 380 if (trans_pcie->alloc_page_used >= allocsize) 381 trans_pcie->alloc_page = NULL; 382 else 383 get_page(page); 384 spin_unlock_bh(&trans_pcie->alloc_page_lock); 385 return page; 386 } 387 spin_unlock_bh(&trans_pcie->alloc_page_lock); 388 } 389 390 /* Alloc a new receive buffer */ 391 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 392 if (!page) { 393 if (net_ratelimit()) 394 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 395 trans_pcie->rx_page_order); 396 /* 397 * Issue an error if we don't have enough pre-allocated 398 * buffers. 399 */ 400 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 401 IWL_CRIT(trans, 402 "Failed to alloc_pages\n"); 403 return NULL; 404 } 405 406 if (2 * rbsize <= allocsize) { 407 spin_lock_bh(&trans_pcie->alloc_page_lock); 408 if (!trans_pcie->alloc_page) { 409 get_page(page); 410 trans_pcie->alloc_page = page; 411 trans_pcie->alloc_page_used = rbsize; 412 } 413 spin_unlock_bh(&trans_pcie->alloc_page_lock); 414 } 415 416 *offset = 0; 417 return page; 418 } 419 420 /* 421 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 422 * 423 * A used RBD is an Rx buffer that has been given to the stack. To use it again 424 * a page must be allocated and the RBD must point to the page. This function 425 * doesn't change the HW pointer but handles the list of pages that is used by 426 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 427 * allocated buffers. 428 */ 429 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 430 struct iwl_rxq *rxq) 431 { 432 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 433 struct iwl_rx_mem_buffer *rxb; 434 struct page *page; 435 436 while (1) { 437 unsigned int offset; 438 439 spin_lock_bh(&rxq->lock); 440 if (list_empty(&rxq->rx_used)) { 441 spin_unlock_bh(&rxq->lock); 442 return; 443 } 444 spin_unlock_bh(&rxq->lock); 445 446 page = iwl_pcie_rx_alloc_page(trans, &offset, priority); 447 if (!page) 448 return; 449 450 spin_lock_bh(&rxq->lock); 451 452 if (list_empty(&rxq->rx_used)) { 453 spin_unlock_bh(&rxq->lock); 454 __free_pages(page, trans_pcie->rx_page_order); 455 return; 456 } 457 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 458 list); 459 list_del(&rxb->list); 460 spin_unlock_bh(&rxq->lock); 461 462 BUG_ON(rxb->page); 463 rxb->page = page; 464 rxb->offset = offset; 465 /* Get physical address of the RB */ 466 rxb->page_dma = 467 dma_map_page(trans->dev, page, rxb->offset, 468 trans_pcie->rx_buf_bytes, 469 DMA_FROM_DEVICE); 470 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 471 rxb->page = NULL; 472 spin_lock_bh(&rxq->lock); 473 list_add(&rxb->list, &rxq->rx_used); 474 spin_unlock_bh(&rxq->lock); 475 __free_pages(page, trans_pcie->rx_page_order); 476 return; 477 } 478 479 spin_lock_bh(&rxq->lock); 480 481 list_add_tail(&rxb->list, &rxq->rx_free); 482 rxq->free_count++; 483 484 spin_unlock_bh(&rxq->lock); 485 } 486 } 487 488 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 489 { 490 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 491 int i; 492 493 if (!trans_pcie->rx_pool) 494 return; 495 496 for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { 497 if (!trans_pcie->rx_pool[i].page) 498 continue; 499 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 500 trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); 501 __free_pages(trans_pcie->rx_pool[i].page, 502 trans_pcie->rx_page_order); 503 trans_pcie->rx_pool[i].page = NULL; 504 } 505 } 506 507 /* 508 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 509 * 510 * Allocates for each received request 8 pages 511 * Called as a scheduled work item. 512 */ 513 static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 514 { 515 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 516 struct iwl_rb_allocator *rba = &trans_pcie->rba; 517 struct list_head local_empty; 518 int pending = atomic_read(&rba->req_pending); 519 520 IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); 521 522 /* If we were scheduled - there is at least one request */ 523 spin_lock_bh(&rba->lock); 524 /* swap out the rba->rbd_empty to a local list */ 525 list_replace_init(&rba->rbd_empty, &local_empty); 526 spin_unlock_bh(&rba->lock); 527 528 while (pending) { 529 int i; 530 LIST_HEAD(local_allocated); 531 gfp_t gfp_mask = GFP_KERNEL; 532 533 /* Do not post a warning if there are only a few requests */ 534 if (pending < RX_PENDING_WATERMARK) 535 gfp_mask |= __GFP_NOWARN; 536 537 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 538 struct iwl_rx_mem_buffer *rxb; 539 struct page *page; 540 541 /* List should never be empty - each reused RBD is 542 * returned to the list, and initial pool covers any 543 * possible gap between the time the page is allocated 544 * to the time the RBD is added. 545 */ 546 BUG_ON(list_empty(&local_empty)); 547 /* Get the first rxb from the rbd list */ 548 rxb = list_first_entry(&local_empty, 549 struct iwl_rx_mem_buffer, list); 550 BUG_ON(rxb->page); 551 552 /* Alloc a new receive buffer */ 553 page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, 554 gfp_mask); 555 if (!page) 556 continue; 557 rxb->page = page; 558 559 /* Get physical address of the RB */ 560 rxb->page_dma = dma_map_page(trans->dev, page, 561 rxb->offset, 562 trans_pcie->rx_buf_bytes, 563 DMA_FROM_DEVICE); 564 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 565 rxb->page = NULL; 566 __free_pages(page, trans_pcie->rx_page_order); 567 continue; 568 } 569 570 /* move the allocated entry to the out list */ 571 list_move(&rxb->list, &local_allocated); 572 i++; 573 } 574 575 atomic_dec(&rba->req_pending); 576 pending--; 577 578 if (!pending) { 579 pending = atomic_read(&rba->req_pending); 580 if (pending) 581 IWL_DEBUG_TPT(trans, 582 "Got more pending allocation requests = %d\n", 583 pending); 584 } 585 586 spin_lock_bh(&rba->lock); 587 /* add the allocated rbds to the allocator allocated list */ 588 list_splice_tail(&local_allocated, &rba->rbd_allocated); 589 /* get more empty RBDs for current pending requests */ 590 list_splice_tail_init(&rba->rbd_empty, &local_empty); 591 spin_unlock_bh(&rba->lock); 592 593 atomic_inc(&rba->req_ready); 594 595 } 596 597 spin_lock_bh(&rba->lock); 598 /* return unused rbds to the allocator empty list */ 599 list_splice_tail(&local_empty, &rba->rbd_empty); 600 spin_unlock_bh(&rba->lock); 601 602 IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); 603 } 604 605 /* 606 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages 607 .* 608 .* Called by queue when the queue posted allocation request and 609 * has freed 8 RBDs in order to restock itself. 610 * This function directly moves the allocated RBs to the queue's ownership 611 * and updates the relevant counters. 612 */ 613 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 614 struct iwl_rxq *rxq) 615 { 616 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 617 struct iwl_rb_allocator *rba = &trans_pcie->rba; 618 int i; 619 620 lockdep_assert_held(&rxq->lock); 621 622 /* 623 * atomic_dec_if_positive returns req_ready - 1 for any scenario. 624 * If req_ready is 0 atomic_dec_if_positive will return -1 and this 625 * function will return early, as there are no ready requests. 626 * atomic_dec_if_positive will perofrm the *actual* decrement only if 627 * req_ready > 0, i.e. - there are ready requests and the function 628 * hands one request to the caller. 629 */ 630 if (atomic_dec_if_positive(&rba->req_ready) < 0) 631 return; 632 633 spin_lock(&rba->lock); 634 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 635 /* Get next free Rx buffer, remove it from free list */ 636 struct iwl_rx_mem_buffer *rxb = 637 list_first_entry(&rba->rbd_allocated, 638 struct iwl_rx_mem_buffer, list); 639 640 list_move(&rxb->list, &rxq->rx_free); 641 } 642 spin_unlock(&rba->lock); 643 644 rxq->used_count -= RX_CLAIM_REQ_ALLOC; 645 rxq->free_count += RX_CLAIM_REQ_ALLOC; 646 } 647 648 void iwl_pcie_rx_allocator_work(struct work_struct *data) 649 { 650 struct iwl_rb_allocator *rba_p = 651 container_of(data, struct iwl_rb_allocator, rx_alloc); 652 struct iwl_trans_pcie *trans_pcie = 653 container_of(rba_p, struct iwl_trans_pcie, rba); 654 655 iwl_pcie_rx_allocator(trans_pcie->trans); 656 } 657 658 static int iwl_pcie_free_bd_size(struct iwl_trans *trans) 659 { 660 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 661 return sizeof(struct iwl_rx_transfer_desc); 662 663 return trans->trans_cfg->mq_rx_supported ? 664 sizeof(__le64) : sizeof(__le32); 665 } 666 667 static int iwl_pcie_used_bd_size(struct iwl_trans *trans) 668 { 669 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 670 return sizeof(struct iwl_rx_completion_desc_bz); 671 672 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 673 return sizeof(struct iwl_rx_completion_desc); 674 675 return sizeof(__le32); 676 } 677 678 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, 679 struct iwl_rxq *rxq) 680 { 681 int free_size = iwl_pcie_free_bd_size(trans); 682 683 if (rxq->bd) 684 dma_free_coherent(trans->dev, 685 free_size * rxq->queue_size, 686 rxq->bd, rxq->bd_dma); 687 rxq->bd_dma = 0; 688 rxq->bd = NULL; 689 690 rxq->rb_stts_dma = 0; 691 rxq->rb_stts = NULL; 692 693 if (rxq->used_bd) 694 dma_free_coherent(trans->dev, 695 iwl_pcie_used_bd_size(trans) * 696 rxq->queue_size, 697 rxq->used_bd, rxq->used_bd_dma); 698 rxq->used_bd_dma = 0; 699 rxq->used_bd = NULL; 700 } 701 702 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, 703 struct iwl_rxq *rxq) 704 { 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 706 struct device *dev = trans->dev; 707 int i; 708 int free_size; 709 bool use_rx_td = (trans->trans_cfg->device_family >= 710 IWL_DEVICE_FAMILY_AX210); 711 size_t rb_stts_size = use_rx_td ? sizeof(__le16) : 712 sizeof(struct iwl_rb_status); 713 714 spin_lock_init(&rxq->lock); 715 if (trans->trans_cfg->mq_rx_supported) 716 rxq->queue_size = trans->cfg->num_rbds; 717 else 718 rxq->queue_size = RX_QUEUE_SIZE; 719 720 free_size = iwl_pcie_free_bd_size(trans); 721 722 /* 723 * Allocate the circular buffer of Read Buffer Descriptors 724 * (RBDs) 725 */ 726 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, 727 &rxq->bd_dma, GFP_KERNEL); 728 if (!rxq->bd) 729 goto err; 730 731 if (trans->trans_cfg->mq_rx_supported) { 732 rxq->used_bd = dma_alloc_coherent(dev, 733 iwl_pcie_used_bd_size(trans) * 734 rxq->queue_size, 735 &rxq->used_bd_dma, 736 GFP_KERNEL); 737 if (!rxq->used_bd) 738 goto err; 739 } 740 741 rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size; 742 rxq->rb_stts_dma = 743 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; 744 745 return 0; 746 747 err: 748 for (i = 0; i < trans->num_rx_queues; i++) { 749 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 750 751 iwl_pcie_free_rxq_dma(trans, rxq); 752 } 753 754 return -ENOMEM; 755 } 756 757 static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 758 { 759 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 760 struct iwl_rb_allocator *rba = &trans_pcie->rba; 761 int i, ret; 762 size_t rb_stts_size = trans->trans_cfg->device_family >= 763 IWL_DEVICE_FAMILY_AX210 ? 764 sizeof(__le16) : sizeof(struct iwl_rb_status); 765 766 if (WARN_ON(trans_pcie->rxq)) 767 return -EINVAL; 768 769 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 770 GFP_KERNEL); 771 trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), 772 sizeof(trans_pcie->rx_pool[0]), 773 GFP_KERNEL); 774 trans_pcie->global_table = 775 kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), 776 sizeof(trans_pcie->global_table[0]), 777 GFP_KERNEL); 778 if (!trans_pcie->rxq || !trans_pcie->rx_pool || 779 !trans_pcie->global_table) { 780 ret = -ENOMEM; 781 goto err; 782 } 783 784 spin_lock_init(&rba->lock); 785 786 /* 787 * Allocate the driver's pointer to receive buffer status. 788 * Allocate for all queues continuously (HW requirement). 789 */ 790 trans_pcie->base_rb_stts = 791 dma_alloc_coherent(trans->dev, 792 rb_stts_size * trans->num_rx_queues, 793 &trans_pcie->base_rb_stts_dma, 794 GFP_KERNEL); 795 if (!trans_pcie->base_rb_stts) { 796 ret = -ENOMEM; 797 goto err; 798 } 799 800 for (i = 0; i < trans->num_rx_queues; i++) { 801 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 802 803 rxq->id = i; 804 ret = iwl_pcie_alloc_rxq_dma(trans, rxq); 805 if (ret) 806 goto err; 807 } 808 return 0; 809 810 err: 811 if (trans_pcie->base_rb_stts) { 812 dma_free_coherent(trans->dev, 813 rb_stts_size * trans->num_rx_queues, 814 trans_pcie->base_rb_stts, 815 trans_pcie->base_rb_stts_dma); 816 trans_pcie->base_rb_stts = NULL; 817 trans_pcie->base_rb_stts_dma = 0; 818 } 819 kfree(trans_pcie->rx_pool); 820 trans_pcie->rx_pool = NULL; 821 kfree(trans_pcie->global_table); 822 trans_pcie->global_table = NULL; 823 kfree(trans_pcie->rxq); 824 trans_pcie->rxq = NULL; 825 826 return ret; 827 } 828 829 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 830 { 831 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 832 u32 rb_size; 833 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 834 835 switch (trans_pcie->rx_buf_size) { 836 case IWL_AMSDU_4K: 837 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 838 break; 839 case IWL_AMSDU_8K: 840 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 841 break; 842 case IWL_AMSDU_12K: 843 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 844 break; 845 default: 846 WARN_ON(1); 847 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 848 } 849 850 if (!iwl_trans_grab_nic_access(trans)) 851 return; 852 853 /* Stop Rx DMA */ 854 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 855 /* reset and flush pointers */ 856 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 857 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 858 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 859 860 /* Reset driver's Rx queue write index */ 861 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 862 863 /* Tell device where to find RBD circular buffer in DRAM */ 864 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 865 (u32)(rxq->bd_dma >> 8)); 866 867 /* Tell device where in DRAM to update its Rx status */ 868 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 869 rxq->rb_stts_dma >> 4); 870 871 /* Enable Rx DMA 872 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 873 * the credit mechanism in 5000 HW RX FIFO 874 * Direct rx interrupts to hosts 875 * Rx buffer size 4 or 8k or 12k 876 * RB timeout 0x10 877 * 256 RBDs 878 */ 879 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 880 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 881 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 882 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 883 rb_size | 884 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 885 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 886 887 iwl_trans_release_nic_access(trans); 888 889 /* Set interrupt coalescing timer to default (2048 usecs) */ 890 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 891 892 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 893 if (trans->cfg->host_interrupt_operation_mode) 894 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 895 } 896 897 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 898 { 899 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 900 u32 rb_size, enabled = 0; 901 int i; 902 903 switch (trans_pcie->rx_buf_size) { 904 case IWL_AMSDU_2K: 905 rb_size = RFH_RXF_DMA_RB_SIZE_2K; 906 break; 907 case IWL_AMSDU_4K: 908 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 909 break; 910 case IWL_AMSDU_8K: 911 rb_size = RFH_RXF_DMA_RB_SIZE_8K; 912 break; 913 case IWL_AMSDU_12K: 914 rb_size = RFH_RXF_DMA_RB_SIZE_12K; 915 break; 916 default: 917 WARN_ON(1); 918 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 919 } 920 921 if (!iwl_trans_grab_nic_access(trans)) 922 return; 923 924 /* Stop Rx DMA */ 925 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); 926 /* disable free amd used rx queue operation */ 927 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); 928 929 for (i = 0; i < trans->num_rx_queues; i++) { 930 /* Tell device where to find RBD free table in DRAM */ 931 iwl_write_prph64_no_grab(trans, 932 RFH_Q_FRBDCB_BA_LSB(i), 933 trans_pcie->rxq[i].bd_dma); 934 /* Tell device where to find RBD used table in DRAM */ 935 iwl_write_prph64_no_grab(trans, 936 RFH_Q_URBDCB_BA_LSB(i), 937 trans_pcie->rxq[i].used_bd_dma); 938 /* Tell device where in DRAM to update its Rx status */ 939 iwl_write_prph64_no_grab(trans, 940 RFH_Q_URBD_STTS_WPTR_LSB(i), 941 trans_pcie->rxq[i].rb_stts_dma); 942 /* Reset device indice tables */ 943 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); 944 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); 945 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); 946 947 enabled |= BIT(i) | BIT(i + 16); 948 } 949 950 /* 951 * Enable Rx DMA 952 * Rx buffer size 4 or 8k or 12k 953 * Min RB size 4 or 8 954 * Drop frames that exceed RB size 955 * 512 RBDs 956 */ 957 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 958 RFH_DMA_EN_ENABLE_VAL | rb_size | 959 RFH_RXF_DMA_MIN_RB_4_8 | 960 RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 961 RFH_RXF_DMA_RBDCB_SIZE_512); 962 963 /* 964 * Activate DMA snooping. 965 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe 966 * Default queue is 0 967 */ 968 iwl_write_prph_no_grab(trans, RFH_GEN_CFG, 969 RFH_GEN_CFG_RFH_DMA_SNOOP | 970 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | 971 RFH_GEN_CFG_SERVICE_DMA_SNOOP | 972 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, 973 trans->trans_cfg->integrated ? 974 RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 975 RFH_GEN_CFG_RB_CHUNK_SIZE_128)); 976 /* Enable the relevant rx queues */ 977 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); 978 979 iwl_trans_release_nic_access(trans); 980 981 /* Set interrupt coalescing timer to default (2048 usecs) */ 982 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 983 } 984 985 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 986 { 987 lockdep_assert_held(&rxq->lock); 988 989 INIT_LIST_HEAD(&rxq->rx_free); 990 INIT_LIST_HEAD(&rxq->rx_used); 991 rxq->free_count = 0; 992 rxq->used_count = 0; 993 } 994 995 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget); 996 997 static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) 998 { 999 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); 1000 struct iwl_trans_pcie *trans_pcie; 1001 struct iwl_trans *trans; 1002 int ret; 1003 1004 trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); 1005 trans = trans_pcie->trans; 1006 1007 ret = iwl_pcie_rx_handle(trans, rxq->id, budget); 1008 1009 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", 1010 rxq->id, ret, budget); 1011 1012 if (ret < budget) { 1013 spin_lock(&trans_pcie->irq_lock); 1014 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1015 _iwl_enable_interrupts(trans); 1016 spin_unlock(&trans_pcie->irq_lock); 1017 1018 napi_complete_done(&rxq->napi, ret); 1019 } 1020 1021 return ret; 1022 } 1023 1024 static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) 1025 { 1026 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); 1027 struct iwl_trans_pcie *trans_pcie; 1028 struct iwl_trans *trans; 1029 int ret; 1030 1031 trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); 1032 trans = trans_pcie->trans; 1033 1034 ret = iwl_pcie_rx_handle(trans, rxq->id, budget); 1035 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, 1036 budget); 1037 1038 if (ret < budget) { 1039 int irq_line = rxq->id; 1040 1041 /* FIRST_RSS is shared with line 0 */ 1042 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && 1043 rxq->id == 1) 1044 irq_line = 0; 1045 1046 spin_lock(&trans_pcie->irq_lock); 1047 iwl_pcie_clear_irq(trans, irq_line); 1048 spin_unlock(&trans_pcie->irq_lock); 1049 1050 napi_complete_done(&rxq->napi, ret); 1051 } 1052 1053 return ret; 1054 } 1055 1056 static int _iwl_pcie_rx_init(struct iwl_trans *trans) 1057 { 1058 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1059 struct iwl_rxq *def_rxq; 1060 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1061 int i, err, queue_size, allocator_pool_size, num_alloc; 1062 1063 if (!trans_pcie->rxq) { 1064 err = iwl_pcie_rx_alloc(trans); 1065 if (err) 1066 return err; 1067 } 1068 def_rxq = trans_pcie->rxq; 1069 1070 cancel_work_sync(&rba->rx_alloc); 1071 1072 spin_lock_bh(&rba->lock); 1073 atomic_set(&rba->req_pending, 0); 1074 atomic_set(&rba->req_ready, 0); 1075 INIT_LIST_HEAD(&rba->rbd_allocated); 1076 INIT_LIST_HEAD(&rba->rbd_empty); 1077 spin_unlock_bh(&rba->lock); 1078 1079 /* free all first - we overwrite everything here */ 1080 iwl_pcie_free_rbs_pool(trans); 1081 1082 for (i = 0; i < RX_QUEUE_SIZE; i++) 1083 def_rxq->queue[i] = NULL; 1084 1085 for (i = 0; i < trans->num_rx_queues; i++) { 1086 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 1087 1088 spin_lock_bh(&rxq->lock); 1089 /* 1090 * Set read write pointer to reflect that we have processed 1091 * and used all buffers, but have not restocked the Rx queue 1092 * with fresh buffers 1093 */ 1094 rxq->read = 0; 1095 rxq->write = 0; 1096 rxq->write_actual = 0; 1097 memset(rxq->rb_stts, 0, 1098 (trans->trans_cfg->device_family >= 1099 IWL_DEVICE_FAMILY_AX210) ? 1100 sizeof(__le16) : sizeof(struct iwl_rb_status)); 1101 1102 iwl_pcie_rx_init_rxb_lists(rxq); 1103 1104 spin_unlock_bh(&rxq->lock); 1105 1106 if (!rxq->napi.poll) { 1107 int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll; 1108 1109 if (trans_pcie->msix_enabled) 1110 poll = iwl_pcie_napi_poll_msix; 1111 1112 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 1113 poll); 1114 napi_enable(&rxq->napi); 1115 } 1116 1117 } 1118 1119 /* move the pool to the default queue and allocator ownerships */ 1120 queue_size = trans->trans_cfg->mq_rx_supported ? 1121 trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; 1122 allocator_pool_size = trans->num_rx_queues * 1123 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 1124 num_alloc = queue_size + allocator_pool_size; 1125 1126 for (i = 0; i < num_alloc; i++) { 1127 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 1128 1129 if (i < allocator_pool_size) 1130 list_add(&rxb->list, &rba->rbd_empty); 1131 else 1132 list_add(&rxb->list, &def_rxq->rx_used); 1133 trans_pcie->global_table[i] = rxb; 1134 rxb->vid = (u16)(i + 1); 1135 rxb->invalid = true; 1136 } 1137 1138 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 1139 1140 return 0; 1141 } 1142 1143 int iwl_pcie_rx_init(struct iwl_trans *trans) 1144 { 1145 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1146 int ret = _iwl_pcie_rx_init(trans); 1147 1148 if (ret) 1149 return ret; 1150 1151 if (trans->trans_cfg->mq_rx_supported) 1152 iwl_pcie_rx_mq_hw_init(trans); 1153 else 1154 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); 1155 1156 iwl_pcie_rxq_restock(trans, trans_pcie->rxq); 1157 1158 spin_lock_bh(&trans_pcie->rxq->lock); 1159 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); 1160 spin_unlock_bh(&trans_pcie->rxq->lock); 1161 1162 return 0; 1163 } 1164 1165 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) 1166 { 1167 /* Set interrupt coalescing timer to default (2048 usecs) */ 1168 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 1169 1170 /* 1171 * We don't configure the RFH. 1172 * Restock will be done at alive, after firmware configured the RFH. 1173 */ 1174 return _iwl_pcie_rx_init(trans); 1175 } 1176 1177 void iwl_pcie_rx_free(struct iwl_trans *trans) 1178 { 1179 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1180 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1181 int i; 1182 size_t rb_stts_size = trans->trans_cfg->device_family >= 1183 IWL_DEVICE_FAMILY_AX210 ? 1184 sizeof(__le16) : sizeof(struct iwl_rb_status); 1185 1186 /* 1187 * if rxq is NULL, it means that nothing has been allocated, 1188 * exit now 1189 */ 1190 if (!trans_pcie->rxq) { 1191 IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 1192 return; 1193 } 1194 1195 cancel_work_sync(&rba->rx_alloc); 1196 1197 iwl_pcie_free_rbs_pool(trans); 1198 1199 if (trans_pcie->base_rb_stts) { 1200 dma_free_coherent(trans->dev, 1201 rb_stts_size * trans->num_rx_queues, 1202 trans_pcie->base_rb_stts, 1203 trans_pcie->base_rb_stts_dma); 1204 trans_pcie->base_rb_stts = NULL; 1205 trans_pcie->base_rb_stts_dma = 0; 1206 } 1207 1208 for (i = 0; i < trans->num_rx_queues; i++) { 1209 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 1210 1211 iwl_pcie_free_rxq_dma(trans, rxq); 1212 1213 if (rxq->napi.poll) { 1214 napi_disable(&rxq->napi); 1215 netif_napi_del(&rxq->napi); 1216 } 1217 } 1218 kfree(trans_pcie->rx_pool); 1219 kfree(trans_pcie->global_table); 1220 kfree(trans_pcie->rxq); 1221 1222 if (trans_pcie->alloc_page) 1223 __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); 1224 } 1225 1226 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, 1227 struct iwl_rb_allocator *rba) 1228 { 1229 spin_lock(&rba->lock); 1230 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1231 spin_unlock(&rba->lock); 1232 } 1233 1234 /* 1235 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 1236 * 1237 * Called when a RBD can be reused. The RBD is transferred to the allocator. 1238 * When there are 2 empty RBDs - a request for allocation is posted 1239 */ 1240 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 1241 struct iwl_rx_mem_buffer *rxb, 1242 struct iwl_rxq *rxq, bool emergency) 1243 { 1244 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1245 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1246 1247 /* Move the RBD to the used list, will be moved to allocator in batches 1248 * before claiming or posting a request*/ 1249 list_add_tail(&rxb->list, &rxq->rx_used); 1250 1251 if (unlikely(emergency)) 1252 return; 1253 1254 /* Count the allocator owned RBDs */ 1255 rxq->used_count++; 1256 1257 /* If we have RX_POST_REQ_ALLOC new released rx buffers - 1258 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 1259 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 1260 * after but we still need to post another request. 1261 */ 1262 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 1263 /* Move the 2 RBDs to the allocator ownership. 1264 Allocator has another 6 from pool for the request completion*/ 1265 iwl_pcie_rx_move_to_allocator(rxq, rba); 1266 1267 atomic_inc(&rba->req_pending); 1268 queue_work(rba->alloc_wq, &rba->rx_alloc); 1269 } 1270 } 1271 1272 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 1273 struct iwl_rxq *rxq, 1274 struct iwl_rx_mem_buffer *rxb, 1275 bool emergency, 1276 int i) 1277 { 1278 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1279 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1280 bool page_stolen = false; 1281 int max_len = trans_pcie->rx_buf_bytes; 1282 u32 offset = 0; 1283 1284 if (WARN_ON(!rxb)) 1285 return; 1286 1287 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 1288 1289 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 1290 struct iwl_rx_packet *pkt; 1291 bool reclaim; 1292 int len; 1293 struct iwl_rx_cmd_buffer rxcb = { 1294 ._offset = rxb->offset + offset, 1295 ._rx_page_order = trans_pcie->rx_page_order, 1296 ._page = rxb->page, 1297 ._page_stolen = false, 1298 .truesize = max_len, 1299 }; 1300 1301 pkt = rxb_addr(&rxcb); 1302 1303 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { 1304 IWL_DEBUG_RX(trans, 1305 "Q %d: RB end marker at offset %d\n", 1306 rxq->id, offset); 1307 break; 1308 } 1309 1310 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1311 FH_RSCSR_RXQ_POS != rxq->id, 1312 "frame on invalid queue - is on %d and indicates %d\n", 1313 rxq->id, 1314 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1315 FH_RSCSR_RXQ_POS); 1316 1317 IWL_DEBUG_RX(trans, 1318 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", 1319 rxq->id, offset, 1320 iwl_get_cmd_string(trans, 1321 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), 1322 pkt->hdr.group_id, pkt->hdr.cmd, 1323 le16_to_cpu(pkt->hdr.sequence)); 1324 1325 len = iwl_rx_packet_len(pkt); 1326 len += sizeof(u32); /* account for status word */ 1327 1328 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 1329 1330 /* check that what the device tells us made sense */ 1331 if (len < sizeof(*pkt) || offset > max_len) 1332 break; 1333 1334 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 1335 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 1336 1337 /* Reclaim a command buffer only if this packet is a response 1338 * to a (driver-originated) command. 1339 * If the packet (e.g. Rx frame) originated from uCode, 1340 * there is no command buffer to reclaim. 1341 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 1342 * but apparently a few don't get set; catch them here. */ 1343 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 1344 if (reclaim && !pkt->hdr.group_id) { 1345 int i; 1346 1347 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 1348 if (trans_pcie->no_reclaim_cmds[i] == 1349 pkt->hdr.cmd) { 1350 reclaim = false; 1351 break; 1352 } 1353 } 1354 } 1355 1356 if (rxq->id == trans_pcie->def_rx_queue) 1357 iwl_op_mode_rx(trans->op_mode, &rxq->napi, 1358 &rxcb); 1359 else 1360 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 1361 &rxcb, rxq->id); 1362 1363 /* 1364 * After here, we should always check rxcb._page_stolen, 1365 * if it is true then one of the handlers took the page. 1366 */ 1367 1368 if (reclaim) { 1369 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1370 int index = SEQ_TO_INDEX(sequence); 1371 int cmd_index = iwl_txq_get_cmd_index(txq, index); 1372 1373 kfree_sensitive(txq->entries[cmd_index].free_buf); 1374 txq->entries[cmd_index].free_buf = NULL; 1375 1376 /* Invoke any callbacks, transfer the buffer to caller, 1377 * and fire off the (possibly) blocking 1378 * iwl_trans_send_cmd() 1379 * as we reclaim the driver command queue */ 1380 if (!rxcb._page_stolen) 1381 iwl_pcie_hcmd_complete(trans, &rxcb); 1382 else 1383 IWL_WARN(trans, "Claim null rxb?\n"); 1384 } 1385 1386 page_stolen |= rxcb._page_stolen; 1387 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1388 break; 1389 } 1390 1391 /* page was stolen from us -- free our reference */ 1392 if (page_stolen) { 1393 __free_pages(rxb->page, trans_pcie->rx_page_order); 1394 rxb->page = NULL; 1395 } 1396 1397 /* Reuse the page if possible. For notification packets and 1398 * SKBs that fail to Rx correctly, add them back into the 1399 * rx_free list for reuse later. */ 1400 if (rxb->page != NULL) { 1401 rxb->page_dma = 1402 dma_map_page(trans->dev, rxb->page, rxb->offset, 1403 trans_pcie->rx_buf_bytes, 1404 DMA_FROM_DEVICE); 1405 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 1406 /* 1407 * free the page(s) as well to not break 1408 * the invariant that the items on the used 1409 * list have no page(s) 1410 */ 1411 __free_pages(rxb->page, trans_pcie->rx_page_order); 1412 rxb->page = NULL; 1413 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1414 } else { 1415 list_add_tail(&rxb->list, &rxq->rx_free); 1416 rxq->free_count++; 1417 } 1418 } else 1419 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1420 } 1421 1422 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, 1423 struct iwl_rxq *rxq, int i, 1424 bool *join) 1425 { 1426 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1427 struct iwl_rx_mem_buffer *rxb; 1428 u16 vid; 1429 1430 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); 1431 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4); 1432 1433 if (!trans->trans_cfg->mq_rx_supported) { 1434 rxb = rxq->queue[i]; 1435 rxq->queue[i] = NULL; 1436 return rxb; 1437 } 1438 1439 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 1440 struct iwl_rx_completion_desc_bz *cd = rxq->used_bd; 1441 1442 vid = le16_to_cpu(cd[i].rbid); 1443 *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; 1444 } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1445 struct iwl_rx_completion_desc *cd = rxq->used_bd; 1446 1447 vid = le16_to_cpu(cd[i].rbid); 1448 *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; 1449 } else { 1450 __le32 *cd = rxq->used_bd; 1451 1452 vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */ 1453 } 1454 1455 if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) 1456 goto out_err; 1457 1458 rxb = trans_pcie->global_table[vid - 1]; 1459 if (rxb->invalid) 1460 goto out_err; 1461 1462 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); 1463 1464 rxb->invalid = true; 1465 1466 return rxb; 1467 1468 out_err: 1469 WARN(1, "Invalid rxb from HW %u\n", (u32)vid); 1470 iwl_force_nmi(trans); 1471 return NULL; 1472 } 1473 1474 /* 1475 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1476 */ 1477 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget) 1478 { 1479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1480 struct iwl_rxq *rxq; 1481 u32 r, i, count = 0, handled = 0; 1482 bool emergency = false; 1483 1484 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) 1485 return budget; 1486 1487 rxq = &trans_pcie->rxq[queue]; 1488 1489 restart: 1490 spin_lock(&rxq->lock); 1491 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1492 * buffer that the driver may process (last buffer filled by ucode). */ 1493 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 1494 i = rxq->read; 1495 1496 /* W/A 9000 device step A0 wrap-around bug */ 1497 r &= (rxq->queue_size - 1); 1498 1499 /* Rx interrupt, but nothing sent from uCode */ 1500 if (i == r) 1501 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); 1502 1503 while (i != r && ++handled < budget) { 1504 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1505 struct iwl_rx_mem_buffer *rxb; 1506 /* number of RBDs still waiting for page allocation */ 1507 u32 rb_pending_alloc = 1508 atomic_read(&trans_pcie->rba.req_pending) * 1509 RX_CLAIM_REQ_ALLOC; 1510 bool join = false; 1511 1512 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && 1513 !emergency)) { 1514 iwl_pcie_rx_move_to_allocator(rxq, rba); 1515 emergency = true; 1516 IWL_DEBUG_TPT(trans, 1517 "RX path is in emergency. Pending allocations %d\n", 1518 rb_pending_alloc); 1519 } 1520 1521 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 1522 1523 rxb = iwl_pcie_get_rxb(trans, rxq, i, &join); 1524 if (!rxb) 1525 goto out; 1526 1527 if (unlikely(join || rxq->next_rb_is_fragment)) { 1528 rxq->next_rb_is_fragment = join; 1529 /* 1530 * We can only get a multi-RB in the following cases: 1531 * - firmware issue, sending a too big notification 1532 * - sniffer mode with a large A-MSDU 1533 * - large MTU frames (>2k) 1534 * since the multi-RB functionality is limited to newer 1535 * hardware that cannot put multiple entries into a 1536 * single RB. 1537 * 1538 * Right now, the higher layers aren't set up to deal 1539 * with that, so discard all of these. 1540 */ 1541 list_add_tail(&rxb->list, &rxq->rx_free); 1542 rxq->free_count++; 1543 } else { 1544 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); 1545 } 1546 1547 i = (i + 1) & (rxq->queue_size - 1); 1548 1549 /* 1550 * If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1551 * try to claim the pre-allocated buffers from the allocator. 1552 * If not ready - will try to reclaim next time. 1553 * There is no need to reschedule work - allocator exits only 1554 * on success 1555 */ 1556 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) 1557 iwl_pcie_rx_allocator_get(trans, rxq); 1558 1559 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { 1560 /* Add the remaining empty RBDs for allocator use */ 1561 iwl_pcie_rx_move_to_allocator(rxq, rba); 1562 } else if (emergency) { 1563 count++; 1564 if (count == 8) { 1565 count = 0; 1566 if (rb_pending_alloc < rxq->queue_size / 3) { 1567 IWL_DEBUG_TPT(trans, 1568 "RX path exited emergency. Pending allocations %d\n", 1569 rb_pending_alloc); 1570 emergency = false; 1571 } 1572 1573 rxq->read = i; 1574 spin_unlock(&rxq->lock); 1575 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1576 iwl_pcie_rxq_restock(trans, rxq); 1577 goto restart; 1578 } 1579 } 1580 } 1581 out: 1582 /* Backtrack one entry */ 1583 rxq->read = i; 1584 spin_unlock(&rxq->lock); 1585 1586 /* 1587 * handle a case where in emergency there are some unallocated RBDs. 1588 * those RBDs are in the used list, but are not tracked by the queue's 1589 * used_count which counts allocator owned RBDs. 1590 * unallocated emergency RBDs must be allocated on exit, otherwise 1591 * when called again the function may not be in emergency mode and 1592 * they will be handed to the allocator with no tracking in the RBD 1593 * allocator counters, which will lead to them never being claimed back 1594 * by the queue. 1595 * by allocating them here, they are now in the queue free list, and 1596 * will be restocked by the next call of iwl_pcie_rxq_restock. 1597 */ 1598 if (unlikely(emergency && count)) 1599 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1600 1601 iwl_pcie_rxq_restock(trans, rxq); 1602 1603 return handled; 1604 } 1605 1606 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) 1607 { 1608 u8 queue = entry->entry; 1609 struct msix_entry *entries = entry - queue; 1610 1611 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 1612 } 1613 1614 /* 1615 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 1616 * This interrupt handler should be used with RSS queue only. 1617 */ 1618 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) 1619 { 1620 struct msix_entry *entry = dev_id; 1621 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 1622 struct iwl_trans *trans = trans_pcie->trans; 1623 struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry]; 1624 1625 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); 1626 1627 if (WARN_ON(entry->entry >= trans->num_rx_queues)) 1628 return IRQ_NONE; 1629 1630 if (!rxq) { 1631 if (net_ratelimit()) 1632 IWL_ERR(trans, 1633 "[%d] Got MSI-X interrupt before we have Rx queues\n", 1634 entry->entry); 1635 return IRQ_NONE; 1636 } 1637 1638 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1639 IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); 1640 1641 local_bh_disable(); 1642 if (napi_schedule_prep(&rxq->napi)) 1643 __napi_schedule(&rxq->napi); 1644 else 1645 iwl_pcie_clear_irq(trans, entry->entry); 1646 local_bh_enable(); 1647 1648 lock_map_release(&trans->sync_cmd_lockdep_map); 1649 1650 return IRQ_HANDLED; 1651 } 1652 1653 /* 1654 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1655 */ 1656 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 1657 { 1658 int i; 1659 1660 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 1661 if (trans->cfg->internal_wimax_coex && 1662 !trans->cfg->apmg_not_supported && 1663 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 1664 APMS_CLK_VAL_MRB_FUNC_MODE) || 1665 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 1666 APMG_PS_CTRL_VAL_RESET_REQ))) { 1667 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1668 iwl_op_mode_wimax_active(trans->op_mode); 1669 wake_up(&trans->wait_command_queue); 1670 return; 1671 } 1672 1673 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1674 if (!trans->txqs.txq[i]) 1675 continue; 1676 del_timer(&trans->txqs.txq[i]->stuck_timer); 1677 } 1678 1679 /* The STATUS_FW_ERROR bit is set in this function. This must happen 1680 * before we wake up the command caller, to ensure a proper cleanup. */ 1681 iwl_trans_fw_error(trans, false); 1682 1683 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1684 wake_up(&trans->wait_command_queue); 1685 } 1686 1687 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 1688 { 1689 u32 inta; 1690 1691 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 1692 1693 trace_iwlwifi_dev_irq(trans->dev); 1694 1695 /* Discover which interrupts are active/pending */ 1696 inta = iwl_read32(trans, CSR_INT); 1697 1698 /* the thread will service interrupts and re-enable them */ 1699 return inta; 1700 } 1701 1702 /* a device (PCI-E) page is 4096 bytes long */ 1703 #define ICT_SHIFT 12 1704 #define ICT_SIZE (1 << ICT_SHIFT) 1705 #define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1706 1707 /* interrupt handler using ict table, with this interrupt driver will 1708 * stop using INTA register to get device's interrupt, reading this register 1709 * is expensive, device will write interrupts in ICT dram table, increment 1710 * index then will fire interrupt to driver, driver will OR all ICT table 1711 * entries from current index up to table entry with 0 value. the result is 1712 * the interrupt we need to service, driver will set the entries back to 0 and 1713 * set index. 1714 */ 1715 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 1716 { 1717 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1718 u32 inta; 1719 u32 val = 0; 1720 u32 read; 1721 1722 trace_iwlwifi_dev_irq(trans->dev); 1723 1724 /* Ignore interrupt if there's nothing in NIC to service. 1725 * This may be due to IRQ shared with another device, 1726 * or due to sporadic interrupts thrown from our NIC. */ 1727 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1728 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 1729 if (!read) 1730 return 0; 1731 1732 /* 1733 * Collect all entries up to the first 0, starting from ict_index; 1734 * note we already read at ict_index. 1735 */ 1736 do { 1737 val |= read; 1738 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 1739 trans_pcie->ict_index, read); 1740 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 1741 trans_pcie->ict_index = 1742 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 1743 1744 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1745 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 1746 read); 1747 } while (read); 1748 1749 /* We should not get this value, just ignore it. */ 1750 if (val == 0xffffffff) 1751 val = 0; 1752 1753 /* 1754 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 1755 * (bit 15 before shifting it to 31) to clear when using interrupt 1756 * coalescing. fortunately, bits 18 and 19 stay set when this happens 1757 * so we use them to decide on the real state of the Rx bit. 1758 * In order words, bit 15 is set if bit 18 or bit 19 are set. 1759 */ 1760 if (val & 0xC0000) 1761 val |= 0x8000; 1762 1763 inta = (0xff & val) | ((0xff00 & val) << 16); 1764 return inta; 1765 } 1766 1767 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) 1768 { 1769 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1770 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1771 bool hw_rfkill, prev, report; 1772 1773 mutex_lock(&trans_pcie->mutex); 1774 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1775 hw_rfkill = iwl_is_rfkill_set(trans); 1776 if (hw_rfkill) { 1777 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1778 set_bit(STATUS_RFKILL_HW, &trans->status); 1779 } 1780 if (trans_pcie->opmode_down) 1781 report = hw_rfkill; 1782 else 1783 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1784 1785 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 1786 hw_rfkill ? "disable radio" : "enable radio"); 1787 1788 isr_stats->rfkill++; 1789 1790 if (prev != report) 1791 iwl_trans_pcie_rf_kill(trans, report); 1792 mutex_unlock(&trans_pcie->mutex); 1793 1794 if (hw_rfkill) { 1795 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1796 &trans->status)) 1797 IWL_DEBUG_RF_KILL(trans, 1798 "Rfkill while SYNC HCMD in flight\n"); 1799 wake_up(&trans->wait_command_queue); 1800 } else { 1801 clear_bit(STATUS_RFKILL_HW, &trans->status); 1802 if (trans_pcie->opmode_down) 1803 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1804 } 1805 } 1806 1807 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 1808 { 1809 struct iwl_trans *trans = dev_id; 1810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1811 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1812 u32 inta = 0; 1813 u32 handled = 0; 1814 bool polling = false; 1815 1816 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1817 1818 spin_lock_bh(&trans_pcie->irq_lock); 1819 1820 /* dram interrupt table not set yet, 1821 * use legacy interrupt. 1822 */ 1823 if (likely(trans_pcie->use_ict)) 1824 inta = iwl_pcie_int_cause_ict(trans); 1825 else 1826 inta = iwl_pcie_int_cause_non_ict(trans); 1827 1828 if (iwl_have_debug_level(IWL_DL_ISR)) { 1829 IWL_DEBUG_ISR(trans, 1830 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 1831 inta, trans_pcie->inta_mask, 1832 iwl_read32(trans, CSR_INT_MASK), 1833 iwl_read32(trans, CSR_FH_INT_STATUS)); 1834 if (inta & (~trans_pcie->inta_mask)) 1835 IWL_DEBUG_ISR(trans, 1836 "We got a masked interrupt (0x%08x)\n", 1837 inta & (~trans_pcie->inta_mask)); 1838 } 1839 1840 inta &= trans_pcie->inta_mask; 1841 1842 /* 1843 * Ignore interrupt if there's nothing in NIC to service. 1844 * This may be due to IRQ shared with another device, 1845 * or due to sporadic interrupts thrown from our NIC. 1846 */ 1847 if (unlikely(!inta)) { 1848 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1849 /* 1850 * Re-enable interrupts here since we don't 1851 * have anything to service 1852 */ 1853 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1854 _iwl_enable_interrupts(trans); 1855 spin_unlock_bh(&trans_pcie->irq_lock); 1856 lock_map_release(&trans->sync_cmd_lockdep_map); 1857 return IRQ_NONE; 1858 } 1859 1860 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1861 /* 1862 * Hardware disappeared. It might have 1863 * already raised an interrupt. 1864 */ 1865 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 1866 spin_unlock_bh(&trans_pcie->irq_lock); 1867 goto out; 1868 } 1869 1870 /* Ack/clear/reset pending uCode interrupts. 1871 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1872 */ 1873 /* There is a hardware bug in the interrupt mask function that some 1874 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 1875 * they are disabled in the CSR_INT_MASK register. Furthermore the 1876 * ICT interrupt handling mechanism has another bug that might cause 1877 * these unmasked interrupts fail to be detected. We workaround the 1878 * hardware bugs here by ACKing all the possible interrupts so that 1879 * interrupt coalescing can still be achieved. 1880 */ 1881 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 1882 1883 if (iwl_have_debug_level(IWL_DL_ISR)) 1884 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 1885 inta, iwl_read32(trans, CSR_INT_MASK)); 1886 1887 spin_unlock_bh(&trans_pcie->irq_lock); 1888 1889 /* Now service all interrupt bits discovered above. */ 1890 if (inta & CSR_INT_BIT_HW_ERR) { 1891 IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 1892 1893 /* Tell the device to stop sending interrupts */ 1894 iwl_disable_interrupts(trans); 1895 1896 isr_stats->hw++; 1897 iwl_pcie_irq_handle_error(trans); 1898 1899 handled |= CSR_INT_BIT_HW_ERR; 1900 1901 goto out; 1902 } 1903 1904 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1905 if (inta & CSR_INT_BIT_SCD) { 1906 IWL_DEBUG_ISR(trans, 1907 "Scheduler finished to transmit the frame/frames.\n"); 1908 isr_stats->sch++; 1909 } 1910 1911 /* Alive notification via Rx interrupt will do the real work */ 1912 if (inta & CSR_INT_BIT_ALIVE) { 1913 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1914 isr_stats->alive++; 1915 if (trans->trans_cfg->gen2) { 1916 /* 1917 * We can restock, since firmware configured 1918 * the RFH 1919 */ 1920 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 1921 } 1922 1923 handled |= CSR_INT_BIT_ALIVE; 1924 } 1925 1926 /* Safely ignore these bits for debug checks below */ 1927 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1928 1929 /* HW RF KILL switch toggled */ 1930 if (inta & CSR_INT_BIT_RF_KILL) { 1931 iwl_pcie_handle_rfkill_irq(trans); 1932 handled |= CSR_INT_BIT_RF_KILL; 1933 } 1934 1935 /* Chip got too hot and stopped itself */ 1936 if (inta & CSR_INT_BIT_CT_KILL) { 1937 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1938 isr_stats->ctkill++; 1939 handled |= CSR_INT_BIT_CT_KILL; 1940 } 1941 1942 /* Error detected by uCode */ 1943 if (inta & CSR_INT_BIT_SW_ERR) { 1944 IWL_ERR(trans, "Microcode SW error detected. " 1945 " Restarting 0x%X.\n", inta); 1946 isr_stats->sw++; 1947 iwl_pcie_irq_handle_error(trans); 1948 handled |= CSR_INT_BIT_SW_ERR; 1949 } 1950 1951 /* uCode wakes up after power-down sleep */ 1952 if (inta & CSR_INT_BIT_WAKEUP) { 1953 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1954 iwl_pcie_rxq_check_wrptr(trans); 1955 iwl_pcie_txq_check_wrptrs(trans); 1956 1957 isr_stats->wakeup++; 1958 1959 handled |= CSR_INT_BIT_WAKEUP; 1960 } 1961 1962 /* All uCode command responses, including Tx command responses, 1963 * Rx "responses" (frame-received notification), and other 1964 * notifications from uCode come through here*/ 1965 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1966 CSR_INT_BIT_RX_PERIODIC)) { 1967 IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1968 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1969 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1970 iwl_write32(trans, CSR_FH_INT_STATUS, 1971 CSR_FH_INT_RX_MASK); 1972 } 1973 if (inta & CSR_INT_BIT_RX_PERIODIC) { 1974 handled |= CSR_INT_BIT_RX_PERIODIC; 1975 iwl_write32(trans, 1976 CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1977 } 1978 /* Sending RX interrupt require many steps to be done in the 1979 * device: 1980 * 1- write interrupt to current index in ICT table. 1981 * 2- dma RX frame. 1982 * 3- update RX shared data to indicate last write index. 1983 * 4- send interrupt. 1984 * This could lead to RX race, driver could receive RX interrupt 1985 * but the shared data changes does not reflect this; 1986 * periodic interrupt will detect any dangling Rx activity. 1987 */ 1988 1989 /* Disable periodic interrupt; we use it as just a one-shot. */ 1990 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1991 CSR_INT_PERIODIC_DIS); 1992 1993 /* 1994 * Enable periodic interrupt in 8 msec only if we received 1995 * real RX interrupt (instead of just periodic int), to catch 1996 * any dangling Rx interrupt. If it was just the periodic 1997 * interrupt, there was no dangling Rx activity, and no need 1998 * to extend the periodic interrupt; one-shot is enough. 1999 */ 2000 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 2001 iwl_write8(trans, CSR_INT_PERIODIC_REG, 2002 CSR_INT_PERIODIC_ENA); 2003 2004 isr_stats->rx++; 2005 2006 local_bh_disable(); 2007 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { 2008 polling = true; 2009 __napi_schedule(&trans_pcie->rxq[0].napi); 2010 } 2011 local_bh_enable(); 2012 } 2013 2014 /* This "Tx" DMA channel is used only for loading uCode */ 2015 if (inta & CSR_INT_BIT_FH_TX) { 2016 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 2017 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 2018 isr_stats->tx++; 2019 handled |= CSR_INT_BIT_FH_TX; 2020 /* Wake up uCode load routine, now that load is complete */ 2021 trans_pcie->ucode_write_complete = true; 2022 wake_up(&trans_pcie->ucode_write_waitq); 2023 /* Wake up IMR write routine, now that write to SRAM is complete */ 2024 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2025 trans_pcie->imr_status = IMR_D2S_COMPLETED; 2026 wake_up(&trans_pcie->ucode_write_waitq); 2027 } 2028 } 2029 2030 if (inta & ~handled) { 2031 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 2032 isr_stats->unhandled++; 2033 } 2034 2035 if (inta & ~(trans_pcie->inta_mask)) { 2036 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 2037 inta & ~trans_pcie->inta_mask); 2038 } 2039 2040 if (!polling) { 2041 spin_lock_bh(&trans_pcie->irq_lock); 2042 /* only Re-enable all interrupt if disabled by irq */ 2043 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 2044 _iwl_enable_interrupts(trans); 2045 /* we are loading the firmware, enable FH_TX interrupt only */ 2046 else if (handled & CSR_INT_BIT_FH_TX) 2047 iwl_enable_fw_load_int(trans); 2048 /* Re-enable RF_KILL if it occurred */ 2049 else if (handled & CSR_INT_BIT_RF_KILL) 2050 iwl_enable_rfkill_int(trans); 2051 /* Re-enable the ALIVE / Rx interrupt if it occurred */ 2052 else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) 2053 iwl_enable_fw_load_int_ctx_info(trans); 2054 spin_unlock_bh(&trans_pcie->irq_lock); 2055 } 2056 2057 out: 2058 lock_map_release(&trans->sync_cmd_lockdep_map); 2059 return IRQ_HANDLED; 2060 } 2061 2062 /****************************************************************************** 2063 * 2064 * ICT functions 2065 * 2066 ******************************************************************************/ 2067 2068 /* Free dram table */ 2069 void iwl_pcie_free_ict(struct iwl_trans *trans) 2070 { 2071 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2072 2073 if (trans_pcie->ict_tbl) { 2074 dma_free_coherent(trans->dev, ICT_SIZE, 2075 trans_pcie->ict_tbl, 2076 trans_pcie->ict_tbl_dma); 2077 trans_pcie->ict_tbl = NULL; 2078 trans_pcie->ict_tbl_dma = 0; 2079 } 2080 } 2081 2082 /* 2083 * allocate dram shared table, it is an aligned memory 2084 * block of ICT_SIZE. 2085 * also reset all data related to ICT table interrupt. 2086 */ 2087 int iwl_pcie_alloc_ict(struct iwl_trans *trans) 2088 { 2089 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2090 2091 trans_pcie->ict_tbl = 2092 dma_alloc_coherent(trans->dev, ICT_SIZE, 2093 &trans_pcie->ict_tbl_dma, GFP_KERNEL); 2094 if (!trans_pcie->ict_tbl) 2095 return -ENOMEM; 2096 2097 /* just an API sanity check ... it is guaranteed to be aligned */ 2098 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 2099 iwl_pcie_free_ict(trans); 2100 return -EINVAL; 2101 } 2102 2103 return 0; 2104 } 2105 2106 /* Device is going up inform it about using ICT interrupt table, 2107 * also we need to tell the driver to start using ICT interrupt. 2108 */ 2109 void iwl_pcie_reset_ict(struct iwl_trans *trans) 2110 { 2111 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2112 u32 val; 2113 2114 if (!trans_pcie->ict_tbl) 2115 return; 2116 2117 spin_lock_bh(&trans_pcie->irq_lock); 2118 _iwl_disable_interrupts(trans); 2119 2120 memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 2121 2122 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 2123 2124 val |= CSR_DRAM_INT_TBL_ENABLE | 2125 CSR_DRAM_INIT_TBL_WRAP_CHECK | 2126 CSR_DRAM_INIT_TBL_WRITE_POINTER; 2127 2128 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 2129 2130 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 2131 trans_pcie->use_ict = true; 2132 trans_pcie->ict_index = 0; 2133 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 2134 _iwl_enable_interrupts(trans); 2135 spin_unlock_bh(&trans_pcie->irq_lock); 2136 } 2137 2138 /* Device is going down disable ict interrupt usage */ 2139 void iwl_pcie_disable_ict(struct iwl_trans *trans) 2140 { 2141 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2142 2143 spin_lock_bh(&trans_pcie->irq_lock); 2144 trans_pcie->use_ict = false; 2145 spin_unlock_bh(&trans_pcie->irq_lock); 2146 } 2147 2148 irqreturn_t iwl_pcie_isr(int irq, void *data) 2149 { 2150 struct iwl_trans *trans = data; 2151 2152 if (!trans) 2153 return IRQ_NONE; 2154 2155 /* Disable (but don't clear!) interrupts here to avoid 2156 * back-to-back ISRs and sporadic interrupts from our NIC. 2157 * If we have something to service, the tasklet will re-enable ints. 2158 * If we *don't* have something, we'll re-enable before leaving here. 2159 */ 2160 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 2161 2162 return IRQ_WAKE_THREAD; 2163 } 2164 2165 irqreturn_t iwl_pcie_msix_isr(int irq, void *data) 2166 { 2167 return IRQ_WAKE_THREAD; 2168 } 2169 2170 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) 2171 { 2172 struct msix_entry *entry = dev_id; 2173 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 2174 struct iwl_trans *trans = trans_pcie->trans; 2175 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2176 u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE; 2177 u32 inta_fh, inta_hw; 2178 bool polling = false; 2179 bool sw_err; 2180 2181 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 2182 inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0; 2183 2184 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 2185 inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1; 2186 2187 lock_map_acquire(&trans->sync_cmd_lockdep_map); 2188 2189 spin_lock_bh(&trans_pcie->irq_lock); 2190 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); 2191 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); 2192 /* 2193 * Clear causes registers to avoid being handling the same cause. 2194 */ 2195 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk); 2196 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 2197 spin_unlock_bh(&trans_pcie->irq_lock); 2198 2199 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); 2200 2201 if (unlikely(!(inta_fh | inta_hw))) { 2202 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 2203 lock_map_release(&trans->sync_cmd_lockdep_map); 2204 return IRQ_NONE; 2205 } 2206 2207 if (iwl_have_debug_level(IWL_DL_ISR)) { 2208 IWL_DEBUG_ISR(trans, 2209 "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 2210 entry->entry, inta_fh, trans_pcie->fh_mask, 2211 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); 2212 if (inta_fh & ~trans_pcie->fh_mask) 2213 IWL_DEBUG_ISR(trans, 2214 "We got a masked interrupt (0x%08x)\n", 2215 inta_fh & ~trans_pcie->fh_mask); 2216 } 2217 2218 inta_fh &= trans_pcie->fh_mask; 2219 2220 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && 2221 inta_fh & MSIX_FH_INT_CAUSES_Q0) { 2222 local_bh_disable(); 2223 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { 2224 polling = true; 2225 __napi_schedule(&trans_pcie->rxq[0].napi); 2226 } 2227 local_bh_enable(); 2228 } 2229 2230 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && 2231 inta_fh & MSIX_FH_INT_CAUSES_Q1) { 2232 local_bh_disable(); 2233 if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) { 2234 polling = true; 2235 __napi_schedule(&trans_pcie->rxq[1].napi); 2236 } 2237 local_bh_enable(); 2238 } 2239 2240 /* This "Tx" DMA channel is used only for loading uCode */ 2241 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM && 2242 trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2243 IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n"); 2244 isr_stats->tx++; 2245 2246 /* Wake up IMR routine once write to SRAM is complete */ 2247 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2248 trans_pcie->imr_status = IMR_D2S_COMPLETED; 2249 wake_up(&trans_pcie->ucode_write_waitq); 2250 } 2251 } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 2252 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 2253 isr_stats->tx++; 2254 /* 2255 * Wake up uCode load routine, 2256 * now that load is complete 2257 */ 2258 trans_pcie->ucode_write_complete = true; 2259 wake_up(&trans_pcie->ucode_write_waitq); 2260 2261 /* Wake up IMR routine once write to SRAM is complete */ 2262 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2263 trans_pcie->imr_status = IMR_D2S_COMPLETED; 2264 wake_up(&trans_pcie->ucode_write_waitq); 2265 } 2266 } 2267 2268 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 2269 sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; 2270 else 2271 sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; 2272 2273 /* Error detected by uCode */ 2274 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { 2275 IWL_ERR(trans, 2276 "Microcode SW error detected. Restarting 0x%X.\n", 2277 inta_fh); 2278 isr_stats->sw++; 2279 /* during FW reset flow report errors from there */ 2280 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2281 trans_pcie->imr_status = IMR_D2S_ERROR; 2282 wake_up(&trans_pcie->imr_waitq); 2283 } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { 2284 trans_pcie->fw_reset_state = FW_RESET_ERROR; 2285 wake_up(&trans_pcie->fw_reset_waitq); 2286 } else { 2287 iwl_pcie_irq_handle_error(trans); 2288 } 2289 } 2290 2291 /* After checking FH register check HW register */ 2292 if (iwl_have_debug_level(IWL_DL_ISR)) { 2293 IWL_DEBUG_ISR(trans, 2294 "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 2295 entry->entry, inta_hw, trans_pcie->hw_mask, 2296 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); 2297 if (inta_hw & ~trans_pcie->hw_mask) 2298 IWL_DEBUG_ISR(trans, 2299 "We got a masked interrupt 0x%08x\n", 2300 inta_hw & ~trans_pcie->hw_mask); 2301 } 2302 2303 inta_hw &= trans_pcie->hw_mask; 2304 2305 /* Alive notification via Rx interrupt will do the real work */ 2306 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { 2307 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 2308 isr_stats->alive++; 2309 if (trans->trans_cfg->gen2) { 2310 /* We can restock, since firmware configured the RFH */ 2311 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 2312 } 2313 } 2314 2315 /* 2316 * In some rare cases when the HW is in a bad state, we may 2317 * get this interrupt too early, when prph_info is still NULL. 2318 * So make sure that it's not NULL to prevent crashing. 2319 */ 2320 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { 2321 u32 sleep_notif = 2322 le32_to_cpu(trans_pcie->prph_info->sleep_notif); 2323 if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || 2324 sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { 2325 IWL_DEBUG_ISR(trans, 2326 "Sx interrupt: sleep notification = 0x%x\n", 2327 sleep_notif); 2328 trans_pcie->sx_complete = true; 2329 wake_up(&trans_pcie->sx_waitq); 2330 } else { 2331 /* uCode wakes up after power-down sleep */ 2332 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 2333 iwl_pcie_rxq_check_wrptr(trans); 2334 iwl_pcie_txq_check_wrptrs(trans); 2335 2336 isr_stats->wakeup++; 2337 } 2338 } 2339 2340 /* Chip got too hot and stopped itself */ 2341 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { 2342 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 2343 isr_stats->ctkill++; 2344 } 2345 2346 /* HW RF KILL switch toggled */ 2347 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) 2348 iwl_pcie_handle_rfkill_irq(trans); 2349 2350 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { 2351 IWL_ERR(trans, 2352 "Hardware error detected. Restarting.\n"); 2353 2354 isr_stats->hw++; 2355 trans->dbg.hw_error = true; 2356 iwl_pcie_irq_handle_error(trans); 2357 } 2358 2359 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { 2360 IWL_DEBUG_ISR(trans, "Reset flow completed\n"); 2361 trans_pcie->fw_reset_state = FW_RESET_OK; 2362 wake_up(&trans_pcie->fw_reset_waitq); 2363 } 2364 2365 if (!polling) 2366 iwl_pcie_clear_irq(trans, entry->entry); 2367 2368 lock_map_release(&trans->sync_cmd_lockdep_map); 2369 2370 return IRQ_HANDLED; 2371 } 2372