1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2003-2014, 2018-2022 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/sched.h> 8 #include <linux/wait.h> 9 #include <linux/gfp.h> 10 11 #include "iwl-prph.h" 12 #include "iwl-io.h" 13 #include "internal.h" 14 #include "iwl-op-mode.h" 15 #include "iwl-context-info-gen3.h" 16 17 /****************************************************************************** 18 * 19 * RX path functions 20 * 21 ******************************************************************************/ 22 23 /* 24 * Rx theory of operation 25 * 26 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 27 * each of which point to Receive Buffers to be filled by the NIC. These get 28 * used not only for Rx frames, but for any command response or notification 29 * from the NIC. The driver and NIC manage the Rx buffers by means 30 * of indexes into the circular buffer. 31 * 32 * Rx Queue Indexes 33 * The host/firmware share two index registers for managing the Rx buffers. 34 * 35 * The READ index maps to the first position that the firmware may be writing 36 * to -- the driver can read up to (but not including) this position and get 37 * good data. 38 * The READ index is managed by the firmware once the card is enabled. 39 * 40 * The WRITE index maps to the last position the driver has read from -- the 41 * position preceding WRITE is the last slot the firmware can place a packet. 42 * 43 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 44 * WRITE = READ. 45 * 46 * During initialization, the host sets up the READ queue position to the first 47 * INDEX position, and WRITE to the last (READ - 1 wrapped) 48 * 49 * When the firmware places a packet in a buffer, it will advance the READ index 50 * and fire the RX interrupt. The driver can then query the READ index and 51 * process as many packets as possible, moving the WRITE index forward as it 52 * resets the Rx queue buffers with new memory. 53 * 54 * The management in the driver is as follows: 55 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 56 * When the interrupt handler is called, the request is processed. 57 * The page is either stolen - transferred to the upper layer 58 * or reused - added immediately to the iwl->rxq->rx_free list. 59 * + When the page is stolen - the driver updates the matching queue's used 60 * count, detaches the RBD and transfers it to the queue used list. 61 * When there are two used RBDs - they are transferred to the allocator empty 62 * list. Work is then scheduled for the allocator to start allocating 63 * eight buffers. 64 * When there are another 6 used RBDs - they are transferred to the allocator 65 * empty list and the driver tries to claim the pre-allocated buffers and 66 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 67 * until ready. 68 * When there are 8+ buffers in the free list - either from allocation or from 69 * 8 reused unstolen pages - restock is called to update the FW and indexes. 70 * + In order to make sure the allocator always has RBDs to use for allocation 71 * the allocator has initial pool in the size of num_queues*(8-2) - the 72 * maximum missing RBDs per allocation request (request posted with 2 73 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 74 * The queues supplies the recycle of the rest of the RBDs. 75 * + A received packet is processed and handed to the kernel network stack, 76 * detached from the iwl->rxq. The driver 'processed' index is updated. 77 * + If there are no allocated buffers in iwl->rxq->rx_free, 78 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 79 * If there were enough free buffers and RX_STALLED is set it is cleared. 80 * 81 * 82 * Driver sequence: 83 * 84 * iwl_rxq_alloc() Allocates rx_free 85 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 86 * iwl_pcie_rxq_restock. 87 * Used only during initialization. 88 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 89 * queue, updates firmware pointers, and updates 90 * the WRITE index. 91 * iwl_pcie_rx_allocator() Background work for allocating pages. 92 * 93 * -- enable interrupts -- 94 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 95 * READ INDEX, detaching the SKB from the pool. 96 * Moves the packet buffer from queue to rx_used. 97 * Posts and claims requests to the allocator. 98 * Calls iwl_pcie_rxq_restock to refill any empty 99 * slots. 100 * 101 * RBD life-cycle: 102 * 103 * Init: 104 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 105 * 106 * Regular Receive interrupt: 107 * Page Stolen: 108 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 109 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 110 * Page not Stolen: 111 * rxq.queue -> rxq.rx_free -> rxq.queue 112 * ... 113 * 114 */ 115 116 /* 117 * iwl_rxq_space - Return number of free slots available in queue. 118 */ 119 static int iwl_rxq_space(const struct iwl_rxq *rxq) 120 { 121 /* Make sure rx queue size is a power of 2 */ 122 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 123 124 /* 125 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 126 * between empty and completely full queues. 127 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 128 * defined for negative dividends. 129 */ 130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 131 } 132 133 /* 134 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 135 */ 136 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 137 { 138 return cpu_to_le32((u32)(dma_addr >> 8)); 139 } 140 141 /* 142 * iwl_pcie_rx_stop - stops the Rx DMA 143 */ 144 int iwl_pcie_rx_stop(struct iwl_trans *trans) 145 { 146 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 147 /* TODO: remove this once fw does it */ 148 iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); 149 return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, 150 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 151 } else if (trans->trans_cfg->mq_rx_supported) { 152 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 153 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, 154 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 155 } else { 156 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 157 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 158 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 159 1000); 160 } 161 } 162 163 /* 164 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 165 */ 166 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 167 struct iwl_rxq *rxq) 168 { 169 u32 reg; 170 171 lockdep_assert_held(&rxq->lock); 172 173 /* 174 * explicitly wake up the NIC if: 175 * 1. shadow registers aren't enabled 176 * 2. there is a chance that the NIC is asleep 177 */ 178 if (!trans->trans_cfg->base_params->shadow_reg_enable && 179 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 180 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 181 182 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 183 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 184 reg); 185 iwl_set_bit(trans, CSR_GP_CNTRL, 186 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 187 rxq->need_update = true; 188 return; 189 } 190 } 191 192 rxq->write_actual = round_down(rxq->write, 8); 193 if (trans->trans_cfg->mq_rx_supported) 194 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 195 rxq->write_actual); 196 else 197 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 198 } 199 200 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 201 { 202 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 203 int i; 204 205 for (i = 0; i < trans->num_rx_queues; i++) { 206 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 207 208 if (!rxq->need_update) 209 continue; 210 spin_lock_bh(&rxq->lock); 211 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 212 rxq->need_update = false; 213 spin_unlock_bh(&rxq->lock); 214 } 215 } 216 217 static void iwl_pcie_restock_bd(struct iwl_trans *trans, 218 struct iwl_rxq *rxq, 219 struct iwl_rx_mem_buffer *rxb) 220 { 221 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 222 struct iwl_rx_transfer_desc *bd = rxq->bd; 223 224 BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); 225 226 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); 227 bd[rxq->write].rbid = cpu_to_le16(rxb->vid); 228 } else { 229 __le64 *bd = rxq->bd; 230 231 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 232 } 233 234 IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", 235 (u32)rxb->vid, rxq->id, rxq->write); 236 } 237 238 /* 239 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx 240 */ 241 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, 242 struct iwl_rxq *rxq) 243 { 244 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 245 struct iwl_rx_mem_buffer *rxb; 246 247 /* 248 * If the device isn't enabled - no need to try to add buffers... 249 * This can happen when we stop the device and still have an interrupt 250 * pending. We stop the APM before we sync the interrupts because we 251 * have to (see comment there). On the other hand, since the APM is 252 * stopped, we cannot access the HW (in particular not prph). 253 * So don't try to restock if the APM has been already stopped. 254 */ 255 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 256 return; 257 258 spin_lock_bh(&rxq->lock); 259 while (rxq->free_count) { 260 /* Get next free Rx buffer, remove from free list */ 261 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 262 list); 263 list_del(&rxb->list); 264 rxb->invalid = false; 265 /* some low bits are expected to be unset (depending on hw) */ 266 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); 267 /* Point to Rx buffer via next RBD in circular buffer */ 268 iwl_pcie_restock_bd(trans, rxq, rxb); 269 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); 270 rxq->free_count--; 271 } 272 spin_unlock_bh(&rxq->lock); 273 274 /* 275 * If we've added more space for the firmware to place data, tell it. 276 * Increment device's write pointer in multiples of 8. 277 */ 278 if (rxq->write_actual != (rxq->write & ~0x7)) { 279 spin_lock_bh(&rxq->lock); 280 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 281 spin_unlock_bh(&rxq->lock); 282 } 283 } 284 285 /* 286 * iwl_pcie_rxsq_restock - restock implementation for single queue rx 287 */ 288 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, 289 struct iwl_rxq *rxq) 290 { 291 struct iwl_rx_mem_buffer *rxb; 292 293 /* 294 * If the device isn't enabled - not need to try to add buffers... 295 * This can happen when we stop the device and still have an interrupt 296 * pending. We stop the APM before we sync the interrupts because we 297 * have to (see comment there). On the other hand, since the APM is 298 * stopped, we cannot access the HW (in particular not prph). 299 * So don't try to restock if the APM has been already stopped. 300 */ 301 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 302 return; 303 304 spin_lock_bh(&rxq->lock); 305 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 306 __le32 *bd = (__le32 *)rxq->bd; 307 /* The overwritten rxb must be a used one */ 308 rxb = rxq->queue[rxq->write]; 309 BUG_ON(rxb && rxb->page); 310 311 /* Get next free Rx buffer, remove from free list */ 312 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 313 list); 314 list_del(&rxb->list); 315 rxb->invalid = false; 316 317 /* Point to Rx buffer via next RBD in circular buffer */ 318 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 319 rxq->queue[rxq->write] = rxb; 320 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 321 rxq->free_count--; 322 } 323 spin_unlock_bh(&rxq->lock); 324 325 /* If we've added more space for the firmware to place data, tell it. 326 * Increment device's write pointer in multiples of 8. */ 327 if (rxq->write_actual != (rxq->write & ~0x7)) { 328 spin_lock_bh(&rxq->lock); 329 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 330 spin_unlock_bh(&rxq->lock); 331 } 332 } 333 334 /* 335 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 336 * 337 * If there are slots in the RX queue that need to be restocked, 338 * and we have free pre-allocated buffers, fill the ranks as much 339 * as we can, pulling from rx_free. 340 * 341 * This moves the 'write' index forward to catch up with 'processed', and 342 * also updates the memory address in the firmware to reference the new 343 * target buffer. 344 */ 345 static 346 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 347 { 348 if (trans->trans_cfg->mq_rx_supported) 349 iwl_pcie_rxmq_restock(trans, rxq); 350 else 351 iwl_pcie_rxsq_restock(trans, rxq); 352 } 353 354 /* 355 * iwl_pcie_rx_alloc_page - allocates and returns a page. 356 * 357 */ 358 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 359 u32 *offset, gfp_t priority) 360 { 361 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 362 unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 363 unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; 364 struct page *page; 365 gfp_t gfp_mask = priority; 366 367 if (trans_pcie->rx_page_order > 0) 368 gfp_mask |= __GFP_COMP; 369 370 if (trans_pcie->alloc_page) { 371 spin_lock_bh(&trans_pcie->alloc_page_lock); 372 /* recheck */ 373 if (trans_pcie->alloc_page) { 374 *offset = trans_pcie->alloc_page_used; 375 page = trans_pcie->alloc_page; 376 trans_pcie->alloc_page_used += rbsize; 377 if (trans_pcie->alloc_page_used >= allocsize) 378 trans_pcie->alloc_page = NULL; 379 else 380 get_page(page); 381 spin_unlock_bh(&trans_pcie->alloc_page_lock); 382 return page; 383 } 384 spin_unlock_bh(&trans_pcie->alloc_page_lock); 385 } 386 387 /* Alloc a new receive buffer */ 388 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 389 if (!page) { 390 if (net_ratelimit()) 391 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 392 trans_pcie->rx_page_order); 393 /* 394 * Issue an error if we don't have enough pre-allocated 395 * buffers. 396 */ 397 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 398 IWL_CRIT(trans, 399 "Failed to alloc_pages\n"); 400 return NULL; 401 } 402 403 if (2 * rbsize <= allocsize) { 404 spin_lock_bh(&trans_pcie->alloc_page_lock); 405 if (!trans_pcie->alloc_page) { 406 get_page(page); 407 trans_pcie->alloc_page = page; 408 trans_pcie->alloc_page_used = rbsize; 409 } 410 spin_unlock_bh(&trans_pcie->alloc_page_lock); 411 } 412 413 *offset = 0; 414 return page; 415 } 416 417 /* 418 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 419 * 420 * A used RBD is an Rx buffer that has been given to the stack. To use it again 421 * a page must be allocated and the RBD must point to the page. This function 422 * doesn't change the HW pointer but handles the list of pages that is used by 423 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 424 * allocated buffers. 425 */ 426 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 427 struct iwl_rxq *rxq) 428 { 429 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 430 struct iwl_rx_mem_buffer *rxb; 431 struct page *page; 432 433 while (1) { 434 unsigned int offset; 435 436 spin_lock_bh(&rxq->lock); 437 if (list_empty(&rxq->rx_used)) { 438 spin_unlock_bh(&rxq->lock); 439 return; 440 } 441 spin_unlock_bh(&rxq->lock); 442 443 page = iwl_pcie_rx_alloc_page(trans, &offset, priority); 444 if (!page) 445 return; 446 447 spin_lock_bh(&rxq->lock); 448 449 if (list_empty(&rxq->rx_used)) { 450 spin_unlock_bh(&rxq->lock); 451 __free_pages(page, trans_pcie->rx_page_order); 452 return; 453 } 454 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 455 list); 456 list_del(&rxb->list); 457 spin_unlock_bh(&rxq->lock); 458 459 BUG_ON(rxb->page); 460 rxb->page = page; 461 rxb->offset = offset; 462 /* Get physical address of the RB */ 463 rxb->page_dma = 464 dma_map_page(trans->dev, page, rxb->offset, 465 trans_pcie->rx_buf_bytes, 466 DMA_FROM_DEVICE); 467 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 468 rxb->page = NULL; 469 spin_lock_bh(&rxq->lock); 470 list_add(&rxb->list, &rxq->rx_used); 471 spin_unlock_bh(&rxq->lock); 472 __free_pages(page, trans_pcie->rx_page_order); 473 return; 474 } 475 476 spin_lock_bh(&rxq->lock); 477 478 list_add_tail(&rxb->list, &rxq->rx_free); 479 rxq->free_count++; 480 481 spin_unlock_bh(&rxq->lock); 482 } 483 } 484 485 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 486 { 487 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 488 int i; 489 490 if (!trans_pcie->rx_pool) 491 return; 492 493 for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { 494 if (!trans_pcie->rx_pool[i].page) 495 continue; 496 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 497 trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); 498 __free_pages(trans_pcie->rx_pool[i].page, 499 trans_pcie->rx_page_order); 500 trans_pcie->rx_pool[i].page = NULL; 501 } 502 } 503 504 /* 505 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 506 * 507 * Allocates for each received request 8 pages 508 * Called as a scheduled work item. 509 */ 510 static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 511 { 512 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 513 struct iwl_rb_allocator *rba = &trans_pcie->rba; 514 struct list_head local_empty; 515 int pending = atomic_read(&rba->req_pending); 516 517 IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); 518 519 /* If we were scheduled - there is at least one request */ 520 spin_lock_bh(&rba->lock); 521 /* swap out the rba->rbd_empty to a local list */ 522 list_replace_init(&rba->rbd_empty, &local_empty); 523 spin_unlock_bh(&rba->lock); 524 525 while (pending) { 526 int i; 527 LIST_HEAD(local_allocated); 528 gfp_t gfp_mask = GFP_KERNEL; 529 530 /* Do not post a warning if there are only a few requests */ 531 if (pending < RX_PENDING_WATERMARK) 532 gfp_mask |= __GFP_NOWARN; 533 534 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 535 struct iwl_rx_mem_buffer *rxb; 536 struct page *page; 537 538 /* List should never be empty - each reused RBD is 539 * returned to the list, and initial pool covers any 540 * possible gap between the time the page is allocated 541 * to the time the RBD is added. 542 */ 543 BUG_ON(list_empty(&local_empty)); 544 /* Get the first rxb from the rbd list */ 545 rxb = list_first_entry(&local_empty, 546 struct iwl_rx_mem_buffer, list); 547 BUG_ON(rxb->page); 548 549 /* Alloc a new receive buffer */ 550 page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, 551 gfp_mask); 552 if (!page) 553 continue; 554 rxb->page = page; 555 556 /* Get physical address of the RB */ 557 rxb->page_dma = dma_map_page(trans->dev, page, 558 rxb->offset, 559 trans_pcie->rx_buf_bytes, 560 DMA_FROM_DEVICE); 561 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 562 rxb->page = NULL; 563 __free_pages(page, trans_pcie->rx_page_order); 564 continue; 565 } 566 567 /* move the allocated entry to the out list */ 568 list_move(&rxb->list, &local_allocated); 569 i++; 570 } 571 572 atomic_dec(&rba->req_pending); 573 pending--; 574 575 if (!pending) { 576 pending = atomic_read(&rba->req_pending); 577 if (pending) 578 IWL_DEBUG_TPT(trans, 579 "Got more pending allocation requests = %d\n", 580 pending); 581 } 582 583 spin_lock_bh(&rba->lock); 584 /* add the allocated rbds to the allocator allocated list */ 585 list_splice_tail(&local_allocated, &rba->rbd_allocated); 586 /* get more empty RBDs for current pending requests */ 587 list_splice_tail_init(&rba->rbd_empty, &local_empty); 588 spin_unlock_bh(&rba->lock); 589 590 atomic_inc(&rba->req_ready); 591 592 } 593 594 spin_lock_bh(&rba->lock); 595 /* return unused rbds to the allocator empty list */ 596 list_splice_tail(&local_empty, &rba->rbd_empty); 597 spin_unlock_bh(&rba->lock); 598 599 IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); 600 } 601 602 /* 603 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages 604 .* 605 .* Called by queue when the queue posted allocation request and 606 * has freed 8 RBDs in order to restock itself. 607 * This function directly moves the allocated RBs to the queue's ownership 608 * and updates the relevant counters. 609 */ 610 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 611 struct iwl_rxq *rxq) 612 { 613 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 614 struct iwl_rb_allocator *rba = &trans_pcie->rba; 615 int i; 616 617 lockdep_assert_held(&rxq->lock); 618 619 /* 620 * atomic_dec_if_positive returns req_ready - 1 for any scenario. 621 * If req_ready is 0 atomic_dec_if_positive will return -1 and this 622 * function will return early, as there are no ready requests. 623 * atomic_dec_if_positive will perofrm the *actual* decrement only if 624 * req_ready > 0, i.e. - there are ready requests and the function 625 * hands one request to the caller. 626 */ 627 if (atomic_dec_if_positive(&rba->req_ready) < 0) 628 return; 629 630 spin_lock(&rba->lock); 631 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 632 /* Get next free Rx buffer, remove it from free list */ 633 struct iwl_rx_mem_buffer *rxb = 634 list_first_entry(&rba->rbd_allocated, 635 struct iwl_rx_mem_buffer, list); 636 637 list_move(&rxb->list, &rxq->rx_free); 638 } 639 spin_unlock(&rba->lock); 640 641 rxq->used_count -= RX_CLAIM_REQ_ALLOC; 642 rxq->free_count += RX_CLAIM_REQ_ALLOC; 643 } 644 645 void iwl_pcie_rx_allocator_work(struct work_struct *data) 646 { 647 struct iwl_rb_allocator *rba_p = 648 container_of(data, struct iwl_rb_allocator, rx_alloc); 649 struct iwl_trans_pcie *trans_pcie = 650 container_of(rba_p, struct iwl_trans_pcie, rba); 651 652 iwl_pcie_rx_allocator(trans_pcie->trans); 653 } 654 655 static int iwl_pcie_free_bd_size(struct iwl_trans *trans) 656 { 657 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 658 return sizeof(struct iwl_rx_transfer_desc); 659 660 return trans->trans_cfg->mq_rx_supported ? 661 sizeof(__le64) : sizeof(__le32); 662 } 663 664 static int iwl_pcie_used_bd_size(struct iwl_trans *trans) 665 { 666 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 667 return sizeof(struct iwl_rx_completion_desc_bz); 668 669 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 670 return sizeof(struct iwl_rx_completion_desc); 671 672 return sizeof(__le32); 673 } 674 675 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, 676 struct iwl_rxq *rxq) 677 { 678 int free_size = iwl_pcie_free_bd_size(trans); 679 680 if (rxq->bd) 681 dma_free_coherent(trans->dev, 682 free_size * rxq->queue_size, 683 rxq->bd, rxq->bd_dma); 684 rxq->bd_dma = 0; 685 rxq->bd = NULL; 686 687 rxq->rb_stts_dma = 0; 688 rxq->rb_stts = NULL; 689 690 if (rxq->used_bd) 691 dma_free_coherent(trans->dev, 692 iwl_pcie_used_bd_size(trans) * 693 rxq->queue_size, 694 rxq->used_bd, rxq->used_bd_dma); 695 rxq->used_bd_dma = 0; 696 rxq->used_bd = NULL; 697 } 698 699 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, 700 struct iwl_rxq *rxq) 701 { 702 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 703 struct device *dev = trans->dev; 704 int i; 705 int free_size; 706 bool use_rx_td = (trans->trans_cfg->device_family >= 707 IWL_DEVICE_FAMILY_AX210); 708 size_t rb_stts_size = use_rx_td ? sizeof(__le16) : 709 sizeof(struct iwl_rb_status); 710 711 spin_lock_init(&rxq->lock); 712 if (trans->trans_cfg->mq_rx_supported) 713 rxq->queue_size = trans->cfg->num_rbds; 714 else 715 rxq->queue_size = RX_QUEUE_SIZE; 716 717 free_size = iwl_pcie_free_bd_size(trans); 718 719 /* 720 * Allocate the circular buffer of Read Buffer Descriptors 721 * (RBDs) 722 */ 723 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, 724 &rxq->bd_dma, GFP_KERNEL); 725 if (!rxq->bd) 726 goto err; 727 728 if (trans->trans_cfg->mq_rx_supported) { 729 rxq->used_bd = dma_alloc_coherent(dev, 730 iwl_pcie_used_bd_size(trans) * 731 rxq->queue_size, 732 &rxq->used_bd_dma, 733 GFP_KERNEL); 734 if (!rxq->used_bd) 735 goto err; 736 } 737 738 rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size; 739 rxq->rb_stts_dma = 740 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; 741 742 return 0; 743 744 err: 745 for (i = 0; i < trans->num_rx_queues; i++) { 746 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 747 748 iwl_pcie_free_rxq_dma(trans, rxq); 749 } 750 751 return -ENOMEM; 752 } 753 754 static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 755 { 756 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 757 struct iwl_rb_allocator *rba = &trans_pcie->rba; 758 int i, ret; 759 size_t rb_stts_size = trans->trans_cfg->device_family >= 760 IWL_DEVICE_FAMILY_AX210 ? 761 sizeof(__le16) : sizeof(struct iwl_rb_status); 762 763 if (WARN_ON(trans_pcie->rxq)) 764 return -EINVAL; 765 766 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 767 GFP_KERNEL); 768 trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), 769 sizeof(trans_pcie->rx_pool[0]), 770 GFP_KERNEL); 771 trans_pcie->global_table = 772 kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), 773 sizeof(trans_pcie->global_table[0]), 774 GFP_KERNEL); 775 if (!trans_pcie->rxq || !trans_pcie->rx_pool || 776 !trans_pcie->global_table) { 777 ret = -ENOMEM; 778 goto err; 779 } 780 781 spin_lock_init(&rba->lock); 782 783 /* 784 * Allocate the driver's pointer to receive buffer status. 785 * Allocate for all queues continuously (HW requirement). 786 */ 787 trans_pcie->base_rb_stts = 788 dma_alloc_coherent(trans->dev, 789 rb_stts_size * trans->num_rx_queues, 790 &trans_pcie->base_rb_stts_dma, 791 GFP_KERNEL); 792 if (!trans_pcie->base_rb_stts) { 793 ret = -ENOMEM; 794 goto err; 795 } 796 797 for (i = 0; i < trans->num_rx_queues; i++) { 798 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 799 800 rxq->id = i; 801 ret = iwl_pcie_alloc_rxq_dma(trans, rxq); 802 if (ret) 803 goto err; 804 } 805 return 0; 806 807 err: 808 if (trans_pcie->base_rb_stts) { 809 dma_free_coherent(trans->dev, 810 rb_stts_size * trans->num_rx_queues, 811 trans_pcie->base_rb_stts, 812 trans_pcie->base_rb_stts_dma); 813 trans_pcie->base_rb_stts = NULL; 814 trans_pcie->base_rb_stts_dma = 0; 815 } 816 kfree(trans_pcie->rx_pool); 817 trans_pcie->rx_pool = NULL; 818 kfree(trans_pcie->global_table); 819 trans_pcie->global_table = NULL; 820 kfree(trans_pcie->rxq); 821 trans_pcie->rxq = NULL; 822 823 return ret; 824 } 825 826 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 827 { 828 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 829 u32 rb_size; 830 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 831 832 switch (trans_pcie->rx_buf_size) { 833 case IWL_AMSDU_4K: 834 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 835 break; 836 case IWL_AMSDU_8K: 837 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 838 break; 839 case IWL_AMSDU_12K: 840 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 841 break; 842 default: 843 WARN_ON(1); 844 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 845 } 846 847 if (!iwl_trans_grab_nic_access(trans)) 848 return; 849 850 /* Stop Rx DMA */ 851 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 852 /* reset and flush pointers */ 853 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 854 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 855 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 856 857 /* Reset driver's Rx queue write index */ 858 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 859 860 /* Tell device where to find RBD circular buffer in DRAM */ 861 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 862 (u32)(rxq->bd_dma >> 8)); 863 864 /* Tell device where in DRAM to update its Rx status */ 865 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 866 rxq->rb_stts_dma >> 4); 867 868 /* Enable Rx DMA 869 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 870 * the credit mechanism in 5000 HW RX FIFO 871 * Direct rx interrupts to hosts 872 * Rx buffer size 4 or 8k or 12k 873 * RB timeout 0x10 874 * 256 RBDs 875 */ 876 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 877 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 878 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 879 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 880 rb_size | 881 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 882 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 883 884 iwl_trans_release_nic_access(trans); 885 886 /* Set interrupt coalescing timer to default (2048 usecs) */ 887 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 888 889 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 890 if (trans->cfg->host_interrupt_operation_mode) 891 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 892 } 893 894 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 895 { 896 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 897 u32 rb_size, enabled = 0; 898 int i; 899 900 switch (trans_pcie->rx_buf_size) { 901 case IWL_AMSDU_2K: 902 rb_size = RFH_RXF_DMA_RB_SIZE_2K; 903 break; 904 case IWL_AMSDU_4K: 905 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 906 break; 907 case IWL_AMSDU_8K: 908 rb_size = RFH_RXF_DMA_RB_SIZE_8K; 909 break; 910 case IWL_AMSDU_12K: 911 rb_size = RFH_RXF_DMA_RB_SIZE_12K; 912 break; 913 default: 914 WARN_ON(1); 915 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 916 } 917 918 if (!iwl_trans_grab_nic_access(trans)) 919 return; 920 921 /* Stop Rx DMA */ 922 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); 923 /* disable free amd used rx queue operation */ 924 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); 925 926 for (i = 0; i < trans->num_rx_queues; i++) { 927 /* Tell device where to find RBD free table in DRAM */ 928 iwl_write_prph64_no_grab(trans, 929 RFH_Q_FRBDCB_BA_LSB(i), 930 trans_pcie->rxq[i].bd_dma); 931 /* Tell device where to find RBD used table in DRAM */ 932 iwl_write_prph64_no_grab(trans, 933 RFH_Q_URBDCB_BA_LSB(i), 934 trans_pcie->rxq[i].used_bd_dma); 935 /* Tell device where in DRAM to update its Rx status */ 936 iwl_write_prph64_no_grab(trans, 937 RFH_Q_URBD_STTS_WPTR_LSB(i), 938 trans_pcie->rxq[i].rb_stts_dma); 939 /* Reset device indice tables */ 940 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); 941 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); 942 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); 943 944 enabled |= BIT(i) | BIT(i + 16); 945 } 946 947 /* 948 * Enable Rx DMA 949 * Rx buffer size 4 or 8k or 12k 950 * Min RB size 4 or 8 951 * Drop frames that exceed RB size 952 * 512 RBDs 953 */ 954 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 955 RFH_DMA_EN_ENABLE_VAL | rb_size | 956 RFH_RXF_DMA_MIN_RB_4_8 | 957 RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 958 RFH_RXF_DMA_RBDCB_SIZE_512); 959 960 /* 961 * Activate DMA snooping. 962 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe 963 * Default queue is 0 964 */ 965 iwl_write_prph_no_grab(trans, RFH_GEN_CFG, 966 RFH_GEN_CFG_RFH_DMA_SNOOP | 967 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | 968 RFH_GEN_CFG_SERVICE_DMA_SNOOP | 969 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, 970 trans->trans_cfg->integrated ? 971 RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 972 RFH_GEN_CFG_RB_CHUNK_SIZE_128)); 973 /* Enable the relevant rx queues */ 974 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); 975 976 iwl_trans_release_nic_access(trans); 977 978 /* Set interrupt coalescing timer to default (2048 usecs) */ 979 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 980 } 981 982 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 983 { 984 lockdep_assert_held(&rxq->lock); 985 986 INIT_LIST_HEAD(&rxq->rx_free); 987 INIT_LIST_HEAD(&rxq->rx_used); 988 rxq->free_count = 0; 989 rxq->used_count = 0; 990 } 991 992 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget); 993 994 static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) 995 { 996 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); 997 struct iwl_trans_pcie *trans_pcie; 998 struct iwl_trans *trans; 999 int ret; 1000 1001 trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); 1002 trans = trans_pcie->trans; 1003 1004 ret = iwl_pcie_rx_handle(trans, rxq->id, budget); 1005 1006 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", 1007 rxq->id, ret, budget); 1008 1009 if (ret < budget) { 1010 spin_lock(&trans_pcie->irq_lock); 1011 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1012 _iwl_enable_interrupts(trans); 1013 spin_unlock(&trans_pcie->irq_lock); 1014 1015 napi_complete_done(&rxq->napi, ret); 1016 } 1017 1018 return ret; 1019 } 1020 1021 static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) 1022 { 1023 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); 1024 struct iwl_trans_pcie *trans_pcie; 1025 struct iwl_trans *trans; 1026 int ret; 1027 1028 trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); 1029 trans = trans_pcie->trans; 1030 1031 ret = iwl_pcie_rx_handle(trans, rxq->id, budget); 1032 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, 1033 budget); 1034 1035 if (ret < budget) { 1036 int irq_line = rxq->id; 1037 1038 /* FIRST_RSS is shared with line 0 */ 1039 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && 1040 rxq->id == 1) 1041 irq_line = 0; 1042 1043 spin_lock(&trans_pcie->irq_lock); 1044 iwl_pcie_clear_irq(trans, irq_line); 1045 spin_unlock(&trans_pcie->irq_lock); 1046 1047 napi_complete_done(&rxq->napi, ret); 1048 } 1049 1050 return ret; 1051 } 1052 1053 static int _iwl_pcie_rx_init(struct iwl_trans *trans) 1054 { 1055 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1056 struct iwl_rxq *def_rxq; 1057 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1058 int i, err, queue_size, allocator_pool_size, num_alloc; 1059 1060 if (!trans_pcie->rxq) { 1061 err = iwl_pcie_rx_alloc(trans); 1062 if (err) 1063 return err; 1064 } 1065 def_rxq = trans_pcie->rxq; 1066 1067 cancel_work_sync(&rba->rx_alloc); 1068 1069 spin_lock_bh(&rba->lock); 1070 atomic_set(&rba->req_pending, 0); 1071 atomic_set(&rba->req_ready, 0); 1072 INIT_LIST_HEAD(&rba->rbd_allocated); 1073 INIT_LIST_HEAD(&rba->rbd_empty); 1074 spin_unlock_bh(&rba->lock); 1075 1076 /* free all first - we overwrite everything here */ 1077 iwl_pcie_free_rbs_pool(trans); 1078 1079 for (i = 0; i < RX_QUEUE_SIZE; i++) 1080 def_rxq->queue[i] = NULL; 1081 1082 for (i = 0; i < trans->num_rx_queues; i++) { 1083 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 1084 1085 spin_lock_bh(&rxq->lock); 1086 /* 1087 * Set read write pointer to reflect that we have processed 1088 * and used all buffers, but have not restocked the Rx queue 1089 * with fresh buffers 1090 */ 1091 rxq->read = 0; 1092 rxq->write = 0; 1093 rxq->write_actual = 0; 1094 memset(rxq->rb_stts, 0, 1095 (trans->trans_cfg->device_family >= 1096 IWL_DEVICE_FAMILY_AX210) ? 1097 sizeof(__le16) : sizeof(struct iwl_rb_status)); 1098 1099 iwl_pcie_rx_init_rxb_lists(rxq); 1100 1101 spin_unlock_bh(&rxq->lock); 1102 1103 if (!rxq->napi.poll) { 1104 int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll; 1105 1106 if (trans_pcie->msix_enabled) 1107 poll = iwl_pcie_napi_poll_msix; 1108 1109 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 1110 poll, NAPI_POLL_WEIGHT); 1111 napi_enable(&rxq->napi); 1112 } 1113 1114 } 1115 1116 /* move the pool to the default queue and allocator ownerships */ 1117 queue_size = trans->trans_cfg->mq_rx_supported ? 1118 trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; 1119 allocator_pool_size = trans->num_rx_queues * 1120 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 1121 num_alloc = queue_size + allocator_pool_size; 1122 1123 for (i = 0; i < num_alloc; i++) { 1124 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 1125 1126 if (i < allocator_pool_size) 1127 list_add(&rxb->list, &rba->rbd_empty); 1128 else 1129 list_add(&rxb->list, &def_rxq->rx_used); 1130 trans_pcie->global_table[i] = rxb; 1131 rxb->vid = (u16)(i + 1); 1132 rxb->invalid = true; 1133 } 1134 1135 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 1136 1137 return 0; 1138 } 1139 1140 int iwl_pcie_rx_init(struct iwl_trans *trans) 1141 { 1142 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1143 int ret = _iwl_pcie_rx_init(trans); 1144 1145 if (ret) 1146 return ret; 1147 1148 if (trans->trans_cfg->mq_rx_supported) 1149 iwl_pcie_rx_mq_hw_init(trans); 1150 else 1151 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); 1152 1153 iwl_pcie_rxq_restock(trans, trans_pcie->rxq); 1154 1155 spin_lock_bh(&trans_pcie->rxq->lock); 1156 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); 1157 spin_unlock_bh(&trans_pcie->rxq->lock); 1158 1159 return 0; 1160 } 1161 1162 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) 1163 { 1164 /* Set interrupt coalescing timer to default (2048 usecs) */ 1165 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 1166 1167 /* 1168 * We don't configure the RFH. 1169 * Restock will be done at alive, after firmware configured the RFH. 1170 */ 1171 return _iwl_pcie_rx_init(trans); 1172 } 1173 1174 void iwl_pcie_rx_free(struct iwl_trans *trans) 1175 { 1176 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1177 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1178 int i; 1179 size_t rb_stts_size = trans->trans_cfg->device_family >= 1180 IWL_DEVICE_FAMILY_AX210 ? 1181 sizeof(__le16) : sizeof(struct iwl_rb_status); 1182 1183 /* 1184 * if rxq is NULL, it means that nothing has been allocated, 1185 * exit now 1186 */ 1187 if (!trans_pcie->rxq) { 1188 IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 1189 return; 1190 } 1191 1192 cancel_work_sync(&rba->rx_alloc); 1193 1194 iwl_pcie_free_rbs_pool(trans); 1195 1196 if (trans_pcie->base_rb_stts) { 1197 dma_free_coherent(trans->dev, 1198 rb_stts_size * trans->num_rx_queues, 1199 trans_pcie->base_rb_stts, 1200 trans_pcie->base_rb_stts_dma); 1201 trans_pcie->base_rb_stts = NULL; 1202 trans_pcie->base_rb_stts_dma = 0; 1203 } 1204 1205 for (i = 0; i < trans->num_rx_queues; i++) { 1206 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 1207 1208 iwl_pcie_free_rxq_dma(trans, rxq); 1209 1210 if (rxq->napi.poll) { 1211 napi_disable(&rxq->napi); 1212 netif_napi_del(&rxq->napi); 1213 } 1214 } 1215 kfree(trans_pcie->rx_pool); 1216 kfree(trans_pcie->global_table); 1217 kfree(trans_pcie->rxq); 1218 1219 if (trans_pcie->alloc_page) 1220 __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); 1221 } 1222 1223 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, 1224 struct iwl_rb_allocator *rba) 1225 { 1226 spin_lock(&rba->lock); 1227 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1228 spin_unlock(&rba->lock); 1229 } 1230 1231 /* 1232 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 1233 * 1234 * Called when a RBD can be reused. The RBD is transferred to the allocator. 1235 * When there are 2 empty RBDs - a request for allocation is posted 1236 */ 1237 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 1238 struct iwl_rx_mem_buffer *rxb, 1239 struct iwl_rxq *rxq, bool emergency) 1240 { 1241 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1242 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1243 1244 /* Move the RBD to the used list, will be moved to allocator in batches 1245 * before claiming or posting a request*/ 1246 list_add_tail(&rxb->list, &rxq->rx_used); 1247 1248 if (unlikely(emergency)) 1249 return; 1250 1251 /* Count the allocator owned RBDs */ 1252 rxq->used_count++; 1253 1254 /* If we have RX_POST_REQ_ALLOC new released rx buffers - 1255 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 1256 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 1257 * after but we still need to post another request. 1258 */ 1259 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 1260 /* Move the 2 RBDs to the allocator ownership. 1261 Allocator has another 6 from pool for the request completion*/ 1262 iwl_pcie_rx_move_to_allocator(rxq, rba); 1263 1264 atomic_inc(&rba->req_pending); 1265 queue_work(rba->alloc_wq, &rba->rx_alloc); 1266 } 1267 } 1268 1269 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 1270 struct iwl_rxq *rxq, 1271 struct iwl_rx_mem_buffer *rxb, 1272 bool emergency, 1273 int i) 1274 { 1275 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1276 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1277 bool page_stolen = false; 1278 int max_len = trans_pcie->rx_buf_bytes; 1279 u32 offset = 0; 1280 1281 if (WARN_ON(!rxb)) 1282 return; 1283 1284 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 1285 1286 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 1287 struct iwl_rx_packet *pkt; 1288 bool reclaim; 1289 int len; 1290 struct iwl_rx_cmd_buffer rxcb = { 1291 ._offset = rxb->offset + offset, 1292 ._rx_page_order = trans_pcie->rx_page_order, 1293 ._page = rxb->page, 1294 ._page_stolen = false, 1295 .truesize = max_len, 1296 }; 1297 1298 pkt = rxb_addr(&rxcb); 1299 1300 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { 1301 IWL_DEBUG_RX(trans, 1302 "Q %d: RB end marker at offset %d\n", 1303 rxq->id, offset); 1304 break; 1305 } 1306 1307 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1308 FH_RSCSR_RXQ_POS != rxq->id, 1309 "frame on invalid queue - is on %d and indicates %d\n", 1310 rxq->id, 1311 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 1312 FH_RSCSR_RXQ_POS); 1313 1314 IWL_DEBUG_RX(trans, 1315 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", 1316 rxq->id, offset, 1317 iwl_get_cmd_string(trans, 1318 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), 1319 pkt->hdr.group_id, pkt->hdr.cmd, 1320 le16_to_cpu(pkt->hdr.sequence)); 1321 1322 len = iwl_rx_packet_len(pkt); 1323 len += sizeof(u32); /* account for status word */ 1324 1325 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 1326 1327 /* check that what the device tells us made sense */ 1328 if (len < sizeof(*pkt) || offset > max_len) 1329 break; 1330 1331 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 1332 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 1333 1334 /* Reclaim a command buffer only if this packet is a response 1335 * to a (driver-originated) command. 1336 * If the packet (e.g. Rx frame) originated from uCode, 1337 * there is no command buffer to reclaim. 1338 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 1339 * but apparently a few don't get set; catch them here. */ 1340 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 1341 if (reclaim && !pkt->hdr.group_id) { 1342 int i; 1343 1344 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 1345 if (trans_pcie->no_reclaim_cmds[i] == 1346 pkt->hdr.cmd) { 1347 reclaim = false; 1348 break; 1349 } 1350 } 1351 } 1352 1353 if (rxq->id == trans_pcie->def_rx_queue) 1354 iwl_op_mode_rx(trans->op_mode, &rxq->napi, 1355 &rxcb); 1356 else 1357 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 1358 &rxcb, rxq->id); 1359 1360 /* 1361 * After here, we should always check rxcb._page_stolen, 1362 * if it is true then one of the handlers took the page. 1363 */ 1364 1365 if (reclaim) { 1366 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1367 int index = SEQ_TO_INDEX(sequence); 1368 int cmd_index = iwl_txq_get_cmd_index(txq, index); 1369 1370 kfree_sensitive(txq->entries[cmd_index].free_buf); 1371 txq->entries[cmd_index].free_buf = NULL; 1372 1373 /* Invoke any callbacks, transfer the buffer to caller, 1374 * and fire off the (possibly) blocking 1375 * iwl_trans_send_cmd() 1376 * as we reclaim the driver command queue */ 1377 if (!rxcb._page_stolen) 1378 iwl_pcie_hcmd_complete(trans, &rxcb); 1379 else 1380 IWL_WARN(trans, "Claim null rxb?\n"); 1381 } 1382 1383 page_stolen |= rxcb._page_stolen; 1384 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1385 break; 1386 } 1387 1388 /* page was stolen from us -- free our reference */ 1389 if (page_stolen) { 1390 __free_pages(rxb->page, trans_pcie->rx_page_order); 1391 rxb->page = NULL; 1392 } 1393 1394 /* Reuse the page if possible. For notification packets and 1395 * SKBs that fail to Rx correctly, add them back into the 1396 * rx_free list for reuse later. */ 1397 if (rxb->page != NULL) { 1398 rxb->page_dma = 1399 dma_map_page(trans->dev, rxb->page, rxb->offset, 1400 trans_pcie->rx_buf_bytes, 1401 DMA_FROM_DEVICE); 1402 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 1403 /* 1404 * free the page(s) as well to not break 1405 * the invariant that the items on the used 1406 * list have no page(s) 1407 */ 1408 __free_pages(rxb->page, trans_pcie->rx_page_order); 1409 rxb->page = NULL; 1410 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1411 } else { 1412 list_add_tail(&rxb->list, &rxq->rx_free); 1413 rxq->free_count++; 1414 } 1415 } else 1416 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1417 } 1418 1419 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, 1420 struct iwl_rxq *rxq, int i, 1421 bool *join) 1422 { 1423 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1424 struct iwl_rx_mem_buffer *rxb; 1425 u16 vid; 1426 1427 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); 1428 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4); 1429 1430 if (!trans->trans_cfg->mq_rx_supported) { 1431 rxb = rxq->queue[i]; 1432 rxq->queue[i] = NULL; 1433 return rxb; 1434 } 1435 1436 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 1437 struct iwl_rx_completion_desc_bz *cd = rxq->used_bd; 1438 1439 vid = le16_to_cpu(cd[i].rbid); 1440 *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; 1441 } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1442 struct iwl_rx_completion_desc *cd = rxq->used_bd; 1443 1444 vid = le16_to_cpu(cd[i].rbid); 1445 *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; 1446 } else { 1447 __le32 *cd = rxq->used_bd; 1448 1449 vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */ 1450 } 1451 1452 if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) 1453 goto out_err; 1454 1455 rxb = trans_pcie->global_table[vid - 1]; 1456 if (rxb->invalid) 1457 goto out_err; 1458 1459 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); 1460 1461 rxb->invalid = true; 1462 1463 return rxb; 1464 1465 out_err: 1466 WARN(1, "Invalid rxb from HW %u\n", (u32)vid); 1467 iwl_force_nmi(trans); 1468 return NULL; 1469 } 1470 1471 /* 1472 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1473 */ 1474 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget) 1475 { 1476 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1477 struct iwl_rxq *rxq; 1478 u32 r, i, count = 0, handled = 0; 1479 bool emergency = false; 1480 1481 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) 1482 return budget; 1483 1484 rxq = &trans_pcie->rxq[queue]; 1485 1486 restart: 1487 spin_lock(&rxq->lock); 1488 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1489 * buffer that the driver may process (last buffer filled by ucode). */ 1490 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 1491 i = rxq->read; 1492 1493 /* W/A 9000 device step A0 wrap-around bug */ 1494 r &= (rxq->queue_size - 1); 1495 1496 /* Rx interrupt, but nothing sent from uCode */ 1497 if (i == r) 1498 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); 1499 1500 while (i != r && ++handled < budget) { 1501 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1502 struct iwl_rx_mem_buffer *rxb; 1503 /* number of RBDs still waiting for page allocation */ 1504 u32 rb_pending_alloc = 1505 atomic_read(&trans_pcie->rba.req_pending) * 1506 RX_CLAIM_REQ_ALLOC; 1507 bool join = false; 1508 1509 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && 1510 !emergency)) { 1511 iwl_pcie_rx_move_to_allocator(rxq, rba); 1512 emergency = true; 1513 IWL_DEBUG_TPT(trans, 1514 "RX path is in emergency. Pending allocations %d\n", 1515 rb_pending_alloc); 1516 } 1517 1518 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 1519 1520 rxb = iwl_pcie_get_rxb(trans, rxq, i, &join); 1521 if (!rxb) 1522 goto out; 1523 1524 if (unlikely(join || rxq->next_rb_is_fragment)) { 1525 rxq->next_rb_is_fragment = join; 1526 /* 1527 * We can only get a multi-RB in the following cases: 1528 * - firmware issue, sending a too big notification 1529 * - sniffer mode with a large A-MSDU 1530 * - large MTU frames (>2k) 1531 * since the multi-RB functionality is limited to newer 1532 * hardware that cannot put multiple entries into a 1533 * single RB. 1534 * 1535 * Right now, the higher layers aren't set up to deal 1536 * with that, so discard all of these. 1537 */ 1538 list_add_tail(&rxb->list, &rxq->rx_free); 1539 rxq->free_count++; 1540 } else { 1541 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); 1542 } 1543 1544 i = (i + 1) & (rxq->queue_size - 1); 1545 1546 /* 1547 * If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1548 * try to claim the pre-allocated buffers from the allocator. 1549 * If not ready - will try to reclaim next time. 1550 * There is no need to reschedule work - allocator exits only 1551 * on success 1552 */ 1553 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) 1554 iwl_pcie_rx_allocator_get(trans, rxq); 1555 1556 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { 1557 /* Add the remaining empty RBDs for allocator use */ 1558 iwl_pcie_rx_move_to_allocator(rxq, rba); 1559 } else if (emergency) { 1560 count++; 1561 if (count == 8) { 1562 count = 0; 1563 if (rb_pending_alloc < rxq->queue_size / 3) { 1564 IWL_DEBUG_TPT(trans, 1565 "RX path exited emergency. Pending allocations %d\n", 1566 rb_pending_alloc); 1567 emergency = false; 1568 } 1569 1570 rxq->read = i; 1571 spin_unlock(&rxq->lock); 1572 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1573 iwl_pcie_rxq_restock(trans, rxq); 1574 goto restart; 1575 } 1576 } 1577 } 1578 out: 1579 /* Backtrack one entry */ 1580 rxq->read = i; 1581 spin_unlock(&rxq->lock); 1582 1583 /* 1584 * handle a case where in emergency there are some unallocated RBDs. 1585 * those RBDs are in the used list, but are not tracked by the queue's 1586 * used_count which counts allocator owned RBDs. 1587 * unallocated emergency RBDs must be allocated on exit, otherwise 1588 * when called again the function may not be in emergency mode and 1589 * they will be handed to the allocator with no tracking in the RBD 1590 * allocator counters, which will lead to them never being claimed back 1591 * by the queue. 1592 * by allocating them here, they are now in the queue free list, and 1593 * will be restocked by the next call of iwl_pcie_rxq_restock. 1594 */ 1595 if (unlikely(emergency && count)) 1596 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1597 1598 iwl_pcie_rxq_restock(trans, rxq); 1599 1600 return handled; 1601 } 1602 1603 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) 1604 { 1605 u8 queue = entry->entry; 1606 struct msix_entry *entries = entry - queue; 1607 1608 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 1609 } 1610 1611 /* 1612 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 1613 * This interrupt handler should be used with RSS queue only. 1614 */ 1615 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) 1616 { 1617 struct msix_entry *entry = dev_id; 1618 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 1619 struct iwl_trans *trans = trans_pcie->trans; 1620 struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry]; 1621 1622 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); 1623 1624 if (WARN_ON(entry->entry >= trans->num_rx_queues)) 1625 return IRQ_NONE; 1626 1627 if (WARN_ONCE(!rxq, 1628 "[%d] Got MSI-X interrupt before we have Rx queues", 1629 entry->entry)) 1630 return IRQ_NONE; 1631 1632 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1633 IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); 1634 1635 local_bh_disable(); 1636 if (napi_schedule_prep(&rxq->napi)) 1637 __napi_schedule(&rxq->napi); 1638 else 1639 iwl_pcie_clear_irq(trans, entry->entry); 1640 local_bh_enable(); 1641 1642 lock_map_release(&trans->sync_cmd_lockdep_map); 1643 1644 return IRQ_HANDLED; 1645 } 1646 1647 /* 1648 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1649 */ 1650 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 1651 { 1652 int i; 1653 1654 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 1655 if (trans->cfg->internal_wimax_coex && 1656 !trans->cfg->apmg_not_supported && 1657 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 1658 APMS_CLK_VAL_MRB_FUNC_MODE) || 1659 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 1660 APMG_PS_CTRL_VAL_RESET_REQ))) { 1661 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1662 iwl_op_mode_wimax_active(trans->op_mode); 1663 wake_up(&trans->wait_command_queue); 1664 return; 1665 } 1666 1667 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1668 if (!trans->txqs.txq[i]) 1669 continue; 1670 del_timer(&trans->txqs.txq[i]->stuck_timer); 1671 } 1672 1673 /* The STATUS_FW_ERROR bit is set in this function. This must happen 1674 * before we wake up the command caller, to ensure a proper cleanup. */ 1675 iwl_trans_fw_error(trans, false); 1676 1677 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1678 wake_up(&trans->wait_command_queue); 1679 } 1680 1681 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 1682 { 1683 u32 inta; 1684 1685 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 1686 1687 trace_iwlwifi_dev_irq(trans->dev); 1688 1689 /* Discover which interrupts are active/pending */ 1690 inta = iwl_read32(trans, CSR_INT); 1691 1692 /* the thread will service interrupts and re-enable them */ 1693 return inta; 1694 } 1695 1696 /* a device (PCI-E) page is 4096 bytes long */ 1697 #define ICT_SHIFT 12 1698 #define ICT_SIZE (1 << ICT_SHIFT) 1699 #define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1700 1701 /* interrupt handler using ict table, with this interrupt driver will 1702 * stop using INTA register to get device's interrupt, reading this register 1703 * is expensive, device will write interrupts in ICT dram table, increment 1704 * index then will fire interrupt to driver, driver will OR all ICT table 1705 * entries from current index up to table entry with 0 value. the result is 1706 * the interrupt we need to service, driver will set the entries back to 0 and 1707 * set index. 1708 */ 1709 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 1710 { 1711 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1712 u32 inta; 1713 u32 val = 0; 1714 u32 read; 1715 1716 trace_iwlwifi_dev_irq(trans->dev); 1717 1718 /* Ignore interrupt if there's nothing in NIC to service. 1719 * This may be due to IRQ shared with another device, 1720 * or due to sporadic interrupts thrown from our NIC. */ 1721 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1722 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 1723 if (!read) 1724 return 0; 1725 1726 /* 1727 * Collect all entries up to the first 0, starting from ict_index; 1728 * note we already read at ict_index. 1729 */ 1730 do { 1731 val |= read; 1732 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 1733 trans_pcie->ict_index, read); 1734 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 1735 trans_pcie->ict_index = 1736 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 1737 1738 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1739 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 1740 read); 1741 } while (read); 1742 1743 /* We should not get this value, just ignore it. */ 1744 if (val == 0xffffffff) 1745 val = 0; 1746 1747 /* 1748 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 1749 * (bit 15 before shifting it to 31) to clear when using interrupt 1750 * coalescing. fortunately, bits 18 and 19 stay set when this happens 1751 * so we use them to decide on the real state of the Rx bit. 1752 * In order words, bit 15 is set if bit 18 or bit 19 are set. 1753 */ 1754 if (val & 0xC0000) 1755 val |= 0x8000; 1756 1757 inta = (0xff & val) | ((0xff00 & val) << 16); 1758 return inta; 1759 } 1760 1761 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) 1762 { 1763 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1764 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1765 bool hw_rfkill, prev, report; 1766 1767 mutex_lock(&trans_pcie->mutex); 1768 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1769 hw_rfkill = iwl_is_rfkill_set(trans); 1770 if (hw_rfkill) { 1771 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1772 set_bit(STATUS_RFKILL_HW, &trans->status); 1773 } 1774 if (trans_pcie->opmode_down) 1775 report = hw_rfkill; 1776 else 1777 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1778 1779 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 1780 hw_rfkill ? "disable radio" : "enable radio"); 1781 1782 isr_stats->rfkill++; 1783 1784 if (prev != report) 1785 iwl_trans_pcie_rf_kill(trans, report); 1786 mutex_unlock(&trans_pcie->mutex); 1787 1788 if (hw_rfkill) { 1789 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1790 &trans->status)) 1791 IWL_DEBUG_RF_KILL(trans, 1792 "Rfkill while SYNC HCMD in flight\n"); 1793 wake_up(&trans->wait_command_queue); 1794 } else { 1795 clear_bit(STATUS_RFKILL_HW, &trans->status); 1796 if (trans_pcie->opmode_down) 1797 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1798 } 1799 } 1800 1801 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 1802 { 1803 struct iwl_trans *trans = dev_id; 1804 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1805 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1806 u32 inta = 0; 1807 u32 handled = 0; 1808 bool polling = false; 1809 1810 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1811 1812 spin_lock_bh(&trans_pcie->irq_lock); 1813 1814 /* dram interrupt table not set yet, 1815 * use legacy interrupt. 1816 */ 1817 if (likely(trans_pcie->use_ict)) 1818 inta = iwl_pcie_int_cause_ict(trans); 1819 else 1820 inta = iwl_pcie_int_cause_non_ict(trans); 1821 1822 if (iwl_have_debug_level(IWL_DL_ISR)) { 1823 IWL_DEBUG_ISR(trans, 1824 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 1825 inta, trans_pcie->inta_mask, 1826 iwl_read32(trans, CSR_INT_MASK), 1827 iwl_read32(trans, CSR_FH_INT_STATUS)); 1828 if (inta & (~trans_pcie->inta_mask)) 1829 IWL_DEBUG_ISR(trans, 1830 "We got a masked interrupt (0x%08x)\n", 1831 inta & (~trans_pcie->inta_mask)); 1832 } 1833 1834 inta &= trans_pcie->inta_mask; 1835 1836 /* 1837 * Ignore interrupt if there's nothing in NIC to service. 1838 * This may be due to IRQ shared with another device, 1839 * or due to sporadic interrupts thrown from our NIC. 1840 */ 1841 if (unlikely(!inta)) { 1842 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1843 /* 1844 * Re-enable interrupts here since we don't 1845 * have anything to service 1846 */ 1847 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1848 _iwl_enable_interrupts(trans); 1849 spin_unlock_bh(&trans_pcie->irq_lock); 1850 lock_map_release(&trans->sync_cmd_lockdep_map); 1851 return IRQ_NONE; 1852 } 1853 1854 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1855 /* 1856 * Hardware disappeared. It might have 1857 * already raised an interrupt. 1858 */ 1859 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 1860 spin_unlock_bh(&trans_pcie->irq_lock); 1861 goto out; 1862 } 1863 1864 /* Ack/clear/reset pending uCode interrupts. 1865 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1866 */ 1867 /* There is a hardware bug in the interrupt mask function that some 1868 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 1869 * they are disabled in the CSR_INT_MASK register. Furthermore the 1870 * ICT interrupt handling mechanism has another bug that might cause 1871 * these unmasked interrupts fail to be detected. We workaround the 1872 * hardware bugs here by ACKing all the possible interrupts so that 1873 * interrupt coalescing can still be achieved. 1874 */ 1875 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 1876 1877 if (iwl_have_debug_level(IWL_DL_ISR)) 1878 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 1879 inta, iwl_read32(trans, CSR_INT_MASK)); 1880 1881 spin_unlock_bh(&trans_pcie->irq_lock); 1882 1883 /* Now service all interrupt bits discovered above. */ 1884 if (inta & CSR_INT_BIT_HW_ERR) { 1885 IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 1886 1887 /* Tell the device to stop sending interrupts */ 1888 iwl_disable_interrupts(trans); 1889 1890 isr_stats->hw++; 1891 iwl_pcie_irq_handle_error(trans); 1892 1893 handled |= CSR_INT_BIT_HW_ERR; 1894 1895 goto out; 1896 } 1897 1898 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1899 if (inta & CSR_INT_BIT_SCD) { 1900 IWL_DEBUG_ISR(trans, 1901 "Scheduler finished to transmit the frame/frames.\n"); 1902 isr_stats->sch++; 1903 } 1904 1905 /* Alive notification via Rx interrupt will do the real work */ 1906 if (inta & CSR_INT_BIT_ALIVE) { 1907 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1908 isr_stats->alive++; 1909 if (trans->trans_cfg->gen2) { 1910 /* 1911 * We can restock, since firmware configured 1912 * the RFH 1913 */ 1914 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 1915 } 1916 1917 handled |= CSR_INT_BIT_ALIVE; 1918 } 1919 1920 /* Safely ignore these bits for debug checks below */ 1921 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1922 1923 /* HW RF KILL switch toggled */ 1924 if (inta & CSR_INT_BIT_RF_KILL) { 1925 iwl_pcie_handle_rfkill_irq(trans); 1926 handled |= CSR_INT_BIT_RF_KILL; 1927 } 1928 1929 /* Chip got too hot and stopped itself */ 1930 if (inta & CSR_INT_BIT_CT_KILL) { 1931 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1932 isr_stats->ctkill++; 1933 handled |= CSR_INT_BIT_CT_KILL; 1934 } 1935 1936 /* Error detected by uCode */ 1937 if (inta & CSR_INT_BIT_SW_ERR) { 1938 IWL_ERR(trans, "Microcode SW error detected. " 1939 " Restarting 0x%X.\n", inta); 1940 isr_stats->sw++; 1941 iwl_pcie_irq_handle_error(trans); 1942 handled |= CSR_INT_BIT_SW_ERR; 1943 } 1944 1945 /* uCode wakes up after power-down sleep */ 1946 if (inta & CSR_INT_BIT_WAKEUP) { 1947 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1948 iwl_pcie_rxq_check_wrptr(trans); 1949 iwl_pcie_txq_check_wrptrs(trans); 1950 1951 isr_stats->wakeup++; 1952 1953 handled |= CSR_INT_BIT_WAKEUP; 1954 } 1955 1956 /* All uCode command responses, including Tx command responses, 1957 * Rx "responses" (frame-received notification), and other 1958 * notifications from uCode come through here*/ 1959 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1960 CSR_INT_BIT_RX_PERIODIC)) { 1961 IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1962 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1963 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1964 iwl_write32(trans, CSR_FH_INT_STATUS, 1965 CSR_FH_INT_RX_MASK); 1966 } 1967 if (inta & CSR_INT_BIT_RX_PERIODIC) { 1968 handled |= CSR_INT_BIT_RX_PERIODIC; 1969 iwl_write32(trans, 1970 CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1971 } 1972 /* Sending RX interrupt require many steps to be done in the 1973 * the device: 1974 * 1- write interrupt to current index in ICT table. 1975 * 2- dma RX frame. 1976 * 3- update RX shared data to indicate last write index. 1977 * 4- send interrupt. 1978 * This could lead to RX race, driver could receive RX interrupt 1979 * but the shared data changes does not reflect this; 1980 * periodic interrupt will detect any dangling Rx activity. 1981 */ 1982 1983 /* Disable periodic interrupt; we use it as just a one-shot. */ 1984 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1985 CSR_INT_PERIODIC_DIS); 1986 1987 /* 1988 * Enable periodic interrupt in 8 msec only if we received 1989 * real RX interrupt (instead of just periodic int), to catch 1990 * any dangling Rx interrupt. If it was just the periodic 1991 * interrupt, there was no dangling Rx activity, and no need 1992 * to extend the periodic interrupt; one-shot is enough. 1993 */ 1994 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1995 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1996 CSR_INT_PERIODIC_ENA); 1997 1998 isr_stats->rx++; 1999 2000 local_bh_disable(); 2001 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { 2002 polling = true; 2003 __napi_schedule(&trans_pcie->rxq[0].napi); 2004 } 2005 local_bh_enable(); 2006 } 2007 2008 /* This "Tx" DMA channel is used only for loading uCode */ 2009 if (inta & CSR_INT_BIT_FH_TX) { 2010 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 2011 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 2012 isr_stats->tx++; 2013 handled |= CSR_INT_BIT_FH_TX; 2014 /* Wake up uCode load routine, now that load is complete */ 2015 trans_pcie->ucode_write_complete = true; 2016 wake_up(&trans_pcie->ucode_write_waitq); 2017 /* Wake up IMR write routine, now that write to SRAM is complete */ 2018 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2019 trans_pcie->imr_status = IMR_D2S_COMPLETED; 2020 wake_up(&trans_pcie->ucode_write_waitq); 2021 } 2022 } 2023 2024 if (inta & ~handled) { 2025 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 2026 isr_stats->unhandled++; 2027 } 2028 2029 if (inta & ~(trans_pcie->inta_mask)) { 2030 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 2031 inta & ~trans_pcie->inta_mask); 2032 } 2033 2034 if (!polling) { 2035 spin_lock_bh(&trans_pcie->irq_lock); 2036 /* only Re-enable all interrupt if disabled by irq */ 2037 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 2038 _iwl_enable_interrupts(trans); 2039 /* we are loading the firmware, enable FH_TX interrupt only */ 2040 else if (handled & CSR_INT_BIT_FH_TX) 2041 iwl_enable_fw_load_int(trans); 2042 /* Re-enable RF_KILL if it occurred */ 2043 else if (handled & CSR_INT_BIT_RF_KILL) 2044 iwl_enable_rfkill_int(trans); 2045 /* Re-enable the ALIVE / Rx interrupt if it occurred */ 2046 else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) 2047 iwl_enable_fw_load_int_ctx_info(trans); 2048 spin_unlock_bh(&trans_pcie->irq_lock); 2049 } 2050 2051 out: 2052 lock_map_release(&trans->sync_cmd_lockdep_map); 2053 return IRQ_HANDLED; 2054 } 2055 2056 /****************************************************************************** 2057 * 2058 * ICT functions 2059 * 2060 ******************************************************************************/ 2061 2062 /* Free dram table */ 2063 void iwl_pcie_free_ict(struct iwl_trans *trans) 2064 { 2065 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2066 2067 if (trans_pcie->ict_tbl) { 2068 dma_free_coherent(trans->dev, ICT_SIZE, 2069 trans_pcie->ict_tbl, 2070 trans_pcie->ict_tbl_dma); 2071 trans_pcie->ict_tbl = NULL; 2072 trans_pcie->ict_tbl_dma = 0; 2073 } 2074 } 2075 2076 /* 2077 * allocate dram shared table, it is an aligned memory 2078 * block of ICT_SIZE. 2079 * also reset all data related to ICT table interrupt. 2080 */ 2081 int iwl_pcie_alloc_ict(struct iwl_trans *trans) 2082 { 2083 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2084 2085 trans_pcie->ict_tbl = 2086 dma_alloc_coherent(trans->dev, ICT_SIZE, 2087 &trans_pcie->ict_tbl_dma, GFP_KERNEL); 2088 if (!trans_pcie->ict_tbl) 2089 return -ENOMEM; 2090 2091 /* just an API sanity check ... it is guaranteed to be aligned */ 2092 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 2093 iwl_pcie_free_ict(trans); 2094 return -EINVAL; 2095 } 2096 2097 return 0; 2098 } 2099 2100 /* Device is going up inform it about using ICT interrupt table, 2101 * also we need to tell the driver to start using ICT interrupt. 2102 */ 2103 void iwl_pcie_reset_ict(struct iwl_trans *trans) 2104 { 2105 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2106 u32 val; 2107 2108 if (!trans_pcie->ict_tbl) 2109 return; 2110 2111 spin_lock_bh(&trans_pcie->irq_lock); 2112 _iwl_disable_interrupts(trans); 2113 2114 memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 2115 2116 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 2117 2118 val |= CSR_DRAM_INT_TBL_ENABLE | 2119 CSR_DRAM_INIT_TBL_WRAP_CHECK | 2120 CSR_DRAM_INIT_TBL_WRITE_POINTER; 2121 2122 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 2123 2124 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 2125 trans_pcie->use_ict = true; 2126 trans_pcie->ict_index = 0; 2127 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 2128 _iwl_enable_interrupts(trans); 2129 spin_unlock_bh(&trans_pcie->irq_lock); 2130 } 2131 2132 /* Device is going down disable ict interrupt usage */ 2133 void iwl_pcie_disable_ict(struct iwl_trans *trans) 2134 { 2135 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2136 2137 spin_lock_bh(&trans_pcie->irq_lock); 2138 trans_pcie->use_ict = false; 2139 spin_unlock_bh(&trans_pcie->irq_lock); 2140 } 2141 2142 irqreturn_t iwl_pcie_isr(int irq, void *data) 2143 { 2144 struct iwl_trans *trans = data; 2145 2146 if (!trans) 2147 return IRQ_NONE; 2148 2149 /* Disable (but don't clear!) interrupts here to avoid 2150 * back-to-back ISRs and sporadic interrupts from our NIC. 2151 * If we have something to service, the tasklet will re-enable ints. 2152 * If we *don't* have something, we'll re-enable before leaving here. 2153 */ 2154 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 2155 2156 return IRQ_WAKE_THREAD; 2157 } 2158 2159 irqreturn_t iwl_pcie_msix_isr(int irq, void *data) 2160 { 2161 return IRQ_WAKE_THREAD; 2162 } 2163 2164 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) 2165 { 2166 struct msix_entry *entry = dev_id; 2167 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 2168 struct iwl_trans *trans = trans_pcie->trans; 2169 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2170 u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE; 2171 u32 inta_fh, inta_hw; 2172 bool polling = false; 2173 bool sw_err; 2174 2175 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 2176 inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0; 2177 2178 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 2179 inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1; 2180 2181 lock_map_acquire(&trans->sync_cmd_lockdep_map); 2182 2183 spin_lock_bh(&trans_pcie->irq_lock); 2184 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); 2185 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); 2186 /* 2187 * Clear causes registers to avoid being handling the same cause. 2188 */ 2189 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk); 2190 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 2191 spin_unlock_bh(&trans_pcie->irq_lock); 2192 2193 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); 2194 2195 if (unlikely(!(inta_fh | inta_hw))) { 2196 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 2197 lock_map_release(&trans->sync_cmd_lockdep_map); 2198 return IRQ_NONE; 2199 } 2200 2201 if (iwl_have_debug_level(IWL_DL_ISR)) { 2202 IWL_DEBUG_ISR(trans, 2203 "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 2204 entry->entry, inta_fh, trans_pcie->fh_mask, 2205 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); 2206 if (inta_fh & ~trans_pcie->fh_mask) 2207 IWL_DEBUG_ISR(trans, 2208 "We got a masked interrupt (0x%08x)\n", 2209 inta_fh & ~trans_pcie->fh_mask); 2210 } 2211 2212 inta_fh &= trans_pcie->fh_mask; 2213 2214 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && 2215 inta_fh & MSIX_FH_INT_CAUSES_Q0) { 2216 local_bh_disable(); 2217 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { 2218 polling = true; 2219 __napi_schedule(&trans_pcie->rxq[0].napi); 2220 } 2221 local_bh_enable(); 2222 } 2223 2224 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && 2225 inta_fh & MSIX_FH_INT_CAUSES_Q1) { 2226 local_bh_disable(); 2227 if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) { 2228 polling = true; 2229 __napi_schedule(&trans_pcie->rxq[1].napi); 2230 } 2231 local_bh_enable(); 2232 } 2233 2234 /* This "Tx" DMA channel is used only for loading uCode */ 2235 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM && 2236 trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2237 IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n"); 2238 isr_stats->tx++; 2239 2240 /* Wake up IMR routine once write to SRAM is complete */ 2241 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2242 trans_pcie->imr_status = IMR_D2S_COMPLETED; 2243 wake_up(&trans_pcie->ucode_write_waitq); 2244 } 2245 } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 2246 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 2247 isr_stats->tx++; 2248 /* 2249 * Wake up uCode load routine, 2250 * now that load is complete 2251 */ 2252 trans_pcie->ucode_write_complete = true; 2253 wake_up(&trans_pcie->ucode_write_waitq); 2254 2255 /* Wake up IMR routine once write to SRAM is complete */ 2256 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2257 trans_pcie->imr_status = IMR_D2S_COMPLETED; 2258 wake_up(&trans_pcie->ucode_write_waitq); 2259 } 2260 } 2261 2262 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 2263 sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; 2264 else 2265 sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; 2266 2267 /* Error detected by uCode */ 2268 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { 2269 IWL_ERR(trans, 2270 "Microcode SW error detected. Restarting 0x%X.\n", 2271 inta_fh); 2272 isr_stats->sw++; 2273 /* during FW reset flow report errors from there */ 2274 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { 2275 trans_pcie->imr_status = IMR_D2S_ERROR; 2276 wake_up(&trans_pcie->imr_waitq); 2277 } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { 2278 trans_pcie->fw_reset_state = FW_RESET_ERROR; 2279 wake_up(&trans_pcie->fw_reset_waitq); 2280 } else { 2281 iwl_pcie_irq_handle_error(trans); 2282 } 2283 } 2284 2285 /* After checking FH register check HW register */ 2286 if (iwl_have_debug_level(IWL_DL_ISR)) { 2287 IWL_DEBUG_ISR(trans, 2288 "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 2289 entry->entry, inta_hw, trans_pcie->hw_mask, 2290 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); 2291 if (inta_hw & ~trans_pcie->hw_mask) 2292 IWL_DEBUG_ISR(trans, 2293 "We got a masked interrupt 0x%08x\n", 2294 inta_hw & ~trans_pcie->hw_mask); 2295 } 2296 2297 inta_hw &= trans_pcie->hw_mask; 2298 2299 /* Alive notification via Rx interrupt will do the real work */ 2300 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { 2301 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 2302 isr_stats->alive++; 2303 if (trans->trans_cfg->gen2) { 2304 /* We can restock, since firmware configured the RFH */ 2305 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 2306 } 2307 } 2308 2309 /* 2310 * In some rare cases when the HW is in a bad state, we may 2311 * get this interrupt too early, when prph_info is still NULL. 2312 * So make sure that it's not NULL to prevent crashing. 2313 */ 2314 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { 2315 u32 sleep_notif = 2316 le32_to_cpu(trans_pcie->prph_info->sleep_notif); 2317 if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || 2318 sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { 2319 IWL_DEBUG_ISR(trans, 2320 "Sx interrupt: sleep notification = 0x%x\n", 2321 sleep_notif); 2322 trans_pcie->sx_complete = true; 2323 wake_up(&trans_pcie->sx_waitq); 2324 } else { 2325 /* uCode wakes up after power-down sleep */ 2326 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 2327 iwl_pcie_rxq_check_wrptr(trans); 2328 iwl_pcie_txq_check_wrptrs(trans); 2329 2330 isr_stats->wakeup++; 2331 } 2332 } 2333 2334 /* Chip got too hot and stopped itself */ 2335 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { 2336 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 2337 isr_stats->ctkill++; 2338 } 2339 2340 /* HW RF KILL switch toggled */ 2341 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) 2342 iwl_pcie_handle_rfkill_irq(trans); 2343 2344 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { 2345 IWL_ERR(trans, 2346 "Hardware error detected. Restarting.\n"); 2347 2348 isr_stats->hw++; 2349 trans->dbg.hw_error = true; 2350 iwl_pcie_irq_handle_error(trans); 2351 } 2352 2353 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { 2354 IWL_DEBUG_ISR(trans, "Reset flow completed\n"); 2355 trans_pcie->fw_reset_state = FW_RESET_OK; 2356 wake_up(&trans_pcie->fw_reset_waitq); 2357 } 2358 2359 if (!polling) 2360 iwl_pcie_clear_irq(trans, entry->entry); 2361 2362 lock_map_release(&trans->sync_cmd_lockdep_map); 2363 2364 return IRQ_HANDLED; 2365 } 2366